Skip to content

Commit

Permalink
fix: remove runtime imports (tinygrad#1982)
Browse files Browse the repository at this point in the history
fix: import what is used

probably monkeypatched

fix: import

revert selective import
  • Loading branch information
roelofvandijk committed Oct 7, 2023
1 parent f54959e commit 26fcc8d
Showing 1 changed file with 1 addition and 10 deletions.
11 changes: 1 addition & 10 deletions extra/utils.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
# type: ignore
import pickle
import pickle, hashlib, zipfile, io, requests, struct, tempfile, platform, concurrent.futures
import numpy as np
from tqdm import tqdm
import tempfile, platform
from pathlib import Path
from collections import defaultdict
from typing import Union
Expand All @@ -22,7 +21,6 @@ def fetch(url):
if url.startswith("/") or url.startswith("."):
with open(url, "rb") as f:
return f.read()
import hashlib
fp = temp(hashlib.md5(url.encode('utf-8')).hexdigest())
download_file(url, fp, skip_if_exists=not getenv("NOCACHE"))
with open(fp, "rb") as f:
Expand All @@ -32,13 +30,11 @@ def fetch_as_file(url):
if url.startswith("/") or url.startswith("."):
with open(url, "rb") as f:
return f.read()
import hashlib
fp = temp(hashlib.md5(url.encode('utf-8')).hexdigest())
download_file(url, fp, skip_if_exists=not getenv("NOCACHE"))
return fp

def download_file(url, fp, skip_if_exists=True):
import requests
if skip_if_exists and Path(fp).is_file() and Path(fp).stat().st_size > 0:
return
r = requests.get(url, stream=True)
Expand Down Expand Up @@ -143,8 +139,6 @@ def _read(lna):

def fake_torch_load_zipped(fb0, load_weights=True, multithreaded=True):
if Device.DEFAULT in ["TORCH", "GPU", "CUDA"]: multithreaded = False # multithreaded doesn't work with CUDA or TORCH. for GPU it's a wash with _mmap

import zipfile
with zipfile.ZipFile(fb0, 'r') as myzip:
base_name = myzip.namelist()[0].split('/', 1)[0]
with myzip.open(f'{base_name}/data.pkl') as myfile:
Expand All @@ -155,7 +149,6 @@ def load_weight(k, vv):
for v in vv:
load_single_weight(v[2], myfile, v[3], v[4], v[0], v[5], mmap_allowed=True)
if multithreaded:
import concurrent.futures
# 2 seems fastest
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
futures = {executor.submit(load_weight, k, v):k for k,v in ret[1].items()}
Expand All @@ -170,8 +163,6 @@ def load_weight(k, vv):
return ret[0]

def fake_torch_load(b0):
import io
import struct

# convert it to a file
fb0 = io.BytesIO(b0)
Expand Down

0 comments on commit 26fcc8d

Please sign in to comment.