Skip to content

Commit

Permalink
refactor/ci: delete many # type: ignore (tinygrad#2281)
Browse files Browse the repository at this point in the history
* refactor/ci: delete many `# type: ignore`

* replace `axis.__class__ is int` with `isinstance(axis, int)` to make mypy happy
* add `--warn-unused-ignores` to mypy flag

refs tinygrad#2240

* ci: move `--warn-unused-ignores` flag to mypy config

refs tinygrad#2240
  • Loading branch information
76616c6172 committed Nov 12, 2023
1 parent 2e2154a commit 123ea05
Show file tree
Hide file tree
Showing 14 changed files with 21 additions and 20 deletions.
2 changes: 1 addition & 1 deletion extra/helpers.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import multiprocessing, subprocess
import cloudpickle # type: ignore
import cloudpickle
from typing import Any

def _early_exec_process(qin, qout):
Expand Down
1 change: 1 addition & 0 deletions mypy.ini
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,4 @@ check_untyped_defs = True
explicit_package_bases = True
warn_unreachable = True
warn_redundant_casts = True
warn_unused_ignores = True
2 changes: 1 addition & 1 deletion tinygrad/graph.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import os, atexit, functools
try:
import networkx as nx # type: ignore
import networkx as nx
except ImportError:
nx = None # graph won't work
from collections import defaultdict
Expand Down
2 changes: 1 addition & 1 deletion tinygrad/lazy.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ def e(self:LazyBuffer, op:Union[UnaryOps, BinaryOps, TernaryOps], *srcs:LazyBuff

if MERGE_ELEMENTWISE_OPS:
# remove the buffers from any (childless) BinaryOps that feed into this
_srcs = tuple([x.op if x.optype == BinaryOps and not x.children and not x.realized else x for x in srcs]) # type: ignore
_srcs = tuple([x.op if x.optype == BinaryOps and not x.children and not x.realized else x for x in srcs])
# TODO: needs general merge limiting
if out_device != "WEBGPU" or len(dedup([x.base for _src in _srcs for x in _src.buffers if not x.is_unrealized_const()])) < 7: srcs = _srcs # type: ignore

Expand Down
2 changes: 1 addition & 1 deletion tinygrad/renderer/llvmir.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from typing import Final, Dict, Callable, Any, List, Optional, Tuple
from llvmlite import ir # type: ignore
from llvmlite import ir
from tinygrad.codegen.linearizer import UOps, UOp
from tinygrad.helpers import dtypes
from tinygrad.ops import Op, UnaryOps, BinaryOps, TernaryOps
Expand Down
2 changes: 1 addition & 1 deletion tinygrad/renderer/triton.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from tinygrad.ops import UnaryOps, BinaryOps, TernaryOps, Op
from tinygrad.helpers import DType, dtypes, ImageDType, DEBUG, getenv
from tinygrad.codegen.linearizer import UOp, UOps
from triton.compiler import compile as triton_compile # type: ignore
from triton.compiler import compile as triton_compile
import linecache
import math
import re
Expand Down
2 changes: 1 addition & 1 deletion tinygrad/runtime/lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def fromCPU(cls, x:np.ndarray, **kwargs):
class RawBufferMapped(RawBufferCopyIn):
def _buffer(self) -> memoryview: raise NotImplementedError("must be implemented")
# NOTE: this metadata prevents the backing buffer from being freed. hack can be removed with PEP688
def buffer_view(self) -> np.ndarray: return np.frombuffer(self._buffer(), dtype=np.dtype(self.dtype.np, metadata={"backing": self}), count=self.size) # type: ignore
def buffer_view(self) -> np.ndarray: return np.frombuffer(self._buffer(), dtype=np.dtype(self.dtype.np, metadata={"backing": self}), count=self.size)
def toCPU(self) -> np.ndarray: return self.buffer_view().copy() # Need a copy, since jit will write to the same buffer.
def _copyin(self, x:np.ndarray) -> None: np.copyto(self.buffer_view(), x.reshape(-1))

Expand Down
8 changes: 4 additions & 4 deletions tinygrad/runtime/ops_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from pathlib import Path
from typing import Optional, Tuple
import numpy as np
from pycuda.compiler import compile as cuda_compile # type: ignore
from pycuda.compiler import compile as cuda_compile
from tinygrad.helpers import DEBUG, getenv, colored, diskcache
from tinygrad.ops import Compiled
from tinygrad.runtime.lib import RawBufferCopyInOut, RawMallocBuffer, LRUAllocator
Expand Down Expand Up @@ -42,18 +42,18 @@ class context:
class device:
compute_capability = lambda: (3,5) # pylint: disable=unnecessary-lambda # noqa: E731
get_device = lambda: context.device # pylint: disable=unnecessary-lambda # noqa: E731
import pycuda.driver # type: ignore
import pycuda.driver
pycuda.driver.Context = context
RawCUDABuffer = RawMallocBuffer
else:
import pycuda.autoprimaryctx # type: ignore # pylint: disable=unused-import # noqa: F401
import pycuda.autoprimaryctx # pylint: disable=unused-import # noqa: F401
import pycuda.driver as cuda # type: ignore
class CUDAAllocator(LRUAllocator):
def __init__(self): super().__init__(self._get_cur_free_space(None))
def _do_alloc(self, size, dtype, device, **kwargs): return cuda.mem_alloc(size * dtype.itemsize) # type: ignore
def _cached_bufkey(self, size, dtype, device): return (device, size*dtype.itemsize) # Buffers of the same length could be reused, no matter what dtype.
def _get_cur_free_space(self, device): return cuda.mem_get_info()[0] # type: ignore
CUDAAlloc = CUDAAllocator() # type: ignore
CUDAAlloc = CUDAAllocator()
class RawCUDABuffer(RawBufferCopyInOut): # type: ignore
def __init__(self, size, dtype): super().__init__(size, dtype, allocator=CUDAAlloc)
def _copyin(self, x:np.ndarray, stream:Optional[cuda.Stream]=None): cuda.memcpy_htod_async(self._buf, x.ravel(), stream) # type: ignore
Expand Down
4 changes: 2 additions & 2 deletions tinygrad/runtime/ops_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
os.environ['PYOPENCL_NO_CACHE'] = '1'
import pathlib
import numpy as np
import pyopencl as cl # type: ignore
import pyopencl as cl
from typing import Optional, List, Tuple
from tinygrad.helpers import DEBUG, getenv, prod, ImageDType, OSX, fromimport, diskcache
from tinygrad.ops import Compiled
Expand Down Expand Up @@ -71,7 +71,7 @@ def compile_gpu(prg:str) -> bytes:

class CLProgram:
def __init__(self, name:str, prg:bytes, argdtypes=None, options=None):
self.name, self.clprograms = name, [cl.Program(ctx, ctx.devices, [prg]*len(ctx.devices)) for ctx in CL.cl_ctxs] # type: ignore
self.name, self.clprograms = name, [cl.Program(ctx, ctx.devices, [prg]*len(ctx.devices)) for ctx in CL.cl_ctxs]
self._clprgs = [clprogram.build(options=options) for clprogram in self.clprograms]
self.clprgs = [clprg.__getattr__(name) for clprg in self._clprgs]
if DEBUG >= 5 and not OSX:
Expand Down
2 changes: 1 addition & 1 deletion tinygrad/runtime/ops_llvm.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from tinygrad.renderer.llvmir import uops_to_llvm_ir
from tinygrad.runtime.lib import RawMallocBuffer

import llvmlite.binding as llvm # type: ignore
import llvmlite.binding as llvm

LLVMOPT = bool(getenv("LLVMOPT"))

Expand Down
2 changes: 1 addition & 1 deletion tinygrad/runtime/ops_metal.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# pip3 install pyobjc-framework-Metal pyobjc-framework-Cocoa pyobjc-framework-libdispatch
import os, subprocess, pathlib, ctypes, tempfile
import Metal, Cocoa, libdispatch # type: ignore
import Metal, Cocoa, libdispatch
from typing import List, Any, Tuple
from tinygrad.codegen.kernel import LinearizerOptions
from tinygrad.helpers import prod, getenv, DEBUG, DType, dtypes, diskcache
Expand Down
4 changes: 2 additions & 2 deletions tinygrad/runtime/ops_shm.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import os, mmap
try: import _posixshmem # type: ignore
try: import _posixshmem
except Exception: pass
from typing import Callable, Dict
from tinygrad.helpers import DType, OSX
Expand All @@ -16,7 +16,7 @@ def __init__(self, size, dtype:DType, device:str):
fd = _posixshmem.shm_open(device, os.O_RDWR, 0o600)
# TODO: these flags are somewhat platform specific, but python doesn't expose the ones we need
shm = mmap.mmap(fd, size * dtype.itemsize, flags=mmap.MAP_SHARED | 0x2000 | 0x008000)
shm.madvise(mmap.MADV_HUGEPAGE) # type: ignore
shm.madvise(mmap.MADV_HUGEPAGE)
os.close(fd)

super().__init__(size, dtype, shm)
Expand Down
4 changes: 2 additions & 2 deletions tinygrad/runtime/ops_webgpu.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
import numpy as np
import functools
from wgpu.utils._device import get_default_device # type: ignore
from wgpu.utils._device import get_default_device
from tinygrad.runtime.lib import RawBufferCopyIn, LRUAllocator
from tinygrad.helpers import dtypes, DType
from tinygrad.ops import Compiled
from tinygrad.codegen.kernel import LinearizerOptions
from tinygrad.renderer.cstyle import uops_to_cstyle
from tinygrad.renderer.wgsl import WGSLLanguage
import wgpu # type: ignore
import wgpu

wgpu_device = get_default_device()

Expand Down
4 changes: 2 additions & 2 deletions tinygrad/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def assign(self, x) -> Tensor:
# TODO: this is a hack for writing to DISK
if self.device.startswith("DISK"):
if x.__class__ is not Tensor: x = Tensor(x, device="CPU", dtype=self.dtype)
self.contiguous().realize().lazydata.realized._copyin(x.numpy()) # type: ignore
self.contiguous().realize().lazydata.realized._copyin(x.numpy())
return self
if x.__class__ is not Tensor: x = Tensor(x, device=self.device, dtype=self.dtype)
assert self.shape == x.shape and self.device == x.device, f"assign shape mismatch {self.shape} != {x.shape} or device mismatch {self.device} != {x.device}"
Expand Down Expand Up @@ -427,7 +427,7 @@ def flatten(self, start_dim=0): return self.reshape(shape=self.shape[:start_dim]
# ***** reduce ops *****

def _reduce(self, fxn:Type[Function], axis:Optional[Union[int, Tuple[int, ...]]]=None, keepdim=False) -> Tensor:
axis_: List[int] = list(range(len(self.shape))) if axis is None else ([axis] if axis.__class__ is int else list(axis)) # type: ignore
axis_: List[int] = list(range(len(self.shape))) if axis is None else ([axis] if isinstance(axis, int) else list(axis))
axis_ = [x if x >= 0 else x+len(self.shape) for x in axis_]
shape = [s for i,s in enumerate(self.shape) if i not in axis_]
ret = fxn.apply(self, new_shape=tuple([1 if i in axis_ else s for i,s in enumerate(self.shape)]))
Expand Down

0 comments on commit 123ea05

Please sign in to comment.