Skip to content

Properties

Basic¤

shape property ¤

shape: tuple[sint, ...]

dtype property ¤

dtype: DType

device property ¤

device: Union[str, tuple[str, ...]]

ndim property ¤

ndim: int

Returns the number of dimensions in the tensor.

t = Tensor([[1, 2], [3, 4]])
print(t.ndim)
2

numel ¤

numel() -> sint

Returns the total number of elements in the tensor.

t = Tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
print(t.numel())
8
Source code in tinygrad/tensor.py
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
def numel(self) -> sint:
  """
  Returns the total number of elements in the tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
  print(t.numel())
  ```
  """
  return prod(self.shape)

element_size ¤

element_size() -> int

Returns the size in bytes of an individual element in the tensor.

t = Tensor([5], dtype=dtypes.int16)
print(t.element_size())
2
Source code in tinygrad/tensor.py
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
def element_size(self) -> int:
  """
  Returns the size in bytes of an individual element in the tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([5], dtype=dtypes.int16)
  print(t.element_size())
  ```
  """
  return self.dtype.itemsize

nbytes ¤

nbytes() -> int

Returns the total number of bytes of all elements in the tensor.

t = Tensor([8, 9], dtype=dtypes.float)
print(t.nbytes())
8
Source code in tinygrad/tensor.py
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
def nbytes(self) -> int:
  """
  Returns the total number of bytes of all elements in the tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([8, 9], dtype=dtypes.float)
  print(t.nbytes())
  ```
  """
  return self.numel() * self.element_size()

is_floating_point ¤

is_floating_point() -> bool

Returns True if the tensor contains floating point types, i.e. is one of dtype.float64, dtype.float32, dtype.float16, dtype.bfloat16.

t = Tensor([8, 9], dtype=dtypes.float32)
print(t.is_floating_point())
True
Source code in tinygrad/tensor.py
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
def is_floating_point(self) -> bool:
  """
  Returns `True` if the tensor contains floating point types, i.e. is one of `dtype.float64`, `dtype.float32`,
  `dtype.float16`, `dtype.bfloat16`.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([8, 9], dtype=dtypes.float32)
  print(t.is_floating_point())
  ```
  """
  return dtypes.is_float(self.dtype)

size ¤

size(
    dim: Optional[int] = None,
) -> Union[sint, tuple[sint, ...]]

Return the size of the tensor. If dim is specified, return the length along dimension dim. Otherwise return the shape of the tensor.

t = Tensor([[4, 5, 6], [7, 8, 9]])
print(t.size())
(2, 3)
print(t.size(dim=1))
3

Source code in tinygrad/tensor.py
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
def size(self, dim:Optional[int]=None) -> Union[sint, tuple[sint, ...]]:
  """
  Return the size of the tensor. If `dim` is specified, return the length along dimension `dim`. Otherwise return the shape of the tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([[4, 5, 6], [7, 8, 9]])
  print(t.size())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.size(dim=1))
  ```
  """
  return self.shape if dim is None else self.shape[dim]

Data Access¤

data ¤

data() -> memoryview

Returns the data of this tensor as a memoryview.

t = Tensor([1, 2, 3, 4])
print(np.frombuffer(t.data(), dtype=np.int32))
[1 2 3 4]
Source code in tinygrad/tensor.py
306
307
308
309
310
311
312
313
314
315
316
317
318
def data(self) -> memoryview:
  """
  Returns the data of this tensor as a memoryview.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([1, 2, 3, 4])
  print(np.frombuffer(t.data(), dtype=np.int32))
  ```
  """
  assert self.dtype.base.fmt is not None, f"no fmt dtype for {self.dtype.base}"
  assert all_int(self.shape), f"no data if shape is symbolic, {self.shape=}"
  if TYPE_CHECKING or sys.version_info < (3, 12): assert self.dtype.base.fmt != "e"
  return cast(memoryview, self._data().cast(self.dtype.base.fmt) if 0 in self.shape else self._data().cast(self.dtype.base.fmt, self.shape))

item ¤

item() -> ConstType

Returns the value of this tensor as a standard Python number.

t = Tensor(42)
print(t.item())
42
Source code in tinygrad/tensor.py
320
321
322
323
324
325
326
327
328
329
330
def item(self) -> ConstType:
  """
  Returns the value of this tensor as a standard Python number.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor(42)
  print(t.item())
  ```
  """
  assert self.numel() == 1, "must have one element for item"
  return self.data()[(0,) * len(self.shape)]

tolist ¤

Returns the value of this tensor as a nested list. Returns single value for const tensor.

t = Tensor([1, 2, 3, 4])
print(t.tolist())
[1, 2, 3, 4]
t = Tensor(5)
print(t.tolist())
5

Source code in tinygrad/tensor.py
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
def tolist(self) -> Union[Sequence[ConstType], ConstType]:
  """
  Returns the value of this tensor as a nested list.
  Returns single value for const tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([1, 2, 3, 4])
  print(t.tolist())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor(5)
  print(t.tolist())
  ```
  """
  return self.data().tolist()

numpy ¤

numpy() -> 'np.ndarray'

Returns the value of this tensor as a numpy.ndarray.

t = Tensor([1, 2, 3, 4])
print(repr(t.numpy()))
array([1, 2, 3, 4], dtype=int32)
Source code in tinygrad/tensor.py
350
351
352
353
354
355
356
357
358
359
360
361
362
363
def numpy(self) -> 'np.ndarray':  # type: ignore [name-defined] # noqa: F821
  """
  Returns the value of this tensor as a `numpy.ndarray`.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([1, 2, 3, 4])
  print(repr(t.numpy()))
  ```
  """
  import numpy as np
  if self.dtype.base == dtypes.bfloat16: return self.float().numpy()
  assert _to_np_dtype(self.dtype.base) is not None, f"no np dtype for {self.dtype.base}"
  assert all_int(self.shape), f"no data if shape is symbolic, {self.shape=}"
  return np.frombuffer(self._data(), dtype=_to_np_dtype(self.dtype.base)).reshape(self.shape)

tinygrad ops¤

schedule_with_vars ¤

schedule_with_vars(
    *lst: Tensor,
) -> tuple[list[ScheduleItem], dict[Variable, int]]

Creates the schedule needed to realize these Tensor(s), with Variables.

Note

A Tensor can only be scheduled once.

Source code in tinygrad/tensor.py
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
def schedule_with_vars(self, *lst:Tensor) -> tuple[list[ScheduleItem], dict[Variable, int]]:
  """
  Creates the schedule needed to realize these Tensor(s), with Variables.

  NOTE: A Tensor can only be scheduled once.
  """
  big_sink = UOp.sink(*[x.lazydata for x in (self,)+lst])

  # TODO: move this to scheduler tensor_map pass
  if any(x.op is Ops.MULTI for x in big_sink.toposort):
    # multi fixup
    _apply_map_to_tensors(get_multi_map(big_sink))
    big_sink = UOp.sink(*flatten([x.lazydata.src if x.lazydata.op is Ops.MULTI else [x.lazydata] for x in (self,)+lst]))

  # verify Tensors match the spec
  if __debug__: type_verify(list(big_sink.toposort), tensor_uop_spec)

  schedule, var_vals, becomes_map = create_schedule_with_vars(big_sink)
  _apply_map_to_tensors(becomes_map)
  return memory_planner(schedule), var_vals

schedule ¤

schedule(*lst: Tensor) -> list[ScheduleItem]

Creates the schedule needed to realize these Tensor(s).

Source code in tinygrad/tensor.py
254
255
256
257
258
def schedule(self, *lst:Tensor) -> list[ScheduleItem]:
  """Creates the schedule needed to realize these Tensor(s)."""
  schedule, var_vals = self.schedule_with_vars(*lst)
  assert len(var_vals) == 0
  return schedule

realize ¤

realize(*lst: Tensor, do_update_stats=True) -> Tensor

Triggers the computation needed to create these Tensor(s).

Source code in tinygrad/tensor.py
260
261
262
263
def realize(self, *lst:Tensor, do_update_stats=True) -> Tensor:
  """Triggers the computation needed to create these Tensor(s)."""
  run_schedule(*self.schedule_with_vars(*lst), do_update_stats=do_update_stats)
  return self

replace ¤

replace(x: Tensor, allow_shape_mismatch=False) -> Tensor

Replaces the data of this tensor with the data of another tensor. Only the shape of the tensors must match.

Source code in tinygrad/tensor.py
265
266
267
268
269
270
271
272
def replace(self, x:Tensor, allow_shape_mismatch=False) -> Tensor:
  """
  Replaces the data of this tensor with the data of another tensor. Only the shape of the tensors must match.
  """
  # used for replacing a Tensor with a new version of it (potentially with a different device and dtype)
  assert self.shape == x.shape or allow_shape_mismatch, f"replace shape mismatch {self.shape} != {x.shape}"
  self.lazydata = x.lazydata
  return self

assign ¤

assign(x) -> Tensor
Source code in tinygrad/tensor.py
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
def assign(self, x) -> Tensor:
  # TODO: this is a hack for writing to DISK. remove with working assign
  if isinstance(self.device, str) and self.device.startswith("DISK"):
    if x.__class__ is not Tensor: x = Tensor(x, device="CPU", dtype=self.dtype)
    self.contiguous().realize().lazydata.base.realized.ensure_allocated().copyin(x._data())
    return self
  if x.__class__ is not Tensor: x = Tensor(x, device=self.device, dtype=self.dtype)
  if self.lazydata is x.lazydata: return self  # a self assign is a NOOP
  # NOTE: we allow cross device assign
  assert self.shape == x.shape, f"assign shape mismatch {self.shape} != {x.shape}"
  assert self.device == x.device, f"assign device mismatch {self.device} != {x.device}"
  assert self.dtype == x.dtype, f"assign dtype mismatch {self.dtype} != {x.dtype}"
  assert not x.requires_grad  # self requires_grad is okay?
  if not self.lazydata.is_realized: return self.replace(x)
  self.lazydata = self.lazydata.assign(x.lazydata)
  return self

detach ¤

detach() -> Tensor

Returns a new tensor with the same data as this tensor, but detached from the autograd graph.

Source code in tinygrad/tensor.py
291
292
293
294
295
def detach(self) -> Tensor:
  """
  Returns a new tensor with the same data as this tensor, but detached from the autograd graph.
  """
  return Tensor(self.lazydata.detach(), device=self.device, requires_grad=False)

to ¤

to(device: Optional[Union[str, tuple[str, ...]]]) -> Tensor

Moves the tensor to the given device.

Source code in tinygrad/tensor.py
373
374
375
376
377
378
379
380
381
382
def to(self, device:Optional[Union[str, tuple[str, ...]]]) -> Tensor:
  """
  Moves the tensor to the given device.
  """
  device = tuple(Device.canonicalize(x) for x in device) if isinstance(device, (tuple, list)) else Device.canonicalize(device)
  if device == self.device: return self
  if not isinstance(device, str): return self.shard(device)
  ret = Tensor(self.lazydata, device, requires_grad=self.requires_grad)
  if self.grad is not None: ret.grad = self.grad.to(device)
  return ret

to_ ¤

to_(device: Optional[Union[str, tuple[str, ...]]])

Moves the tensor to the given device in place.

Source code in tinygrad/tensor.py
384
385
386
387
388
389
390
def to_(self, device:Optional[Union[str, tuple[str, ...]]]):
  """
  Moves the tensor to the given device in place.
  """
  real = self.to(device)
  if self.grad is not None and real.grad is not None: self.grad.replace(real.grad)
  return self.replace(real)

shard ¤

shard(
    devices: tuple[str, ...], axis: Optional[int] = None
) -> Tensor

Shards the tensor across the given devices. Optionally specify which axis to shard on.

t = Tensor.empty(2, 4)
print(t.shard((t.device, t.device), axis=1).lazydata)
UOp(Ops.MULTI, dtypes.float, arg=(1, (True, True)), src=(
  UOp(Ops.CONTIGUOUS, dtypes.float, arg=None, src=(
    UOp(Ops.COPY, dtypes.float, arg=False, src=(
      x2:=UOp(Ops.DEVICE, dtypes.void, arg='CPU', src=()),
      UOp(Ops.CONTIGUOUS, dtypes.float, arg=None, src=(
        UOp(Ops.SHRINK, dtypes.float, arg=((0, 2), (0, 2)), src=(
          x5:=UOp(Ops.RESHAPE, dtypes.float, arg=(2, 4), src=(
            UOp(Ops.BUFFER, dtypes.float, arg=8, src=(
               x2,
              UOp(Ops.UNIQUE, dtypes.void, arg=1073, src=()),)),)),)),)),)),)),
  UOp(Ops.CONTIGUOUS, dtypes.float, arg=None, src=(
    UOp(Ops.COPY, dtypes.float, arg=False, src=(
       x2,
      UOp(Ops.CONTIGUOUS, dtypes.float, arg=None, src=(
        UOp(Ops.SHRINK, dtypes.float, arg=((0, 2), (2, 4)), src=(
           x5,)),)),)),)),))
Source code in tinygrad/tensor.py
392
393
394
395
396
397
398
399
400
401
402
403
404
def shard(self, devices:tuple[str, ...], axis:Optional[int]=None) -> Tensor:
  """
  Shards the tensor across the given devices. Optionally specify which axis to shard on.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor.empty(2, 4)
  print(t.shard((t.device, t.device), axis=1).lazydata)
  ```
  """
  assert isinstance(self.device, str), "can't shard a MultiLazyBuffer"
  devices = tuple(Device.canonicalize(x) for x in devices)
  mlb = self.lazydata.shard(devices, self._resolve_dim(axis) if axis is not None else None)
  return Tensor(mlb, device=devices, requires_grad=self.requires_grad)

shard_ ¤

shard_(
    devices: tuple[str, ...], axis: Optional[int] = None
)

Shards the tensor across the given devices in place.

Source code in tinygrad/tensor.py
406
407
408
409
410
def shard_(self, devices:tuple[str, ...], axis:Optional[int]=None):
  """
  Shards the tensor across the given devices in place.
  """
  return self.replace(self.shard(devices, axis))

contiguous ¤

contiguous()

Returns a contiguous tensor.

Source code in tinygrad/tensor.py
2574
2575
2576
2577
2578
def contiguous(self):
  """
  Returns a contiguous tensor.
  """
  return self._apply_uop(UOp.contiguous)

contiguous_backward ¤

contiguous_backward()

Inserts a contiguous operation in the backward pass.

Source code in tinygrad/tensor.py
2579
2580
2581
2582
2583
def contiguous_backward(self):
  """
  Inserts a contiguous operation in the backward pass.
  """
  return self._apply_uop(UOp.contiguous_backward)

Gradient¤

gradient ¤

gradient(
    *targets: Tensor,
    gradient: Optional[Tensor] = None,
    materialize_grads=False
) -> list[Tensor]

Compute the gradient of the targets with respect to self.

x = Tensor.eye(3)
y = Tensor([[2.0,0,-2.0]])
z = y.matmul(x).sum()
dx, dy = z.gradient(x, y)

print(dx.tolist())  # dz/dx
print(dy.tolist())  # dz/dy
[[2.0, 2.0, 2.0], [0.0, 0.0, 0.0], [-2.0, -2.0, -2.0]]
[[1.0, 1.0, 1.0]]
Source code in tinygrad/tensor.py
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
def gradient(self, *targets:Tensor, gradient:Optional[Tensor]=None, materialize_grads=False) -> list[Tensor]:
  """
  Compute the gradient of the targets with respect to self.

  ```python exec="true" source="above" session="tensor" result="python"
  x = Tensor.eye(3)
  y = Tensor([[2.0,0,-2.0]])
  z = y.matmul(x).sum()
  dx, dy = z.gradient(x, y)

  print(dx.tolist())  # dz/dx
  print(dy.tolist())  # dz/dy
  ```
  """
  assert gradient is not None or self.shape == tuple(), "when no gradient is provided, backward must be called on a scalar tensor"
  if gradient is None: gradient = Tensor(1.0, dtype=self.dtype, device=self.device, requires_grad=False)
  rets = []
  target_uops = [x.lazydata for x in targets]
  grads = compute_gradient(self.lazydata, gradient.lazydata, set(target_uops))
  ret = []
  for x in target_uops:
    if (y:=grads.get(x)) is None:
      if materialize_grads: y = x.const_like(0)
      else: raise RuntimeError(f"{x}\n\nnot found in\n\n{self.lazydata}")
    ret.append(y)
  rets.append(ret)
  # create returned Tensors
  return [Tensor(u, device=t.device) for t,u in zip(targets, rets[0])]

backward ¤

backward(gradient: Optional[Tensor] = None) -> Tensor

Propagates the gradient of a tensor backwards through the computation graph. If the 'gradient' argument is not provided, the tensor must be a scalar, and the gradient is implicitly set to 1.0.

t = Tensor([1.0, 2.0, 3.0, 4.0], requires_grad=True)
t.sum().backward()
print(t.grad.numpy())
[1. 1. 1. 1.]

Source code in tinygrad/tensor.py
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
def backward(self, gradient:Optional[Tensor]=None) -> Tensor:
  """
  Propagates the gradient of a tensor backwards through the computation graph.
  If the 'gradient' argument is not provided, the tensor must be a scalar, and the gradient is implicitly set to 1.0.
  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([1.0, 2.0, 3.0, 4.0], requires_grad=True)
  t.sum().backward()
  print(t.grad.numpy())
  ```
  """
  all_uops = self.lazydata.toposort
  tensors_need_grad: list[Tensor] = [t for tref in all_tensors if (t:=tref()) is not None and \
                                     t.lazydata in all_uops and t.requires_grad and not Tensor.no_grad]
  # clear contexts
  for t,g in zip(tensors_need_grad, self.gradient(*tensors_need_grad, gradient=gradient, materialize_grads=True)):
    assert g.shape == t.shape, f"grad shape must match tensor shape, {g.shape!r} != {t.shape!r}"
    t.grad = g if t.grad is None else (t.grad + g)
  return self