Skip to content

Properties

Basic¤

shape property ¤

shape: tuple[sint, ...]

dtype property ¤

dtype: DType

device property ¤

device: str | tuple[str, ...]

ndim property ¤

ndim: int

Returns the number of dimensions in the tensor.

t = Tensor([[1, 2], [3, 4]])
print(t.ndim)
2

numel ¤

numel() -> sint

Returns the total number of elements in the tensor.

t = Tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
print(t.numel())
8
Source code in tinygrad/mixin/movement.py
40
41
42
43
44
45
46
47
48
49
def numel(self) -> sint:
  """
  Returns the total number of elements in the tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
  print(t.numel())
  ```
  """
  return prod(self.shape)

element_size ¤

element_size() -> int

Returns the size in bytes of an individual element in the tensor.

t = Tensor([5], dtype=dtypes.int16)
print(t.element_size())
2
Source code in tinygrad/tensor.py
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
def element_size(self) -> int:
  """
  Returns the size in bytes of an individual element in the tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([5], dtype=dtypes.int16)
  print(t.element_size())
  ```
  """
  return self.dtype.itemsize

nbytes ¤

nbytes() -> int

Returns the total number of bytes of all elements in the tensor.

t = Tensor([8, 9], dtype=dtypes.float)
print(t.nbytes())
8
Source code in tinygrad/tensor.py
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
def nbytes(self) -> int:
  """
  Returns the total number of bytes of all elements in the tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([8, 9], dtype=dtypes.float)
  print(t.nbytes())
  ```
  """
  return int(self.numel()) * self.element_size()

is_floating_point ¤

is_floating_point() -> bool

Returns True if the tensor contains floating point types, i.e. is one of dtypes.float64, dtypes.float32, dtypes.float16, dtypes.bfloat16.

t = Tensor([8, 9], dtype=dtypes.float32)
print(t.is_floating_point())
True
Source code in tinygrad/tensor.py
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
def is_floating_point(self) -> bool:
  """
  Returns `True` if the tensor contains floating point types, i.e. is one of `dtypes.float64`, `dtypes.float32`,
  `dtypes.float16`, `dtypes.bfloat16`.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([8, 9], dtype=dtypes.float32)
  print(t.is_floating_point())
  ```
  """
  return dtypes.is_float(self.dtype)

size ¤

size(dim: int | None = None) -> sint | tuple[sint, ...]

Returns the size of the tensor. If dim is specified, return the length along dimension dim. Otherwise return the shape of the tensor.

t = Tensor([[4, 5, 6], [7, 8, 9]])
print(t.size())
(2, 3)
print(t.size(dim=1))
3

Source code in tinygrad/tensor.py
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
def size(self, dim:int|None=None) -> sint|tuple[sint, ...]:
  """
  Returns the size of the tensor. If `dim` is specified, return the length along dimension `dim`. Otherwise return the shape of the tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([[4, 5, 6], [7, 8, 9]])
  print(t.size())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.size(dim=1))
  ```
  """
  return self.shape if dim is None else self.shape[dim]

Data Access¤

data ¤

data() -> memoryview

Returns the data of this tensor as a memoryview.

t = Tensor([1, 2, 3, 4])
print(np.frombuffer(t.data(), dtype=np.int32))
[1 2 3 4]
Source code in tinygrad/tensor.py
343
344
345
346
347
348
349
350
351
352
353
354
355
356
def data(self) -> memoryview:
  """
  Returns the data of this tensor as a memoryview.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([1, 2, 3, 4])
  print(np.frombuffer(t.data(), dtype=np.int32))
  ```
  """
  if 0 in self.shape: return memoryview(bytearray(0)).cast(self.dtype.base.fmt)
  assert all_int(self.shape), f"no data if shape is symbolic, {self.shape=}"
  assert self.dtype.base.fmt is not None, f"no fmt dtype for {self.dtype.base}"
  assert self.dtype.base.fmt != "e" or sys.version_info >= (3, 12)
  return self._buffer().as_memoryview().cast(self.dtype.base.fmt, self.shape)

item ¤

item() -> PyConst

Returns the value of this tensor as a standard Python number.

t = Tensor(42)
print(t.item())
42
Source code in tinygrad/tensor.py
358
359
360
361
362
363
364
365
366
367
368
def item(self) -> PyConst:
  """
  Returns the value of this tensor as a standard Python number.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor(42)
  print(t.item())
  ```
  """
  assert self.numel() == 1, "must have one element for item"
  return self.data()[(0,) * len(self.shape)]

tolist ¤

tolist() -> PyConst | list[Any]

Returns the value of this tensor as a nested list. Returns single value for const tensor.

t = Tensor([1, 2, 3, 4])
print(t.tolist())
[1, 2, 3, 4]
t = Tensor(5)
print(t.tolist())
5

Source code in tinygrad/tensor.py
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
def tolist(self) -> PyConst|list[Any]:
  """
  Returns the value of this tensor as a nested list.
  Returns single value for const tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([1, 2, 3, 4])
  print(t.tolist())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor(5)
  print(t.tolist())
  ```
  """
  # TODO: remove half once minimum python supports it
  if self.dtype in (dtypes.half, dtypes.bfloat16, *dtypes.fp8s): return self.cast(dtypes.float32).tolist()
  return self.data().tolist()

numpy ¤

numpy() -> 'numpy.ndarray'

Returns the value of this tensor as a numpy.ndarray.

t = Tensor([1, 2, 3, 4])
print(repr(t.numpy()))
array([1, 2, 3, 4], dtype=int32)
Source code in tinygrad/tensor.py
389
390
391
392
393
394
395
396
397
398
399
400
401
402
def numpy(self) -> 'numpy.ndarray':
  """
  Returns the value of this tensor as a `numpy.ndarray`.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([1, 2, 3, 4])
  print(repr(t.numpy()))
  ```
  """
  assert all_int(self.shape), f"no data if shape is symbolic, {self.shape=}"
  import numpy as np
  if self.dtype.base in { dtypes.bfloat16, *dtypes.fp8s }: return self.float().numpy()
  if 0 in self.shape: return np.empty(self.shape, dtype=_to_np_dtype(self.dtype.base))
  return self._buffer().numpy().reshape(self.shape)

tinygrad ops¤

schedule_with_vars ¤

schedule_with_vars(
    *lst: Tensor,
) -> tuple[list[ExecItem], dict[str, int]]

Creates the schedule needed to realize these Tensor(s), with Variables.

Note

A Tensor can only be scheduled once.

Source code in tinygrad/tensor.py
253
254
255
256
257
258
259
260
261
262
263
264
def schedule_with_vars(self, *lst:Tensor) -> tuple[list[ExecItem], dict[str, int]]:
  """
  Creates the schedule needed to realize these Tensor(s), with Variables.

  NOTE: A Tensor can only be scheduled once.
  """
  big_sink = UOp.sink(*[x.uop for x in (self,)+lst])

  # this is where the schedule cache should go
  becomes_map, schedule, var_vals = complete_create_schedule_with_vars(big_sink)
  _apply_map_to_tensors(becomes_map, name="Apply Schedule Map")
  return schedule, var_vals

schedule ¤

schedule(*lst: Tensor) -> list[ExecItem]

Creates the schedule needed to realize these Tensor(s).

Source code in tinygrad/tensor.py
266
267
268
269
270
def schedule(self, *lst:Tensor) -> list[ExecItem]:
  """Creates the schedule needed to realize these Tensor(s)."""
  schedule, var_vals = self.schedule_with_vars(*lst)
  assert len(var_vals) == 0
  return schedule

realize ¤

realize(*lst: Tensor, do_update_stats=True) -> Tensor

Triggers the computation needed to create these Tensor(s).

Source code in tinygrad/tensor.py
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
@disable_gc()
def realize(self, *lst:Tensor, do_update_stats=True) -> Tensor:
  """Triggers the computation needed to create these Tensor(s)."""
  # side-realize pending assigns for buffers referenced by these tensors
  if _pending_assigns:
    def _realize_pending(buf):
      for assign_uop in _pending_assigns.pop(buf, []):
        # recursively realize pending assigns that this assign's value depends on
        for u in assign_uop.toposort():
          if u.op is Ops.BUFFER and u in _pending_assigns: _realize_pending(u)
        becomes_map, schedule, var_vals = complete_create_schedule_with_vars(UOp.sink(assign_uop))
        _apply_map_to_tensors(becomes_map, name="Apply Pending Assign")
        run_schedule(schedule, var_vals, do_update_stats=do_update_stats)
        # update remaining pending assigns so they reference realized buffers instead of stale lazy graphs
        if becomes_map:
          for assigns in _pending_assigns.values():
            for i in range(len(assigns)): assigns[i] = assigns[i].substitute(becomes_map)
    for buf in {u for t in (self,)+lst for u in t.uop.toposort() if u.op is Ops.BUFFER}:
      if buf in _pending_assigns: _realize_pending(buf)
  if len(to_realize:=[x for x in (self,)+lst if not x.uop.has_buffer_identity()]):
    run_schedule(*Tensor.schedule_with_vars(*to_realize), do_update_stats=do_update_stats)
  return self

replace ¤

replace(x: Tensor) -> Tensor

Replaces the data of this tensor with the data of another tensor. Only the shape of the tensors must match.

Source code in tinygrad/tensor.py
295
296
297
298
299
300
301
302
def replace(self, x:Tensor) -> Tensor:
  """
  Replaces the data of this tensor with the data of another tensor. Only the shape of the tensors must match.
  """
  # used for replacing a Tensor with a new version of it (potentially with a different device and dtype)
  assert self.shape == x.shape, f"replace shape mismatch {self.shape} != {x.shape}"
  self.uop = x.uop
  return self

assign ¤

assign(x: Tensor | PyConst | list | tuple) -> Tensor
Source code in tinygrad/tensor.py
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
def assign(self, x:Tensor|PyConst|list|tuple) -> Tensor:
  is_disk = isinstance(self.device, str) and self.device.startswith("DISK")
  if not isinstance(x, Tensor): x = Tensor(x, device="CPU" if is_disk else self.device, dtype=self.dtype)
  if self.uop is x.uop: return self  # a self assign is a NOOP
  # broadcast x (shape only, dtype must match)
  if self.shape != x.shape: x = x._broadcast_to(self.shape)
  if self.shape != x.shape: raise RuntimeError(f"assign shape mismatch {self.shape} != {x.shape}")
  if not is_disk and self.device != x.device: raise RuntimeError(f"assign device mismatch {self.device} != {x.device}")
  if self.dtype != x.dtype: raise RuntimeError(f"assign dtype mismatch {self.dtype} != {x.dtype}")
  if isinstance(self.device, tuple) and self.uop.axis != x.uop.axis: raise RuntimeError(f"multi axis mismatch {self.uop.axis} != {x.uop.axis}")

  # TODO: this is a hack for writing to DISK. remove with working assign
  if is_disk:
    self._buffer().copyin(x._data())
    return self
  result = self._apply_uop(UOp.assign, x)
  # track view assigns (not full-buffer or assign-chain) so they can be side-realized when the buffer is read
  if (buf_uop:=self.uop.base).op is Ops.BUFFER and self.uop.op is not Ops.ASSIGN and not self.uop.has_buffer_identity():
    # deduplicate: if the value is already a pending assign for this buffer (e.g. __iadd__ in __setitem__), remove it
    if x.uop in _pending_assigns.get(buf_uop, []): _pending_assigns[buf_uop].remove(x.uop)
    _pending_assigns.setdefault(buf_uop, []).append(result.uop)
  return self.replace(result)

detach ¤

detach() -> Tensor

Returns a new tensor with the same data as this tensor, but detached from the autograd graph.

Source code in tinygrad/tensor.py
327
328
329
330
331
def detach(self) -> Tensor:
  """
  Returns a new tensor with the same data as this tensor, but detached from the autograd graph.
  """
  return Tensor(self.uop.detach(), device=self.device, requires_grad=False)

clone ¤

clone() -> Tensor

Creates a clone of this tensor allocating a separate buffer for the data.

Source code in tinygrad/tensor.py
404
405
406
407
408
409
410
def clone(self) -> Tensor:
  """
  Creates a clone of this tensor allocating a separate buffer for the data.
  """
  ret = Tensor.empty(self.shape, device=self.device, dtype=self.dtype)
  if self.grad is not None: ret.grad = self.grad.clone()
  return ret.assign(self)

to ¤

to(device: str | tuple[str, ...] | None) -> Tensor

Moves the tensor to the given device.

Source code in tinygrad/tensor.py
412
413
414
415
416
417
418
419
420
421
def to(self, device:str|tuple[str, ...]|None) -> Tensor:
  """
  Moves the tensor to the given device.
  """
  device = canonicalize_device(device)
  if device == self.device: return self
  if not isinstance(device, str): return self.shard(device)
  ret = Tensor(self.uop, device, requires_grad=self.requires_grad)
  if self.grad is not None: ret.grad = self.grad.to(device)
  return ret

to_ ¤

to_(device: str | tuple[str, ...] | None) -> Tensor

Moves the tensor to the given device in place.

Source code in tinygrad/tensor.py
423
424
425
426
427
428
429
def to_(self, device:str|tuple[str, ...]|None) -> Tensor:
  """
  Moves the tensor to the given device in place.
  """
  real = self.to(device)
  if self.grad is not None and real.grad is not None: self.grad.replace(real.grad)
  return self.replace(real)

shard ¤

shard(
    devices: tuple[str, ...], axis: int | None = None
) -> Tensor

Shards the tensor across the given devices. Optionally specify which axis to shard on.

t = Tensor.empty(2, 4)
print(t.shard((t.device, t.device), axis=1).uop)
UOp(Ops.MULTI, dtypes.float, arg=1, src=(
  UOp(Ops.SHRINK, dtypes.float, arg=None, src=(
    UOp(Ops.COPY, dtypes.float, arg=None, src=(
      UOp(Ops.RESHAPE, dtypes.float, arg=None, src=(
        UOp(Ops.BUFFER, dtypes.float, arg=8, src=(
          UOp(Ops.UNIQUE, dtypes.void, arg=1516, src=()),
          UOp(Ops.DEVICE, dtypes.void, arg='CPU', src=()),)),
        UOp(Ops.VCONST, dtypes.index.vec(2), arg=(2, 4), src=()),)),
      UOp(Ops.DEVICE, dtypes.void, arg=('CPU', 'CPU'), src=()),)),
    UOp(Ops.VECTORIZE, dtypes.index.vec(2), arg=None, src=(
      UOp(Ops.CONST, dtypes.index, arg=0, src=()),
      x10:=UOp(Ops.MUL, dtypes.index, arg=None, src=(
        UOp(Ops.DEFINE_VAR, dtypes.index, arg=('_device_num', 0, 1), src=()),
        x12:=UOp(Ops.CONST, dtypes.index, arg=2, src=()),)),)),
    UOp(Ops.VECTORIZE, dtypes.index.vec(2), arg=None, src=(
      x12,
      UOp(Ops.ADD, dtypes.index, arg=None, src=(
        x10,
        x12,)),)),)),))
Source code in tinygrad/tensor.py
431
432
433
434
435
436
437
438
439
440
441
442
443
444
def shard(self, devices:tuple[str, ...], axis:int|None=None) -> Tensor:
  """
  Shards the tensor across the given devices. Optionally specify which axis to shard on.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor.empty(2, 4)
  print(t.shard((t.device, t.device), axis=1).uop)
  ```
  """
  if not isinstance(self.device, str): raise RuntimeError("can't shard a multi-device tensor")
  if len(devices) == 1: return self.to(devices[0])
  devices = cast(tuple[str, ...], canonicalize_device(devices))
  mlb = self.uop.shard(devices, self._resolve_dim(axis)) if axis is not None else self.uop.copy_to_device(devices)
  return Tensor(mlb, device=devices, requires_grad=self.requires_grad)

shard_ ¤

shard_(
    devices: tuple[str, ...], axis: int | None = None
) -> Tensor

Shards the tensor across the given devices in place.

Source code in tinygrad/tensor.py
446
447
448
449
450
def shard_(self, devices:tuple[str, ...], axis:int|None=None) -> Tensor:
  """
  Shards the tensor across the given devices in place.
  """
  return self.replace(self.shard(devices, axis))

contiguous ¤

contiguous(*args, **kwargs) -> Tensor

Returns a contiguous tensor.

Source code in tinygrad/tensor.py
2836
2837
2838
2839
2840
def contiguous(self, *args, **kwargs) -> Tensor:
  """
  Returns a contiguous tensor.
  """
  return self._apply_uop(UOp.contiguous, extra_args=args, **kwargs)

contiguous_backward ¤

contiguous_backward() -> Tensor

Inserts a contiguous operation in the backward pass.

Source code in tinygrad/tensor.py
2842
2843
2844
2845
2846
def contiguous_backward(self) -> Tensor:
  """
  Inserts a contiguous operation in the backward pass.
  """
  return self._apply_uop(UOp.contiguous_backward)

Gradient¤

gradient ¤

gradient(
    *targets: Tensor,
    gradient: Tensor | None = None,
    materialize_grads=False
) -> list[Tensor]

Computes the gradient of the targets with respect to self.

x = Tensor.eye(3)
y = Tensor([[2.0,0,-2.0]])
z = y.matmul(x).sum()
dx, dy = z.gradient(x, y)

print(dx.tolist())  # dz/dx
print(dy.tolist())  # dz/dy
[[2.0, 2.0, 2.0], [0.0, 0.0, 0.0], [-2.0, -2.0, -2.0]]
[[1.0, 1.0, 1.0]]
Source code in tinygrad/tensor.py
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
def gradient(self, *targets:Tensor, gradient:Tensor|None=None, materialize_grads=False) -> list[Tensor]:
  """
  Computes the gradient of the targets with respect to self.

  ```python exec="true" source="above" session="tensor" result="python"
  x = Tensor.eye(3)
  y = Tensor([[2.0,0,-2.0]])
  z = y.matmul(x).sum()
  dx, dy = z.gradient(x, y)

  print(dx.tolist())  # dz/dx
  print(dy.tolist())  # dz/dy
  ```
  """
  assert gradient is not None or self.shape == tuple(), "when no gradient is provided, backward must be called on a scalar tensor"
  if not (self.is_floating_point() and all(t.is_floating_point() for t in targets)): raise RuntimeError("only float Tensors have gradient")
  if gradient is None: gradient = Tensor(1.0, dtype=self.dtype, device=self.device, requires_grad=False)
  target_uops = [x.uop for x in targets]
  grads = compute_gradient(self.uop, gradient.uop, set(target_uops))
  ret = []
  for x in target_uops:
    if (y:=grads.get(x)) is None:
      if materialize_grads: y = x.const_like(0)
      else: raise RuntimeError(f"{x}\n\nnot found in\n\n{self.uop}")
    ret.append(y)
  # create returned Tensors
  return [Tensor(u, device=t.device) for t,u in zip(targets, ret)]

backward ¤

backward(gradient: Tensor | None = None) -> Tensor

Propagates the gradient of a tensor backwards through the computation graph. If the 'gradient' argument is not provided, the tensor must be a scalar, and the gradient is implicitly set to 1.0.

t = Tensor([1.0, 2.0, 3.0, 4.0], requires_grad=True)
t.sum().backward()
print(t.grad.numpy())
[1. 1. 1. 1.]

Source code in tinygrad/tensor.py
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
def backward(self, gradient:Tensor|None=None) -> Tensor:
  """
  Propagates the gradient of a tensor backwards through the computation graph.
  If the 'gradient' argument is not provided, the tensor must be a scalar, and the gradient is implicitly set to 1.0.
  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([1.0, 2.0, 3.0, 4.0], requires_grad=True)
  t.sum().backward()
  print(t.grad.numpy())
  ```
  """
  all_uops = self.uop.toposort()
  tensors_need_grad: list[Tensor] = [t for tref in all_tensors if (t:=tref()) is not None and \
                                     t.uop in all_uops and t.requires_grad]
  # clear contexts
  for t,g in zip(tensors_need_grad, self.gradient(*tensors_need_grad, gradient=gradient, materialize_grads=True)):
    assert g.shape == t.shape, f"grad shape must match tensor shape, {g.shape!r} != {t.shape!r}"
    if t.grad is None: t.grad = g
    else: t.grad.assign(t.grad + g)
  return self