Skip to content

Movement

Movement (low level)¤

view ¤

view(shape, *args) -> Self

.view is an alias for .reshape.

Source code in tinygrad/mixin/movement.py
256
257
258
def view(self, shape, *args) -> Self:
  """`.view` is an alias for `.reshape`."""
  return self.reshape(shape, *args)

reshape ¤

reshape(shape, *args) -> Self

Returns a tensor with the same data as the original tensor but with a different shape. shape can be passed as a tuple or as separate arguments.

t = Tensor.arange(6)
print(t.reshape(2, 3).numpy())
[[0 1 2]
 [3 4 5]]
Source code in tinygrad/mixin/movement.py
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
def reshape(self, shape, *args) -> Self:
  """
  Returns a tensor with the same data as the original tensor but with a different shape.
  `shape` can be passed as a tuple or as separate arguments.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor.arange(6)
  print(t.reshape(2, 3).numpy())
  ```
  """
  # resolve None and args
  new_shape = tuple([s if s is not None else self.shape[i] for i, s in enumerate(argfix(shape, *args))])
  # resolve -1
  if (c := new_shape.count(-1)) > 1:
    raise RuntimeError(f"only one dimension can be inferred using -1, getting {new_shape}")
  if c:
    new_shape = tuple([-prod(self.shape) // prod(new_shape) if s == -1 else s for s in new_shape])
  if prod(self.shape) != prod(new_shape):
    raise ValueError(f"size mismatch, can't reshape ({self.shape}) -> ({new_shape})")
  ret = self._mop(Ops.RESHAPE, arg=new_shape)
  return self if ret.shape == self.shape else ret

expand ¤

expand(shape, *args) -> Self

Returns a tensor that is expanded to the shape that is specified. Expand can also increase the number of dimensions that a tensor has.

Passing a -1 or None to a dimension means that its size will not be changed.

t = Tensor([1, 2, 3])
print(t.expand(4, -1).numpy())
[[1 2 3]
 [1 2 3]
 [1 2 3]
 [1 2 3]]
Source code in tinygrad/mixin/movement.py
141
142
143
144
145
146
147
148
149
150
151
152
153
154
def expand(self, shape, *args) -> Self:
  """
  Returns a tensor that is expanded to the shape that is specified.
  Expand can also increase the number of dimensions that a tensor has.

  Passing a `-1` or `None` to a dimension means that its size will not be changed.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([1, 2, 3])
  print(t.expand(4, -1).numpy())
  ```
  """
  new_shape = tuple(from_ if to == -1 or to is None else to for from_, to in zip(*(_align_left(self.shape, argfix(shape, *args)))))
  return self._broadcast_to(new_shape)

permute ¤

permute(order, *args) -> Self

Returns a tensor that is a permutation of the original tensor. The new tensor has the same data as the original tensor but with the dimensions permuted according to the order specified. order can be passed as a tuple or as separate arguments.

t = Tensor.empty(2, 3, 5)
print(t.shape)
(2, 3, 5)
print(t.permute(2, 0, 1).shape)
(5, 2, 3)

Source code in tinygrad/mixin/movement.py
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
def permute(self, order, *args) -> Self:
  """
  Returns a tensor that is a permutation of the original tensor.
  The new tensor has the same data as the original tensor but with the dimensions permuted according to the order specified.
  `order` can be passed as a tuple or as separate arguments.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor.empty(2, 3, 5)
  print(t.shape)
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.permute(2, 0, 1).shape)
  ```
  """
  order_arg = tuple(self._resolve_dim(x) for x in argfix(order, *args))
  if sorted(order_arg) != list(range(self.ndim)):
    raise RuntimeError(f"order is not a valid permutation, getting {order_arg}")
  return self._mop(Ops.PERMUTE, arg=order_arg) if order_arg != tuple(range(self.ndim)) else self

flip ¤

flip(axis, *args) -> Self

Returns a tensor that reverses the order of the original tensor along given axis. axis can be passed as a tuple or as separate arguments.

t = Tensor.arange(6).reshape(2, 3)
print(t.numpy())
[[0 1 2]
 [3 4 5]]
print(t.flip(0).numpy())
[[3 4 5]
 [0 1 2]]
print(t.flip((0, 1)).numpy())
[[5 4 3]
 [2 1 0]]

Source code in tinygrad/mixin/movement.py
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
def flip(self, axis, *args) -> Self:
  """
  Returns a tensor that reverses the order of the original tensor along given `axis`.
  `axis` can be passed as a tuple or as separate arguments.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor.arange(6).reshape(2, 3)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.flip(0).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.flip((0, 1)).numpy())
  ```
  """
  axis_arg = tuple(self._resolve_dim(x) for x in argfix(axis, *args))
  assert all(not isinstance(x, bool) and x >= 0 and x < self.ndim for x in axis_arg), f"flip args must be axis ints {axis_arg}"
  if len(axis_arg) != len(dedup(axis_arg)):
    raise RuntimeError(f"dim can appear at most once, getting {axis_arg}")
  flip_arg = tuple([i in axis_arg for i in range(len(self.shape))])
  return self._mop(Ops.FLIP, arg=flip_arg) if any(flip_arg) else self

shrink ¤

shrink(arg: tuple[tuple[sint, sint] | None, ...]) -> Self

Returns a tensor that shrinks the each axis based on input arg. arg must have the same length as self.ndim. For each axis, it can be None, which means no shrink, or a tuple (start, end) that works the same as Python slice.

t = Tensor.arange(9).reshape(3, 3)
print(t.numpy())
[[0 1 2]
 [3 4 5]
 [6 7 8]]
print(t.shrink(((None, (1, 3)))).numpy())
[[1 2]
 [4 5]
 [7 8]]
print(t.shrink((((0, 2), (0, 2)))).numpy())
[[0 1]
 [3 4]]

Source code in tinygrad/mixin/movement.py
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
def shrink(self, arg: tuple[tuple[sint, sint] | None, ...]) -> Self:
  """
  Returns a tensor that shrinks the each axis based on input arg.
  `arg` must have the same length as `self.ndim`.
  For each axis, it can be `None`, which means no shrink, or a tuple `(start, end)` that works the same as Python slice.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor.arange(9).reshape(3, 3)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.shrink(((None, (1, 3)))).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.shrink((((0, 2), (0, 2)))).numpy())
  ```
  """
  if self.ndim != len(arg):
    raise ValueError(f"{self.ndim=} != {len(arg)=}")
  ret = self._mop(Ops.SHRINK, arg=[x if x is not None else (0, s) for x, s in zip(arg, self.shape)])
  return self if ret.shape == self.shape else ret

pad ¤

pad(
    padding: (
        Sequence[sint] | Sequence[tuple[sint, sint] | None]
    ),
    mode: str = "constant",
    value: float = 0.0,
) -> Tensor

Returns a tensor with padding applied based on the input padding.

padding supports two padding structures:

  1. Flat padding: (padding_left, padding_right, padding_top, padding_bottom, ...)

    • This structure matches PyTorch's pad.
    • padding length must be even.
  2. Group padding: (..., (padding_top, padding_bottom), (padding_left, padding_right))

    • This structure matches pad for JAX, NumPy, TensorFlow, and others.
    • For each axis, padding can be None, meaning no padding, or a tuple (start, end).
    • padding must have the same length as self.ndim.

Padding values can be negative, resulting in dimension shrinks that work similarly to Python negative slices. Padding modes is selected with mode which supports constant, reflect and replicate.

t = Tensor.arange(9).reshape(1, 1, 3, 3)
print(t.numpy())
[[[[0 1 2]
   [3 4 5]
   [6 7 8]]]]
print(t.pad((1, 2, 0, -1)).numpy())
[[[[0 0 1 2 0 0]
   [0 3 4 5 0 0]]]]
print(t.pad(((None, None, (0, -1), (1, 2)))).numpy())
[[[[0 0 1 2 0 0]
   [0 3 4 5 0 0]]]]
print(t.pad((1, 2, 0, -1), value=-float('inf')).numpy())
[[[[-inf   0.   1.   2. -inf -inf]
   [-inf   3.   4.   5. -inf -inf]]]]

Source code in tinygrad/tensor.py
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
def pad(self, padding:Sequence[sint]|Sequence[tuple[sint, sint]|None], mode:str="constant", value:float=0.0) -> Tensor:
  """
  Returns a tensor with padding applied based on the input `padding`.

  `padding` supports two padding structures:

  1. Flat padding: `(padding_left, padding_right, padding_top, padding_bottom, ...)`
      - This structure matches PyTorch's pad.
      - `padding` length must be even.

  2. Group padding: `(..., (padding_top, padding_bottom), (padding_left, padding_right))`
      - This structure matches pad for JAX, NumPy, TensorFlow, and others.
      - For each axis, padding can be `None`, meaning no padding, or a tuple `(start, end)`.
      - `padding` must have the same length as `self.ndim`.

  Padding values can be negative, resulting in dimension shrinks that work similarly to Python negative slices.
  Padding modes is selected with `mode` which supports `constant`, `reflect` and `replicate`.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor.arange(9).reshape(1, 1, 3, 3)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.pad((1, 2, 0, -1)).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.pad(((None, None, (0, -1), (1, 2)))).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.pad((1, 2, 0, -1), value=-float('inf')).numpy())
  ```
  """
  # normalize to grouped format
  if all(isinstance(p, (int,UOp)) for p in padding):
    if len(padding)%2 != 0: raise ValueError("Flat padding must have even number of pads")
    pX = ((0,0),)*(self.ndim - len(padding)//2) + flat_to_grouped(cast(Sequence[sint], padding))
  else: pX = tuple((0,0) if p is None else p for p in cast(Sequence[tuple[sint, sint]|None], padding))
  if len(pX) != self.ndim: raise ValueError(f"padding length is improper, {padding=} {self.ndim=}")
  # dispatch
  if mode == "constant": return self._pad_constant(pX, value)
  assert all_int(self.shape), f"does not support symbolic shape {self.shape}"
  if mode == "circular": return self._pad_circular(pX)
  if mode in {"reflect", "replicate"}: return self._pad_reflect_replicate(pX, mode)
  raise NotImplementedError(f"{mode=} is not supported")

Movement (high level)¤

__getitem__ ¤

__getitem__(indices) -> Tensor

Retrieves a sub-tensor using indexing.

Supported Index Types: int | slice | Tensor | None | list | tuple | Ellipsis

Examples:

t = Tensor.arange(12).reshape(3, 4)
print(t.numpy())
[[ 0  1  2  3]
 [ 4  5  6  7]
 [ 8  9 10 11]]

  • Int Indexing: Select an element or sub-tensor using integers for each dimension.

    print(t[1, 2].numpy())
    
    6
    

  • Slice Indexing: Select a range of elements using slice notation (start:end:stride).

    print(t[0:2, ::2].numpy())
    
    [[0 2]
     [4 6]]
    

  • Tensor Indexing: Use another tensor as indices for advanced indexing. Using tuple or list here also works.

    print(t[Tensor([2, 0, 1]), Tensor([1, 2, 3])].numpy())
    
    [9 2 7]
    

  • None Indexing: Add a new dimension to the tensor.

    print(t[:, None].shape)
    
    (3, 1, 4)
    

Note

Out-of-bounds indexing results in a value of 0.

t = Tensor([1, 2, 3])
print(t[Tensor([4, 3, 2])].numpy())
[0 0 3]

Source code in tinygrad/tensor.py
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
def __getitem__(self, indices) -> Tensor:
  """
  Retrieves a sub-tensor using indexing.

  Supported Index Types: `int | slice | Tensor | None | list | tuple | Ellipsis`

  Examples:
  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor.arange(12).reshape(3, 4)
  print(t.numpy())
  ```

  - Int Indexing: Select an element or sub-tensor using integers for each dimension.
    ```python exec="true" source="above" session="tensor" result="python"
    print(t[1, 2].numpy())
    ```

  - Slice Indexing: Select a range of elements using slice notation (`start:end:stride`).
    ```python exec="true" source="above" session="tensor" result="python"
    print(t[0:2, ::2].numpy())
    ```

  - Tensor Indexing: Use another tensor as indices for advanced indexing. Using `tuple` or `list` here also works.
    ```python exec="true" source="above" session="tensor" result="python"
    print(t[Tensor([2, 0, 1]), Tensor([1, 2, 3])].numpy())
    ```

  - `None` Indexing: Add a new dimension to the tensor.
    ```python exec="true" source="above" session="tensor" result="python"
    print(t[:, None].shape)
    ```

  NOTE: Out-of-bounds indexing results in a value of `0`.
  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([1, 2, 3])
  print(t[Tensor([4, 3, 2])].numpy())
  ```
  """
  return self._getitem(indices)

gather ¤

gather(dim: int, index: Self) -> Self

Gathers values along an axis specified by dim.

t = Tensor([[1, 2], [3, 4]])
print(t.numpy())
[[1 2]
 [3 4]]
print(t.gather(1, Tensor([[0, 0], [1, 0]])).numpy())
[[1 1]
 [4 3]]

Source code in tinygrad/mixin/__init__.py
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
def gather(self, dim:int, index:Self) -> Self:
  """
  Gathers values along an axis specified by `dim`.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([[1, 2], [3, 4]])
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.gather(1, Tensor([[0, 0], [1, 0]])).numpy())
  ```
  """
  if index.device != self.device: raise RuntimeError(f"expected index and self on the same device, {index.device=}, {self.device=}")
  assert index.ndim == self.ndim, f"self.ndim must equal index.ndim, {self.ndim=}, {index.ndim=}"
  dim = self._resolve_dim(dim)
  assert all(s >= i for d,(s,i) in enumerate(zip(self.shape, index.shape)) if d != dim), "requires self.shape[d] >= index.shape[d] for all d != dim"
  x = self.shrink_to(tuple(i if d != dim else None for d,i in enumerate(index.shape))).unsqueeze(-1).transpose(-1, dim)
  return (index.unsqueeze(-1)._one_hot_along_dim(self.shape[dim]).where(x, 0)).sum(-1, dtype=self.dtype)

cat ¤

cat(*args: Self, dim: int = 0) -> Self

Concatenates self with other tensors in args along an axis specified by dim. All tensors must have the same shape except in the concatenating dimension.

t0, t1, t2 = Tensor([[1, 2]]), Tensor([[3, 4]]), Tensor([[5, 6]])
print(t0.cat(t1, t2, dim=0).numpy())
[[1 2]
 [3 4]
 [5 6]]
print(t0.cat(t1, t2, dim=1).numpy())
[[1 2 3 4 5 6]]

Source code in tinygrad/mixin/__init__.py
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
def cat(self, *args:Self, dim:int=0) -> Self:
  """
  Concatenates self with other tensors in `args` along an axis specified by `dim`.
  All tensors must have the same shape except in the concatenating dimension.

  ```python exec="true" source="above" session="tensor" result="python"
  t0, t1, t2 = Tensor([[1, 2]]), Tensor([[3, 4]]), Tensor([[5, 6]])
  print(t0.cat(t1, t2, dim=0).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t0.cat(t1, t2, dim=1).numpy())
  ```
  """
  dim = self._resolve_dim(dim)
  for arg in args: assert arg.ndim==self.ndim and all(ti==ai for i,(ti,ai) in enumerate(zip(self.shape, arg.shape)) if i!=dim)
  tensors = [self, *args]
  dim_cumsum = list(itertools.accumulate([t.shape[dim] for t in tensors], initial=0))
  padded = [t.pad(tuple((dim_cumsum[i], dim_cumsum[-1]-dim_cumsum[i+1]) if j==dim else None for j in range(t.ndim))) for i,t in enumerate(tensors)]
  return padded[0].usum(*padded[1:])

stack ¤

stack(*args: Self, dim: int = 0) -> Self

Concatenates self with other tensors in args along a new dimension specified by dim.

t0, t1, t2 = Tensor([1, 2]), Tensor([3, 4]), Tensor([5, 6])
print(t0.stack(t1, t2, dim=0).numpy())
[[1 2]
 [3 4]
 [5 6]]
print(t0.stack(t1, t2, dim=1).numpy())
[[1 3 5]
 [2 4 6]]

Source code in tinygrad/mixin/__init__.py
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
def stack(self, *args:Self, dim:int=0) -> Self:
  """
  Concatenates self with other tensors in `args` along a new dimension specified by `dim`.

  ```python exec="true" source="above" session="tensor" result="python"
  t0, t1, t2 = Tensor([1, 2]), Tensor([3, 4]), Tensor([5, 6])
  print(t0.stack(t1, t2, dim=0).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t0.stack(t1, t2, dim=1).numpy())
  ```
  """
  # checks for shapes and number of dimensions delegated to cat
  unsqueezed = [t.unsqueeze(dim) for t in argfix(self, *args)]
  return unsqueezed[0].cat(*unsqueezed[1:], dim=dim)

repeat ¤

repeat(repeats, *args) -> Self

Repeats tensor number of times along each dimension specified by repeats. repeats can be passed as a tuple or as separate arguments.

t = Tensor([1, 2, 3])
print(t.repeat(4, 2).numpy())
[[1 2 3 1 2 3]
 [1 2 3 1 2 3]
 [1 2 3 1 2 3]
 [1 2 3 1 2 3]]
print(t.repeat(4, 2, 1).shape)
(4, 2, 3)

Source code in tinygrad/mixin/movement.py
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
def repeat(self, repeats, *args) -> Self:
  """
  Repeats tensor number of times along each dimension specified by `repeats`.
  `repeats` can be passed as a tuple or as separate arguments.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([1, 2, 3])
  print(t.repeat(4, 2).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.repeat(4, 2, 1).shape)
  ```
  """
  repeats = argfix(repeats, *args)
  base_shape = _align_left(self.shape, repeats)[0]
  unsqueezed_shape = flatten([[s] if r == 1 else [1, s] for r, s in zip(repeats, base_shape)])
  expanded_shape = flatten([[s] if r == 1 else [r, s] for r, s in zip(repeats, base_shape)])
  final_shape = [r * s for r, s in zip(repeats, base_shape)]
  return self.reshape(unsqueezed_shape).expand(expanded_shape).reshape(final_shape)

repeat_interleave ¤

repeat_interleave(
    repeats: int, dim: int | None = None
) -> Self

Repeats elements of a tensor.

t = Tensor([1, 2, 3])
print(t.repeat_interleave(2).numpy())
[1 1 2 2 3 3]
Source code in tinygrad/mixin/movement.py
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
def repeat_interleave(self, repeats: int, dim: int | None = None) -> Self:
  """
  Repeats elements of a tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([1, 2, 3])
  print(t.repeat_interleave(2).numpy())
  ```
  """
  x, dim = (self.flatten(), 0) if dim is None else (self, self._resolve_dim(dim))
  shp = x.shape
  x = x.reshape(*shp[: dim + 1], 1, *shp[dim + 1 :])
  x = x.expand(*shp[: dim + 1], repeats, *shp[dim + 1 :])
  x = x.reshape(*shp[:dim], shp[dim] * repeats, *shp[dim + 1 :])
  return x

split ¤

split(
    sizes: int | Sequence[int], dim: int = 0
) -> tuple[Self, ...]

Splits the tensor into chunks along the dimension specified by dim. If sizes is an integer, it splits into equally sized chunks if possible, otherwise the last chunk will be smaller. If sizes is a list, it splits into len(sizes) chunks with size in dim according to size.

t = Tensor.arange(10).reshape(5, 2)
print(t.numpy())
[[0 1]
 [2 3]
 [4 5]
 [6 7]
 [8 9]]
split = t.split(2)
print("\n".join([repr(x.numpy()) for x in split]))
array([[0, 1],
       [2, 3]], dtype=int32)
array([[4, 5],
       [6, 7]], dtype=int32)
array([[8, 9]], dtype=int32)
split = t.split([1, 4])
print("\n".join([repr(x.numpy()) for x in split]))
array([[0, 1]], dtype=int32)
array([[2, 3],
       [4, 5],
       [6, 7],
       [8, 9]], dtype=int32)

Source code in tinygrad/mixin/movement.py
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
def split(self, sizes:int|Sequence[int], dim:int=0) -> tuple[Self, ...]:
  """
  Splits the tensor into chunks along the dimension specified by `dim`.
  If `sizes` is an integer, it splits into equally sized chunks if possible, otherwise the last chunk will be smaller.
  If `sizes` is a list, it splits into `len(sizes)` chunks with size in `dim` according to `size`.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor.arange(10).reshape(5, 2)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  split = t.split(2)
  print("\\n".join([repr(x.numpy()) for x in split]))
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  split = t.split([1, 4])
  print("\\n".join([repr(x.numpy()) for x in split]))
  ```
  """
  dim = self._resolve_dim(dim)
  dim_sz = self.shape[dim]
  assert isinstance(dim_sz, int), f"does not support symbolic shape in split dimension {dim}: {self.shape}"
  if isinstance(sizes, int): sizes = [min(sizes, dim_sz-i) for i in range(0, max(1, dim_sz), max(1, sizes))]
  assert sum(sizes) == dim_sz, f"expect sizes to sum exactly to {dim_sz}, but got {sum(sizes)}"
  return tuple(self.shrink(tuple((sum(sizes[:i]), sum(sizes[:i+1])) if j == dim else None for j in range(self.ndim))) for i in range(len(sizes)))

chunk ¤

chunk(chunks: int, dim: int = 0) -> list[Self]

Splits the tensor into chunks number of chunks along the dimension dim. If the tensor size along dim is not divisible by chunks, all returned chunks will be the same size except the last one. The function may return fewer than the specified number of chunks.

chunked = Tensor.arange(11).chunk(6)
print("\n".join([repr(x.numpy()) for x in chunked]))
array([0, 1], dtype=int32)
array([2, 3], dtype=int32)
array([4, 5], dtype=int32)
array([6, 7], dtype=int32)
array([8, 9], dtype=int32)
array([10], dtype=int32)
chunked = Tensor.arange(12).chunk(6)
print("\n".join([repr(x.numpy()) for x in chunked]))
array([0, 1], dtype=int32)
array([2, 3], dtype=int32)
array([4, 5], dtype=int32)
array([6, 7], dtype=int32)
array([8, 9], dtype=int32)
array([10, 11], dtype=int32)
chunked = Tensor.arange(13).chunk(6)
print("\n".join([repr(x.numpy()) for x in chunked]))
array([0, 1, 2], dtype=int32)
array([3, 4, 5], dtype=int32)
array([6, 7, 8], dtype=int32)
array([ 9, 10, 11], dtype=int32)
array([12], dtype=int32)

Source code in tinygrad/mixin/movement.py
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
def chunk(self, chunks:int, dim:int=0) -> list[Self]:
  """
  Splits the tensor into `chunks` number of chunks along the dimension `dim`.
  If the tensor size along `dim` is not divisible by `chunks`, all returned chunks will be the same size except the last one.
  The function may return fewer than the specified number of chunks.

  ```python exec="true" source="above" session="tensor" result="python"
  chunked = Tensor.arange(11).chunk(6)
  print("\\n".join([repr(x.numpy()) for x in chunked]))
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  chunked = Tensor.arange(12).chunk(6)
  print("\\n".join([repr(x.numpy()) for x in chunked]))
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  chunked = Tensor.arange(13).chunk(6)
  print("\\n".join([repr(x.numpy()) for x in chunked]))
  ```
  """
  dim = self._resolve_dim(dim)
  dim_sz = self.shape[dim]
  assert isinstance(dim_sz, int), f"does not support symbolic shape in split dimension {dim}: {self.shape}"
  assert chunks > 0, f"expect chunks to be greater than 0, got: {chunks}"
  return list(self.split(ceildiv(dim_sz, chunks) if dim_sz else [0]*chunks, dim=dim))

unfold ¤

unfold(dim: int, size, step: int) -> Self

Unfolds the tensor along dimension dim into overlapping windows.

Each window has length size and begins every step elements of self. Returns the input tensor with dimension dim replaced by dims (n_windows, size) where n_windows = (self.shape[dim] - size) // step + 1.

unfolded = Tensor.arange(8).unfold(0,2,2)
print("\n".join([repr(x.numpy()) for x in unfolded]))
array([0, 1], dtype=int32)
array([2, 3], dtype=int32)
array([4, 5], dtype=int32)
array([6, 7], dtype=int32)
unfolded = Tensor.arange(27).reshape(3,3,3).unfold(-1,2,3)
print("\n".join([repr(x.numpy()) for x in unfolded]))
array([[[0, 1]],

       [[3, 4]],

       [[6, 7]]], dtype=int32)
array([[[ 9, 10]],

       [[12, 13]],

       [[15, 16]]], dtype=int32)
array([[[18, 19]],

       [[21, 22]],

       [[24, 25]]], dtype=int32)

Source code in tinygrad/mixin/movement.py
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
def unfold(self, dim:int, size, step:int) -> Self:
  """
  Unfolds the tensor along dimension `dim` into overlapping windows.

  Each window has length `size` and begins every `step` elements of `self`.
  Returns the input tensor with dimension `dim` replaced by dims `(n_windows, size)`
  where `n_windows = (self.shape[dim] - size) // step + 1`.

  ```python exec="true" source="above" session="tensor" result="python"
  unfolded = Tensor.arange(8).unfold(0,2,2)
  print("\\n".join([repr(x.numpy()) for x in unfolded]))
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  unfolded = Tensor.arange(27).reshape(3,3,3).unfold(-1,2,3)
  print("\\n".join([repr(x.numpy()) for x in unfolded]))
  ```
  """
  if size < 0: raise RuntimeError(f'size must be >= 0 but got {size=}')
  if step <= 0: raise RuntimeError(f'step must be > 0 but got {step=}')
  if size > self.shape[dim]: raise RuntimeError(f'maximum size for tensor at dimension {dim} is {self.shape[dim]} but size is {size}')
  dim = self._resolve_dim(dim)
  perm_to_last = tuple(i for i in range(self.ndim) if i != dim) + (dim,)
  return self.permute(perm_to_last)._pool((size,), step).permute(argsort(perm_to_last) + (self.ndim,))

meshgrid ¤

meshgrid(*args, indexing: str = 'ij') -> tuple[Self, ...]

Generates coordinate matrices from coordinate vectors. Input tensors can be scalars or 1D tensors.

indexing determines how the output grids are aligned. ij indexing follows matrix-style indexing and xy indexing follows Cartesian-style indexing.

x, y = Tensor([1, 2, 3]), Tensor([4, 5, 6])
grid_x, grid_y = x.meshgrid(y)
print(grid_x.numpy())
print(grid_y.numpy())
[[1 1 1]
 [2 2 2]
 [3 3 3]]
[[4 5 6]
 [4 5 6]
 [4 5 6]]
grid_x, grid_y = x.meshgrid(y, indexing="xy")
print(grid_x.numpy())
print(grid_y.numpy())
[[1 2 3]
 [1 2 3]
 [1 2 3]]
[[4 4 4]
 [5 5 5]
 [6 6 6]]

Source code in tinygrad/mixin/movement.py
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
def meshgrid(self, *args, indexing:str="ij") -> tuple[Self, ...]:
  """
  Generates coordinate matrices from coordinate vectors.
  Input tensors can be scalars or 1D tensors.

  `indexing` determines how the output grids are aligned.
  `ij` indexing follows matrix-style indexing and `xy` indexing follows Cartesian-style indexing.

  ```python exec="true" source="above" session="tensor" result="python"
  x, y = Tensor([1, 2, 3]), Tensor([4, 5, 6])
  grid_x, grid_y = x.meshgrid(y)
  print(grid_x.numpy())
  print(grid_y.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  grid_x, grid_y = x.meshgrid(y, indexing="xy")
  print(grid_x.numpy())
  print(grid_y.numpy())
  ```
  """
  if indexing not in ("ij", "xy"): raise RuntimeError(f'indexing must be in ("ij", "xy"), got {indexing}')
  if len(tensors:=(self, *args)) == 1: return tensors
  basis = tuple(range(len(tensors))) if indexing == "ij" else (1, 0) + tuple(range(2, len(tensors)))
  tensors = tuple(t.reshape((-1,) + (1,)*(len(args) - i)) for i,t in zip(basis, tensors))
  output_shape = _broadcast_shape(*(t.shape for t in tensors))
  return tuple(t._broadcast_to(output_shape) for t in tensors)

squeeze ¤

squeeze(dim: int | None = None) -> Self

Returns a tensor with specified dimensions of input of size 1 removed. If dim is not specified, all dimensions with size 1 are removed.

t = Tensor.zeros(2, 1, 2, 1, 2)
print(t.squeeze().shape)
(2, 2, 2)
print(t.squeeze(0).shape)
(2, 1, 2, 1, 2)
print(t.squeeze(1).shape)
(2, 2, 1, 2)

Source code in tinygrad/mixin/movement.py
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
def squeeze(self, dim: int | None = None) -> Self:
  """
  Returns a tensor with specified dimensions of input of size 1 removed.
  If `dim` is not specified, all dimensions with size 1 are removed.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor.zeros(2, 1, 2, 1, 2)
  print(t.squeeze().shape)
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.squeeze(0).shape)
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.squeeze(1).shape)
  ```
  """
  if dim is None:
    return self.reshape(tuple(dim for dim in self.shape if dim != 1))
  dim = self._resolve_dim(dim)
  return self if not self.ndim or self.shape[dim] != 1 else self.reshape(self.shape[:dim] + self.shape[dim + 1 :])

unsqueeze ¤

unsqueeze(dim: int) -> Self

Returns a tensor with a new dimension of size 1 inserted at the specified dim.

t = Tensor([1, 2, 3, 4])
print(t.unsqueeze(0).numpy())
[[1 2 3 4]]
print(t.unsqueeze(1).numpy())
[[1]
 [2]
 [3]
 [4]]

Source code in tinygrad/mixin/movement.py
281
282
283
284
285
286
287
288
289
290
291
292
293
294
def unsqueeze(self, dim: int) -> Self:
  """
  Returns a tensor with a new dimension of size 1 inserted at the specified `dim`.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([1, 2, 3, 4])
  print(t.unsqueeze(0).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.unsqueeze(1).numpy())
  ```
  """
  dim = self._resolve_dim(dim, extra=True)
  return self.reshape(self.shape[:dim] + (1,) + self.shape[dim:])

T property ¤

T: Self

.T is an alias for .transpose().

transpose ¤

transpose(dim0=1, dim1=0) -> Self

Returns a tensor that is a transposed version of the original tensor. The given dimensions dim0 and dim1 are swapped.

t = Tensor.arange(6).reshape(2, 3)
print(t.numpy())
[[0 1 2]
 [3 4 5]]
print(t.transpose(0, 1).numpy())
[[0 3]
 [1 4]
 [2 5]]

Source code in tinygrad/mixin/movement.py
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
def transpose(self, dim0=1, dim1=0) -> Self:
  """
  Returns a tensor that is a transposed version of the original tensor.
  The given dimensions `dim0` and `dim1` are swapped.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor.arange(6).reshape(2, 3)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.transpose(0, 1).numpy())
  ```
  """
  order = list(range(self.ndim))
  order[dim0], order[dim1] = order[dim1], order[dim0]
  return self.permute(order)

flatten ¤

flatten(start_dim=0, end_dim=-1) -> Self

Flattens the tensor by reshaping it into a one-dimensional tensor. If start_dim or end_dim are passed, only dimensions starting with start_dim and ending with end_dim are flattened.

t = Tensor.arange(8).reshape(2, 2, 2)
print(t.flatten().numpy())
[0 1 2 3 4 5 6 7]
print(t.flatten(start_dim=1).numpy())
[[0 1 2 3]
 [4 5 6 7]]

Source code in tinygrad/mixin/movement.py
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
def flatten(self, start_dim=0, end_dim=-1) -> Self:
  """
  Flattens the tensor by reshaping it into a one-dimensional tensor.
  If `start_dim` or `end_dim` are passed, only dimensions starting with `start_dim` and ending with `end_dim` are flattened.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor.arange(8).reshape(2, 2, 2)
  print(t.flatten().numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.flatten(start_dim=1).numpy())
  ```
  """
  start_dim, end_dim = self._resolve_dim(start_dim), self._resolve_dim(end_dim)
  return self.reshape(self.shape[:start_dim] + (prod(self.shape[start_dim : end_dim + 1]),) + self.shape[end_dim + 1 :])

unflatten ¤

unflatten(dim: int, sizes: tuple[int, ...]) -> Self

Unflattens dimension dim of the tensor into multiple dimensions specified by sizes. Tensor.flatten() is the inverse of this function.

print(Tensor.ones(3, 4, 1).unflatten(1, (2, 2)).shape)
(3, 2, 2, 1)
print(Tensor.ones(3, 4, 1).unflatten(1, (-1, 2)).shape)
(3, 2, 2, 1)
print(Tensor.ones(5, 12, 3).unflatten(-2, (2, 2, 3, 1, 1)).shape)
(5, 2, 2, 3, 1, 1, 3)

Source code in tinygrad/mixin/movement.py
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
def unflatten(self, dim: int, sizes: tuple[int, ...]) -> Self:
  """
  Unflattens dimension `dim` of the tensor into multiple dimensions specified by `sizes`. `Tensor.flatten()` is the inverse of this function.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor.ones(3, 4, 1).unflatten(1, (2, 2)).shape)
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor.ones(3, 4, 1).unflatten(1, (-1, 2)).shape)
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor.ones(5, 12, 3).unflatten(-2, (2, 2, 3, 1, 1)).shape)
  ```
  """
  dim = self._resolve_dim(dim)
  return self.reshape(self.shape[:dim] + sizes + self.shape[dim + 1 :])

diag ¤

diag() -> Self

Returns a 2-D square tensor with the elements of input as the main diagonal.

print(Tensor([1, 2, 3]).diag().numpy())
[[1 0 0]
 [0 2 0]
 [0 0 3]]
Source code in tinygrad/mixin/movement.py
474
475
476
477
478
479
480
481
482
483
def diag(self) -> Self:
  """
  Returns a 2-D square tensor with the elements of input as the main diagonal.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1, 2, 3]).diag().numpy())
  ```
  """
  if self.ndim != 1: raise ValueError(f"expect input to be 1-D, getting {self.ndim}-D")
  return self.unsqueeze(-1).pad_to((None, 1+(n:=self.shape[0]))).flatten().shrink_to((n*n,)).reshape(n,n)

diagonal ¤

diagonal(
    offset: int = 0, dim1: int = 0, dim2: int = 1
) -> Self

Returns a view of the diagonal elements with respect to dim1 and dim2. offset controls which diagonal: 0 is main, positive is above, negative is below.

t = Tensor.arange(9).reshape(3, 3)
print(t.numpy())
[[0 1 2]
 [3 4 5]
 [6 7 8]]
print(t.diagonal().numpy())
[0 4 8]
print(t.diagonal(offset=1).numpy())
[1 5]

Source code in tinygrad/mixin/movement.py
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
def diagonal(self, offset:int=0, dim1:int=0, dim2:int=1) -> Self:
  """
  Returns a view of the diagonal elements with respect to `dim1` and `dim2`.
  `offset` controls which diagonal: 0 is main, positive is above, negative is below.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor.arange(9).reshape(3, 3)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.diagonal().numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.diagonal(offset=1).numpy())
  ```
  """
  if (dim1:=self._resolve_dim(dim1)) == (dim2:=self._resolve_dim(dim2)): raise RuntimeError("dim1 and dim2 cannot be the same dimension")
  x = self.permute(*[i for i in range(self.ndim) if i != dim1 and i != dim2], dim1, dim2)
  if offset >= 0: x = x.shrink(tuple(None for _ in x.shape[:-1]) + ((offset, x.shape[-1]),))
  else: x = x.shrink(tuple(None for _ in x.shape[:-2]) + ((-offset, x.shape[-2]), None))
  if (d := min(int(x.shape[-2]), int(x.shape[-1]))) <= 0: return x.reshape(*x.shape[:-2], 0)
  nones, x = tuple(None for _ in x.shape[:-2]), x.shrink_to(tuple(None for _ in x.shape[:-2]) + (d, d))
  return x.flatten(-2).pad_to(nones+(d*(d+1),)).unflatten(-1, (d, d+1)).shrink_to(nones+(None, 1)).squeeze(-1)

roll ¤

roll(
    shifts: int | tuple[int, ...],
    dims: int | tuple[int, ...] | None = None,
) -> Self

Rolls the tensor along specified dimension(s). The rolling operation is circular, meaning that elements that go beyond the edge are wrapped around to the beginning of the dimension.

t = Tensor.arange(4)
print(t.roll(shifts=1, dims=0).numpy())
[3 0 1 2]
print(t.roll(shifts=-1, dims=0).numpy())
[1 2 3 0]

Source code in tinygrad/mixin/movement.py
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
def roll(self, shifts:int|tuple[int, ...], dims:int|tuple[int, ...]|None=None) -> Self:
  """
  Rolls the tensor along specified dimension(s).
  The rolling operation is circular, meaning that elements that go beyond the edge are wrapped around to the beginning of the dimension.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor.arange(4)
  print(t.roll(shifts=1, dims=0).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.roll(shifts=-1, dims=0).numpy())
  ```
  """
  if dims is None: return self.flatten().roll(shifts, 0).reshape(self.shape)
  dims, shifts = tuple(self._resolve_dim(d) for d in make_tuple(dims, 1)), make_tuple(shifts, 1)
  if len(dims) != len(shifts): raise RuntimeError(f"{len(dims)=} != {len(shifts)=}")
  shrink_arg: list[tuple[sint, sint]|None] = [None] * self.ndim
  for d, s in zip(dims, shifts): shrink_arg[d] = (delta:=self.shape[d]-s%self.shape[d], delta+self.shape[d])
  return self.repeat(*tuple(2 if i in dims else 1 for i in range(self.ndim))).shrink(tuple(shrink_arg))

rearrange ¤

rearrange(formula: str, **sizes) -> Self

Rearranges input according to formula

See: https://einops.rocks/api/rearrange/

x = Tensor([[1, 2], [3, 4]])
print(Tensor.rearrange(x, "batch channel -> (batch channel)").numpy())
[1 2 3 4]
Source code in tinygrad/mixin/movement.py
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
def rearrange(self, formula: str, **sizes) -> Self:
  """
  Rearranges input according to formula

  See: https://einops.rocks/api/rearrange/

  ```python exec="true" source="above" session="tensor" result="python"
  x = Tensor([[1, 2], [3, 4]])
  print(Tensor.rearrange(x, "batch channel -> (batch channel)").numpy())
  ```
  """

  def parse_side(s: str) -> tuple[list[str], list[tuple[int, int]]]:
    """Parse one side of formula into (axis_names, dims) where dims are (start, end) index pairs for parens."""
    tokens = f" {s} ".replace("…", "...").replace("(", " ( ").replace(")", " ) ").replace(" ", "  ").replace(" 1 ", " ( ) ").split()
    lparens, rparens = [i for i, tok in enumerate(tokens) if tok == "("], [i for i, tok in enumerate(tokens) if tok == ")"]
    pairs = list(zip(lparens, rparens))
    assert len(lparens) == len(rparens) and sorted(flatten(pairs)) == flatten(pairs), "bracket mismatch"
    return [tok for tok in tokens if tok not in ("(", ")")], [(lp - 2*i, rp - 1 - 2*i) for i, (lp, rp) in enumerate(pairs)]

  assert formula.count("->") == 1, 'need exactly one "->" in formula'
  (lhs, unflatten_dims), (rhs, flatten_dims) = map(parse_side, formula.split("->"))

  for name in sizes: assert name in lhs, f"axis {name} is not used in transform"
  assert sorted(lhs) == sorted(rhs) and len(lhs) == len(set(lhs)), f"name mismatch in {formula}"
  for name in lhs+rhs: assert name == "..." or (name.isidentifier() and "_" not in (name[0], name[-1])), f"invalid axis name {name}"
  assert "..." not in flatten([lhs[s:e] for s, e in unflatten_dims]), f"cannot have collapsed ellipsis (...) in lhs of {formula}"
  assert lhs.count("...") <= 1, f"too many ellipses in {formula}"

  # resolve ellipsis
  if "..." in lhs:
    ell_len = len(self.shape) - len(lhs) + 1 + sum(e - s - 1 for s, e in unflatten_dims)
    lhs, rhs = map(lambda l: l[:(i := l.index("..."))] + [f"...{j}" for j in range(ell_len)] + l[i + 1:] if "..." in l else l, (lhs, rhs))
    def newdims(side, s, e): return (s + (ell_len - 1 if "...0" in side[:s] else 0), e + (ell_len - 1 if "...0" in side[:e] else 0))
    unflatten_dims, flatten_dims = [newdims(lhs, s, e) for s, e in unflatten_dims], [newdims(rhs, s, e) for s, e in flatten_dims]

  # unflatten -> permute -> flatten
  t = self
  for start, end in unflatten_dims: t = t.unflatten(start, tuple(sizes.get(lhs[i], -1) for i in range(start, end)))
  for i, name in enumerate(lhs):
    if name in sizes: assert sizes[name] == t.shape[i], f"size provided for dimension {name} incorrect"
  t = t.permute([lhs.index(name) for name in rhs])
  for start, end in reversed(flatten_dims): t = t.flatten(start, end - 1) if start < end else t.unsqueeze(start)
  return t