Skip to content

Elementwise

Elementwise ops operate on a per element basis. They don't change the shape of the tensor.

Unary Ops (math)¤

logical_not ¤

logical_not() -> Tensor

Computes the logical NOT of the tensor element-wise.

print(Tensor([False, True]).logical_not().numpy())
[ True False]
Source code in tinygrad/tensor.py
2845
2846
2847
2848
2849
2850
2851
2852
2853
def logical_not(self) -> Tensor:
  """
  Computes the logical NOT of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([False, True]).logical_not().numpy())
  ```
  """
  return self.cast(dtypes.bool).ne(True)

neg ¤

neg() -> Tensor

Negates the tensor element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).neg().numpy())
[ 3.  2.  1. -0. -1. -2. -3.]
Source code in tinygrad/tensor.py
2855
2856
2857
2858
2859
2860
2861
2862
2863
def neg(self) -> Tensor:
  """
  Negates the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).neg().numpy())
  ```
  """
  return self*-1 if self.dtype != dtypes.bool else self.logical_not()

log ¤

log() -> Self

Computes the natural logarithm element-wise.

See: https://en.wikipedia.org/wiki/Logarithm

print(Tensor([1., 2., 4., 8.]).log().numpy())
[0.     0.6931 1.3863 2.0794]
Source code in tinygrad/mixin/elementwise.py
592
593
594
595
596
597
598
599
600
601
602
def log(self) -> Self:
  """
  Computes the natural logarithm element-wise.

  See: https://en.wikipedia.org/wiki/Logarithm

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 4., 8.]).log().numpy())
  ```
  """
  return self.log2()*math.log(2)

log2 ¤

log2() -> Self

Computes the base-2 logarithm element-wise.

See: https://en.wikipedia.org/wiki/Logarithm

print(Tensor([1., 2., 4., 8.]).log2().numpy())
[0. 1. 2. 3.]
Source code in tinygrad/mixin/elementwise.py
333
334
335
336
337
338
339
340
341
342
343
def log2(self) -> Self:
  """
  Computes the base-2 logarithm element-wise.

  See: https://en.wikipedia.org/wiki/Logarithm

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 4., 8.]).log2().numpy())
  ```
  """
  return self._ensure_float().alu(Ops.LOG2)

log10 ¤

log10() -> Self

Computes the base-10 logarithm element-wise.

See: https://en.wikipedia.org/wiki/Logarithm

print(Tensor([1., 2., 4., 8.]).log10().numpy())
[0.     0.301  0.6021 0.9031]
Source code in tinygrad/mixin/elementwise.py
604
605
606
607
608
609
610
611
612
613
614
def log10(self) -> Self:
  """
  Computes the base-10 logarithm element-wise.

  See: https://en.wikipedia.org/wiki/Logarithm

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 4., 8.]).log10().numpy())
  ```
  """
  return self.log2()*math.log10(2)

exp ¤

exp() -> Self

Computes the exponential function element-wise.

See: https://en.wikipedia.org/wiki/Exponential_function

print(Tensor([0., 1., 2., 3.]).exp().numpy())
[ 1.      2.7183  7.3891 20.0855]
Source code in tinygrad/mixin/elementwise.py
319
320
321
322
323
324
325
326
327
328
329
330
331
def exp(self) -> Self:
  """
  Computes the exponential function element-wise.

  See: https://en.wikipedia.org/wiki/Exponential_function

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., 1., 2., 3.]).exp().numpy())
  ```
  """
  if self.is_floating_point():
    return self.cast(least_upper_dtype(self.dtype, dtypes.float32)).mul(1/math.log(2)).exp2().cast(self.dtype)
  return self.mul(1/math.log(2)).exp2()

exp2 ¤

exp2() -> Self

Computes the base-2 exponential function element-wise.

See: https://en.wikipedia.org/wiki/Exponential_function

print(Tensor([0., 1., 2., 3.]).exp2().numpy())
[1. 2. 4. 8.]
Source code in tinygrad/mixin/elementwise.py
345
346
347
348
349
350
351
352
353
354
355
def exp2(self) -> Self:
  """
  Computes the base-2 exponential function element-wise.

  See: https://en.wikipedia.org/wiki/Exponential_function

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., 1., 2., 3.]).exp2().numpy())
  ```
  """
  return self._ensure_float().alu(Ops.EXP2)

sqrt ¤

sqrt() -> Self

Computes the square root of the tensor element-wise.

print(Tensor([1., 2., 3., 4.]).sqrt().numpy())
[1.     1.4142 1.7321 2.    ]
Source code in tinygrad/mixin/elementwise.py
288
289
290
291
292
293
294
295
296
def sqrt(self) -> Self:
  """
  Computes the square root of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 3., 4.]).sqrt().numpy())
  ```
  """
  return self._ensure_float().alu(Ops.SQRT)

rsqrt ¤

rsqrt() -> Self

Computes the reciprocal of the square root of the tensor element-wise.

print(Tensor([1., 2., 3., 4.]).rsqrt().numpy())
[1.     0.7071 0.5774 0.5   ]
Source code in tinygrad/mixin/elementwise.py
582
583
584
585
586
587
588
589
590
def rsqrt(self) -> Self:
  """
  Computes the reciprocal of the square root of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 3., 4.]).rsqrt().numpy())
  ```
  """
  return self.sqrt().reciprocal()

sin ¤

sin() -> Self

Computes the sine of the tensor element-wise.

print(Tensor([0., math.pi/2, math.pi, 3*math.pi/2, 2*math.pi]).sin().numpy())
[ 0.  1. -0. -1.  0.]
Source code in tinygrad/mixin/elementwise.py
298
299
300
301
302
303
304
305
306
def sin(self) -> Self:
  """
  Computes the sine of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., math.pi/2, math.pi, 3*math.pi/2, 2*math.pi]).sin().numpy())
  ```
  """
  return self._ensure_float().alu(Ops.SIN)

cos ¤

cos() -> Self

Computes the cosine of the tensor element-wise.

print(Tensor([0., math.pi/2, math.pi, 3*math.pi/2, 2*math.pi]).cos().numpy())
[ 1.0000e+00  0.0000e+00 -1.0000e+00 -2.3842e-07  1.0000e+00]
Source code in tinygrad/mixin/elementwise.py
308
309
310
311
312
313
314
315
316
317
def cos(self) -> Self:
  """
  Computes the cosine of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., math.pi/2, math.pi, 3*math.pi/2, 2*math.pi]).cos().numpy())
  ```
  """
  if self.is_floating_point(): return ((math.pi/2)-self.cast(least_upper_dtype(self.dtype, dtypes.float32))).sin().cast(self.dtype)
  return ((math.pi/2)-self).sin()

tan ¤

tan() -> Self

Computes the tangent of the tensor element-wise.

print(Tensor([0., math.pi/4, math.pi/2, 3*math.pi/4, math.pi]).tan().numpy())
[ 0.  1. inf -1.  0.]
Source code in tinygrad/mixin/elementwise.py
682
683
684
685
686
687
688
689
690
def tan(self) -> Self:
  """
  Computes the tangent of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., math.pi/4, math.pi/2, 3*math.pi/4, math.pi]).tan().numpy())
  ```
  """
  return self.sin() / self.cos()

asin ¤

asin() -> Self

Computes the inverse sine (arcsine) of the tensor element-wise.

print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).asin().numpy())
[-1.1198 -0.6435 -0.3047  0.      0.3047  0.6435  1.1198]
Source code in tinygrad/mixin/elementwise.py
692
693
694
695
696
697
698
699
700
701
702
703
def asin(self) -> Self:
  """
  Computes the inverse sine (arcsine) of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).asin().numpy())
  ```
  """
  # https://personal.math.ubc.ca/~cbm/aands/page_81.htm 4.4.46
  coefficients = [-0.0012624911, 0.0066700901, -0.0170881256, 0.0308918810, -0.0501743046, 0.0889789874, -0.2145988016, 1.5707963050]
  x = math.pi / 2 - (1.0 - self.abs()).sqrt() * polyN(self.abs(), coefficients)
  return self.sign() * x

acos ¤

acos() -> Self

Computes the inverse cosine (arccosine) of the tensor element-wise.

print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).acos().numpy())
[2.6906 2.2143 1.8755 1.5708 1.2661 0.9273 0.451 ]
Source code in tinygrad/mixin/elementwise.py
705
706
707
708
709
710
711
712
713
def acos(self) -> Self:
  """
  Computes the inverse cosine (arccosine) of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).acos().numpy())
  ```
  """
  return math.pi / 2 - self.asin()

atan ¤

atan() -> Self

Computes the inverse tangent (arctan) of the tensor element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).atan().numpy())
[-1.249  -1.1071 -0.7854  0.      0.7854  1.1071  1.249 ]
Source code in tinygrad/mixin/elementwise.py
715
716
717
718
719
720
721
722
723
def atan(self) -> Self:
  """
  Computes the inverse tangent (arctan) of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).atan().numpy())
  ```
  """
  return (self / (1 + self * self).sqrt()).asin()

trunc ¤

trunc() -> Self

Truncates the tensor element-wise.

print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).trunc().numpy())
[-3. -2. -1. -0.  0.  1.  2.  3.]
Source code in tinygrad/mixin/elementwise.py
278
279
280
281
282
283
284
285
286
def trunc(self) -> Self:
  """
  Truncates the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).trunc().numpy())
  ```
  """
  return self.alu(Ops.TRUNC)

ceil ¤

ceil() -> Self

Rounds the tensor element-wise towards positive infinity.

print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).ceil().numpy())
[-3. -2. -1. -0.  1.  2.  3.  4.]
Source code in tinygrad/mixin/elementwise.py
421
422
423
424
425
426
427
428
429
def ceil(self) -> Self:
  """
  Rounds the tensor element-wise towards positive infinity.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).ceil().numpy())
  ```
  """
  return (self > (b := self.trunc())).where(b+1, b)

floor ¤

floor() -> Self

Rounds the tensor element-wise towards negative infinity.

print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).floor().numpy())
[-4. -3. -2. -1.  0.  1.  2.  3.]
Source code in tinygrad/mixin/elementwise.py
431
432
433
434
435
436
437
438
439
def floor(self) -> Self:
  """
  Rounds the tensor element-wise towards negative infinity.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).floor().numpy())
  ```
  """
  return (self < (b := self.trunc())).where(b-1, b)

round ¤

round() -> Self

Rounds the tensor element-wise with rounding half to even.

print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).round().numpy())
[-4. -2. -2.  0.  0.  2.  2.  4.]
Source code in tinygrad/mixin/elementwise.py
652
653
654
655
656
657
658
659
660
def round(self) -> Self:
  """
  Rounds the tensor element-wise with rounding half to even.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).round().numpy())
  ```
  """
  return ((self > 0).eq((b := self.trunc() / 2.0).trunc().eq(b))).where((self - 0.5).ceil(), (self + 0.5).floor())

isinf ¤

isinf(
    detect_positive: bool = True,
    detect_negative: bool = True,
) -> Self

Checks the tensor element-wise to return True where the element is infinity, otherwise returns False

print(Tensor([1, float('inf'), 2, float('-inf'), float('nan')]).isinf().numpy())
[False  True False  True False]
Source code in tinygrad/mixin/elementwise.py
401
402
403
404
405
406
407
408
409
def isinf(self, detect_positive: bool = True, detect_negative: bool = True) -> Self:
  """
  Checks the tensor element-wise to return True where the element is infinity, otherwise returns False

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1, float('inf'), 2, float('-inf'), float('nan')]).isinf().numpy())
  ```
  """
  return self.eq(float("inf")) * detect_positive + self.eq(float("-inf")) * detect_negative

isnan ¤

isnan() -> Self

Checks the tensor element-wise to return True where the element is NaN, otherwise returns False

print(Tensor([1, float('inf'), 2, float('-inf'), float('nan')]).isnan().numpy())
[False False False False  True]
Source code in tinygrad/mixin/elementwise.py
391
392
393
394
395
396
397
398
399
def isnan(self) -> Self:
  """
  Checks the tensor element-wise to return True where the element is NaN, otherwise returns False

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1, float('inf'), 2, float('-inf'), float('nan')]).isnan().numpy())
  ```
  """
  return self != self

isfinite ¤

isfinite() -> Self

Checks the tensor element-wise to return True where the element is finite, otherwise returns False

print(Tensor([1, float('inf'), 2, float('-inf'), float('nan')]).isfinite().numpy())
[ True False  True False False]
Source code in tinygrad/mixin/elementwise.py
411
412
413
414
415
416
417
418
419
def isfinite(self) -> Self:
  """
  Checks the tensor element-wise to return True where the element is finite, otherwise returns False

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1, float('inf'), 2, float('-inf'), float('nan')]).isfinite().numpy())
  ```
  """
  return (self.isinf() | self.isnan()).logical_not()

lerp ¤

lerp(end: Tensor, weight: Tensor | float) -> Tensor

Linearly interpolates between self and end by weight.

print(Tensor([1., 2., 3.]).lerp(Tensor([4., 5., 6.]), 0.5).numpy())
[2.5 3.5 4.5]
Source code in tinygrad/tensor.py
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
def lerp(self, end:Tensor, weight:Tensor|float) -> Tensor:
  """
  Linearly interpolates between `self` and `end` by `weight`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 3.]).lerp(Tensor([4., 5., 6.]), 0.5).numpy())
  ```
  """
  if self.dtype == dtypes.uint8 and isinstance(weight, Tensor):
    w_i = (weight * (1<<(W_PREC:=7)) + 0.5).cast(dtypes.int16)
    return (self+(((end - self).cast(dtypes.int8) * w_i + (1<<W_PREC-1)).cast(dtypes.uint16) >> W_PREC)).cast(dtypes.uint8)
  return self + (end - self) * weight

square ¤

square() -> Self

Squares the tensor element-wise. Equivalent to self*self.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).square().numpy())
[9. 4. 1. 0. 1. 4. 9.]
Source code in tinygrad/mixin/elementwise.py
363
364
365
366
367
368
369
370
371
372
def square(self) -> Self:
  """
  Squares the tensor element-wise.
  Equivalent to `self*self`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).square().numpy())
  ```
  """
  return self * self

clamp ¤

clamp(min_=None, max_=None) -> Self

Clips (clamps) the values in the tensor between min_ and max_ element-wise. If min_ is None, there is no lower bound. If max_ is None, there is no upper bound.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).clip(-1, 1).numpy())
[-1. -1. -1.  0.  1.  1.  1.]
Source code in tinygrad/mixin/elementwise.py
374
375
376
377
378
379
380
381
382
383
384
385
def clamp(self, min_=None, max_=None) -> Self:
  """
  Clips (clamps) the values in the tensor between `min_` and `max_` element-wise.
  If `min_` is `None`, there is no lower bound. If `max_` is None, there is no upper bound.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).clip(-1, 1).numpy())
  ```
  """
  if min_ is None and max_ is None: raise RuntimeError("at least one of 'min_' or 'max_' must not be None")
  ret = (self < min_).where(min_, self) if min_ is not None else self
  return (ret > max_).where(max_, ret) if max_ is not None else ret

clip ¤

clip(min_=None, max_=None) -> Self

Alias for Tensor.clamp.

Source code in tinygrad/mixin/elementwise.py
387
388
389
def clip(self, min_=None, max_=None) -> Self:
  """Alias for `Tensor.clamp`."""
  return self.clamp(min_, max_)

sign ¤

sign() -> Self

Returns the sign of the tensor element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sign().numpy())
[-1. -1. -1.  0.  1.  1.  1.]
Source code in tinygrad/mixin/elementwise.py
662
663
664
665
666
667
668
669
670
def sign(self) -> Self:
  """
  Returns the sign of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sign().numpy())
  ```
  """
  return self.ne(0).where((self < 0).where(self.const_like(-1), self.const_like(1)), self.const_like(0)) + self * 0

abs ¤

abs() -> Self

Computes the absolute value of the tensor element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).abs().numpy())
[3. 2. 1. 0. 1. 2. 3.]
Source code in tinygrad/mixin/elementwise.py
672
673
674
675
676
677
678
679
680
def abs(self) -> Self:
  """
  Computes the absolute value of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).abs().numpy())
  ```
  """
  return self * self.sign()

reciprocal ¤

reciprocal() -> Self

Computes 1/x element-wise.

print(Tensor([1., 2., 3., 4.]).reciprocal().numpy())
[1.     0.5    0.3333 0.25  ]
Source code in tinygrad/mixin/elementwise.py
268
269
270
271
272
273
274
275
276
def reciprocal(self) -> Self:
  """
  Computes `1/x` element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 3., 4.]).reciprocal().numpy())
  ```
  """
  return self._ensure_float().alu(Ops.RECIPROCAL)

Unary Ops (activation)¤

relu ¤

relu() -> Self

Applies the Rectified Linear Unit (ReLU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).relu().numpy())
[0. 0. 0. 0. 1. 2. 3.]
Source code in tinygrad/mixin/elementwise.py
441
442
443
444
445
446
447
448
449
450
def relu(self) -> Self:
  """
  Applies the Rectified Linear Unit (ReLU) function element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).relu().numpy())
  ```
  """
  # NOTE: if you write this as self.maximum(0) the gradient is wrong, passing through half when self is 0
  return (self > 0).where(self, 0)

sigmoid ¤

sigmoid() -> Self

Applies the Sigmoid function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sigmoid().numpy())
[0.0474 0.1192 0.2689 0.5    0.7311 0.8808 0.9526]
Source code in tinygrad/mixin/elementwise.py
452
453
454
455
456
457
458
459
460
461
462
def sigmoid(self) -> Self:
  """
  Applies the Sigmoid function element-wise.

  - Described: https://en.wikipedia.org/wiki/Sigmoid_function

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sigmoid().numpy())
  ```
  """
  return (1 + (self * (-1/math.log(2))).exp2()).reciprocal()

logsigmoid ¤

logsigmoid() -> Tensor

Applies the LogSigmoid function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).logsigmoid().numpy())
[-3.0486 -2.1269 -1.3133 -0.6931 -0.3133 -0.1269 -0.0486]
Source code in tinygrad/tensor.py
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
def logsigmoid(self) -> Tensor:
  """
  Applies the LogSigmoid function element-wise.

  - See: https://docs.pytorch.org/docs/stable/generated/torch.nn.functional.logsigmoid.html

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).logsigmoid().numpy())
  ```
  """
  return -(-self).softplus()

hardsigmoid ¤

hardsigmoid(
    alpha: float = 1 / 6, beta: float = 0.5
) -> Self

Applies the Hardsigmoid function element-wise. NOTE: default alpha and beta values are taken from torch

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).hardsigmoid().numpy())
[0.     0.1667 0.3333 0.5    0.6667 0.8333 1.    ]
Source code in tinygrad/mixin/elementwise.py
488
489
490
491
492
493
494
495
496
497
498
499
def hardsigmoid(self, alpha: float = 1/6, beta: float = 0.5) -> Self:
  """
  Applies the Hardsigmoid function element-wise.
  NOTE: default `alpha` and `beta` values are taken from torch

  - See: https://pytorch.org/docs/stable/generated/torch.nn.functional.hardsigmoid.html

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).hardsigmoid().numpy())
  ```
  """
  return (alpha * self + beta).relu() - (alpha * self + beta - 1).relu()

elu ¤

elu(alpha=1.0) -> Self

Applies the Exponential Linear Unit (ELU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).elu().numpy())
[-0.9502 -0.8647 -0.6321  0.      1.      2.      3.    ]
Source code in tinygrad/mixin/elementwise.py
725
726
727
728
729
730
731
732
733
734
735
def elu(self, alpha=1.0) -> Self:
  """
  Applies the Exponential Linear Unit (ELU) function element-wise.

  - Paper: https://arxiv.org/abs/1511.07289v5

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).elu().numpy())
  ```
  """
  return self.relu() - alpha*(1-self.exp()).relu()

celu ¤

celu(alpha=1.0) -> Self

Applies the Continuously differentiable Exponential Linear Unit (CELU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).celu().numpy())
[-0.9502 -0.8647 -0.6321  0.      1.      2.      3.    ]
Source code in tinygrad/mixin/elementwise.py
737
738
739
740
741
742
743
744
745
746
747
def celu(self, alpha=1.0) -> Self:
  """
  Applies the Continuously differentiable Exponential Linear Unit (CELU) function element-wise.

  - Paper: https://arxiv.org/abs/1704.07483

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).celu().numpy())
  ```
  """
  return self.maximum(0) + (alpha * ((self / alpha).exp() - 1)).minimum(0)

selu ¤

selu(alpha=1.67326, gamma=1.0507) -> Tensor

Applies the Scaled Exponential Linear Unit (SELU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).selu().numpy())
[-1.6706 -1.5202 -1.1113  0.      1.0507  2.1014  3.1521]
Source code in tinygrad/tensor.py
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
def selu(self, alpha=1.67326, gamma=1.0507) -> Tensor:
  """
  Applies the Scaled Exponential Linear Unit (SELU) function element-wise.

  - Paper: https://arxiv.org/abs/1706.02515v5

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).selu().numpy())
  ```
  """
  return gamma * (self >= 0).detach().where(self, alpha * (self.exp() - 1))

swish ¤

swish() -> Self

See .silu()

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).swish().numpy())
[-0.1423 -0.2384 -0.2689  0.      0.7311  1.7616  2.8577]
Source code in tinygrad/mixin/elementwise.py
558
559
560
561
562
563
564
565
566
567
568
def swish(self) -> Self:
  """
  See `.silu()`

  - Paper: https://arxiv.org/abs/1710.05941v1

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).swish().numpy())
  ```
  """
  return self * self.sigmoid()

silu ¤

silu() -> Self

Applies the Sigmoid Linear Unit (SiLU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).silu().numpy())
[-0.1423 -0.2384 -0.2689  0.      0.7311  1.7616  2.8577]
Source code in tinygrad/mixin/elementwise.py
570
571
572
573
574
575
576
577
578
579
580
def silu(self) -> Self:
  """
  Applies the Sigmoid Linear Unit (SiLU) function element-wise.

  - Paper: https://arxiv.org/abs/1606.08415

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).silu().numpy())
  ```
  """
  return self.swish()  # The SiLU function is also known as the swish function.

relu6 ¤

relu6() -> Self

Applies the ReLU6 function element-wise.

print(Tensor([-9., -6., -3., 0., 3., 6., 9.]).relu6().numpy())
[0. 0. 0. 0. 3. 6. 6.]
Source code in tinygrad/mixin/elementwise.py
464
465
466
467
468
469
470
471
472
473
474
def relu6(self) -> Self:
  """
  Applies the ReLU6 function element-wise.

  - Paper: https://arxiv.org/abs/1704.04861v1

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-9., -6., -3., 0., 3., 6., 9.]).relu6().numpy())
  ```
  """
  return self.relu() - (self-6).relu()

hardswish ¤

hardswish() -> Self

Applies the Hardswish function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).hardswish().numpy())
[-0.     -0.3333 -0.3333  0.      0.6667  1.6667  3.    ]
Source code in tinygrad/mixin/elementwise.py
476
477
478
479
480
481
482
483
484
485
486
def hardswish(self) -> Self:
  """
  Applies the Hardswish function element-wise.

  - Paper: https://arxiv.org/abs/1905.02244v5

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).hardswish().numpy())
  ```
  """
  return self * (self+3).relu6() * (1/6)

tanh ¤

tanh() -> Self

Applies the Hyperbolic Tangent (tanh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).tanh().numpy())
[-0.9951 -0.964  -0.7616  0.      0.7616  0.964   0.9951]
Source code in tinygrad/mixin/elementwise.py
524
525
526
527
528
529
530
531
532
533
534
def tanh(self) -> Self:
  """
  Applies the Hyperbolic Tangent (tanh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Hyperbolic_functions#Tanh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).tanh().numpy())
  ```
  """
  return 2.0 * ((2.0 * self).sigmoid()) - 1.0

sinh ¤

sinh() -> Self

Applies the Hyperbolic Sine (sinh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sinh().numpy())
[-10.0179  -3.6269  -1.1752   0.       1.1752   3.6269  10.0179]
Source code in tinygrad/mixin/elementwise.py
749
750
751
752
753
754
755
756
757
758
759
def sinh(self) -> Self:
  """
  Applies the Hyperbolic Sine (sinh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Hyperbolic_functions#Sinh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sinh().numpy())
  ```
  """
  return (self.exp() - self.neg().exp()) / 2

cosh ¤

cosh() -> Self

Applies the Hyperbolic Cosine (cosh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).cosh().numpy())
[10.0677  3.7622  1.5431  1.      1.5431  3.7622 10.0677]
Source code in tinygrad/mixin/elementwise.py
761
762
763
764
765
766
767
768
769
770
771
def cosh(self) -> Self:
  """
  Applies the Hyperbolic Cosine (cosh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Hyperbolic_functions#Cosh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).cosh().numpy())
  ```
  """
  return (self.exp() + self.neg().exp()) / 2

atanh ¤

atanh() -> Self

Applies the Inverse Hyperbolic Tangent (atanh) function element-wise.

print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).atanh().numpy())
[-1.4722 -0.6931 -0.3095  0.      0.3095  0.6931  1.4722]
Source code in tinygrad/mixin/elementwise.py
616
617
618
619
620
621
622
623
624
625
626
def atanh(self) -> Self:
  """
  Applies the Inverse Hyperbolic Tangent (atanh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions#atanh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).atanh().numpy())
  ```
  """
  return ((1 + self)/(1 - self)).log() / 2

asinh ¤

asinh() -> Self

Applies the Inverse Hyperbolic Sine (asinh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).asinh().numpy())
[-1.8184 -1.4436 -0.8814  0.      0.8814  1.4436  1.8184]
Source code in tinygrad/mixin/elementwise.py
628
629
630
631
632
633
634
635
636
637
638
def asinh(self) -> Self:
  """
  Applies the Inverse Hyperbolic Sine (asinh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions#asinh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).asinh().numpy())
  ```
  """
  return (self + (self.square() + 1).sqrt()).log()

acosh ¤

acosh() -> Self

Applies the Inverse Hyperbolic Cosine (acosh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).acosh().numpy())
[   nan    nan    nan    nan 0.     1.317  1.7627]
Source code in tinygrad/mixin/elementwise.py
640
641
642
643
644
645
646
647
648
649
650
def acosh(self) -> Self:
  """
  Applies the Inverse Hyperbolic Cosine (acosh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions#acosh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).acosh().numpy())
  ```
  """
  return (self + (self.square() - 1).sqrt()).log()

hardtanh ¤

hardtanh(min_val=-1, max_val=1) -> Self

Applies the Hardtanh function element-wise.

print(Tensor([-1.5, -1.0, -0.5, 0., 0.5, 1.0, 1.5]).hardtanh().numpy())
[-1.  -1.  -0.5  0.   0.5  1.   1. ]
Source code in tinygrad/mixin/elementwise.py
501
502
503
504
505
506
507
508
509
def hardtanh(self, min_val=-1, max_val=1) -> Self:
  """
  Applies the Hardtanh function element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1.5, -1.0, -0.5, 0., 0.5, 1.0, 1.5]).hardtanh().numpy())
  ```
  """
  return self.clip(min_val, max_val)

erf ¤

erf() -> Self

Applies error function element-wise.

print(Tensor([-1.5, -1.0, -0.5, 0., 0.5, 1.0, 1.5]).erf().numpy())
[-0.9661 -0.8427 -0.5205  0.      0.5205  0.8427  0.9661]
Source code in tinygrad/mixin/elementwise.py
773
774
775
776
777
778
779
780
781
782
783
784
785
def erf(self) -> Self:
  """
  Applies error function element-wise.

  - Described: https://en.wikipedia.org/wiki/Error_function

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1.5, -1.0, -0.5, 0., 0.5, 1.0, 1.5]).erf().numpy())
  ```
  """
  # https://personal.math.ubc.ca/~cbm/aands/page_299.htm 7.1.26
  t = 1.0 / (1.0 + 0.3275911 * self.abs())
  return self.sign() * (1.0 - t * polyN(t, [1.061405429, -1.453152027, 1.421413741, -0.284496736, 0.254829592]) * (-self.square()).exp())

gelu ¤

gelu() -> Self

Applies the Gaussian Error Linear Unit (GELU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).gelu().numpy())
[-0.0036 -0.0454 -0.1588  0.      0.8412  1.9546  2.9964]
Source code in tinygrad/mixin/elementwise.py
546
547
548
549
550
551
552
553
554
555
556
def gelu(self) -> Self:
  """
  Applies the Gaussian Error Linear Unit (GELU) function element-wise.

  - Paper: https://arxiv.org/abs/1606.08415v5

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).gelu().numpy())
  ```
  """
  return 0.5 * self * (1 + (math.sqrt(2 / math.pi) * (self + 0.044715 * self ** 3)).tanh())

quick_gelu ¤

quick_gelu() -> Self

Applies the Sigmoid GELU approximation element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).quick_gelu().numpy())
[-0.0181 -0.0643 -0.1542  0.      0.8458  1.9357  2.9819]
Source code in tinygrad/mixin/elementwise.py
536
537
538
539
540
541
542
543
544
def quick_gelu(self) -> Self:
  """
  Applies the Sigmoid GELU approximation element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).quick_gelu().numpy())
  ```
  """
  return self * (self * 1.702).sigmoid()

leaky_relu ¤

leaky_relu(neg_slope=0.01) -> Self

Applies the Leaky ReLU function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).leaky_relu().numpy())
[-0.03 -0.02 -0.01  0.    1.    2.    3.  ]
print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).leaky_relu(neg_slope=0.42).numpy())
[-1.26 -0.84 -0.42  0.    1.    2.    3.  ]

Source code in tinygrad/mixin/elementwise.py
511
512
513
514
515
516
517
518
519
520
521
522
def leaky_relu(self, neg_slope=0.01) -> Self:
  """
  Applies the Leaky ReLU function element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).leaky_relu().numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).leaky_relu(neg_slope=0.42).numpy())
  ```
  """
  return (self < 0).where(neg_slope*self, self)

mish ¤

mish() -> Tensor

Applies the Mish function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).mish().numpy())
[-0.1456 -0.2525 -0.3034  0.      0.8651  1.944   2.9865]
Source code in tinygrad/tensor.py
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
def mish(self) -> Tensor:
  """
  Applies the Mish function element-wise.

  - Paper: https://arxiv.org/abs/1908.08681v3

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).mish().numpy())
  ```
  """
  return self * self.softplus().tanh()

softplus ¤

softplus(beta=1.0) -> Tensor

Applies the Softplus function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).softplus().numpy())
[0.0486 0.1269 0.3133 0.6931 1.3133 2.1269 3.0486]
Source code in tinygrad/tensor.py
2930
2931
2932
2933
2934
2935
2936
2937
2938
def softplus(self, beta=1.0) -> Tensor:
  """
  Applies the Softplus function element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).softplus().numpy())
  ```
  """
  return (1/beta) * (self*beta).logaddexp(0.0)

softsign ¤

softsign() -> Self

Applies the Softsign function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).softsign().numpy())
[-0.75   -0.6667 -0.5     0.      0.5     0.6667  0.75  ]
Source code in tinygrad/mixin/elementwise.py
787
788
789
790
791
792
793
794
795
def softsign(self) -> Self:
  """
  Applies the Softsign function element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).softsign().numpy())
  ```
  """
  return self / (1 + self.abs())

Elementwise Ops (broadcasted)¤

add ¤

add(x: Self | ConstType, reverse: bool = False) -> Self

Adds self and x. Equivalent to self + x. Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

Tensor.manual_seed(42)
t = Tensor.randn(4)
print(t.numpy())
[-0.5144  1.085   0.9089 -0.0841]
print(t.add(20).numpy())
[19.4856 21.085  20.9089 19.9159]
print(t.add(Tensor([[2.0], [3.5]])).numpy())
[[1.4856 3.085  2.9089 1.9159]
 [2.9856 4.585  4.4089 3.4159]]

Source code in tinygrad/mixin/elementwise.py
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
def add(self, x: Self | ConstType, reverse: bool = False) -> Self:
  """
  Adds `self` and `x`.
  Equivalent to `self + x`.
  Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.
  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  t = Tensor.randn(4)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.add(20).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.add(Tensor([[2.0], [3.5]])).numpy())
  ```
  """
  return self._binop(Ops.ADD, x, reverse)

sub ¤

sub(x: Tensor | ConstType, reverse=False) -> Tensor

Subtracts x from self. Equivalent to self - x. Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

Tensor.manual_seed(42)
t = Tensor.randn(4)
print(t.numpy())
[-0.5144  1.085   0.9089 -0.0841]
print(t.sub(20).numpy())
[-20.5144 -18.915  -19.0911 -20.0841]
print(t.sub(Tensor([[2.0], [3.5]])).numpy())
[[-2.5144 -0.915  -1.0911 -2.0841]
 [-4.0144 -2.415  -2.5911 -3.5841]]

Source code in tinygrad/tensor.py
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
def sub(self, x:Tensor|ConstType, reverse=False) -> Tensor:
  """
  Subtracts `x` from `self`.
  Equivalent to `self - x`.
  Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  t = Tensor.randn(4)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.sub(20).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.sub(Tensor([[2.0], [3.5]])).numpy())
  ```
  """
  a, b = self._broadcasted(x, reverse)
  return a + (-b)

mul ¤

mul(x: Self | ConstType, reverse: bool = False) -> Self

Multiplies self and x. Equivalent to self * x. Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

Tensor.manual_seed(42)
t = Tensor.randn(4)
print(t.numpy())
[-0.5144  1.085   0.9089 -0.0841]
print(t.mul(3).numpy())
[-1.5431  3.2549  2.7267 -0.2523]
print(t.mul(Tensor([[-1.0], [2.0]])).numpy())
[[ 0.5144 -1.085  -0.9089  0.0841]
 [-1.0287  2.17    1.8178 -0.1682]]

Source code in tinygrad/mixin/elementwise.py
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
def mul(self, x: Self | ConstType, reverse: bool = False) -> Self:
  """
  Multiplies `self` and `x`.
  Equivalent to `self * x`.
  Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  t = Tensor.randn(4)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.mul(3).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.mul(Tensor([[-1.0], [2.0]])).numpy())
  ```
  """
  return self._binop(Ops.MUL, x, reverse)

div ¤

div(
    x: Tensor | ConstType,
    reverse=False,
    rounding_mode: Literal["trunc", "floor"] | None = None,
) -> Tensor

Divides self by x. Equivalent to self / x. Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs. div performs true division.

Tensor.manual_seed(42)
t = Tensor.randn(4)
print(t.numpy())
[-0.5144  1.085   0.9089 -0.0841]
print(t.div(3).numpy())
[-0.1715  0.3617  0.303  -0.028 ]
print(Tensor([1, 4, 10]).div(Tensor([2, 3, 4])).numpy())
[0.5    1.3333 2.5   ]

Source code in tinygrad/tensor.py
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
def div(self, x:Tensor|ConstType, reverse=False, rounding_mode:Literal["trunc", "floor"]|None=None) -> Tensor:
  """
  Divides `self` by `x`.
  Equivalent to `self / x`.
  Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.
  `div` performs true division.

  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  t = Tensor.randn(4)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.div(3).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1, 4, 10]).div(Tensor([2, 3, 4])).numpy())
  ```
  """
  numerator, denominator = self._broadcasted(x, reverse)
  d = numerator.cast(least_upper_float(numerator.dtype)) * denominator.cast(least_upper_float(denominator.dtype)).reciprocal()
  output_dtype = numerator.dtype if dtypes.is_int(numerator.dtype) else d.dtype
  if dtypes.is_int(dt:=least_upper_dtype(numerator.dtype, denominator.dtype)) and rounding_mode is not None:
    numerator, denominator = numerator.cast(dt), denominator.cast(dt)
    if rounding_mode == "trunc": return numerator.idiv(denominator)
    if rounding_mode == "floor":
      truncate_div, truncate_mod = numerator.idiv(denominator), numerator._binop(Ops.MOD, denominator, False)
      opposite_sign = ((numerator>0)&(denominator<0)) | ((numerator<0)&(denominator>0))
      return (opposite_sign&(truncate_mod!=0)).where(truncate_div-1, truncate_div)
  if rounding_mode == "trunc": return d.trunc().cast(output_dtype)
  if rounding_mode == "floor": return d.floor().cast(output_dtype)
  if rounding_mode is not None: raise RuntimeError(f"{rounding_mode=} is not supported")
  return d

idiv ¤

idiv(x: Self | ConstType, reverse: bool = False) -> Self

Divides self by x. Equivalent to self // x. Supports broadcasting to a common shape, type promotion, and integer inputs. idiv performs integer division (truncate towards zero).

print(Tensor([-4, 7, 5, 4, -7, 8]).idiv(Tensor([2, -3, 8, -2, 3, 5])).numpy())
[-2 -2  0 -2 -2  1]
Source code in tinygrad/mixin/elementwise.py
119
120
121
122
123
124
125
126
127
128
129
130
def idiv(self, x: Self | ConstType, reverse: bool = False) -> Self:
  """
  Divides `self` by `x`.
  Equivalent to `self // x`.
  Supports broadcasting to a common shape, type promotion, and integer inputs.
  `idiv` performs integer division (truncate towards zero).

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-4, 7, 5, 4, -7, 8]).idiv(Tensor([2, -3, 8, -2, 3, 5])).numpy())
  ```
  """
  return self._binop(Ops.IDIV, x, reverse)

mod ¤

mod(x: Tensor | ConstType, reverse=False) -> Tensor

Mod self by x. Equivalent to self % x. Supports broadcasting to a common shape, type promotion, and integer inputs.

print(Tensor([-4, 7, 5, 4, -7, 8]).mod(Tensor([2, -3, 8, -2, 3, 5])).numpy())
[ 0 -2  5  0  2  3]
Source code in tinygrad/tensor.py
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
def mod(self, x:Tensor|ConstType, reverse=False) -> Tensor:
  """
  Mod `self` by `x`.
  Equivalent to `self % x`.
  Supports broadcasting to a common shape, type promotion, and integer inputs.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-4, 7, 5, 4, -7, 8]).mod(Tensor([2, -3, 8, -2, 3, 5])).numpy())
  ```
  """
  a, b = self._broadcasted(x, reverse)
  return a - a.div(b, rounding_mode="floor") * b

bitwise_xor ¤

bitwise_xor(
    x: Self | ConstType, reverse: bool = False
) -> Self

Computes bitwise xor of self and x. Equivalent to self ^ x. Supports broadcasting to a common shape, type promotion, and integer, boolean inputs.

print(Tensor([-1, -2, 3]).bitwise_xor(Tensor([1, 0, 3])).numpy())
[-2 -2  0]
print(Tensor([True, True, False, False]).bitwise_xor(Tensor([True, False, True, False])).numpy())
[False  True  True False]

Source code in tinygrad/mixin/elementwise.py
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
def bitwise_xor(self, x: Self | ConstType, reverse: bool = False) -> Self:
  """
  Computes bitwise xor of `self` and `x`.
  Equivalent to `self ^ x`.
  Supports broadcasting to a common shape, type promotion, and integer, boolean inputs.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, -2, 3]).bitwise_xor(Tensor([1, 0, 3])).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([True, True, False, False]).bitwise_xor(Tensor([True, False, True, False])).numpy())
  ```
  """
  self._check_dtype()
  return self._binop(Ops.XOR, x, reverse)

bitwise_and ¤

bitwise_and(
    x: Self | ConstType, reverse: bool = False
) -> Self

Computes the bitwise AND of self and x. Equivalent to self & x. Supports broadcasting to a common shape, type promotion, and integer, boolean inputs.

print(Tensor([2, 5, 255]).bitwise_and(Tensor([3, 14, 16])).numpy())
[ 2  4 16]
print(Tensor([True, True, False, False]).bitwise_and(Tensor([True, False, True, False])).numpy())
[ True False False False]

Source code in tinygrad/mixin/elementwise.py
73
74
75
76
77
78
79
80
81
82
83
84
85
86
def bitwise_and(self, x: Self | ConstType, reverse: bool = False) -> Self:
  """
  Computes the bitwise AND of `self` and `x`.
  Equivalent to `self & x`.
  Supports broadcasting to a common shape, type promotion, and integer, boolean inputs.
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([2, 5, 255]).bitwise_and(Tensor([3, 14, 16])).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([True, True, False, False]).bitwise_and(Tensor([True, False, True, False])).numpy())
  ```
  """
  self._check_dtype()
  return self._binop(Ops.AND, x, reverse)

bitwise_or ¤

bitwise_or(
    x: Self | ConstType, reverse: bool = False
) -> Self

Computes the bitwise OR of self and x. Equivalent to self | x. Supports broadcasting to a common shape, type promotion, and integer, boolean inputs.

print(Tensor([2, 5, 255]).bitwise_or(Tensor([4, 4, 4])).numpy())
[  6   5 255]
print(Tensor([True, True, False, False]).bitwise_or(Tensor([True, False, True, False])).numpy())
[ True  True  True False]

Source code in tinygrad/mixin/elementwise.py
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
def bitwise_or(self, x: Self | ConstType, reverse: bool = False) -> Self:
  """
  Computes the bitwise OR of `self` and `x`.
  Equivalent to `self | x`.
  Supports broadcasting to a common shape, type promotion, and integer, boolean inputs.
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([2, 5, 255]).bitwise_or(Tensor([4, 4, 4])).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([True, True, False, False]).bitwise_or(Tensor([True, False, True, False])).numpy())
  ```
  """
  self._check_dtype()
  return self._binop(Ops.OR, x, reverse)

bitwise_not ¤

bitwise_not() -> Self

Computes the bitwise NOT of self. Equivalent to ~self.

print(Tensor([0, 2, 5, 255], dtype="int8").bitwise_not().numpy())
[-1 -3 -6  0]
print(Tensor([True, False]).bitwise_not().numpy())
[False  True]

Source code in tinygrad/mixin/elementwise.py
797
798
799
800
801
802
803
804
805
806
807
808
809
def bitwise_not(self) -> Self:
  """
  Computes the bitwise NOT of `self`.
  Equivalent to `~self`.
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0, 2, 5, 255], dtype="int8").bitwise_not().numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([True, False]).bitwise_not().numpy())
  ```
  """
  if self.dtype != dtypes.bool and not dtypes.is_int(self.dtype): raise RuntimeError(f"{self.dtype} is not supported")
  return self.logical_not() if self.dtype == dtypes.bool else self ^ -1

lshift ¤

lshift(x: Tensor | int, reverse=False) -> Tensor

Computes left arithmetic shift of self by x bits. self must have unsigned dtype. Equivalent to self << x.

print(Tensor([1, 3, 31], dtype=dtypes.uint8).lshift(2).numpy())
[  4  12 124]
Source code in tinygrad/tensor.py
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
def lshift(self, x:Tensor|int, reverse=False) -> Tensor:
  """
  Computes left arithmetic shift of `self` by `x` bits. `self` must have unsigned dtype.
  Equivalent to `self << x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1, 3, 31], dtype=dtypes.uint8).lshift(2).numpy())
  ```
  """
  assert dtypes.is_unsigned(self.dtype) and isinstance(x, int) and x >= 0 and not reverse, f"not supported {self.dtype=} {x=}"
  return self.mul(2 ** x, reverse)

rshift ¤

rshift(x: Tensor | int, reverse=False) -> Tensor

Computes right arithmetic shift of self by x bits. self must have unsigned dtype. Equivalent to self >> x.

print(Tensor([4, 13, 125], dtype=dtypes.uint8).rshift(2).numpy())
[ 1  3 31]
Source code in tinygrad/tensor.py
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
def rshift(self, x:Tensor|int, reverse=False) -> Tensor:
  """
  Computes right arithmetic shift of `self` by `x` bits. `self` must have unsigned dtype.
  Equivalent to `self >> x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([4, 13, 125], dtype=dtypes.uint8).rshift(2).numpy())
  ```
  """
  assert dtypes.is_unsigned(self.dtype) and isinstance(x, int) and x >= 0 and not reverse, f"not supported {self.dtype=} {x=}"
  return self.idiv(2 ** x, reverse)

pow ¤

pow(x: Tensor | ConstType, reverse=False) -> Tensor

Computes power of self with x. Equivalent to self ** x.

print(Tensor([-1, 2, 3]).pow(2.0).numpy())
[1 4 9]
print(Tensor([-1, 2, 3]).pow(Tensor([-1.5, 0.5, 1.5])).numpy())
[-2147483648           1           5]
print((2.0 ** Tensor([-1, 2, 3])).numpy())
[0.5 4.  8. ]

Source code in tinygrad/tensor.py
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
def pow(self, x:Tensor|ConstType, reverse=False) -> Tensor:
  """
  Computes power of `self` with `x`.
  Equivalent to `self ** x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).pow(2.0).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).pow(Tensor([-1.5, 0.5, 1.5])).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print((2.0 ** Tensor([-1, 2, 3])).numpy())
  ```
  """
  base, exponent = self._broadcasted(x, reverse=reverse)
  # TODO: int pow
  if not base.is_floating_point() and not (isinstance(x, int) and x >= 0): raise RuntimeError("base needs to be float")

  ret = base._apply_uop(UOp.pow, exponent)
  # NOTE: pow(int, float) -> int
  return ret.round().cast(self.dtype) if not reverse and not dtypes.is_float(self.dtype) and dtypes.is_float(exponent.dtype) else ret

maximum ¤

maximum(x: Self | ConstType) -> Self

Computes element-wise maximum of self and x.

print(Tensor([-1, 2, 3]).maximum(1).numpy())
[1 2 3]
print(Tensor([-1, 2, 3]).maximum(Tensor([-4, -2, 9])).numpy())
[-1  2  9]

Source code in tinygrad/mixin/elementwise.py
239
240
241
242
243
244
245
246
247
248
249
250
def maximum(self, x: Self | ConstType) -> Self:
  """
  Computes element-wise maximum of `self` and `x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).maximum(1).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).maximum(Tensor([-4, -2, 9])).numpy())
  ```
  """
  return self._binop(Ops.MAX, x, False)

minimum ¤

minimum(x: Tensor | ConstType) -> Tensor

Computes element-wise minimum of self and x.

print(Tensor([-1, 2, 3]).minimum(1).numpy())
[-1  1  1]
print(Tensor([-1, 2, 3]).minimum(Tensor([-4, -2, 9])).numpy())
[-4 -2  3]

Source code in tinygrad/tensor.py
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
def minimum(self, x:Tensor|ConstType) -> Tensor:
  """
  Computes element-wise minimum of `self` and `x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).minimum(1).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).minimum(Tensor([-4, -2, 9])).numpy())
  ```
  """
  t, x = self._broadcasted(x)
  return t._inverse().maximum(x._inverse())._inverse()

where ¤

where(
    x: Tensor | ConstType | sint,
    y: Tensor | ConstType | sint,
) -> Tensor

Returns a tensor of elements selected from either x or y, depending on self. output_i = x_i if self_i else y_i.

cond = Tensor([[True, True, False], [True, False, False]])
print(cond.where(1, 3).numpy())
[[1 1 3]
 [1 3 3]]
Tensor.manual_seed(42)
cond = Tensor.randn(2, 3)
print(cond.numpy())
[[ 0.9779  0.4678  0.5526]
 [-0.3288 -0.8555  0.2753]]
print((cond > 0).where(cond, -float("inf")).numpy())
[[0.9779 0.4678 0.5526]
 [  -inf   -inf 0.2753]]

Source code in tinygrad/tensor.py
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
def where(self:Tensor, x:Tensor|ConstType|sint, y:Tensor|ConstType|sint) -> Tensor:
  """
  Returns a tensor of elements selected from either `x` or `y`, depending on `self`.
  `output_i = x_i if self_i else y_i`.

  ```python exec="true" source="above" session="tensor" result="python"
  cond = Tensor([[True, True, False], [True, False, False]])
  print(cond.where(1, 3).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  cond = Tensor.randn(2, 3)
  print(cond.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print((cond > 0).where(cond, -float("inf")).numpy())
  ```
  """
  if isinstance(x, Tensor): x, y = x._broadcasted(y)
  elif isinstance(y, Tensor): y, x = y._broadcasted(x)
  cond, x = self._broadcasted(x, match_dtype=False)
  cond, y = cond._broadcasted(y, match_dtype=False)
  return cond.cast(dtypes.bool)._apply_uop(UOp.where, *x._broadcasted(y))

copysign ¤

copysign(other) -> Tensor

Returns a tensor of with the magnitude of self and the sign of other, elementwise.

Source code in tinygrad/tensor.py
3119
3120
3121
3122
3123
3124
3125
3126
3127
def copysign(self, other) -> Tensor:
  """
  Returns a tensor of with the magnitude of `self` and the sign of `other`, elementwise.
  """
  # NOTE: torch always return in float, we return based on the broadcasting rule.
  other = self._broadcasted(other)[1]
  # TODO: remove other.sign()*0?
  # other.sign()*0 keeps other in the gradient graph (gradient=0) without affecting forward (works for inf unlike other*0)
  return self.abs() * ((other < 0) | (other.reciprocal() < 0)).where(-1, 1) + other.sign()*0

logaddexp ¤

logaddexp(other) -> Tensor

Calculates (self.exp()+other.exp()).log(), elementwise.

Source code in tinygrad/tensor.py
3129
3130
3131
3132
3133
3134
def logaddexp(self, other) -> Tensor:
  """
  Calculates (self.exp()+other.exp()).log(), elementwise.
  """
  m = self.maximum(other)
  return ((self-m).exp() + (self._broadcasted(other)[1]-m).exp()).log() + m

Casting Ops¤

cast ¤

cast(dtype: DTypeLike) -> Tensor

Casts self to the given dtype.

t = Tensor([-1, 2.5, 3], dtype=dtypes.float)
print(t.dtype, t.numpy())
dtypes.float [-1.   2.5  3. ]
t = t.cast(dtypes.int32)
print(t.dtype, t.numpy())
dtypes.int [-1  2  3]
t = t.cast(dtypes.uint8)
print(t.dtype, t.numpy())
dtypes.uchar [255   2   3]

Source code in tinygrad/tensor.py
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
def cast(self, dtype:DTypeLike) -> Tensor:
  """
  Casts `self` to the given `dtype`.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 2.5, 3], dtype=dtypes.float)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.cast(dtypes.int32)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.cast(dtypes.uint8)
  print(t.dtype, t.numpy())
  ```
  """
  return self if self.dtype == (dt:=to_dtype(dtype)) else self._apply_uop(UOp.cast, dtype=dt)

bitcast ¤

bitcast(dtype: DTypeLike) -> Tensor

Bitcasts self to the given dtype of the same itemsize.

self must not require a gradient.

t = Tensor([-1, 2, 3], dtype=dtypes.int32)
print(t.dtype, t.numpy())
dtypes.int [-1  2  3]
t = t.bitcast(dtypes.uint32)
print(t.dtype, t.numpy())
dtypes.uint [4294967295          2          3]

Source code in tinygrad/tensor.py
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
def bitcast(self, dtype:DTypeLike) -> Tensor:
  """
  Bitcasts `self` to the given `dtype` of the same itemsize.

  `self` must not require a gradient.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 2, 3], dtype=dtypes.int32)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.bitcast(dtypes.uint32)
  print(t.dtype, t.numpy())
  ```
  """
  if self.requires_grad: raise RuntimeError("can't backprop through bitcast")
  dt = to_dtype(dtype)
  if (ns:=dt.itemsize) != (os:=self.dtype.itemsize) and (self.shape[-1]*os) % ns != 0: raise RuntimeError("unsupported size in bitcast")
  if (not isinstance(self.device, str) or not self.device.startswith("DISK")) and ns != os:
    new_uint, old_uint = to_dtype(f"uint{8*ns}"), to_dtype(f"uint{8*os}")
    tmp = self.bitcast(old_uint)
    if ns > os:
      tmp = tmp.reshape(self.shape[:-1] + (self.shape[-1]//(rate := ns//os), rate))
      nones = (None,) * (tmp.ndim - 1)
      return functools.reduce(Tensor.add, (tmp.shrink(nones + ((i, i+1),)).cast(new_uint)<<8*i*os for i in range(rate))).squeeze(-1).bitcast(dtype)
    return Tensor.stack(*(tmp>>8*i*ns for i in range(os//ns)), dim=-1).flatten(-2).cast(new_uint).bitcast(dtype)
  return self._apply_uop(UOp.bitcast, dtype=dt) if self.dtype != dt else self

float ¤

float() -> Self

Convenience method to cast self to a float32 Tensor.

t = Tensor([-1, 2, 3], dtype=dtypes.int32)
print(t.dtype, t.numpy())
dtypes.int [-1  2  3]
t = t.float()
print(t.dtype, t.numpy())
dtypes.float [-1.  2.  3.]

Source code in tinygrad/mixin/dtype.py
33
34
35
36
37
38
39
40
41
42
43
44
45
46
def float(self) -> Self:
  """
  Convenience method to cast `self` to a `float32` Tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 2, 3], dtype=dtypes.int32)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.float()
  print(t.dtype, t.numpy())
  ```
  """
  return self.cast(dtypes.float32)

half ¤

half() -> Self

Convenience method to cast self to a float16 Tensor.

t = Tensor([-1, 2, 3], dtype=dtypes.int32)
print(t.dtype, t.numpy())
dtypes.int [-1  2  3]
t = t.half()
print(t.dtype, t.numpy())
dtypes.half [-1.  2.  3.]

Source code in tinygrad/mixin/dtype.py
48
49
50
51
52
53
54
55
56
57
58
59
60
61
def half(self) -> Self:
  """
  Convenience method to cast `self` to a `float16` Tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 2, 3], dtype=dtypes.int32)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.half()
  print(t.dtype, t.numpy())
  ```
  """
  return self.cast(dtypes.float16)

int ¤

int() -> Self

Convenience method to cast self to a int32 Tensor.

t = Tensor([-1.5, -0.5, 0.0, 0.5, 1.5])
print(t.dtype, t.numpy())
dtypes.float [-1.5 -0.5  0.   0.5  1.5]
t = t.int()
print(t.dtype, t.numpy())
dtypes.int [-1  0  0  0  1]

Source code in tinygrad/mixin/dtype.py
63
64
65
66
67
68
69
70
71
72
73
74
75
76
def int(self) -> Self:
  """
  Convenience method to cast `self` to a `int32` Tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1.5, -0.5, 0.0, 0.5, 1.5])
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.int()
  print(t.dtype, t.numpy())
  ```
  """
  return self.cast(dtypes.int32)

bool ¤

bool() -> Self

Convenience method to cast self to a bool Tensor.

t = Tensor([-1, 0, 1])
print(t.dtype, t.numpy())
dtypes.int [-1  0  1]
t = t.bool()
print(t.dtype, t.numpy())
dtypes.bool [ True False  True]

Source code in tinygrad/mixin/dtype.py
78
79
80
81
82
83
84
85
86
87
88
89
90
91
def bool(self) -> Self:
  """
  Convenience method to cast `self` to a `bool` Tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 0, 1])
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.bool()
  print(t.dtype, t.numpy())
  ```
  """
  return self.cast(dtypes.bool)

bfloat16 ¤

bfloat16() -> Self
Source code in tinygrad/mixin/dtype.py
93
def bfloat16(self) -> Self: return self.cast(dtypes.bfloat16)

double ¤

double() -> Self
Source code in tinygrad/mixin/dtype.py
94
def double(self) -> Self: return self.cast(dtypes.double)

long ¤

long() -> Self
Source code in tinygrad/mixin/dtype.py
95
def long(self) -> Self: return self.cast(dtypes.long)

short ¤

short() -> Self
Source code in tinygrad/mixin/dtype.py
96
def short(self) -> Self: return self.cast(dtypes.short)