Skip to content

Elementwise

Elementwise ops operate on a per element basis. They don't change the shape of the tensor.

Unary Ops (math)¤

logical_not ¤

logical_not() -> Tensor

Computes the logical NOT of the tensor element-wise.

print(Tensor([False, True]).logical_not().numpy())
[ True False]
Source code in tinygrad/tensor.py
2846
2847
2848
2849
2850
2851
2852
2853
2854
def logical_not(self) -> Tensor:
  """
  Computes the logical NOT of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([False, True]).logical_not().numpy())
  ```
  """
  return self.cast(dtypes.bool)._apply_broadcasted_uop(UOp.ne, True)

neg ¤

neg() -> Tensor

Negates the tensor element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).neg().numpy())
[ 3.  2.  1. -0. -1. -2. -3.]
Source code in tinygrad/tensor.py
2856
2857
2858
2859
2860
2861
2862
2863
2864
def neg(self) -> Tensor:
  """
  Negates the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).neg().numpy())
  ```
  """
  return self*-1 if self.dtype != dtypes.bool else self.logical_not()

log ¤

log() -> Tensor

Computes the natural logarithm element-wise.

See: https://en.wikipedia.org/wiki/Logarithm

print(Tensor([1., 2., 4., 8.]).log().numpy())
[0.     0.6931 1.3863 2.0794]
Source code in tinygrad/tensor.py
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
def log(self) -> Tensor:
  """
  Computes the natural logarithm element-wise.

  See: https://en.wikipedia.org/wiki/Logarithm

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 4., 8.]).log().numpy())
  ```
  """
  return self.log2()*math.log(2)

log2 ¤

log2() -> Tensor

Computes the base-2 logarithm element-wise.

See: https://en.wikipedia.org/wiki/Logarithm

print(Tensor([1., 2., 4., 8.]).log2().numpy())
[0. 1. 2. 3.]
Source code in tinygrad/tensor.py
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
def log2(self) -> Tensor:
  """
  Computes the base-2 logarithm element-wise.

  See: https://en.wikipedia.org/wiki/Logarithm

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 4., 8.]).log2().numpy())
  ```
  """
  return self.cast(least_upper_float(self.dtype))._apply_uop(UOp.log2)

exp ¤

exp() -> Tensor

Computes the exponential function element-wise.

See: https://en.wikipedia.org/wiki/Exponential_function

print(Tensor([0., 1., 2., 3.]).exp().numpy())
[ 1.      2.7183  7.3891 20.0855]
Source code in tinygrad/tensor.py
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
def exp(self) -> Tensor:
  """
  Computes the exponential function element-wise.

  See: https://en.wikipedia.org/wiki/Exponential_function

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., 1., 2., 3.]).exp().numpy())
  ```
  """
  return self.mul(1/math.log(2)).exp2()

exp2 ¤

exp2() -> Tensor

Computes the base-2 exponential function element-wise.

See: https://en.wikipedia.org/wiki/Exponential_function

print(Tensor([0., 1., 2., 3.]).exp2().numpy())
[1. 2. 4. 8.]
Source code in tinygrad/tensor.py
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
def exp2(self) -> Tensor:
  """
  Computes the base-2 exponential function element-wise.

  See: https://en.wikipedia.org/wiki/Exponential_function

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., 1., 2., 3.]).exp2().numpy())
  ```
  """
  return self.cast(least_upper_float(self.dtype))._apply_uop(UOp.exp2)

sqrt ¤

sqrt() -> Tensor

Computes the square root of the tensor element-wise.

print(Tensor([1., 2., 3., 4.]).sqrt().numpy())
[1.     1.4142 1.7321 2.    ]
Source code in tinygrad/tensor.py
2986
2987
2988
2989
2990
2991
2992
2993
2994
def sqrt(self) -> Tensor:
  """
  Computes the square root of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 3., 4.]).sqrt().numpy())
  ```
  """
  return self.cast(least_upper_float(self.dtype))._apply_uop(UOp.sqrt)

rsqrt ¤

rsqrt() -> Tensor

Computes the reciprocal of the square root of the tensor element-wise.

print(Tensor([1., 2., 3., 4.]).rsqrt().numpy())
[1.     0.7071 0.5774 0.5   ]
Source code in tinygrad/tensor.py
2996
2997
2998
2999
3000
3001
3002
3003
3004
def rsqrt(self) -> Tensor:
  """
  Computes the reciprocal of the square root of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 3., 4.]).rsqrt().numpy())
  ```
  """
  return self.sqrt().reciprocal()

sin ¤

sin() -> Tensor

Computes the sine of the tensor element-wise.

print(Tensor([0., math.pi/2, math.pi, 3*math.pi/2, 2*math.pi]).sin().numpy())
[ 0.  1. -0. -1.  0.]
Source code in tinygrad/tensor.py
3006
3007
3008
3009
3010
3011
3012
3013
3014
def sin(self) -> Tensor:
  """
  Computes the sine of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., math.pi/2, math.pi, 3*math.pi/2, 2*math.pi]).sin().numpy())
  ```
  """
  return self.cast(least_upper_float(self.dtype))._apply_uop(UOp.sin)

cos ¤

cos() -> Tensor

Computes the cosine of the tensor element-wise.

print(Tensor([0., math.pi/2, math.pi, 3*math.pi/2, 2*math.pi]).cos().numpy())
[ 1.0000e+00  0.0000e+00 -1.0000e+00 -2.3842e-07  1.0000e+00]
Source code in tinygrad/tensor.py
3016
3017
3018
3019
3020
3021
3022
3023
3024
def cos(self) -> Tensor:
  """
  Computes the cosine of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., math.pi/2, math.pi, 3*math.pi/2, 2*math.pi]).cos().numpy())
  ```
  """
  return ((math.pi/2)-self).sin()

tan ¤

tan() -> Tensor

Computes the tangent of the tensor element-wise.

print(Tensor([0., math.pi/4, math.pi/2, 3*math.pi/4, math.pi]).tan().numpy())
[ 0.  1. inf -1.  0.]
Source code in tinygrad/tensor.py
3026
3027
3028
3029
3030
3031
3032
3033
3034
def tan(self) -> Tensor:
  """
  Computes the tangent of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., math.pi/4, math.pi/2, 3*math.pi/4, math.pi]).tan().numpy())
  ```
  """
  return self.sin() / self.cos()

asin ¤

asin() -> Tensor

Computes the inverse sine (arcsine) of the tensor element-wise.

print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).asin().numpy())
[-1.1198 -0.6435 -0.3047  0.      0.3047  0.6435  1.1198]
Source code in tinygrad/tensor.py
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
def asin(self) -> Tensor:
  """
  Computes the inverse sine (arcsine) of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).asin().numpy())
  ```
  """
  # https://personal.math.ubc.ca/~cbm/aands/page_81.htm 4.4.46
  coefficients = [-0.0012624911, 0.0066700901, -0.0170881256, 0.0308918810, -0.0501743046, 0.0889789874, -0.2145988016, 1.5707963050]
  x = math.pi / 2 - (1.0 - self.abs()).sqrt() * polyN(self.abs(), coefficients)
  return self.sign() * x

acos ¤

acos() -> Tensor

Computes the inverse cosine (arccosine) of the tensor element-wise.

print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).acos().numpy())
[2.6906 2.2143 1.8755 1.5708 1.2661 0.9273 0.451 ]
Source code in tinygrad/tensor.py
3049
3050
3051
3052
3053
3054
3055
3056
3057
def acos(self) -> Tensor:
  """
  Computes the inverse cosine (arccosine) of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).acos().numpy())
  ```
  """
  return math.pi / 2 - self.asin()

atan ¤

atan() -> Tensor

Computes the inverse tangent (arctan) of the tensor element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).atan().numpy())
[-1.249  -1.1071 -0.7854  0.      0.7854  1.1071  1.249 ]
Source code in tinygrad/tensor.py
3059
3060
3061
3062
3063
3064
3065
3066
3067
def atan(self) -> Tensor:
  """
  Computes the inverse tangent (arctan) of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).atan().numpy())
  ```
  """
  return (self / (1 + self * self).sqrt()).asin()

trunc ¤

trunc() -> Tensor

Truncates the tensor element-wise.

print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).trunc().numpy())
[-3. -2. -1.  0.  0.  1.  2.  3.]
Source code in tinygrad/tensor.py
3071
3072
3073
3074
3075
3076
3077
3078
3079
def trunc(self: Tensor) -> Tensor:
  """
  Truncates the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).trunc().numpy())
  ```
  """
  return self.cast(dtypes.int32).cast(self.dtype)

ceil ¤

ceil() -> Tensor

Rounds the tensor element-wise towards positive infinity.

print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).ceil().numpy())
[-3. -2. -1.  0.  1.  2.  3.  4.]
Source code in tinygrad/tensor.py
3081
3082
3083
3084
3085
3086
3087
3088
3089
def ceil(self: Tensor) -> Tensor:
  """
  Rounds the tensor element-wise towards positive infinity.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).ceil().numpy())
  ```
  """
  return (self > (b := self.trunc())).where(b+1, b)

floor ¤

floor() -> Tensor

Rounds the tensor element-wise towards negative infinity.

print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).floor().numpy())
[-4. -3. -2. -1.  0.  1.  2.  3.]
Source code in tinygrad/tensor.py
3091
3092
3093
3094
3095
3096
3097
3098
3099
def floor(self: Tensor) -> Tensor:
  """
  Rounds the tensor element-wise towards negative infinity.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).floor().numpy())
  ```
  """
  return (self < (b := self.trunc())).where(b-1, b)

round ¤

round() -> Tensor

Rounds the tensor element-wise with rounding half to even.

print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).round().numpy())
[-4. -2. -2.  0.  0.  2.  2.  4.]
Source code in tinygrad/tensor.py
3101
3102
3103
3104
3105
3106
3107
3108
3109
def round(self: Tensor) -> Tensor:
  """
  Rounds the tensor element-wise with rounding half to even.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).round().numpy())
  ```
  """
  return ((self > 0) == ((b := self.cast(dtypes.int32) / 2.0).cast(dtypes.int32) == b)).where((self - 0.5).ceil(), (self + 0.5).floor())

isinf ¤

isinf(
    detect_positive: bool = True,
    detect_negative: bool = True,
) -> Tensor

Checks the tensor element-wise to return True where the element is infinity, otherwise returns False

print(Tensor([1, float('inf'), 2, float('-inf'), float('nan')]).isinf().numpy())
[False  True False  True False]
Source code in tinygrad/tensor.py
3111
3112
3113
3114
3115
3116
3117
3118
3119
def isinf(self:Tensor, detect_positive:bool=True, detect_negative:bool=True) -> Tensor:
  """
  Checks the tensor element-wise to return True where the element is infinity, otherwise returns False

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1, float('inf'), 2, float('-inf'), float('nan')]).isinf().numpy())
  ```
  """
  return (self == float("inf")) * detect_positive + (self == float("-inf")) * detect_negative

isnan ¤

isnan() -> Tensor

Checks the tensor element-wise to return True where the element is NaN, otherwise returns False

print(Tensor([1, float('inf'), 2, float('-inf'), float('nan')]).isnan().numpy())
[False False False False  True]
Source code in tinygrad/tensor.py
3121
3122
3123
3124
3125
3126
3127
3128
3129
def isnan(self:Tensor) -> Tensor:
  """
  Checks the tensor element-wise to return True where the element is NaN, otherwise returns False

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1, float('inf'), 2, float('-inf'), float('nan')]).isnan().numpy())
  ```
  """
  return self != self

isfinite ¤

isfinite() -> Tensor

Checks the tensor element-wise to return True where the element is finite, otherwise returns False

print(Tensor([1, float('inf'), 2, float('-inf'), float('nan')]).isfinite().numpy())
[ True False  True False False]
Source code in tinygrad/tensor.py
3131
3132
3133
3134
3135
3136
3137
3138
3139
def isfinite(self:Tensor) -> Tensor:
  """
  Checks the tensor element-wise to return True where the element is finite, otherwise returns False

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1, float('inf'), 2, float('-inf'), float('nan')]).isfinite().numpy())
  ```
  """
  return (self.isinf()|self.isnan()).logical_not()

lerp ¤

lerp(end: Tensor, weight: Tensor | float) -> Tensor

Linearly interpolates between self and end by weight.

print(Tensor([1., 2., 3.]).lerp(Tensor([4., 5., 6.]), 0.5).numpy())
[2.5 3.5 4.5]
Source code in tinygrad/tensor.py
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
def lerp(self, end:Tensor, weight:Tensor|float) -> Tensor:
  """
  Linearly interpolates between `self` and `end` by `weight`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 3.]).lerp(Tensor([4., 5., 6.]), 0.5).numpy())
  ```
  """
  if self.dtype == dtypes.uint8 and isinstance(weight, Tensor):
    w_i = (weight * (1<<(W_PREC:=7)) + 0.5).cast(dtypes.int16)
    return (self+(((end - self).cast(dtypes.int8) * w_i + (1<<W_PREC-1)).cast(dtypes.uint16) >> W_PREC)).cast(dtypes.uint8)
  return self + (end - self) * weight

square ¤

square() -> Tensor

Squares the tensor element-wise. Equivalent to self*self.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).square().numpy())
[9. 4. 1. 0. 1. 4. 9.]
Source code in tinygrad/tensor.py
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
def square(self) -> Tensor:
  """
  Squares the tensor element-wise.
  Equivalent to `self*self`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).square().numpy())
  ```
  """
  return self*self

clamp ¤

clamp(min_=None, max_=None) -> Tensor

Clips (clamps) the values in the tensor between min_ and max_ element-wise. If min_ is None, there is no lower bound. If max_ is None, there is no upper bound.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).clip(-1, 1).numpy())
[-1. -1. -1.  0.  1.  1.  1.]
Source code in tinygrad/tensor.py
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
def clamp(self, min_=None, max_=None) -> Tensor:
  """
  Clips (clamps) the values in the tensor between `min_` and `max_` element-wise.
  If `min_` is `None`, there is no lower bound. If `max_` is None, there is no upper bound.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).clip(-1, 1).numpy())
  ```
  """
  if min_ is None and max_ is None: raise RuntimeError("at least one of 'min_' or 'max_' must not be None")
  ret = self.maximum(min_) if min_ is not None else self
  return ret.minimum(max_) if max_ is not None else ret

clip ¤

clip(min_=None, max_=None) -> Tensor

Alias for Tensor.clamp.

Source code in tinygrad/tensor.py
3178
3179
3180
3181
3182
def clip(self, min_=None, max_=None) -> Tensor:
  """
  Alias for `Tensor.clamp`.
  """
  return self.clamp(min_, max_)

sign ¤

sign() -> Tensor

Returns the sign of the tensor element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sign().numpy())
[-1. -1. -1.  0.  1.  1.  1.]
Source code in tinygrad/tensor.py
3184
3185
3186
3187
3188
3189
3190
3191
3192
def sign(self) -> Tensor:
  """
  Returns the sign of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sign().numpy())
  ```
  """
  return self.ne(0).where((self<0).where(self.full_like(-1), self.full_like(1)), self.full_like(0)) + self*0

abs ¤

abs() -> Tensor

Computes the absolute value of the tensor element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).abs().numpy())
[3. 2. 1. 0. 1. 2. 3.]
Source code in tinygrad/tensor.py
3194
3195
3196
3197
3198
3199
3200
3201
3202
def abs(self) -> Tensor:
  """
  Computes the absolute value of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).abs().numpy())
  ```
  """
  return self * self.sign()

reciprocal ¤

reciprocal() -> Tensor

Computes 1/x element-wise.

print(Tensor([1., 2., 3., 4.]).reciprocal().numpy())
[1.     0.5    0.3333 0.25  ]
Source code in tinygrad/tensor.py
3204
3205
3206
3207
3208
3209
3210
3211
3212
def reciprocal(self) -> Tensor:
  """
  Computes `1/x` element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 3., 4.]).reciprocal().numpy())
  ```
  """
  return self.cast(least_upper_float(self.dtype))._apply_uop(UOp.reciprocal)

Unary Ops (activation)¤

relu ¤

relu() -> Tensor

Applies the Rectified Linear Unit (ReLU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).relu().numpy())
[0. 0. 0. 0. 1. 2. 3.]
Source code in tinygrad/tensor.py
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
def relu(self) -> Tensor:
  """
  Applies the Rectified Linear Unit (ReLU) function element-wise.

  - Described: https://paperswithcode.com/method/relu

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).relu().numpy())
  ```
  """
  # NOTE: if you write this as self.maximum(0) the gradient is wrong, passing through half when self is 0
  return (self>0).where(self, 0)

sigmoid ¤

sigmoid() -> Tensor

Applies the Sigmoid function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sigmoid().numpy())
[0.0474 0.1192 0.2689 0.5    0.7311 0.8808 0.9526]
Source code in tinygrad/tensor.py
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
def sigmoid(self) -> Tensor:
  """
  Applies the Sigmoid function element-wise.

  - Described: https://en.wikipedia.org/wiki/Sigmoid_function

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sigmoid().numpy())
  ```
  """
  return (1 + (self * (-1/math.log(2))).exp2()).reciprocal()

logsigmoid ¤

logsigmoid() -> Tensor

Applies the LogSigmoid function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).logsigmoid().numpy())
[-3.0486 -2.1269 -1.3133 -0.6931 -0.3133 -0.1269 -0.0486]
Source code in tinygrad/tensor.py
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
def logsigmoid(self) -> Tensor:
  """
  Applies the LogSigmoid function element-wise.

  - See: https://docs.pytorch.org/docs/stable/generated/torch.nn.functional.logsigmoid.html

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).logsigmoid().numpy())
  ```
  """
  return -(-self).softplus()

hardsigmoid ¤

hardsigmoid(
    alpha: float = 1 / 6, beta: float = 0.5
) -> Tensor

Applies the Hardsigmoid function element-wise. NOTE: default alpha and beta values are taken from torch

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).hardsigmoid().numpy())
[0.     0.1667 0.3333 0.5    0.6667 0.8333 1.    ]
Source code in tinygrad/tensor.py
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
def hardsigmoid(self, alpha:float=1/6, beta:float=0.5) -> Tensor:
  """
  Applies the Hardsigmoid function element-wise.
  NOTE: default `alpha` and `beta` values are taken from torch

  - Described: https://paperswithcode.com/method/hard-sigmoid
  - See: https://pytorch.org/docs/stable/generated/torch.nn.functional.hardsigmoid.html

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).hardsigmoid().numpy())
  ```
  """
  return (alpha * self + beta).relu() - (alpha * self + beta - 1).relu()

elu ¤

elu(alpha=1.0) -> Tensor

Applies the Exponential Linear Unit (ELU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).elu().numpy())
[-0.9502 -0.8647 -0.6321  0.      1.      2.      3.    ]
Source code in tinygrad/tensor.py
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
def elu(self, alpha=1.0) -> Tensor:
  """
  Applies the Exponential Linear Unit (ELU) function element-wise.

  - Described: https://paperswithcode.com/method/elu
  - Paper: https://arxiv.org/abs/1511.07289v5

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).elu().numpy())
  ```
  """
  return self.relu() - alpha*(1-self.exp()).relu()

celu ¤

celu(alpha=1.0) -> Tensor

Applies the Continuously differentiable Exponential Linear Unit (CELU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).celu().numpy())
[-0.9502 -0.8647 -0.6321  0.      1.      2.      3.    ]
Source code in tinygrad/tensor.py
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
def celu(self, alpha=1.0) -> Tensor:
  """
  Applies the Continuously differentiable Exponential Linear Unit (CELU) function element-wise.

  - Described: https://paperswithcode.com/method/celu
  - Paper: https://arxiv.org/abs/1704.07483

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).celu().numpy())
  ```
  """
  return self.maximum(0) + (alpha * ((self / alpha).exp() - 1)).minimum(0)

selu ¤

selu(alpha=1.67326, gamma=1.0507) -> Tensor

Applies the Scaled Exponential Linear Unit (SELU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).selu().numpy())
[-1.6706 -1.5202 -1.1113  0.      1.0507  2.1014  3.1521]
Source code in tinygrad/tensor.py
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
def selu(self, alpha=1.67326, gamma=1.0507) -> Tensor:
  """
  Applies the Scaled Exponential Linear Unit (SELU) function element-wise.

  - Described: https://paperswithcode.com/method/selu
  - Paper: https://arxiv.org/abs/1706.02515v5

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).selu().numpy())
  ```
  """
  return gamma * (self >= 0).detach().where(self, alpha * (self.exp() - 1))

swish ¤

swish() -> Tensor

See .silu()

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).swish().numpy())
[-0.1423 -0.2384 -0.2689  0.      0.7311  1.7616  2.8577]
Source code in tinygrad/tensor.py
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
def swish(self) -> Tensor:
  """
  See `.silu()`

  - Paper: https://arxiv.org/abs/1710.05941v1

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).swish().numpy())
  ```
  """
  return self * self.sigmoid()

silu ¤

silu() -> Tensor

Applies the Sigmoid Linear Unit (SiLU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).silu().numpy())
[-0.1423 -0.2384 -0.2689  0.      0.7311  1.7616  2.8577]
Source code in tinygrad/tensor.py
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
def silu(self) -> Tensor:
  """
  Applies the Sigmoid Linear Unit (SiLU) function element-wise.

  - Described: https://paperswithcode.com/method/silu
  - Paper: https://arxiv.org/abs/1606.08415

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).silu().numpy())
  ```
  """
  return self.swish()   # The SiLU function is also known as the swish function.

relu6 ¤

relu6() -> Tensor

Applies the ReLU6 function element-wise.

print(Tensor([-9., -6., -3., 0., 3., 6., 9.]).relu6().numpy())
[0. 0. 0. 0. 3. 6. 6.]
Source code in tinygrad/tensor.py
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
def relu6(self) -> Tensor:
  """
  Applies the ReLU6 function element-wise.

  - Described: https://paperswithcode.com/method/relu6
  - Paper: https://arxiv.org/abs/1704.04861v1

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-9., -6., -3., 0., 3., 6., 9.]).relu6().numpy())
  ```
  """
  return self.relu() - (self-6).relu()

hardswish ¤

hardswish() -> Tensor

Applies the Hardswish function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).hardswish().numpy())
[-0.     -0.3333 -0.3333  0.      0.6667  1.6667  3.    ]
Source code in tinygrad/tensor.py
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
def hardswish(self) -> Tensor:
  """
  Applies the Hardswish function element-wise.

  - Described: https://paperswithcode.com/method/hard-swish
  - Paper: https://arxiv.org/abs/1905.02244v5

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).hardswish().numpy())
  ```
  """
  return self * (self+3).relu6() * (1/6)

tanh ¤

tanh() -> Tensor

Applies the Hyperbolic Tangent (tanh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).tanh().numpy())
[-0.9951 -0.964  -0.7616  0.      0.7616  0.964   0.9951]
Source code in tinygrad/tensor.py
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
def tanh(self) -> Tensor:
  """
  Applies the Hyperbolic Tangent (tanh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Hyperbolic_functions#Tanh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).tanh().numpy())
  ```
  """
  return 2.0 * ((2.0 * self).sigmoid()) - 1.0

sinh ¤

sinh() -> Tensor

Applies the Hyperbolic Sine (sinh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sinh().numpy())
[-10.0179  -3.6269  -1.1752   0.       1.1752   3.6269  10.0179]
Source code in tinygrad/tensor.py
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
def sinh(self) -> Tensor:
  """
  Applies the Hyperbolic Sine (sinh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Hyperbolic_functions#Sinh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sinh().numpy())
  ```
  """
  return (self.exp() - self.neg().exp()) / 2

cosh ¤

cosh() -> Tensor

Applies the Hyperbolic Cosine (cosh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).cosh().numpy())
[10.0677  3.7622  1.5431  1.      1.5431  3.7622 10.0677]
Source code in tinygrad/tensor.py
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
def cosh(self) -> Tensor:
  """
  Applies the Hyperbolic Cosine (cosh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Hyperbolic_functions#Cosh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).cosh().numpy())
  ```
  """
  return (self.exp() + self.neg().exp()) / 2

atanh ¤

atanh() -> Tensor

Applies the Inverse Hyperbolic Tangent (atanh) function element-wise.

print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).atanh().numpy())
[-1.4722 -0.6931 -0.3095  0.      0.3095  0.6931  1.4722]
Source code in tinygrad/tensor.py
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
def atanh(self) -> Tensor:
  """
  Applies the Inverse Hyperbolic Tangent (atanh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions#atanh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).atanh().numpy())
  ```
  """
  return ((1 + self)/(1 - self)).log() / 2

asinh ¤

asinh() -> Tensor

Applies the Inverse Hyperbolic Sine (asinh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).asinh().numpy())
[-1.8184 -1.4436 -0.8814  0.      0.8814  1.4436  1.8184]
Source code in tinygrad/tensor.py
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
def asinh(self) -> Tensor:
  """
  Applies the Inverse Hyperbolic Sine (asinh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions#asinh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).asinh().numpy())
  ```
  """
  return (self + (self.square() + 1).sqrt()).log()

acosh ¤

acosh() -> Tensor

Applies the Inverse Hyperbolic Cosine (acosh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).acosh().numpy())
[   nan    nan    nan    nan 0.     1.317  1.7627]
Source code in tinygrad/tensor.py
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
def acosh(self) -> Tensor:
  """
  Applies the Inverse Hyperbolic Cosine (acosh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions#acosh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).acosh().numpy())
  ```
  """
  return (self + (self.square() - 1).sqrt()).log()

hardtanh ¤

hardtanh(min_val=-1, max_val=1) -> Tensor

Applies the Hardtanh function element-wise.

print(Tensor([-1.5, -1.0, -0.5, 0., 0.5, 1.0, 1.5]).hardtanh().numpy())
[-1.  -1.  -0.5  0.   0.5  1.   1. ]
Source code in tinygrad/tensor.py
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
def hardtanh(self, min_val=-1, max_val=1) -> Tensor:
  """
  Applies the Hardtanh function element-wise.

  - Described: https://paperswithcode.com/method/hardtanh-activation

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1.5, -1.0, -0.5, 0., 0.5, 1.0, 1.5]).hardtanh().numpy())
  ```
  """
  return self.clip(min_val, max_val)

erf ¤

erf() -> Tensor

Applies error function element-wise.

print(Tensor([-1.5, -1.0, -0.5, 0., 0.5, 1.0, 1.5]).erf().numpy())
[-0.9661 -0.8427 -0.5205  0.      0.5205  0.8427  0.9661]
Source code in tinygrad/tensor.py
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
def erf(self) -> Tensor:
  """
  Applies error function element-wise.

  - Described: https://en.wikipedia.org/wiki/Error_function

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1.5, -1.0, -0.5, 0., 0.5, 1.0, 1.5]).erf().numpy())
  ```
  """
  # https://personal.math.ubc.ca/~cbm/aands/page_299.htm 7.1.26
  t = 1.0 / (1.0 + 0.3275911 * self.abs())
  return self.sign() * (1.0 - t * polyN(t, [1.061405429, -1.453152027, 1.421413741, -0.284496736, 0.254829592]) * (-self.square()).exp())

gelu ¤

gelu() -> Tensor

Applies the Gaussian Error Linear Unit (GELU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).gelu().numpy())
[-0.0036 -0.0454 -0.1588  0.      0.8412  1.9546  2.9964]
Source code in tinygrad/tensor.py
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
def gelu(self) -> Tensor:
  """
  Applies the Gaussian Error Linear Unit (GELU) function element-wise.

  - Described: https://paperswithcode.com/method/gelu
  - Paper: https://arxiv.org/abs/1606.08415v5

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).gelu().numpy())
  ```
  """
  return 0.5 * self * (1 + (math.sqrt(2 / math.pi) * (self + 0.044715 * self ** 3)).tanh())

quick_gelu ¤

quick_gelu() -> Tensor

Applies the Sigmoid GELU approximation element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).quick_gelu().numpy())
[-0.0181 -0.0643 -0.1542  0.      0.8458  1.9357  2.9819]
Source code in tinygrad/tensor.py
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
def quick_gelu(self) -> Tensor:
  """
  Applies the Sigmoid GELU approximation element-wise.

  - Described: https://paperswithcode.com/method/gelu

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).quick_gelu().numpy())
  ```
  """
  return self * (self * 1.702).sigmoid()

leaky_relu ¤

leaky_relu(neg_slope=0.01) -> Tensor

Applies the Leaky ReLU function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).leaky_relu().numpy())
[-0.03 -0.02 -0.01  0.    1.    2.    3.  ]
print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).leaky_relu(neg_slope=0.42).numpy())
[-1.26 -0.84 -0.42  0.    1.    2.    3.  ]

Source code in tinygrad/tensor.py
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
def leaky_relu(self, neg_slope=0.01) -> Tensor:
  """
  Applies the Leaky ReLU function element-wise.

  - Described: https://paperswithcode.com/method/leaky-relu

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).leaky_relu().numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).leaky_relu(neg_slope=0.42).numpy())
  ```
  """
  return (self<0).where(neg_slope*self, self)

mish ¤

mish() -> Tensor

Applies the Mish function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).mish().numpy())
[-0.1456 -0.2525 -0.3034  0.      0.8651  1.944   2.9865]
Source code in tinygrad/tensor.py
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
def mish(self) -> Tensor:
  """
  Applies the Mish function element-wise.

  - Described: https://paperswithcode.com/method/mish
  - Paper: https://arxiv.org/abs/1908.08681v3

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).mish().numpy())
  ```
  """
  return self * self.softplus().tanh()

softplus ¤

softplus(beta=1) -> Tensor

Applies the Softplus function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).softplus().numpy())
[0.0486 0.1269 0.3133 0.6931 1.3133 2.1269 3.0486]
Source code in tinygrad/tensor.py
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
def softplus(self, beta=1) -> Tensor:
  """
  Applies the Softplus function element-wise.

  - Described: https://paperswithcode.com/method/softplus

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).softplus().numpy())
  ```
  """
  return (1/beta) * (1 + (self*beta).exp()).log()

softsign ¤

softsign() -> Tensor

Applies the Softsign function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).softsign().numpy())
[-0.75   -0.6667 -0.5     0.      0.5     0.6667  0.75  ]
Source code in tinygrad/tensor.py
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
def softsign(self) -> Tensor:
  """
  Applies the Softsign function element-wise.

  - Described: https://paperswithcode.com/method/softsign

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).softsign().numpy())
  ```
  """
  return self / (1 + self.abs())

Elementwise Ops (broadcasted)¤

add ¤

add(x, reverse=False)

Adds self and x. Equivalent to self + x. Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

Tensor.manual_seed(42)
t = Tensor.randn(4)
print(t.numpy())
[-0.5144  1.085   0.9089 -0.0841]
print(t.add(20).numpy())
[19.4856 21.085  20.9089 19.9159]
print(t.add(Tensor([[2.0], [3.5]])).numpy())
[[1.4856 3.085  2.9089 1.9159]
 [2.9856 4.585  4.4089 3.4159]]

Source code in tinygrad/uop/mathtraits.py
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
def add(self, x, reverse=False):
  """
  Adds `self` and `x`.
  Equivalent to `self + x`.
  Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.
  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  t = Tensor.randn(4)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.add(20).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.add(Tensor([[2.0], [3.5]])).numpy())
  ```
  """
  return self._binop(Ops.ADD, x, reverse)

sub ¤

sub(x: Tensor | ConstType, reverse=False) -> Tensor

Subtracts x from self. Equivalent to self - x. Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

Tensor.manual_seed(42)
t = Tensor.randn(4)
print(t.numpy())
[-0.5144  1.085   0.9089 -0.0841]
print(t.sub(20).numpy())
[-20.5144 -18.915  -19.0911 -20.0841]
print(t.sub(Tensor([[2.0], [3.5]])).numpy())
[[-2.5144 -0.915  -1.0911 -2.0841]
 [-4.0144 -2.415  -2.5911 -3.5841]]

Source code in tinygrad/tensor.py
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
def sub(self, x:Tensor|ConstType, reverse=False) -> Tensor:
  """
  Subtracts `x` from `self`.
  Equivalent to `self - x`.
  Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  t = Tensor.randn(4)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.sub(20).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.sub(Tensor([[2.0], [3.5]])).numpy())
  ```
  """
  a, b = self._broadcasted(x, reverse)
  return a + (-b)

mul ¤

mul(x, reverse=False)

Multiplies self and x. Equivalent to self * x. Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

Tensor.manual_seed(42)
t = Tensor.randn(4)
print(t.numpy())
[-0.5144  1.085   0.9089 -0.0841]
print(t.mul(3).numpy())
[-1.5431  3.2549  2.7267 -0.2523]
print(t.mul(Tensor([[-1.0], [2.0]])).numpy())
[[ 0.5144 -1.085  -0.9089  0.0841]
 [-1.0287  2.17    1.8178 -0.1682]]

Source code in tinygrad/uop/mathtraits.py
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
def mul(self, x, reverse=False):
  """
  Multiplies `self` and `x`.
  Equivalent to `self * x`.
  Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  t = Tensor.randn(4)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.mul(3).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.mul(Tensor([[-1.0], [2.0]])).numpy())
  ```
  """
  return self._binop(Ops.MUL, x, reverse)

div ¤

div(
    x: Tensor | ConstType,
    reverse=False,
    rounding_mode: Literal["trunc", "floor"] | None = None,
) -> Tensor

Divides self by x. Equivalent to self / x. Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs. div performs true division.

Tensor.manual_seed(42)
t = Tensor.randn(4)
print(t.numpy())
[-0.5144  1.085   0.9089 -0.0841]
print(t.div(3).numpy())
[-0.1715  0.3617  0.303  -0.028 ]
print(Tensor([1, 4, 10]).div(Tensor([2, 3, 4])).numpy())
[0.5    1.3333 2.5   ]

Source code in tinygrad/tensor.py
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
def div(self, x:Tensor|ConstType, reverse=False, rounding_mode:Literal["trunc", "floor"]|None=None) -> Tensor:
  """
  Divides `self` by `x`.
  Equivalent to `self / x`.
  Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.
  `div` performs true division.

  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  t = Tensor.randn(4)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.div(3).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1, 4, 10]).div(Tensor([2, 3, 4])).numpy())
  ```
  """
  numerator, denominator = self._broadcasted(x, reverse)
  d = numerator.cast(least_upper_float(numerator.dtype)) * denominator.cast(least_upper_float(denominator.dtype)).reciprocal()
  output_dtype = numerator.dtype if dtypes.is_int(numerator.dtype) else d.dtype
  if dtypes.is_int(dt:=least_upper_dtype(numerator.dtype, denominator.dtype)) and rounding_mode is not None:
    numerator, denominator = numerator.cast(dt), denominator.cast(dt)
    if rounding_mode == "trunc": return numerator.idiv(denominator)
    if rounding_mode == "floor":
      truncate_div, truncate_mod = numerator.idiv(denominator), numerator._apply_broadcasted_uop(UOp.mod, denominator)
      opposite_sign = ((numerator>0)&(denominator<0)) | ((numerator<0)&(denominator>0))
      return (opposite_sign&(truncate_mod!=0)).where(truncate_div-1, truncate_div)
  if rounding_mode == "trunc": return d.trunc().cast(output_dtype)
  if rounding_mode == "floor": return d.floor().cast(output_dtype)
  if rounding_mode is not None: raise RuntimeError(f"{rounding_mode=} is not supported")
  return d

idiv ¤

idiv(x, reverse=False)

Divides self by x. Equivalent to self // x. Supports broadcasting to a common shape, type promotion, and integer inputs. idiv performs integer division (truncate towards zero).

print(Tensor([-4, 7, 5, 4, -7, 8]).idiv(Tensor([2, -3, 8, -2, 3, 5])).numpy())
[-2 -2  0 -2 -2  1]
Source code in tinygrad/uop/mathtraits.py
101
102
103
104
105
106
107
108
109
110
111
112
def idiv(self, x, reverse=False):
  """
  Divides `self` by `x`.
  Equivalent to `self // x`.
  Supports broadcasting to a common shape, type promotion, and integer inputs.
  `idiv` performs integer division (truncate towards zero).

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-4, 7, 5, 4, -7, 8]).idiv(Tensor([2, -3, 8, -2, 3, 5])).numpy())
  ```
  """
  return self._binop(Ops.IDIV, x, reverse)

mod ¤

mod(x: Tensor | ConstType, reverse=False) -> Tensor

Mod self by x. Equivalent to self % x. Supports broadcasting to a common shape, type promotion, and integer inputs.

print(Tensor([-4, 7, 5, 4, -7, 8]).mod(Tensor([2, -3, 8, -2, 3, 5])).numpy())
[ 0 -2  5  0  2  3]
Source code in tinygrad/tensor.py
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
def mod(self, x:Tensor|ConstType, reverse=False) -> Tensor:
  """
  Mod `self` by `x`.
  Equivalent to `self % x`.
  Supports broadcasting to a common shape, type promotion, and integer inputs.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-4, 7, 5, 4, -7, 8]).mod(Tensor([2, -3, 8, -2, 3, 5])).numpy())
  ```
  """
  a, b = self._broadcasted(x, reverse)
  return a - a.div(b, rounding_mode="floor") * b

bitwise_xor ¤

bitwise_xor(x, reverse=False)

Computes bitwise xor of self and x. Equivalent to self ^ x. Supports broadcasting to a common shape, type promotion, and integer, boolean inputs.

print(Tensor([-1, -2, 3]).bitwise_xor(Tensor([1, 0, 3])).numpy())
[-2 -2  0]
print(Tensor([True, True, False, False]).bitwise_xor(Tensor([True, False, True, False])).numpy())
[False  True  True False]

Source code in tinygrad/uop/mathtraits.py
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
def bitwise_xor(self, x, reverse=False):
  """
  Computes bitwise xor of `self` and `x`.
  Equivalent to `self ^ x`.
  Supports broadcasting to a common shape, type promotion, and integer, boolean inputs.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, -2, 3]).bitwise_xor(Tensor([1, 0, 3])).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([True, True, False, False]).bitwise_xor(Tensor([True, False, True, False])).numpy())
  ```
  """
  self._check_dtype()
  return self._binop(Ops.XOR, x, reverse)

bitwise_and ¤

bitwise_and(x, reverse=False)

Computes the bitwise AND of self and x. Equivalent to self & x. Supports broadcasting to a common shape, type promotion, and integer, boolean inputs.

print(Tensor([2, 5, 255]).bitwise_and(Tensor([3, 14, 16])).numpy())
[ 2  4 16]
print(Tensor([True, True, False, False]).bitwise_and(Tensor([True, False, True, False])).numpy())
[ True False False False]

Source code in tinygrad/uop/mathtraits.py
58
59
60
61
62
63
64
65
66
67
68
69
70
71
def bitwise_and(self, x, reverse=False):
  """
  Computes the bitwise AND of `self` and `x`.
  Equivalent to `self & x`.
  Supports broadcasting to a common shape, type promotion, and integer, boolean inputs.
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([2, 5, 255]).bitwise_and(Tensor([3, 14, 16])).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([True, True, False, False]).bitwise_and(Tensor([True, False, True, False])).numpy())
  ```
  """
  self._check_dtype()
  return self._binop(Ops.AND, x, reverse)

bitwise_or ¤

bitwise_or(x, reverse=False)

Computes the bitwise OR of self and x. Equivalent to self | x. Supports broadcasting to a common shape, type promotion, and integer, boolean inputs.

print(Tensor([2, 5, 255]).bitwise_or(Tensor([4, 4, 4])).numpy())
[  6   5 255]
print(Tensor([True, True, False, False]).bitwise_or(Tensor([True, False, True, False])).numpy())
[ True  True  True False]

Source code in tinygrad/uop/mathtraits.py
72
73
74
75
76
77
78
79
80
81
82
83
84
85
def bitwise_or(self, x, reverse=False):
  """
  Computes the bitwise OR of `self` and `x`.
  Equivalent to `self | x`.
  Supports broadcasting to a common shape, type promotion, and integer, boolean inputs.
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([2, 5, 255]).bitwise_or(Tensor([4, 4, 4])).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([True, True, False, False]).bitwise_or(Tensor([True, False, True, False])).numpy())
  ```
  """
  self._check_dtype()
  return self._binop(Ops.OR, x, reverse)

bitwise_not ¤

bitwise_not() -> Tensor

Computes the bitwise NOT of self. Equivalent to ~self.

print(Tensor([0, 2, 5, 255], dtype="int8").bitwise_not().numpy())
[-1 -3 -6  0]
print(Tensor([True, False]).bitwise_not().numpy())
[False  True]

Source code in tinygrad/tensor.py
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
def bitwise_not(self) -> Tensor:
  """
  Computes the bitwise NOT of `self`.
  Equivalent to `~self`.
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0, 2, 5, 255], dtype="int8").bitwise_not().numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([True, False]).bitwise_not().numpy())
  ```
  """
  if self.dtype != dtypes.bool and not dtypes.is_int(self.dtype): raise RuntimeError(f"{self.dtype} is not supported")
  return self.logical_not() if self.dtype == dtypes.bool else self ^ -1

lshift ¤

lshift(x: int, reverse=False) -> Tensor

Computes left arithmetic shift of self by x bits. self must have unsigned dtype. Equivalent to self << x.

print(Tensor([1, 3, 31], dtype=dtypes.uint8).lshift(2).numpy())
[  4  12 124]
Source code in tinygrad/tensor.py
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
def lshift(self, x:int, reverse=False) -> Tensor:
  """
  Computes left arithmetic shift of `self` by `x` bits. `self` must have unsigned dtype.
  Equivalent to `self << x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1, 3, 31], dtype=dtypes.uint8).lshift(2).numpy())
  ```
  """
  assert dtypes.is_unsigned(self.dtype) and isinstance(x, int) and x >= 0 and not reverse, f"not supported {self.dtype=} {x=}"
  return self.mul(2 ** x, reverse)

rshift ¤

rshift(x: int, reverse=False) -> Tensor

Computes right arithmetic shift of self by x bits. self must have unsigned dtype. Equivalent to self >> x.

print(Tensor([4, 13, 125], dtype=dtypes.uint8).rshift(2).numpy())
[ 1  3 31]
Source code in tinygrad/tensor.py
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
def rshift(self, x:int, reverse=False) -> Tensor:
  """
  Computes right arithmetic shift of `self` by `x` bits. `self` must have unsigned dtype.
  Equivalent to `self >> x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([4, 13, 125], dtype=dtypes.uint8).rshift(2).numpy())
  ```
  """
  assert dtypes.is_unsigned(self.dtype) and isinstance(x, int) and x >= 0 and not reverse, f"not supported {self.dtype=} {x=}"
  return self.idiv(2 ** x, reverse)

pow ¤

pow(x: Tensor | ConstType, reverse=False) -> Tensor

Computes power of self with x. Equivalent to self ** x.

print(Tensor([-1, 2, 3]).pow(2.0).numpy())
[1 4 9]
print(Tensor([-1, 2, 3]).pow(Tensor([-1.5, 0.5, 1.5])).numpy())
[-2147483648           1           5]
print((2.0 ** Tensor([-1, 2, 3])).numpy())
[0.5 4.  8. ]

Source code in tinygrad/tensor.py
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
def pow(self, x:Tensor|ConstType, reverse=False) -> Tensor:
  """
  Computes power of `self` with `x`.
  Equivalent to `self ** x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).pow(2.0).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).pow(Tensor([-1.5, 0.5, 1.5])).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print((2.0 ** Tensor([-1, 2, 3])).numpy())
  ```
  """
  base, exponent = self._broadcasted(x, reverse=reverse)
  # TODO: int pow
  if not base.is_floating_point(): raise RuntimeError("base needs to be float")

  ret = base._apply_uop(UOp.pow, exponent)
  # NOTE: pow(int, float) -> int
  return ret.round().cast(self.dtype) if not reverse and not dtypes.is_float(self.dtype) else ret

maximum ¤

maximum(x: Tensor | ConstType) -> Tensor

Computes element-wise maximum of self and x.

print(Tensor([-1, 2, 3]).maximum(1).numpy())
[1 2 3]
print(Tensor([-1, 2, 3]).maximum(Tensor([-4, -2, 9])).numpy())
[-1  2  9]

Source code in tinygrad/tensor.py
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
def maximum(self, x:Tensor|ConstType) -> Tensor:
  """
  Computes element-wise maximum of `self` and `x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).maximum(1).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).maximum(Tensor([-4, -2, 9])).numpy())
  ```
  """
  return self._apply_broadcasted_uop(UOp.maximum, x)

minimum ¤

minimum(x: Tensor | ConstType) -> Tensor

Computes element-wise minimum of self and x.

print(Tensor([-1, 2, 3]).minimum(1).numpy())
[-1  1  1]
print(Tensor([-1, 2, 3]).minimum(Tensor([-4, -2, 9])).numpy())
[-4 -2  3]

Source code in tinygrad/tensor.py
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
def minimum(self, x:Tensor|ConstType) -> Tensor:
  """
  Computes element-wise minimum of `self` and `x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).minimum(1).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).minimum(Tensor([-4, -2, 9])).numpy())
  ```
  """
  t, x = self._broadcasted(x)
  return t._inverse().maximum(x._inverse())._inverse()

where ¤

where(
    x: Tensor | ConstType | sint,
    y: Tensor | ConstType | sint,
) -> Tensor

Returns a tensor of elements selected from either x or y, depending on self. output_i = x_i if self_i else y_i.

cond = Tensor([[True, True, False], [True, False, False]])
print(cond.where(1, 3).numpy())
[[1 1 3]
 [1 3 3]]
Tensor.manual_seed(42)
cond = Tensor.randn(2, 3)
print(cond.numpy())
[[ 0.9779  0.4678  0.5526]
 [-0.3288 -0.8555  0.2753]]
print((cond > 0).where(cond, -float("inf")).numpy())
[[0.9779 0.4678 0.5526]
 [  -inf   -inf 0.2753]]

Source code in tinygrad/tensor.py
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
def where(self:Tensor, x:Tensor|ConstType|sint, y:Tensor|ConstType|sint) -> Tensor:
  """
  Returns a tensor of elements selected from either `x` or `y`, depending on `self`.
  `output_i = x_i if self_i else y_i`.

  ```python exec="true" source="above" session="tensor" result="python"
  cond = Tensor([[True, True, False], [True, False, False]])
  print(cond.where(1, 3).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  cond = Tensor.randn(2, 3)
  print(cond.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print((cond > 0).where(cond, -float("inf")).numpy())
  ```
  """
  if isinstance(x, Tensor): x, y = x._broadcasted(y)
  elif isinstance(y, Tensor): y, x = y._broadcasted(x)
  cond, x = self._broadcasted(x, match_dtype=False)
  cond, y = cond._broadcasted(y, match_dtype=False)
  return cond.cast(dtypes.bool)._apply_uop(UOp.where, *x._broadcasted(y))

copysign ¤

copysign(other) -> Tensor

Returns a tensor of with the magnitude of self and the sign of other, elementwise.

Source code in tinygrad/tensor.py
3691
3692
3693
3694
3695
3696
3697
3698
def copysign(self, other) -> Tensor:
  """
  Returns a tensor of with the magnitude of `self` and the sign of `other`, elementwise.
  """
  # NOTE: torch always return in float, we return based on the broadcasting rule.
  other = self._broadcasted(other)[1]
  # TODO: remove other*0?
  return (other < 0).where(-self.abs(), self.abs()) + other*0

Casting Ops¤

cast ¤

cast(dtype: DTypeLike) -> Tensor

Casts self to the given dtype.

t = Tensor([-1, 2.5, 3], dtype=dtypes.float)
print(t.dtype, t.numpy())
dtypes.float [-1.   2.5  3. ]
t = t.cast(dtypes.int32)
print(t.dtype, t.numpy())
dtypes.int [-1  2  3]
t = t.cast(dtypes.uint8)
print(t.dtype, t.numpy())
dtypes.uchar [255   2   3]

Source code in tinygrad/tensor.py
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
def cast(self, dtype:DTypeLike) -> Tensor:
  """
  Casts `self` to the given `dtype`.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 2.5, 3], dtype=dtypes.float)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.cast(dtypes.int32)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.cast(dtypes.uint8)
  print(t.dtype, t.numpy())
  ```
  """
  if (dt:=to_dtype(dtype)) in {dtypes.uint8, dtypes.uint16} and dtypes.is_float(self.dtype):
    # NOTE: values within the int32 range and outside the unsigned dtype range will cause values to wrap around
    return self._apply_uop(UOp.cast, dtype=dtypes.int32)._apply_uop(UOp.cast, dtype=dt)
  return self if self.dtype == dt else self._apply_uop(UOp.cast, dtype=dt)

bitcast ¤

bitcast(dtype: DTypeLike) -> Tensor

Bitcasts self to the given dtype of the same itemsize.

self must not require a gradient.

t = Tensor([-1, 2, 3], dtype=dtypes.int32)
print(t.dtype, t.numpy())
dtypes.int [-1  2  3]
t = t.bitcast(dtypes.uint32)
print(t.dtype, t.numpy())
dtypes.uint [4294967295          2          3]

Source code in tinygrad/tensor.py
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
def bitcast(self, dtype:DTypeLike) -> Tensor:
  """
  Bitcasts `self` to the given `dtype` of the same itemsize.

  `self` must not require a gradient.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 2, 3], dtype=dtypes.int32)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.bitcast(dtypes.uint32)
  print(t.dtype, t.numpy())
  ```
  """
  if self.requires_grad: raise RuntimeError("can't backprop through bitcast")
  dt = to_dtype(dtype)
  if (ns:=dt.itemsize) != (os:=self.dtype.itemsize) and (self.shape[-1]*os) % ns != 0: raise RuntimeError("unsupported size in bitcast")
  if (not isinstance(self.device, str) or not self.device.startswith("DISK")) and ns != os:
    new_uint, old_uint = to_dtype(f"uint{8*ns}"), to_dtype(f"uint{8*os}")
    tmp = self.bitcast(old_uint)
    if ns > os:
      tmp = tmp.reshape(self.shape[:-1] + (self.shape[-1]//(rate := ns//os), rate))
      nones = (None,) * (tmp.ndim - 1)
      return functools.reduce(Tensor.add, (tmp.shrink(nones + ((i, i+1),)).cast(new_uint)<<8*i*os for i in range(rate))).squeeze(-1).bitcast(dtype)
    return Tensor.stack(*(tmp>>8*i*ns for i in range(os//ns)), dim=-1).flatten(-2).cast(new_uint).bitcast(dtype)
  return self._apply_uop(UOp.bitcast, dtype=dt) if self.dtype != dt else self

float ¤

float() -> Tensor

Convenience method to cast self to a float32 Tensor.

t = Tensor([-1, 2, 3], dtype=dtypes.int32)
print(t.dtype, t.numpy())
dtypes.int [-1  2  3]
t = t.float()
print(t.dtype, t.numpy())
dtypes.float [-1.  2.  3.]

Source code in tinygrad/tensor.py
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
def float(self) -> Tensor:
  """
  Convenience method to cast `self` to a `float32` Tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 2, 3], dtype=dtypes.int32)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.float()
  print(t.dtype, t.numpy())
  ```
  """
  return self.cast(dtypes.float32)

half ¤

half() -> Tensor

Convenience method to cast self to a float16 Tensor.

t = Tensor([-1, 2, 3], dtype=dtypes.int32)
print(t.dtype, t.numpy())
dtypes.int [-1  2  3]
t = t.half()
print(t.dtype, t.numpy())
dtypes.half [-1.  2.  3.]

Source code in tinygrad/tensor.py
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
def half(self) -> Tensor:
  """
  Convenience method to cast `self` to a `float16` Tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 2, 3], dtype=dtypes.int32)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.half()
  print(t.dtype, t.numpy())
  ```
  """
  return self.cast(dtypes.float16)

int ¤

int() -> Tensor

Convenience method to cast self to a int32 Tensor.

t = Tensor([-1.5, -0.5, 0.0, 0.5, 1.5])
print(t.dtype, t.numpy())
dtypes.float [-1.5 -0.5  0.   0.5  1.5]
t = t.int()
print(t.dtype, t.numpy())
dtypes.int [-1  0  0  0  1]

Source code in tinygrad/tensor.py
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
def int(self) -> Tensor:
  """
  Convenience method to cast `self` to a `int32` Tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1.5, -0.5, 0.0, 0.5, 1.5])
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.int()
  print(t.dtype, t.numpy())
  ```
  """
  return self.cast(dtypes.int32)

bool ¤

bool() -> Tensor

Convenience method to cast self to a bool Tensor.

t = Tensor([-1, 0, 1])
print(t.dtype, t.numpy())
dtypes.int [-1  0  1]
t = t.bool()
print(t.dtype, t.numpy())
dtypes.bool [ True False  True]

Source code in tinygrad/tensor.py
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
def bool(self) -> Tensor:
  """
  Convenience method to cast `self` to a `bool` Tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 0, 1])
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.bool()
  print(t.dtype, t.numpy())
  ```
  """
  return self.cast(dtypes.bool)