Skip to content

Elementwise

Elementwise ops operate on a per element basis. They don't change the shape of the tensor.

Unary Ops (math)¤

logical_not ¤

logical_not()

Computes the logical NOT of the tensor element-wise.

print(Tensor([False, True]).logical_not().numpy())
[ True False]
Source code in tinygrad/tensor.py
2114
2115
2116
2117
2118
2119
2120
2121
2122
def logical_not(self):
  """
  Computes the logical NOT of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([False, True]).logical_not().numpy())
  ```
  """
  return F.Neq.apply(*self.cast(dtypes.bool)._broadcasted(True))

neg ¤

neg()

Negates the tensor element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).neg().numpy())
[ 3.  2.  1. -0. -1. -2. -3.]
Source code in tinygrad/tensor.py
2123
2124
2125
2126
2127
2128
2129
2130
2131
def neg(self):
  """
  Negates the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).neg().numpy())
  ```
  """
  return self*-1 if self.dtype != dtypes.bool else self.logical_not()

log ¤

log()

Computes the natural logarithm element-wise.

See: https://en.wikipedia.org/wiki/Logarithm

print(Tensor([1., 2., 4., 8.]).log().numpy())
[0.     0.6931 1.3863 2.0794]
Source code in tinygrad/tensor.py
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
def log(self):
  """
  Computes the natural logarithm element-wise.

  See: https://en.wikipedia.org/wiki/Logarithm

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 4., 8.]).log().numpy())
  ```
  """
  return F.Log.apply(self.cast(least_upper_float(self.dtype)))

log2 ¤

log2()

Computes the base-2 logarithm element-wise.

See: https://en.wikipedia.org/wiki/Logarithm

print(Tensor([1., 2., 4., 8.]).log2().numpy())
[0. 1. 2. 3.]
Source code in tinygrad/tensor.py
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
def log2(self):
  """
  Computes the base-2 logarithm element-wise.

  See: https://en.wikipedia.org/wiki/Logarithm

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 4., 8.]).log2().numpy())
  ```
  """
  return self.log()/math.log(2)

exp ¤

exp()

Computes the exponential function element-wise.

See: https://en.wikipedia.org/wiki/Exponential_function

print(Tensor([0., 1., 2., 3.]).exp().numpy())
[ 1.      2.7183  7.3891 20.0855]
Source code in tinygrad/tensor.py
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
def exp(self):
  """
  Computes the exponential function element-wise.

  See: https://en.wikipedia.org/wiki/Exponential_function

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., 1., 2., 3.]).exp().numpy())
  ```
  """
  return F.Exp.apply(self.cast(least_upper_float(self.dtype)))

exp2 ¤

exp2()

Computes the base-2 exponential function element-wise.

See: https://en.wikipedia.org/wiki/Exponential_function

print(Tensor([0., 1., 2., 3.]).exp2().numpy())
[1. 2. 4. 8.]
Source code in tinygrad/tensor.py
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
def exp2(self):
  """
  Computes the base-2 exponential function element-wise.

  See: https://en.wikipedia.org/wiki/Exponential_function

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., 1., 2., 3.]).exp2().numpy())
  ```
  """
  return F.Exp.apply(self*math.log(2))

sqrt ¤

sqrt()

Computes the square root of the tensor element-wise.

print(Tensor([1., 2., 3., 4.]).sqrt().numpy())
[1.     1.4142 1.7321 2.    ]
Source code in tinygrad/tensor.py
2208
2209
2210
2211
2212
2213
2214
2215
2216
def sqrt(self):
  """
  Computes the square root of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 3., 4.]).sqrt().numpy())
  ```
  """
  return F.Sqrt.apply(self.cast(least_upper_float(self.dtype)))

rsqrt ¤

rsqrt()

Computes the reciprocal of the square root of the tensor element-wise.

print(Tensor([1., 2., 3., 4.]).rsqrt().numpy())
[1.     0.7071 0.5774 0.5   ]
Source code in tinygrad/tensor.py
2217
2218
2219
2220
2221
2222
2223
2224
2225
def rsqrt(self):
  """
  Computes the reciprocal of the square root of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 3., 4.]).rsqrt().numpy())
  ```
  """
  return self.reciprocal().sqrt()

sin ¤

sin()

Computes the sine of the tensor element-wise.

print(Tensor([0., math.pi/2, math.pi, 3*math.pi/2, 2*math.pi]).sin().numpy())
[ 0.  1. -0. -1.  0.]
Source code in tinygrad/tensor.py
2226
2227
2228
2229
2230
2231
2232
2233
2234
def sin(self):
  """
  Computes the sine of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., math.pi/2, math.pi, 3*math.pi/2, 2*math.pi]).sin().numpy())
  ```
  """
  return F.Sin.apply(self.cast(least_upper_float(self.dtype)))

cos ¤

cos()

Computes the cosine of the tensor element-wise.

print(Tensor([0., math.pi/2, math.pi, 3*math.pi/2, 2*math.pi]).cos().numpy())
[ 1.0000e+00  0.0000e+00 -1.0000e+00 -2.3842e-07  1.0000e+00]
Source code in tinygrad/tensor.py
2235
2236
2237
2238
2239
2240
2241
2242
2243
def cos(self):
  """
  Computes the cosine of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., math.pi/2, math.pi, 3*math.pi/2, 2*math.pi]).cos().numpy())
  ```
  """
  return ((math.pi/2)-self).sin()

tan ¤

tan()

Computes the tangent of the tensor element-wise.

print(Tensor([0., math.pi/4, math.pi/2, 3*math.pi/4, math.pi]).tan().numpy())
[ 0.  1. inf -1.  0.]
Source code in tinygrad/tensor.py
2244
2245
2246
2247
2248
2249
2250
2251
2252
def tan(self):
  """
  Computes the tangent of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., math.pi/4, math.pi/2, 3*math.pi/4, math.pi]).tan().numpy())
  ```
  """
  return self.sin() / self.cos()

trunc ¤

trunc() -> Tensor

Truncates the tensor element-wise.

print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).trunc().numpy())
[-3. -2. -1.  0.  0.  1.  2.  3.]
Source code in tinygrad/tensor.py
2256
2257
2258
2259
2260
2261
2262
2263
2264
def trunc(self: Tensor) -> Tensor:
  """
  Truncates the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).trunc().numpy())
  ```
  """
  return self.cast(dtypes.int32).cast(self.dtype)

ceil ¤

ceil() -> Tensor

Rounds the tensor element-wise towards positive infinity.

print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).ceil().numpy())
[-3. -2. -1.  0.  1.  2.  3.  4.]
Source code in tinygrad/tensor.py
2265
2266
2267
2268
2269
2270
2271
2272
2273
def ceil(self: Tensor) -> Tensor:
  """
  Rounds the tensor element-wise towards positive infinity.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).ceil().numpy())
  ```
  """
  return (self > (b := self.trunc())).where(b+1, b)

floor ¤

floor() -> Tensor

Rounds the tensor element-wise towards negative infinity.

print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).floor().numpy())
[-4. -3. -2. -1.  0.  1.  2.  3.]
Source code in tinygrad/tensor.py
2274
2275
2276
2277
2278
2279
2280
2281
2282
def floor(self: Tensor) -> Tensor:
  """
  Rounds the tensor element-wise towards negative infinity.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).floor().numpy())
  ```
  """
  return (self < (b := self.trunc())).where(b-1, b)

round ¤

round() -> Tensor

Rounds the tensor element-wise with rounding half to even.

print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).round().numpy())
[-4. -2. -2.  0.  0.  2.  2.  4.]
Source code in tinygrad/tensor.py
2283
2284
2285
2286
2287
2288
2289
2290
2291
def round(self: Tensor) -> Tensor:
  """
  Rounds the tensor element-wise with rounding half to even.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).round().numpy())
  ```
  """
  return ((self > 0) == ((b := self.cast(dtypes.int32) / 2.0).cast(dtypes.int32) == b)).where((self - 0.5).ceil(), (self + 0.5).floor())

lerp ¤

lerp(end: Tensor, weight: Union[Tensor, float]) -> Tensor

Linearly interpolates between self and end by weight.

print(Tensor([1., 2., 3.]).lerp(Tensor([4., 5., 6.]), 0.5).numpy())
[2.5 3.5 4.5]
Source code in tinygrad/tensor.py
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
def lerp(self, end: Tensor, weight: Union[Tensor, float]) -> Tensor:
  """
  Linearly interpolates between `self` and `end` by `weight`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 3.]).lerp(Tensor([4., 5., 6.]), 0.5).numpy())
  ```
  """
  if self.dtype == dtypes.uint8 and isinstance(weight, Tensor):
    w_i = (weight * (1<<(W_PREC:=7)) + 0.5).cast(dtypes.int16)
    return (self+(((end - self).cast(dtypes.int8) * w_i + (1<<W_PREC-1)).cast(dtypes.uint16) >> W_PREC)).cast(dtypes.uint8)
  return self + (end - self) * weight

square ¤

square()

Squares the tensor element-wise. Equivalent to self*self.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).square().numpy())
[9. 4. 1. 0. 1. 4. 9.]
Source code in tinygrad/tensor.py
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
def square(self):
  """
  Squares the tensor element-wise.
  Equivalent to `self*self`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).square().numpy())
  ```
  """
  return self*self

clamp ¤

clamp(min_=None, max_=None)

Clips (clamps) the values in the tensor between min_ and max_ element-wise. If min_ is None, there is no lower bound. If max_ is None, there is no upper bound.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).clip(-1, 1).numpy())
[-1. -1. -1.  0.  1.  1.  1.]
Source code in tinygrad/tensor.py
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
def clamp(self, min_=None, max_=None):
  """
  Clips (clamps) the values in the tensor between `min_` and `max_` element-wise.
  If `min_` is `None`, there is no lower bound. If `max_` is None, there is no upper bound.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).clip(-1, 1).numpy())
  ```
  """
  if min_ is None and max_ is None: raise RuntimeError("at least one of 'min_' or 'max_' must not be None")
  ret = self.maximum(min_) if min_ is not None else self
  return ret.minimum(max_) if max_ is not None else ret

clip ¤

clip(min_=None, max_=None)

Alias for Tensor.clamp.

Source code in tinygrad/tensor.py
2328
2329
2330
2331
2332
def clip(self, min_=None, max_=None):
  """
  Alias for `Tensor.clamp`.
  """
  return self.clamp(min_, max_)

sign ¤

sign()

Returns the sign of the tensor element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sign().numpy())
[-1. -1. -1.  0.  1.  1.  1.]
Source code in tinygrad/tensor.py
2333
2334
2335
2336
2337
2338
2339
2340
2341
def sign(self):
  """
  Returns the sign of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sign().numpy())
  ```
  """
  return F.Sign.apply(self)

abs ¤

abs()

Computes the absolute value of the tensor element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).abs().numpy())
[3. 2. 1. 0. 1. 2. 3.]
Source code in tinygrad/tensor.py
2342
2343
2344
2345
2346
2347
2348
2349
2350
def abs(self):
  """
  Computes the absolute value of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).abs().numpy())
  ```
  """
  return self * self.sign()

reciprocal ¤

reciprocal()

Compute 1/x element-wise.

print(Tensor([1., 2., 3., 4.]).reciprocal().numpy())
[1.     0.5    0.3333 0.25  ]
Source code in tinygrad/tensor.py
2351
2352
2353
2354
2355
2356
2357
2358
2359
def reciprocal(self):
  """
  Compute `1/x` element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 3., 4.]).reciprocal().numpy())
  ```
  """
  return F.Reciprocal.apply(self.cast(least_upper_float(self.dtype)))

Unary Ops (activation)¤

relu ¤

relu()

Applies the Rectified Linear Unit (ReLU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).relu().numpy())
[0. 0. 0. 0. 1. 2. 3.]
Source code in tinygrad/tensor.py
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
def relu(self):
  """
  Applies the Rectified Linear Unit (ReLU) function element-wise.

  - Described: https://paperswithcode.com/method/relu

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).relu().numpy())
  ```
  """
  return F.Relu.apply(self)

sigmoid ¤

sigmoid()

Applies the Sigmoid function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sigmoid().numpy())
[0.0474 0.1192 0.2689 0.5    0.7311 0.8808 0.9526]
Source code in tinygrad/tensor.py
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
def sigmoid(self):
  """
  Applies the Sigmoid function element-wise.

  - Described: https://en.wikipedia.org/wiki/Sigmoid_function

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sigmoid().numpy())
  ```
  """
  return F.Sigmoid.apply(self.cast(least_upper_float(self.dtype)))

elu ¤

elu(alpha=1.0)

Applies the Exponential Linear Unit (ELU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).elu().numpy())
[-0.9502 -0.8647 -0.6321  0.      1.      2.      3.    ]
Source code in tinygrad/tensor.py
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
def elu(self, alpha=1.0):
  """
  Applies the Exponential Linear Unit (ELU) function element-wise.

  - Described: https://paperswithcode.com/method/elu
  - Paper: https://arxiv.org/abs/1511.07289v5

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).elu().numpy())
  ```
  """
  return self.relu() - alpha*(1-self.exp()).relu()

celu ¤

celu(alpha=1.0)

Applies the Continuously differentiable Exponential Linear Unit (CELU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).celu().numpy())
[-0.9502 -0.8647 -0.6321  0.      1.      2.      3.    ]
Source code in tinygrad/tensor.py
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
def celu(self, alpha=1.0):
  """
  Applies the Continuously differentiable Exponential Linear Unit (CELU) function element-wise.

  - Described: https://paperswithcode.com/method/celu
  - Paper: https://arxiv.org/abs/1704.07483

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).celu().numpy())
  ```
  """
  return self.maximum(0) + (alpha * ((self / alpha).exp() - 1)).minimum(0)

swish ¤

swish()

See .silu()

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).swish().numpy())
[-0.1423 -0.2384 -0.2689  0.      0.7311  1.7616  2.8577]
Source code in tinygrad/tensor.py
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
def swish(self):
  """
  See `.silu()`

  - Paper: https://arxiv.org/abs/1710.05941v1

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).swish().numpy())
  ```
  """
  return self * self.sigmoid()

silu ¤

silu()

Applies the Sigmoid Linear Unit (SiLU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).silu().numpy())
[-0.1423 -0.2384 -0.2689  0.      0.7311  1.7616  2.8577]
Source code in tinygrad/tensor.py
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
def silu(self):
  """
  Applies the Sigmoid Linear Unit (SiLU) function element-wise.

  - Described: https://paperswithcode.com/method/silu
  - Paper: https://arxiv.org/abs/1606.08415

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).silu().numpy())
  ```
  """
  return self.swish()   # The SiLU function is also known as the swish function.

relu6 ¤

relu6()

Applies the ReLU6 function element-wise.

print(Tensor([-9., -6., -3., 0., 3., 6., 9.]).relu6().numpy())
[0. 0. 0. 0. 3. 6. 6.]
Source code in tinygrad/tensor.py
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
def relu6(self):
  """
  Applies the ReLU6 function element-wise.

  - Described: https://paperswithcode.com/method/relu6
  - Paper: https://arxiv.org/abs/1704.04861v1

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-9., -6., -3., 0., 3., 6., 9.]).relu6().numpy())
  ```
  """
  return self.relu() - (self-6).relu()

hardswish ¤

hardswish()

Applies the Hardswish function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).hardswish().numpy())
[-0.     -0.3333 -0.3333  0.      0.6667  1.6667  3.    ]
Source code in tinygrad/tensor.py
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
def hardswish(self):
  """
  Applies the Hardswish function element-wise.

  - Described: https://paperswithcode.com/method/hard-swish
  - Paper: https://arxiv.org/abs/1905.02244v5

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).hardswish().numpy())
  ```
  """
  return self * (self+3).relu6() * (1/6)

tanh ¤

tanh()

Applies the Hyperbolic Tangent (tanh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).tanh().numpy())
[-0.9951 -0.964  -0.7616  0.      0.7616  0.964   0.9951]
Source code in tinygrad/tensor.py
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
def tanh(self):
  """
  Applies the Hyperbolic Tangent (tanh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Hyperbolic_functions#Tanh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).tanh().numpy())
  ```
  """
  return 2.0 * ((2.0 * self).sigmoid()) - 1.0

sinh ¤

sinh()

Applies the Hyperbolic Sine (sinh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sinh().numpy())
[-10.0179  -3.6269  -1.1752   0.       1.1752   3.6269  10.0179]
Source code in tinygrad/tensor.py
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
def sinh(self):
  """
  Applies the Hyperbolic Sine (sinh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Hyperbolic_functions#Sinh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sinh().numpy())
  ```
  """
  return (self.exp() - self.neg().exp()) / 2

cosh ¤

cosh()

Applies the Hyperbolic Cosine (cosh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).cosh().numpy())
[10.0677  3.7622  1.5431  1.      1.5431  3.7622 10.0677]
Source code in tinygrad/tensor.py
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
def cosh(self):
  """
  Applies the Hyperbolic Cosine (cosh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Hyperbolic_functions#Cosh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).cosh().numpy())
  ```
  """
  return (self.exp() + self.neg().exp()) / 2

atanh ¤

atanh()

Applies the Inverse Hyperbolic Tangent (atanh) function element-wise.

print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).atanh().numpy())
[-1.4722 -0.6931 -0.3095  0.      0.3095  0.6931  1.4722]
Source code in tinygrad/tensor.py
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
def atanh(self):
  """
  Applies the Inverse Hyperbolic Tangent (atanh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions#atanh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).atanh().numpy())
  ```
  """
  return ((1 + self)/(1 - self)).log() / 2

asinh ¤

asinh()

Applies the Inverse Hyperbolic Sine (asinh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).asinh().numpy())
[-1.8184 -1.4436 -0.8814  0.      0.8814  1.4436  1.8184]
Source code in tinygrad/tensor.py
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
def asinh(self):
  """
  Applies the Inverse Hyperbolic Sine (asinh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions#asinh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).asinh().numpy())
  ```
  """
  return (self + (self.square() + 1).sqrt()).log()

acosh ¤

acosh()

Applies the Inverse Hyperbolic Cosine (acosh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).acosh().numpy())
[   nan    nan    nan    nan 0.     1.317  1.7627]
Source code in tinygrad/tensor.py
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
def acosh(self):
  """
  Applies the Inverse Hyperbolic Cosine (acosh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions#acosh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).acosh().numpy())
  ```
  """
  return (self + (self.square() - 1).sqrt()).log()

hardtanh ¤

hardtanh(min_val=-1, max_val=1)

Applies the Hardtanh function element-wise.

print(Tensor([-1.5, -1.0, -0.5, 0., 0.5, 1.0, 1.5]).hardtanh().numpy())
[-1.  -1.  -0.5  0.   0.5  1.   1. ]
Source code in tinygrad/tensor.py
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
def hardtanh(self, min_val=-1, max_val=1):
  """
  Applies the Hardtanh function element-wise.

  - Described: https://paperswithcode.com/method/hardtanh-activation

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1.5, -1.0, -0.5, 0., 0.5, 1.0, 1.5]).hardtanh().numpy())
  ```
  """
  return self.clip(min_val, max_val)

gelu ¤

gelu()

Applies the Gaussian Error Linear Unit (GELU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).gelu().numpy())
[-0.0036 -0.0454 -0.1588  0.      0.8412  1.9546  2.9964]
Source code in tinygrad/tensor.py
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
def gelu(self):
  """
  Applies the Gaussian Error Linear Unit (GELU) function element-wise.

  - Described: https://paperswithcode.com/method/gelu
  - Paper: https://arxiv.org/abs/1606.08415v5

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).gelu().numpy())
  ```
  """
  return 0.5 * self * (1 + (math.sqrt(2 / math.pi) * (self + 0.044715 * self ** 3)).tanh())

quick_gelu ¤

quick_gelu()

Applies the Sigmoid GELU approximation element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).quick_gelu().numpy())
[-0.0181 -0.0643 -0.1542  0.      0.8458  1.9357  2.9819]
Source code in tinygrad/tensor.py
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
def quick_gelu(self):
  """
  Applies the Sigmoid GELU approximation element-wise.

  - Described: https://paperswithcode.com/method/gelu

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).quick_gelu().numpy())
  ```
  """
  return self * (self * 1.702).sigmoid()

leakyrelu ¤

leakyrelu(neg_slope=0.01)

Applies the Leaky ReLU function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).leakyrelu().numpy())
[-0.03 -0.02 -0.01  0.    1.    2.    3.  ]
print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).leakyrelu(neg_slope=0.42).numpy())
[-1.26 -0.84 -0.42  0.    1.    2.    3.  ]

Source code in tinygrad/tensor.py
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
def leakyrelu(self, neg_slope=0.01):
  """
  Applies the Leaky ReLU function element-wise.

  - Described: https://paperswithcode.com/method/leaky-relu

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).leakyrelu().numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).leakyrelu(neg_slope=0.42).numpy())
  ```
  """
  return self.relu() - (-neg_slope*self).relu()

mish ¤

mish()

Applies the Mish function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).mish().numpy())
[-0.1456 -0.2525 -0.3034  0.      0.8651  1.944   2.9865]
Source code in tinygrad/tensor.py
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
def mish(self):
  """
  Applies the Mish function element-wise.

  - Described: https://paperswithcode.com/method/mish
  - Paper: https://arxiv.org/abs/1908.08681v3

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).mish().numpy())
  ```
  """
  return self * self.softplus().tanh()

softplus ¤

softplus(beta=1)

Applies the Softplus function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).softplus().numpy())
[0.0486 0.1269 0.3133 0.6931 1.3133 2.1269 3.0486]
Source code in tinygrad/tensor.py
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
def softplus(self, beta=1):
  """
  Applies the Softplus function element-wise.

  - Described: https://paperswithcode.com/method/softplus

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).softplus().numpy())
  ```
  """
  return (1/beta) * (1 + (self*beta).exp()).log()

softsign ¤

softsign()

Applies the Softsign function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).softsign().numpy())
[-0.75   -0.6667 -0.5     0.      0.5     0.6667  0.75  ]
Source code in tinygrad/tensor.py
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
def softsign(self):
  """
  Applies the Softsign function element-wise.

  - Described: https://paperswithcode.com/method/softsign

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).softsign().numpy())
  ```
  """
  return self / (1 + self.abs())

Elementwise Ops (broadcasted)¤

add ¤

add(x: Union[Tensor, ConstType], reverse=False) -> Tensor

Adds self and x. Equivalent to self + x. Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

Tensor.manual_seed(42)
t = Tensor.randn(4)
print(t.numpy())
[-1.3113 -0.1159 -0.6549  1.84  ]
print(t.add(20).numpy())
[18.6887 19.8841 19.3451 21.84  ]
print(t.add(Tensor([[2.0], [3.5]])).numpy())
[[0.6887 1.8841 1.3451 3.84  ]
 [2.1887 3.3841 2.8451 5.34  ]]

Source code in tinygrad/tensor.py
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
def add(self, x:Union[Tensor, ConstType], reverse=False) -> Tensor:
  """
  Adds `self` and `x`.
  Equivalent to `self + x`.
  Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  t = Tensor.randn(4)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.add(20).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.add(Tensor([[2.0], [3.5]])).numpy())
  ```
  """
  return F.Add.apply(*self._broadcasted(x, reverse))

sub ¤

sub(x: Union[Tensor, ConstType], reverse=False) -> Tensor

Subtracts x from self. Equivalent to self - x. Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

Tensor.manual_seed(42)
t = Tensor.randn(4)
print(t.numpy())
[-1.3113 -0.1159 -0.6549  1.84  ]
print(t.sub(20).numpy())
[-21.3113 -20.1159 -20.6549 -18.16  ]
print(t.sub(Tensor([[2.0], [3.5]])).numpy())
[[-3.3113 -2.1159 -2.6549 -0.16  ]
 [-4.8113 -3.6159 -4.1549 -1.66  ]]

Source code in tinygrad/tensor.py
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
def sub(self, x:Union[Tensor, ConstType], reverse=False) -> Tensor:
  """
  Subtracts `x` from `self`.
  Equivalent to `self - x`.
  Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  t = Tensor.randn(4)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.sub(20).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.sub(Tensor([[2.0], [3.5]])).numpy())
  ```
  """
  a, b = self._broadcasted(x, reverse)
  return a + (-b)

mul ¤

mul(x: Union[Tensor, ConstType], reverse=False) -> Tensor

Multiplies self and x. Equivalent to self * x. Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

Tensor.manual_seed(42)
t = Tensor.randn(4)
print(t.numpy())
[-1.3113 -0.1159 -0.6549  1.84  ]
print(t.mul(3).numpy())
[-3.934  -0.3476 -1.9646  5.5201]
print(t.mul(Tensor([[-1.0], [2.0]])).numpy())
[[ 1.3113  0.1159  0.6549 -1.84  ]
 [-2.6226 -0.2318 -1.3097  3.6801]]

Source code in tinygrad/tensor.py
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
def mul(self, x:Union[Tensor, ConstType], reverse=False) -> Tensor:
  """
  Multiplies `self` and `x`.
  Equivalent to `self * x`.
  Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  t = Tensor.randn(4)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.mul(3).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.mul(Tensor([[-1.0], [2.0]])).numpy())
  ```
  """
  return F.Mul.apply(*self._broadcasted(x, reverse))

div ¤

div(
    x: Union[Tensor, ConstType], reverse=False, upcast=True
) -> Tensor

Divides self by x. Equivalent to self / x. Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs. By default, div performs true division. Set upcast to False for integer division.

Tensor.manual_seed(42)
t = Tensor.randn(4)
print(t.numpy())
[-1.3113 -0.1159 -0.6549  1.84  ]
print(t.div(3).numpy())
[-0.4371 -0.0386 -0.2183  0.6133]
print(Tensor([1, 4, 10]).div(Tensor([2, 3, 4])).numpy())
[0.5    1.3333 2.5   ]
print(Tensor([1, 4, 10]).div(Tensor([2, 3, 4]), upcast=False).numpy())
[0 1 2]

Source code in tinygrad/tensor.py
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
def div(self, x:Union[Tensor, ConstType], reverse=False, upcast=True) -> Tensor:
  """
  Divides `self` by `x`.
  Equivalent to `self / x`.
  Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.
  By default, `div` performs true division. Set `upcast` to `False` for integer division.

  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  t = Tensor.randn(4)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.div(3).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1, 4, 10]).div(Tensor([2, 3, 4])).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1, 4, 10]).div(Tensor([2, 3, 4]), upcast=False).numpy())
  ```
  """
  numerator, denominator = self._broadcasted(x, reverse)
  if upcast: numerator, denominator = numerator.cast(least_upper_float(numerator.dtype)), denominator.cast(least_upper_float(denominator.dtype))
  return (numerator * denominator.reciprocal()) if dtypes.is_float(numerator.dtype) else F.IDiv.apply(numerator, denominator)

xor ¤

xor(x: Union[Tensor, ConstType], reverse=False) -> Tensor

Computes bitwise xor of self and x. Equivalent to self ^ x. Supports broadcasting to a common shape, type promotion, and integer, boolean inputs.

print(Tensor([-1, -2, 3]).xor(Tensor([1, 0, 3])).numpy())
[-2 -2  0]
print(Tensor([True, True, False, False]).xor(Tensor([True, False, True, False])).numpy())
[False  True  True False]

Source code in tinygrad/tensor.py
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
def xor(self, x:Union[Tensor, ConstType], reverse=False) -> Tensor:
  """
  Computes bitwise xor of `self` and `x`.
  Equivalent to `self ^ x`.
  Supports broadcasting to a common shape, type promotion, and integer, boolean inputs.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, -2, 3]).xor(Tensor([1, 0, 3])).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([True, True, False, False]).xor(Tensor([True, False, True, False])).numpy())
  ```
  """
  return F.Xor.apply(*self._broadcasted(x, reverse))

lshift ¤

lshift(x: int)

Computes left arithmetic shift of self by x bits. self must have unsigned dtype. Equivalent to self << x.

print(Tensor([1, 3, 31], dtype=dtypes.uint8).lshift(2).numpy())
[  4  12 124]
Source code in tinygrad/tensor.py
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
def lshift(self, x:int):
  """
  Computes left arithmetic shift of `self` by `x` bits. `self` must have unsigned dtype.
  Equivalent to `self << x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1, 3, 31], dtype=dtypes.uint8).lshift(2).numpy())
  ```
  """
  assert dtypes.is_unsigned(self.dtype) and isinstance(x, int) and x >= 0, f"not supported {self.dtype=} {x=}"
  return self.mul(2 ** x)

rshift ¤

rshift(x: int)

Computes right arithmetic shift of self by x bits. self must have unsigned dtype. Equivalent to self >> x.

print(Tensor([4, 13, 125], dtype=dtypes.uint8).rshift(2).numpy())
[ 1  3 31]
Source code in tinygrad/tensor.py
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
def rshift(self, x:int):
  """
  Computes right arithmetic shift of `self` by `x` bits. `self` must have unsigned dtype.
  Equivalent to `self >> x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([4, 13, 125], dtype=dtypes.uint8).rshift(2).numpy())
  ```
  """
  assert dtypes.is_unsigned(self.dtype) and isinstance(x, int) and x >= 0, f"not supported {self.dtype=} {x=}"
  return self.div(2 ** x, upcast=False)

pow ¤

pow(x: Union[Tensor, ConstType], reverse=False) -> Tensor

Computes power of self with x. Equivalent to self ** x.

print(Tensor([-1, 2, 3]).pow(2).numpy())
[1 4 9]
print(Tensor([-1, 2, 3]).pow(Tensor([-1.5, 0.5, 1.5])).numpy())
[   nan 1.4142 5.1962]
print((2 ** Tensor([-1, 2, 3])).numpy())
[0.5 4.  8. ]

Source code in tinygrad/tensor.py
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
def pow(self, x:Union[Tensor, ConstType], reverse=False) -> Tensor:
  """
  Computes power of `self` with `x`.
  Equivalent to `self ** x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).pow(2).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).pow(Tensor([-1.5, 0.5, 1.5])).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print((2 ** Tensor([-1, 2, 3])).numpy())
  ```
  """
  x = self._to_const_val(x)
  if not isinstance(x, Tensor) and not reverse:
    # simple pow identities
    if x < 0: return self.reciprocal().pow(-x)
    if x == 0: return 1 + self * 0
    if int(x - 0.5) + 0.5 == x: return self.pow(int(x - 0.5)) * self.sqrt()
    if int(x) == x: return self.pow(x // 2).square() * (1 if x % 2 == 0 else self)

  # positive const ** self
  if not isinstance(x, Tensor) and reverse and x > 0: return self.mul(math.log(x)).exp()

  base, exponent = self._broadcasted(x, reverse=reverse)
  # start with b ** e = exp(e * log(b))
  ret = base.abs().log().mul(exponent).exp()
  # correct sign of negative base with odd exponent (cos has a period of 2pi so we use it here to get the oddness of the exponent)
  negative_base = (base < 0).detach().where(1, 0)
  # 1 for non-negative base or negative even exponent, -1 for negative odd exponent, don't care about non-integer exponent
  correct_sign = 1 + negative_base * ((exponent * math.pi).cos() - 1)
  # inject nan for negative base and non-integer exponent
  inject_nan = (negative_base * (exponent != exponent.trunc())).detach().where(math.nan, 1)
  # apply correct_sign inject_nan, and fix 0 ** 0 = 1
  return ((base == 0) * (exponent == 0)).detach().where(1, ret * correct_sign * inject_nan)

maximum ¤

maximum(x: Union[Tensor, ConstType]) -> Tensor

Computes element-wise maximum of self and x.

print(Tensor([-1, 2, 3]).maximum(1).numpy())
[1 2 3]
print(Tensor([-1, 2, 3]).maximum(Tensor([-4, -2, 9])).numpy())
[-1  2  9]

Source code in tinygrad/tensor.py
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
def maximum(self, x:Union[Tensor, ConstType]) -> Tensor:
  """
  Computes element-wise maximum of `self` and `x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).maximum(1).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).maximum(Tensor([-4, -2, 9])).numpy())
  ```
  """
  return (self<x).detach().where(x, (self==x).detach().where(((self * 0.5 + x * 0.5).cast(self.dtype)), self))

minimum ¤

minimum(x: Union[Tensor, ConstType]) -> Tensor

Computes element-wise minimum of self and x.

print(Tensor([-1, 2, 3]).minimum(1).numpy())
[-1  1  1]
print(Tensor([-1, 2, 3]).minimum(Tensor([-4, -2, 9])).numpy())
[-4 -2  3]

Source code in tinygrad/tensor.py
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
def minimum(self, x:Union[Tensor, ConstType]) -> Tensor:
  """
  Computes element-wise minimum of `self` and `x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).minimum(1).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).minimum(Tensor([-4, -2, 9])).numpy())
  ```
  """
  return -((-self).maximum(-x))

where ¤

Return a tensor of elements selected from either x or y, depending on self. output_i = x_i if self_i else y_i.

cond = Tensor([[True, True, False], [True, False, False]])
print(cond.where(1, 3).numpy())
[[1 1 3]
 [1 3 3]]
Tensor.manual_seed(42)
cond = Tensor.randn(2, 3)
print(cond.numpy())
[[-0.8042 -1.1013 -0.9095]
 [ 1.2802 -2.2883  0.7078]]
print((cond > 0).where(cond, -float("inf")).numpy())
[[  -inf   -inf   -inf]
 [1.2802   -inf 0.7078]]

Source code in tinygrad/tensor.py
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
def where(self:Tensor, x:Union[Tensor, ConstType], y:Union[Tensor, ConstType]):
  """
  Return a tensor of elements selected from either `x` or `y`, depending on `self`.
  `output_i = x_i if self_i else y_i`.

  ```python exec="true" source="above" session="tensor" result="python"
  cond = Tensor([[True, True, False], [True, False, False]])
  print(cond.where(1, 3).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  cond = Tensor.randn(2, 3)
  print(cond.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print((cond > 0).where(cond, -float("inf")).numpy())
  ```
  """
  if isinstance(x, Tensor): x, y = x._broadcasted(y)
  elif isinstance(y, Tensor): y, x = y._broadcasted(x)
  cond, x = self._broadcasted(x, match_dtype=False)
  cond, y = cond._broadcasted(y, match_dtype=False)
  return F.Where.apply(cond.cast(dtypes.bool), *x._broadcasted(y))

Casting Ops¤

cast ¤

cast(dtype: DTypeLike) -> Tensor

Casts self to the given dtype.

t = Tensor([-1, 2.5, 3], dtype=dtypes.float)
print(t.dtype, t.numpy())
dtypes.float [-1.   2.5  3. ]
t = t.cast(dtypes.int32)
print(t.dtype, t.numpy())
dtypes.int [-1  2  3]

Source code in tinygrad/tensor.py
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
def cast(self, dtype:DTypeLike) -> Tensor:
  """
  Casts `self` to the given `dtype`.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 2.5, 3], dtype=dtypes.float)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.cast(dtypes.int32)
  print(t.dtype, t.numpy())
  ```
  """
  return self if self.dtype == (dt:=to_dtype(dtype)) else F.Cast.apply(self, dtype=dt)

bitcast ¤

bitcast(dtype: DTypeLike) -> Tensor

Bitcasts self to the given dtype of the same itemsize.

self must not require a gradient.

t = Tensor([-1, 2, 3], dtype=dtypes.int32)
print(t.dtype, t.numpy())
dtypes.int [-1  2  3]
t = t.bitcast(dtypes.uint32)
print(t.dtype, t.numpy())
dtypes.uint [4294967295          2          3]

Source code in tinygrad/tensor.py
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
def bitcast(self, dtype:DTypeLike) -> Tensor:
  """
  Bitcasts `self` to the given `dtype` of the same itemsize.

  `self` must not require a gradient.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 2, 3], dtype=dtypes.int32)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.bitcast(dtypes.uint32)
  print(t.dtype, t.numpy())
  ```
  """
  if self.requires_grad: raise RuntimeError("can't backprop through bitcast")
  dt = to_dtype(dtype)
  if (not isinstance(self.device, str) or not self.device.startswith("DISK")) and (ns:=dt.itemsize) != (os:=self.dtype.itemsize):
    if (self.shape[-1]*os) % ns != 0: raise RuntimeError("unsupported size in bitcast")
    new_uint, old_uint = to_dtype(f"uint{8*ns}"), to_dtype(f"uint{8*os}")
    tmp = self.bitcast(old_uint)
    if ns > os: return functools.reduce(Tensor.add, (tmp[..., i::ns//os].cast(new_uint) << 8*i*os for i in range(ns//os))).bitcast(dtype)
    return Tensor.stack(*(tmp>>8*i*ns for i in range(os//ns)), dim=-1).flatten(-2).cast(new_uint).bitcast(dtype)
  return F.Cast.apply(self, dtype=dt, bitcast=True) if self.dtype != dt else self

float ¤

float() -> Tensor

Convenience method to cast self to a float32 Tensor.

t = Tensor([-1, 2, 3], dtype=dtypes.int32)
print(t.dtype, t.numpy())
dtypes.int [-1  2  3]
t = t.float()
print(t.dtype, t.numpy())
dtypes.float [-1.  2.  3.]

Source code in tinygrad/tensor.py
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
def float(self) -> Tensor:
  """
  Convenience method to cast `self` to a `float32` Tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 2, 3], dtype=dtypes.int32)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.float()
  print(t.dtype, t.numpy())
  ```
  """
  return self.cast(dtypes.float32)

half ¤

half() -> Tensor

Convenience method to cast self to a float16 Tensor.

t = Tensor([-1, 2, 3], dtype=dtypes.int32)
print(t.dtype, t.numpy())
dtypes.int [-1  2  3]
t = t.half()
print(t.dtype, t.numpy())
dtypes.half [-1.  2.  3.]

Source code in tinygrad/tensor.py
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
def half(self) -> Tensor:
  """
  Convenience method to cast `self` to a `float16` Tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 2, 3], dtype=dtypes.int32)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.half()
  print(t.dtype, t.numpy())
  ```
  """
  return self.cast(dtypes.float16)

int ¤

int() -> Tensor

Convenience method to cast self to a int32 Tensor.

t = Tensor([-1.5, -0.5, 0.0, 0.5, 1.5])
print(t.dtype, t.numpy())
dtypes.float [-1.5 -0.5  0.   0.5  1.5]
t = t.int()
print(t.dtype, t.numpy())
dtypes.int [-1  0  0  0  1]

Source code in tinygrad/tensor.py
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
def int(self) -> Tensor:
  """
  Convenience method to cast `self` to a `int32` Tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1.5, -0.5, 0.0, 0.5, 1.5])
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.int()
  print(t.dtype, t.numpy())
  ```
  """
  return self.cast(dtypes.int32)

bool ¤

bool() -> Tensor

Convenience method to cast self to a bool Tensor.

t = Tensor([-1, 0, 1])
print(t.dtype, t.numpy())
dtypes.int [-1  0  1]
t = t.bool()
print(t.dtype, t.numpy())
dtypes.bool [ True False  True]

Source code in tinygrad/tensor.py
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
def bool(self) -> Tensor:
  """
  Convenience method to cast `self` to a `bool` Tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 0, 1])
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.bool()
  print(t.dtype, t.numpy())
  ```
  """
  return self.cast(dtypes.bool)