Newer
Older
#tensorBroadcastAxis = getbroadcastAxid(tensor1, gradientForTensor1)
#if tensorBroadcastAxis is not None:
# gradientForTensor1 = np.sum(gradientForTensor1, axis=tuple(tensorBroadcastAxis), keepdims=True)
tensor1.gradient = np.multiply(bools, gradient)
if tensor1.gradientFunc:
tensor1.gradientFunc(tensor1.gradient)
#tensorBroadcastAxis = getbroadcastAxid(tensor2, gradientForTensor2)
#if tensorBroadcastAxis is not None:
# gradientForTensor2 = np.sum(gradientForTensor2, axis=tuple(tensorBroadcastAxis), keepdims=True)
tensor2.gradient = np.multiply(bools, gradient)
if tensor2.gradientFunc:
tensor2.gradientFunc(tensor2.gradient)
def flattenForward(tensor: Tensor) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
data = np.reshape(tensor.data, newshape=(-1))
gradientFunc = partial(flattenBackward, tensor) if tensor.requireGradient else None
return Tensor(data, requireGradient=tensor.requireGradient, gradientFunc=gradientFunc)
def flattenBackward(tensor: Tensor, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.reshape(gradient, newshape=tensor.shape)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def reshapeForward(tensor: Tensor, *args, **kwargs) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
data = np.reshape(tensor.data, *args, **kwargs)
gradientFunc = partial(reshapeBackward, tensor) if tensor.requireGradient else None
return Tensor(data, requireGradient=tensor.requireGradient, gradientFunc=gradientFunc)
def reshapeBackward(tensor: Tensor, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.reshape(gradient, newshape=tensor.shape)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
#
# Broadcasting
#
def repeatForward(tensor: Tensor, repeats: ArrayLike, axis: int = None) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
data = np.repeat(tensor.data, repeats=repeats, axis=axis)
gradientFunc = partial(repeatBackward, tensor) if tensor.requireGradient else None
return Tensor(data, requireGradient=tensor.requireGradient, gradientFunc=gradientFunc)
def repeatBackward(tensor, repeats, axis, gradient) -> None:
if axis is None:
sum_axis = tuple(range(gradient.ndim)[::-repeats])
counts = np.prod(repeats)
else:
sum_axis = axis
counts = repeats
grad = np.sum(gradient, axis=sum_axis, keepdims=True)
grad = np.divide(grad, counts)
tensor.gradient = np.broadcast_to(grad, tensor.shape)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def tileForward(tensor: Tensor, reps: ArrayLike) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
data = np.tile(tensor.data, reps=reps)
gradientFunc = partial(tileBackward, tensor) if tensor.requireGradient else None
return Tensor(data, requireGradient=tensor.requireGradient, gradientFunc=gradientFunc)
def tileBackward(tensor, reps, gradient) -> None:
reshaped = np.reshape(gradient, tensor.shape + reps)
axis = tuple(range(tensor.ndim, gradient.ndim))
tensor.gradient = np.sum(reshaped, axis=axis)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def concatenateForward(tensors: Tensor, axis=0, out=None, dtype=None, casting='same_kind') -> Tensor:
tensors = [checkTensor(tensor) for tensor in tensors]
data = np.concatenate([tensor.data for tensor in tensors], axis=axis, out=out, dtype=dtype, casting=casting)
requireGradient = any(tensor.requireGradient for tensor in tensors)
if requireGradient:
shapes = [tensor.shape for tensor in tensors]
gradfunc = partial(concatenateBackward, tensors, shapes, axis, out, dtype, casting)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
def concatenateBackward(tensors: Tensor, shapes, axis=0, out=None, dtype=None, casting='same_kind', gradient: np.ndarray = None) -> None:
grads = np.split(gradient, np.cumsum([shape[axis] for shape in shapes[:-1]]), axis=axis)
for tensor, grad in zip(tensors, grads):
if tensor.requireGradient:
tensor.gradient = grad
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def hstackForward(tensors: Tensor, dtype=None, casting='same_kind') -> Tensor:
return concatenateForward(tensors, axis=1, out=None, dtype=dtype, casting=casting)
def vstackForward(tensors: Tensor, dtype=None, casting='same_kind') -> Tensor:
return concatenateForward(tensors, axis=0, out=None, dtype=dtype, casting=casting)
def dstackForward(tensors: Tensor, dtype=None, casting='same_kind') -> Tensor:
return concatenateForward(tensors, axis=2, out=None, dtype=dtype, casting=casting)
def splitForward(tensor: Tensor, indices_or_sections, axis=0) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
data = np.split(tensor.data, indices_or_sections, axis)
gradfunc = partial(splitBackward, tensor, axis)
return [Tensor(datum, requireGradient=True, gradientFunc=gradfunc) for datum in data]
return [Tensor(datum, requireGradient=False, gradientFunc=None) for datum in data]
def splitBackward(tensor: Tensor, axis=0, gradient=None) -> None:
gradient = np.concatenate(gradient, axis=axis)
tensor.gradient = gradient
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def hsplitForward(tensor: Tensor, indices_or_sections) -> Tensor:
return splitForward(tensor, indices_or_sections, axis=1)
def vsplitForward(tensor: Tensor, indices_or_sections) -> Tensor:
return splitForward(tensor, indices_or_sections, axis=0)
def dsplitForward(tensor: Tensor, indices_or_sections) -> Tensor:
return splitForward(tensor, indices_or_sections, axis=2)
def sumForward(tensor: Tensor, axis=None, dtype=None, keepdims=False, **kwargs) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
data = np.sum(tensor.data, axis=axis, dtype=dtype, keepdims=keepdims, **kwargs)
gradientFunc = partial(sumBackward, tensor) if tensor.requireGradient else None
return Tensor(data, requireGradient=tensor.requireGradient, gradientFunc=gradientFunc)
def sumBackward(tensor: Tensor, gradient) -> None:
if tensor.requireGradient:
tensor.gradient = np.broadcast_to(gradient.T, tensor.shape)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def prodForward(tensor: Tensor, axis=None, dtype=None, keepdims=False) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
data = np.prod(tensor, axis=axis, dtype=dtype, keepdims=keepdims)
gradientFunc = partial(prodBackward, tensor, axis, type, keepdims) if tensor.requireGradient else None
return Tensor(data, requireGradient=tensor.requireGradient, gradientFunc=gradientFunc)
def prodBackward(tensor: Tensor, axis, dtype, keepdims, gradient) -> None:
tensorNoneZero = np.where(tensor.data != 0, tensor.data, 1)
tensor.gradient = np.multiply(gradient, np.divide(np.prod(tensor.data, axis=axis, dtype=dtype, keepdims=keepdims), tensorNoneZero))
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def maximumForward(tensor1: Tensor, tensor2: Tensor, out=None, where=True, casting='same_kind', order='k', dtype=None, subhok=True) -> Tensor:
tensor1 = tensor1 if isinstance(tensor1, Tensor) else Tensor(tensor1)
tensor2 = tensor2 if isinstance(tensor2, Tensor) else Tensor(tensor2)
data = np.maximum(tensor1.data, tensor2.data, out=out, where=where, casting=casting, order=order, dtype=dtype, subhok=subhok)
requireGradient = tensor1.requireGradient or tensor2.requireGradient
gradientFunc = partial(maximumBackward, tensor1, tensor2) if requireGradient else None
return Tensor(data, requireGradient=requireGradient, gradientFunc=gradientFunc)
def maximumBackward(tensor1: Tensor, tensor2: Tensor, data: np.ndarray, gradient: np.ndarray) -> None:
if tensor1.gradientFunc:
tensor1.gradientFunc(tensor1.gradient)
if tensor2.gradientFunc:
tensor2.gradientFunc(tensor2.gradient)
def minimumForward(tensor1: Tensor, tensor2: Tensor, out=None, where=True, casting='same_kind', order='k', dtype=None, subhok=True) -> Tensor:
tensor1 = tensor1 if isinstance(tensor1, Tensor) else Tensor(tensor1)
tensor2 = tensor2 if isinstance(tensor2, Tensor) else Tensor(tensor2)
data = np.minimum(tensor1.data, tensor2.data, out=out, where=where, casting=casting, order=order, dtype=dtype, subhok=subhok)
requireGradient = tensor1.requireGradient or tensor2.requireGradient
gradientFunc = partial(minimumBackward, tensor1, tensor2) if requireGradient else None
return Tensor(data, requireGradient=requireGradient, gradientFunc=gradientFunc)
def minimumBackward(tensor1: Tensor, tensor2: Tensor, data: np.ndarray, gradient: np.ndarray) -> None:
if tensor1.gradientFunc:
tensor1.gradientFunc(tensor1.gradient)
if tensor2.gradientFunc:
tensor2.gradientFunc(tensor2.gradient)
def maxForward(tensor: Tensor, axis=None, keepdims=False) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
data = np.max(tensor.data, axis=axis, keepdims=keepdims)
if tensor.requireGradient:
mask = (tensor.data == np.broadcast_to(data, tensor.shape))
gradfunc = partial(maxBackward, tensor, mask)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def maxBackward(tensor: Tensor, mask: np.ndarray, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.multiply(mask, gradient)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def minForward(tensor: Tensor, axis=None, keepdims=False) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
data = np.min(tensor.data, axis=axis, keepdims=keepdims)
if tensor.requireGradient:
mask = (tensor.data == np.broadcast_to(data, tensor.shape))
gradfunc = partial(minBackward, tensor, mask)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def minBackward(tensor: Tensor, mask: np.ndarray, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.multiply(mask, gradient)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def meanForward(tensor: Tensor, axis=None, keepdims=False) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
data = np.mean(tensor.data, axis=axis, keepdims=keepdims)
if tensor.requireGradient:
if axis is None:
divisor = np.prod(tensor.shape)
elif isinstance(axis, int):
divisor = np.prod(tensor.shape[axis])
else:
divisor = np.prod([tensor.shape[i] for i in axis])
gradfunc = partial(meanBackward, tensor, divisor)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def meanBackward(tensor: Tensor, divisor: np.ndarray, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.divide(gradient, divisor)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def varForward(tensor: Tensor, axis=None, ddof=0, keepdims=False) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
data = np.var(tensor.data, axis=axis, ddof=ddof, keepdims=keepdims)
if tensor.requireGradient:
diff = np.subtract(tensor.data, np.mean(tensor.data, axis=axis, keepdims=keepdims))
if axis is None:
divisor = np.prod(tensor.shape)
elif isinstance(axis, int):
divisor = np.prod(tensor.shape[axis])
else:
divisor = np.prod([tensor.shape[i] for i in axis])
gradfunc = partial(varBackward, tensor, divisor, diff)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def varBackward(tensor: Tensor, divisor: np.ndarray, diff: np.ndarray, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.multiply(np.multiply(np.divide(2.0, divisor), diff), gradient)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def stdForward(tensor: Tensor, axis=None, keepdims=False) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
data = np.std(tensor.data, axis=axis, keepdims=keepdims)
if tensor.requireGradient:
diff = np.subtract(tensor.data, np.mean(tensor.data, axis=axis, keepdims=keepdims))
if axis is None:
divisor = np.prod(tensor.shape)
elif isinstance(axis, int):
divisor = np.prod(tensor.shape[axis])
else:
divisor = np.prod([tensor.shape[i] for i in axis])
gradfunc = partial(stdBackward, tensor, divisor, diff)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def stdBackward(tensor: Tensor, divisor: np.ndarray, diff: np.ndarray, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.multiply(gradient, np.divide(diff, np.multiply(divisor, tensor.data)))
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def padForward(tensor: Tensor, pad_with, mode='constant', constant_values=0) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
data = np.pad(tensor.data, pad_with=pad_with, mode=mode, constant_values=constant_values)
gradientFunc = partial(padBackward, tensor) if tensor.requireGradient else None
return Tensor(data, requireGradient=tensor.requireGradient, gradientFunc=gradientFunc)
def padBackward(tensor: Tensor, pad_with, gradient: np.ndarray) -> None:
if tensor and tensor.requireGradient:
slices = tuple(slice(pad[0], -pad[1] if pad[1] != 0 else None) for pad in pad_with)
tensor.gradient = np.add(tensor.gradient, gradient[slices])
if tensor.requireGradient:
tensor.gradientFunc(tensor.gradient)
def insertForward(tensor: Tensor, values: Tensor, index: ArrayLike) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
values = values if isinstance(values, Tensor) else Tensor(tensor2)
data = np.insert(tensor.data, index, values.data)
gradientFunc = partial(insertBackward, tensor) if tensor.requireGradient else None
return Tensor(data, requireGradient=tensor.requireGradient, gradientFunc=gradientFunc)
def insertBackward(tensor: Tensor, values: Tensor, index: ArrayLike, gradient: np.ndarray) -> None:
tensor.gradient = np.delete(gradient, index)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
values.gradient = gradient[index]
if values.gradientFunc:
values.gradientFunc(values.gradient)
def transposeForward(tensor: Tensor) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
gradientFunc = partial(transposeBackward, tensor) if tensor.requireGradient else None
return Tensor(data, requireGradient=tensor.requireGradient, gradientFunc=gradientFunc)
def transposeBackward(tensor: Tensor, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.transpose(gradient)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def whereForward(condition, tensor1: Tensor, tensor2: Tensor) -> Tensor:
tensor1 = tensor1 if isinstance(tensor1, Tensor) else Tensor(tensor1)
tensor2 = tensor2 if isinstance(tensor2, Tensor) else Tensor(tensor2)
data = np.where(condition, tensor1.data, tensor2.data)
requireGradient = tensor1.requireGradient or tensor2.requireGradient
gradientFunc = partial(whereBackward, tensor1, tensor2) if requireGradient else None
return Tensor(data, requireGradient=requireGradient, gradientFunc=gradientFunc)
def whereBackward(condition, tensor1: Tensor, tensor2: Tensor, gradient: np.ndarray) -> None:
tensor1.gradient = np.multiply(gradient, condition)
if tensor1.gradientFunc:
tensor1.gradientFunc(tensor1.gradient)
tensor2.gradient = np.multiply(gradient, np.logical_not(condition))
if tensor2.gradientFunc:
tensor2.gradientFunc(tensor2.gradient)
def cumsumForward(tensor: Tensor, axis, *args, **kwargs) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
data = np.cumsum(tensor.data, axis, *args, **kwargs)
gradientFunc = partial(cumsumBackward, tensor) if tensor.requireGradient else None
return Tensor(data, requireGradient=tensor.requireGradient, gradientFunc=gradientFunc)
def cumsumBackward(tensor: Tensor, axis, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.cumsum(gradient, -axis)[::-1]
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def cumprodForward(tensor: Tensor, axis, *args, **kwargs) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
data = np.cumprod(tensor.data, axis, *args, **kwargs)
gradientFunc = partial(cumprodBackward, tensor) if tensor.requireGradient else None
return Tensor(data, requireGradient=tensor.requireGradient, gradientFunc=gradientFunc)
def cumprodBackward(tensor: Tensor, axis, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.divide(gradient, np.comprod(tensor.data))
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
#
# Not working correctly
#
def asStridedForward(tensor: Tensor, shape=None, strides=None, subok=False) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
patches = np.as_strided(tensor.data, shape=shape, strides=strides, subok=subok)
if tensor.requireGradient:
gradfunc = partial(asStridedBackward, tensor)
return Tensor(data=patches, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data=patches, requireGradient=False, gradientFunc=None)
def asStridedBackward(tensor: Tensor, gradient: np.ndarray) -> None:
tensor.gradient = gradient.sum(tuple(np.arange(gradient.ndim - tensor.ndim)))
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def slidingWindowForward(tensor: Tensor, window_shape=None, axis=None, *, subok=False, writeable=True) -> Tensor:
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
patches = np.sliding_window_view(tensor.data, window_shape=window_shape, axis=axis, subok=subok, writeable=writeable)
if tensor.requireGradient:
gradfunc = partial(slidingWindowBackward, tensor)
return Tensor(data=patches, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data=patches, requireGradient=False, gradientFunc=None)
def slidingWindowBackward(tensor: Tensor, gradient: np.ndarray) -> None:
tensor.gradient = gradient.sum(tuple(np.range(gradient.ndim - tensor.data.ndim)))
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def einsumForward(tensor1: Tensor, tensor2: Tensor, optimize=False) -> Tensor:
tensor1 = tensor1 if isinstance(tensor1, Tensor) else Tensor(tensor1)
tensor2 = tensor2 if isinstance(tensor2, Tensor) else Tensor(tensor2)
einsums = np.einsum('bihwkl,oikl->bohw', tensor1.data, tensor2.data, optimize=optimize)
gradfunc = partial(einsumBackward, tensor1, tensor2, optimize)
return Tensor(einsums, requireGradient=True, gradientFunc=gradfunc)
return Tensor(einsums, requireGradient=False, gradientFunc=None)
def einsumBackward(tensor1: Tensor, tensor2: Tensor, optimize, gradient: np.ndarray) -> None:
if tensor1.requireGradient:
tensor1.gradient = np.as_strided(gradient, shape=(*tensor1.data.shape, *tensor2.data.shape[-2:]), strides=(*tensor1.data.strides, 0, 0))
if tensor1.gradientFunc:
tensor1.gradientFunc(tensor1.gradient)
if tensor2.requireGradient:
tensor2.gradient = np.as_strided(gradient, shape=(*tensor2.data.shape[:-2], *tensor1.data.shape[-2:]), strides=(0, 0, *tensor1.data.strides[-2:]))
if tensor2.gradientFunc:
tensor2.gradientFunc(tensor2.gradient)
#
# Mapping from Numpy to Tensor
#
ufuncMap = {
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
np.add: addForward,
np.subtract: subtractForward,
np.multiply: multiplyForward,
np.divide: divideForward,
np.matmul: matmulForward,
np.power: powerForward,
np.square: squareForward,
np.sqrt: sqrtForward,
np.log: logForward,
np.exp: expForward,
np.sin: sinForward,
np.cos: cosForward,
np.cos: tanForward,
np.sinh: sinhForward,
np.cosh: coshForward,
np.tanh: tanhForward,
np.abs: absForward,
np.sign: signForward,
np.positive: positiveForward,
np.negative: negativeForward,
np.equal: equalForward,
np.not_equal: notEqualForward,
np.less: lessForward,
np.less_equal: lessEqualForward,
np.greater: greaterForward,
np.greater_equal: greaterEqualForward,
np.maximum: maximumForward,
np.minimum: minimumForward
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
np.sum: sumForward,
np.prod: prodForward,
np.repeat: repeatForward,
np.tile: tileForward,
np.max: maxForward,
np.min: minForward,
np.mean: meanForward,
np.var: varForward,
np.std: stdForward,
np.reshape: reshapeForward,
np.transpose: transposeForward,
np.concatenate: concatenateForward,
np.hstack: hstackForward,
np.vstack: vstackForward,
np.dstack: dstackForward,
np.split: splitForward,
np.hsplit: hsplitForward,
np.vsplit: vsplitForward,
np.dsplit: dsplitForward,
np.pad: padForward,
np.insert: insertForward,
np.where: whereForward,
np.cumsum: cumsumForward,
np.cumprod: cumprodForward,
np.einsum: einsumForward