Newer
Older
def lessEqualForward(tensor1: Tensor, tensor2: Tensor, *args, **kwargs) -> Tensor:
if not isinstance(tensor1, Tensor):
tensor1 = Tensor(tensor1)
if not isinstance(tensor2, Tensor):
tensor2 = Tensor(tensor2)
data = np.less_equal(tensor1.data, tensor2.data, *args, **kwargs)
gradfunc = partial(lessEqualBackward, tensor1, tensor2, data)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def lessEqualBackward(tensor1: Tensor, tensor2: Tensor, bools: np.ndarray, gradient: np.ndarray, *args, **kwargs) -> None:
gradientForTensor1 = np.copy(gradient)
tensorBroadcastAxis = getbroadcastAxid(tensor1, gradientForTensor1)
if tensorBroadcastAxis is not None:
gradientForTensor1 = np.sum(gradientForTensor1, axis=tuple(tensorBroadcastAxis), keepdims=True)
if tensor1.gradientFunc:
tensor1.gradientFunc(tensor1.gradient)
gradientForTensor2 = np.copy(gradient)
tensorBroadcastAxis = getbroadcastAxid(tensor2, gradientForTensor2)
if tensorBroadcastAxis is not None:
gradientForTensor2 = np.sum(gradientForTensor2, axis=tuple(tensorBroadcastAxis), keepdims=True)
if tensor2.gradientFunc:
tensor2.gradientFunc(tensor2.gradient)
def greaterForward(tensor1: Tensor, tensor2: Tensor, *args, **kwargs) -> Tensor:
if not isinstance(tensor1, Tensor):
tensor1 = Tensor(tensor1)
if not isinstance(tensor2, Tensor):
tensor2 = Tensor(tensor2)
data = np.greater(tensor1.data, tensor2.data, *args, **kwargs)
gradfunc = partial(greaterBackward, tensor1, tensor2, data)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def greaterBackward(tensor1: Tensor, tensor2: Tensor, bools: np.ndarray, gradient: np.ndarray, *args, **kwargs) -> None:
gradientForTensor1 = np.copy(gradient)
tensorBroadcastAxis = getbroadcastAxid(tensor1, gradientForTensor1)
if tensorBroadcastAxis is not None:
gradientForTensor1 = np.sum(gradientForTensor1, axis=tuple(tensorBroadcastAxis), keepdims=True)
if tensor1.gradientFunc:
tensor1.gradientFunc(tensor1.gradient)
gradientForTensor2 = np.copy(gradient)
tensorBroadcastAxis = getbroadcastAxid(tensor2, gradientForTensor2)
if tensorBroadcastAxis is not None:
gradientForTensor2 = np.sum(gradientForTensor2, axis=tuple(tensorBroadcastAxis), keepdims=True)
if tensor2.gradientFunc:
tensor2.gradientFunc(tensor2.gradient)
def greaterEqualForward(tensor1: Tensor, tensor2: Tensor, *args, **kwargs) -> Tensor:
if not isinstance(tensor1, Tensor):
tensor1 = Tensor(tensor1)
if not isinstance(tensor2, Tensor):
tensor2 = Tensor(tensor2)
data = np.greater_equal(tensor1.data, tensor2.data, *args, **kwargs)
gradfunc = partial(greaterEqualBackward, tensor1, tensor2, data)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def greaterEqualBackward(tensor1: Tensor, tensor2: Tensor, bools: np.ndarray, gradient: np.ndarray, *args, **kwargs) -> None:
gradientForTensor1 = np.copy(gradient)
tensorBroadcastAxis = getbroadcastAxid(tensor1, gradientForTensor1)
if tensorBroadcastAxis is not None:
gradientForTensor1 = np.sum(gradientForTensor1, axis=tuple(tensorBroadcastAxis), keepdims=True)
if tensor1.gradientFunc:
tensor1.gradientFunc(tensor1.gradient)
gradientForTensor2 = np.copy(gradient)
tensorBroadcastAxis = getbroadcastAxid(tensor2, gradientForTensor2)
if tensorBroadcastAxis is not None:
gradientForTensor2 = np.sum(gradientForTensor2, axis=tuple(tensorBroadcastAxis), keepdims=True)
if tensor2.gradientFunc:
tensor2.gradientFunc(tensor2.gradient)
def flattenForward(tensor: Tensor) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
data = np.reshape(tensor.data, newshape=(-1))
if tensor.requireGradient:
gradfunc = partial(flattenBackward, tensor)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def flattenBackward(tensor: Tensor, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.reshape(gradient, newshape=tensor.shape)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def reshapeForward(tensor: Tensor, *args, **kwargs) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
data = np.reshape(tensor.data, *args, **kwargs)
if tensor.requireGradient:
gradfunc = partial(reshapeBackward, tensor)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def reshapeBackward(tensor: Tensor, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.reshape(gradient, newshape=tensor.shape)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
#
# Broadcasting
#
def repeatForward(tensor: Tensor, repeats: ArrayLike, axis: int = None) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
data = np.repeat(tensor.data, repeats=repeats, axis=axis)
if tensor.requireGradient:
gradfunc = partial(repeatBackward, tensor, repeats, axis)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def repeatBackward(tensor, repeats, axis, gradient) -> None:
if axis is None:
sum_axis = tuple(range(gradient.ndim)[::-repeats])
counts = np.prod(repeats)
else:
sum_axis = axis
counts = repeats
grad = np.sum(gradient, axis=sum_axis, keepdims=True)
grad = np.divide(grad, counts)
tensor.gradient = np.broadcast_to(grad, tensor.shape)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def tileForward(tensor: Tensor, reps: ArrayLike) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
data = np.tile(tensor.data, reps=reps)
if tensor.requireGradient:
gradfunc = partial(tileBackward, tensor, reps)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def tileBackward(tensor, reps, gradient) -> None:
reshaped = np.reshape(gradient, tensor.shape + reps)
axis = tuple(range(tensor.ndim, gradient.ndim))
tensor.gradient = np.sum(reshaped, axis=axis)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def concatenateForward(tensors: Tensor, axis=0, out=None, dtype=None, casting='same_kind') -> Tensor:
tensors = [checkTensor(tensor) for tensor in tensors]
data = np.concatenate([tensor.data for tensor in tensors], axis=axis, out=out, dtype=dtype, casting=casting)
requireGradient = any(tensor.requireGradient for tensor in tensors)
if requireGradient:
shapes = [tensor.shape for tensor in tensors]
gradfunc = partial(concatenateBackward, tensors, shapes, axis, out, dtype, casting)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
def concatenateBackward(tensors: Tensor, shapes, axis=0, out=None, dtype=None, casting='same_kind', gradient: np.ndarray = None) -> None:
grads = np.split(gradient, np.cumsum([shape[axis] for shape in shapes[:-1]]), axis=axis)
for tensor, grad in zip(tensors, grads):
if tensor.requireGradient:
tensor.gradient = grad
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def hstackForward(tensors: Tensor, dtype=None, casting='same_kind') -> Tensor:
return concatenateForward(tensors, axis=1, out=None, dtype=dtype, casting=casting)
def vstackForward(tensors: Tensor, dtype=None, casting='same_kind') -> Tensor:
return concatenateForward(tensors, axis=0, out=None, dtype=dtype, casting=casting)
def dstackForward(tensors: Tensor, dtype=None, casting='same_kind') -> Tensor:
return concatenateForward(tensors, axis=2, out=None, dtype=dtype, casting=casting)
def splitForward(tensor: Tensor, indices_or_sections, axis=0) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
data = np.split(tensor.data, indices_or_sections, axis)
gradfunc = partial(splitBackward, tensor, axis)
return [Tensor(datum, requireGradient=True, gradientFunc=gradfunc) for datum in data]
return [Tensor(datum, requireGradient=False, gradientFunc=None) for datum in data]
def splitBackward(tensor: Tensor, axis=0, gradient=None) -> None:
gradient = np.concatenate(gradient, axis=axis)
tensor.gradient = gradient
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def hsplitForward(tensor: Tensor, indices_or_sections) -> Tensor:
return splitForward(tensor, indices_or_sections, axis=1)
def vsplitForward(tensor: Tensor, indices_or_sections) -> Tensor:
return splitForward(tensor, indices_or_sections, axis=0)
def dsplitForward(tensor: Tensor, indices_or_sections) -> Tensor:
return splitForward(tensor, indices_or_sections, axis=2)
def sumForward(tensor: Tensor, axis=None, dtype=None, keepdims=False, **kwargs) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
data = np.sum(tensor.data, axis=axis, dtype=None, keepdims=keepdims, **kwargs)
if tensor.requireGradient:
gradfunc = partial(sumBackward, tensor, axis, dtype, keepdims)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def sumBackward(tensor: Tensor, axis=None, dtype=None, keepdims=False, gradient=None) -> None:
if tensor.requireGradient:
tensor.gradient = np.broadcast_to(gradient.T, tensor.shape)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def prodForward(tensor: Tensor, axis=None, dtype=None, keepdims=False) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
data = np.prod(tensor, axis=axis, dtype=dtype, keepdims=keepdims)
if tensor.requireGradient:
gradfunc = partial(prodBackward, tensor, axis, dtype, keepdims)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def prodBackward(tensor: Tensor, axis=None, dtype=None, keepdims=False, gradient=None) -> None:
tensorNoneZero = np.where(tensor.data != 0, tensor.data, 1)
tensor.gradient = np.multiply(gradient, np.divide(np.prod(tensor.data, axis, dtype, keepdims), tensorNoneZero))
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def maximumForward(tensor1: Tensor, tensor2: Tensor, out=None, where=True, casting='same_kind', order='k', dtype=None, subhok=True) -> Tensor:
if not isinstance(tensor1, Tensor):
tensor1 = Tensor(tensor1)
if not isinstance(tensor2, Tensor):
tensor2 = Tensor(tensor2)
data = np.maximum(tensor1.data, tensor2.data, out=out, where=where, casting=casting, order=order, dtype=dtype, subhok=subhok)
gradfunc = partial(maximumBackward, tensor1, tensor2, data)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def maximumBackward(tensor1: Tensor, tensor2: Tensor, data: np.ndarray, gradient: np.ndarray) -> None:
if tensor1.gradientFunc:
tensor1.gradientFunc(tensor1.gradient)
if tensor2.gradientFunc:
tensor2.gradientFunc(tensor2.gradient)
def minimumForward(tensor1: Tensor, tensor2: Tensor, out=None, where=True, casting='same_kind', order='k', dtype=None, subhok=True) -> Tensor:
if not isinstance(tensor1, Tensor):
tensor1 = Tensor(tensor1)
if not isinstance(tensor2, Tensor):
tensor2 = Tensor(tensor2)
data = np.minimum(tensor1.data, tensor2.data, out=out, where=where, casting=casting, order=order, dtype=dtype, subhok=subhok)
gradfunc = partial(minimumBackward, tensor1, tensor2, data)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def minimumBackward(tensor1: Tensor, tensor2: Tensor, data: np.ndarray, gradient: np.ndarray) -> None:
if tensor1.gradientFunc:
tensor1.gradientFunc(tensor1.gradient)
if tensor2.gradientFunc:
tensor2.gradientFunc(tensor2.gradient)
def maxForward(tensor: Tensor, axis=None, keepdims=False) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
data = np.max(tensor.data, axis=axis, keepdims=keepdims)
if tensor.requireGradient:
mask = (tensor.data == np.broadcast_to(data, tensor.shape))
gradfunc = partial(maxBackward, tensor, mask)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def maxBackward(tensor: Tensor, mask: np.ndarray, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.multiply(mask, gradient)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def minForward(tensor: Tensor, axis=None, keepdims=False) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
data = np.min(tensor.data, axis=axis, keepdims=keepdims)
if tensor.requireGradient:
mask = (tensor.data == np.broadcast_to(data, tensor.shape))
gradfunc = partial(minBackward, tensor, mask)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def minBackward(tensor: Tensor, mask: np.ndarray, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.multiply(mask, gradient)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def meanForward(tensor: Tensor, axis=None, keepdims=False) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
data = np.mean(tensor.data, axis=axis, keepdims=keepdims)
if tensor.requireGradient:
if axis is None:
divisor = np.prod(tensor.shape)
elif isinstance(axis, int):
divisor = np.prod(tensor.shape[axis])
else:
divisor = np.prod([tensor.shape[i] for i in axis])
gradfunc = partial(meanBackward, tensor, divisor)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def meanBackward(tensor: Tensor, divisor: np.ndarray, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.divide(gradient, divisor)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def varForward(tensor: Tensor, axis=None, ddof=0, keepdims=False) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
data = np.var(tensor.data, axis=axis, ddof=ddof, keepdims=keepdims)
if tensor.requireGradient:
diff = np.subtract(tensor.data, np.mean(tensor.data, axis=axis, keepdims=keepdims))
if axis is None:
divisor = np.prod(tensor.shape)
elif isinstance(axis, int):
divisor = np.prod(tensor.shape[axis])
else:
divisor = np.prod([tensor.shape[i] for i in axis])
gradfunc = partial(varBackward, tensor, divisor, diff)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def varBackward(tensor: Tensor, divisor: np.ndarray, diff: np.ndarray, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.multiply(np.multiply(np.divide(2.0, divisor), diff), gradient)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def stdForward(tensor: Tensor, axis=None, keepdims=False) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
data = np.std(tensor.data, axis=axis, keepdims=keepdims)
if tensor.requireGradient:
diff = np.subtract(tensor.data, np.mean(tensor.data, axis=axis, keepdims=keepdims))
if axis is None:
divisor = np.prod(tensor.shape)
elif isinstance(axis, int):
divisor = np.prod(tensor.shape[axis])
else:
divisor = np.prod([tensor.shape[i] for i in axis])
gradfunc = partial(stdBackward, tensor, divisor, diff)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def stdBackward(tensor: Tensor, divisor: np.ndarray, diff: np.ndarray, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.multiply(gradient, np.divide(diff, np.multiply(divisor, tensor.data)))
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def padForward(tensor: Tensor, pad_with, mode='constant', constant_values=0) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
data = np.pad(tensor.data, pad_with=pad_with, mode=mode, constant_values=constant_values)
if tensor.requireGradient:
gradfunc = padBackward(tensor, pad_with)
return Tensor(data, requireGradient=tensor.requireGradient, gradientFunc=gradfunc)
return Tensor(data, requireGradient=tensor.requireGradient, gradientFunc=gradfunc)
def padBackward(tensor: Tensor, pad_with, gradient: np.ndarray) -> None:
if tensor and tensor.requireGradient:
slices = tuple(slice(pad[0], -pad[1] if pad[1] != 0 else None) for pad in pad_with)
tensor.gradient = np.add(tensor.gradient, gradient[slices])
if tensor.requireGradient:
tensor.gradientFunc(tensor.gradient)
def insertForward(tensor: Tensor, values: Tensor, index: ArrayLike) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
if not isinstance(values, Tensor):
values = Tensor(values)
data = np.insert(tensor.data, index, values.data)
gradfunc = partial(insertBackward, tensor, values, index)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def insertBackward(tensor: Tensor, values: Tensor, index: ArrayLike, gradient: np.ndarray) -> None:
tensor.gradient = np.delete(gradient, index)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
values.gradient = gradient[index]
if values.gradientFunc:
values.gradientFunc(values.gradient)
def transposeForward(tensor: Tensor) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
if tensor.requireGradient:
gradfunc = partial(transposeBackward, tensor)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def transposeBackward(tensor: Tensor, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.transpose(gradient)
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def whereForward(condition, tensor1: Tensor, tensor2: Tensor) -> Tensor:
if not isinstance(tensor1, Tensor):
tensor1 = Tensor(tensor1)
if not isinstance(tensor2, Tensor):
tensor2 = Tensor(tensor2)
data = np.where(condition, tensor1.data, tensor2.data)
gradfunc = partial(whereBackward, condition, tensor1, tensor2)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def whereBackward(condition, tensor1: Tensor, tensor2: Tensor, gradient: np.ndarray) -> None:
tensor1.gradient = np.multiply(gradient, condition)
if tensor1.gradientFunc:
tensor1.gradientFunc(tensor1.gradient)
tensor2.gradient = np.multiply(gradient, np.logical_not(condition))
if tensor2.gradientFunc:
tensor2.gradientFunc(tensor2.gradient)
def cumsumForward(tensor: Tensor, axis, *args, **kwargs) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
data = np.cumsum(tensor.data, axis, *args, **kwargs)
if tensor.requireGradient:
gradfunc = partial(cumsumBackward, tensor)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def cumsumBackward(tensor: Tensor, axis, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.cumsum(gradient, -axis)[::-1]
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def cumprodForward(tensor: Tensor, axis, *args, **kwargs) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
data = np.cumprod(tensor.data, axis, *args, **kwargs)
if tensor.requireGradient:
gradfunc = partial(cumprodBackward, tensor)
return Tensor(data, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data, requireGradient=False, gradientFunc=None)
def cumprodBackward(tensor: Tensor, axis, gradient: np.ndarray) -> None:
if tensor.requireGradient:
tensor.gradient = np.divide(gradient, np.comprod(tensor.data))
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
#
# Not working correctly
#
def asStridedForward(tensor: Tensor, shape=None, strides=None, subok=False) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
patches = np.as_strided(tensor.data, shape=shape, strides=strides, subok=subok)
if tensor.requireGradient:
gradfunc = partial(asStridedBackward, tensor)
return Tensor(data=patches, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data=patches, requireGradient=False, gradientFunc=None)
def asStridedBackward(tensor: Tensor, gradient: np.ndarray) -> None:
tensor.gradient = gradient.sum(tuple(np.arange(gradient.ndim - tensor.ndim)))
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def slidingWindowForward(tensor: Tensor, window_shape=None, axis=None, *, subok=False, writeable=True) -> Tensor:
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor)
patches = np.sliding_window_view(tensor.data, window_shape=window_shape, axis=axis, subok=subok, writeable=writeable)
if tensor.requireGradient:
gradfunc = partial(slidingWindowBackward, tensor)
return Tensor(data=patches, requireGradient=True, gradientFunc=gradfunc)
return Tensor(data=patches, requireGradient=False, gradientFunc=None)
def slidingWindowBackward(tensor: Tensor, gradient: np.ndarray) -> None:
tensor.gradient = gradient.sum(tuple(np.range(gradient.ndim - tensor.data.ndim)))
if tensor.gradientFunc:
tensor.gradientFunc(tensor.gradient)
def einsumForward(tensor1: Tensor, tensor2: Tensor, optimize=False) -> Tensor:
if not isinstance(tensor1, Tensor):
tensor1 = Tensor(tensor1)
if not isinstance(tensor2, Tensor):
tensor2 = Tensor(tensor2)
einsums = np.einsum('bihwkl,oikl->bohw', tensor1.data, tensor2.data, optimize=optimize)
gradfunc = partial(einsumBackward, tensor1, tensor2, optimize)
return Tensor(einsums, requireGradient=True, gradientFunc=gradfunc)
return Tensor(einsums, requireGradient=False, gradientFunc=None)
def einsumBackward(tensor1: Tensor, tensor2: Tensor, optimize, gradient: np.ndarray) -> None:
if tensor1.requireGradient:
tensor1.gradient = np.as_strided(gradient, shape=(*tensor1.data.shape, *tensor2.data.shape[-2:]), strides=(*tensor1.data.strides, 0, 0))
if tensor1.gradientFunc:
tensor1.gradientFunc(tensor1.gradient)
if tensor2.requireGradient:
tensor2.gradient = np.as_strided(gradient, shape=(*tensor2.data.shape[:-2], *tensor1.data.shape[-2:]), strides=(0, 0, *tensor1.data.strides[-2:]))
if tensor2.gradientFunc:
tensor2.gradientFunc(tensor2.gradient)
#
# Mapping from Numpy to Tensor
#
ufuncMap = {
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
np.add: addForward,
np.subtract: subtractForward,
np.multiply: multiplyForward,
np.divide: divideForward,
np.matmul: matmulForward,
np.power: powerForward,
np.square: squareForward,
np.sqrt: sqrtForward,
np.log: logForward,
np.exp: expForward,
np.sin: sinForward,
np.cos: cosForward,
np.cos: tanForward,
np.sinh: sinhForward,
np.cosh: coshForward,
np.tanh: tanhForward,
np.abs: absForward,
np.sign: signForward,
np.positive: positiveForward,
np.negative: negativeForward,
np.equal: equalForward,
np.not_equal: notEqualForward,
np.less: lessForward,
np.less_equal: lessEqualForward,
np.greater: greaterForward,
np.greater_equal: greaterEqualForward,
np.maximum: maximumForward,
np.minimum: minimumForward
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
np.sum: sumForward,
np.prod: prodForward,
np.repeat: repeatForward,
np.tile: tileForward,
np.max: maxForward,
np.min: minForward,
np.mean: meanForward,
np.var: varForward,
np.std: stdForward,
np.reshape: reshapeForward,
np.transpose: transposeForward,
np.concatenate: concatenateForward,
np.hstack: hstackForward,
np.vstack: vstackForward,
np.dstack: dstackForward,
np.split: splitForward,
np.hsplit: hsplitForward,
np.vsplit: vsplitForward,
np.dsplit: dsplitForward,
np.pad: padForward,
np.insert: insertForward,
np.where: whereForward,
np.cumsum: cumsumForward,
np.cumprod: cumprodForward,
np.einsum: einsumForward