diff --git a/tfjs-backend-cpu/src/kernels/Cumprod.ts b/tfjs-backend-cpu/src/kernels/Cumprod.ts new file mode 100644 index 00000000000..f3baa72d601 --- /dev/null +++ b/tfjs-backend-cpu/src/kernels/Cumprod.ts @@ -0,0 +1,88 @@ +/** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {backend_util, Cumprod, CumprodAttrs, CumprodInputs, KernelConfig, KernelFunc, TensorInfo, TypedArray, upcastType, util} from '@tensorflow/tfjs-core'; + +import {MathBackendCPU} from '../backend_cpu'; +import {assertNotComplex} from '../cpu_util'; +import {transpose} from './Transpose'; + +export function cumprod( + args: {inputs: CumprodInputs, backend: MathBackendCPU, + attrs: CumprodAttrs}): TensorInfo { + const {inputs, backend, attrs} = args; + const {x} = inputs; + const {axis, exclusive, reverse} = attrs; + + assertNotComplex(x, 'cumprod'); + + const permutation = backend_util.getAxesPermutation([axis], x.shape.length); + let $x = x; + if (permutation != null) { + $x = transpose({inputs: {x}, backend, attrs: {perm: permutation}}); + } + const permutedAxis = backend_util.getInnerMostAxes(1, x.shape.length)[0]; + + if (permutedAxis !== $x.shape.length - 1) { + throw new Error( + `backend.cumprod in CPU expects an inner-most ` + + `axis=${$x.shape.length - 1} but got axis=${permutedAxis}`); + } + + const resultDtype = upcastType($x.dtype, 'int32'); + const vals = util.makeOnesTypedArray( + util.sizeFromShape($x.shape), resultDtype) as TypedArray; + + const aVals = backend.data.get($x.dataId).values as TypedArray; + const finalDim = $x.shape[$x.shape.length - 1]; + const indexAdjuster = reverse ? + (i: number, j: number) => i + finalDim - j - 1 : + (i: number, j: number) => i + j; + for (let i = 0; i < aVals.length; i += finalDim) { + for (let j = 0; j < finalDim; j++) { + const idx = indexAdjuster(i, j); + if (j === 0) { + vals[idx] = exclusive ? 1 : aVals[idx]; + } else { + const prevIdx = indexAdjuster(i, j - 1); + vals[idx] = exclusive ? aVals[prevIdx] * vals[prevIdx] : + aVals[idx] * vals[prevIdx]; + } + } + } + + const result = backend.makeTensorInfo($x.shape, resultDtype, vals); + + if (permutation != null) { + const reversePermutation = backend_util.getUndoAxesPermutation(permutation); + const reverseTransposedResult = transpose( + {inputs: {x: result}, backend, attrs: {perm: reversePermutation}}); + + backend.disposeIntermediateTensorInfo(result); + backend.disposeIntermediateTensorInfo($x); + + return reverseTransposedResult; + } + + return result; +} + +export const cumprodConfig: KernelConfig = { + kernelName: Cumprod, + backendName: 'cpu', + kernelFunc: cumprod as {} as KernelFunc +}; diff --git a/tfjs-backend-cpu/src/register_all_kernels.ts b/tfjs-backend-cpu/src/register_all_kernels.ts index 3c9f6be5ba6..6ffd9570fdb 100644 --- a/tfjs-backend-cpu/src/register_all_kernels.ts +++ b/tfjs-backend-cpu/src/register_all_kernels.ts @@ -58,6 +58,7 @@ import {conv3DBackpropInputV2Config} from './kernels/Conv3DBackpropInputV2'; import {cosConfig} from './kernels/Cos'; import {coshConfig} from './kernels/Cosh'; import {cropAndResizeConfig} from './kernels/CropAndResize'; +import {cumprodConfig} from './kernels/Cumprod'; import {cumsumConfig} from './kernels/Cumsum'; import {denseBincountConfig} from './kernels/DenseBincount'; import {depthToSpaceConfig} from './kernels/DepthToSpace'; @@ -224,6 +225,7 @@ const kernelConfigs: KernelConfig[] = [ cosConfig, coshConfig, cropAndResizeConfig, + cumprodConfig, cumsumConfig, denseBincountConfig, depthToSpaceConfig, diff --git a/tfjs-backend-wasm/src/cc/BUILD b/tfjs-backend-wasm/src/cc/BUILD index 113dfc2456f..6f149a03ce9 100644 --- a/tfjs-backend-wasm/src/cc/BUILD +++ b/tfjs-backend-wasm/src/cc/BUILD @@ -285,6 +285,7 @@ tfjs_cc_library( ":Cos", ":Cosh", ":CropAndResize", + ":Cumprod", ":Cumsum", ":DepthToSpace", ":DepthwiseConv2dNative", @@ -536,6 +537,15 @@ tfjs_cc_library( ], ) +tfjs_cc_library( + name = "Cumprod", + srcs = ["kernels/Cumprod.cc"], + deps = [ + ":backend", + ":util", + ], +) + tfjs_cc_library( name = "Cumsum", srcs = ["kernels/Cumsum.cc"], diff --git a/tfjs-backend-wasm/src/cc/kernels/Cumprod.cc b/tfjs-backend-wasm/src/cc/kernels/Cumprod.cc new file mode 100644 index 00000000000..13780a17bc5 --- /dev/null +++ b/tfjs-backend-wasm/src/cc/kernels/Cumprod.cc @@ -0,0 +1,74 @@ +/* Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ===========================================================================*/ + +#ifdef __EMSCRIPTEN__ +#include +#endif + +#include +#include + +#include "tfjs-backend-wasm/src/cc/backend.h" +#include "tfjs-backend-wasm/src/cc/util.h" + +namespace tfjs { + +template +void cumprod(const size_t x_id, const size_t exclusive, const size_t reverse, + const size_t final_dim, const size_t out_id) { + auto& x_info = backend::get_tensor_info(x_id); + auto& out_info = backend::get_tensor_info_out(out_id); + + const T* x_buf = reinterpret_cast(x_info.memory_offset); + T* out_buf = reinterpret_cast(out_info.memory_offset); + + for (size_t i = 0; i < x_info.size; i += final_dim) { + for (size_t j = 0; j < final_dim; ++j) { + const size_t idx = reverse ? i + final_dim - j - 1 : i + j; + if (j == 0) { + out_buf[idx] = exclusive ? 1 : x_buf[idx]; + } else { + const size_t prev_idx = reverse ? idx + 1 : idx - 1; + out_buf[idx] = exclusive ? x_buf[prev_idx] * out_buf[prev_idx] : + x_buf[idx] * out_buf[prev_idx]; + } + } + } +} + +namespace wasm { +// We use C-style API to interface with Javascript. +extern "C" { + +#ifdef __EMSCRIPTEN__ +EMSCRIPTEN_KEEPALIVE +#endif +void Cumprod(const size_t x_id, const size_t exclusive, const size_t reverse, + const size_t final_dim, const size_t out_id, const DType dtype) { + switch (dtype) { + case DType::float32: + cumprod(x_id, exclusive, reverse, final_dim, out_id); + break; + case DType::int32: + cumprod(x_id, exclusive, reverse, final_dim, out_id); + break; + default: + util::warn("Cumprod for tensor id %d failed. Unsupported dtype %d", + x_id, dtype); + } +} + +} // extern "C" +} // namespace wasm +} // namespace tfjs diff --git a/tfjs-backend-wasm/src/kernels/Cumprod.ts b/tfjs-backend-wasm/src/kernels/Cumprod.ts new file mode 100644 index 00000000000..705862dfdc5 --- /dev/null +++ b/tfjs-backend-wasm/src/kernels/Cumprod.ts @@ -0,0 +1,83 @@ +/** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {backend_util, KernelConfig, KernelFunc, Cumprod, CumprodAttrs, CumprodInputs, TensorInfo, util} from '@tensorflow/tfjs-core'; + +import {BackendWasm} from '../backend_wasm'; + +import {CppDType} from './types'; + +import {transpose} from './Transpose'; + +let wasmCumprod: (xId: number, exclusive: number, reverse: number, + finalDim: number, outId: number, dtype: CppDType) => void; + +function setup(backend: BackendWasm) { + wasmCumprod = backend.wasm.cwrap(Cumprod, null /* void */, [ + 'number', // x_id + 'number', // exclusive + 'number', // reverse + 'number', // final_dim + 'number', // out_id + 'number' // dtype + ]); +} + +export function cumprod( + args: {inputs: CumprodInputs, backend: BackendWasm, attrs: CumprodAttrs}): +TensorInfo { + const {inputs, backend, attrs} = args; + const {x} = inputs; + const {axis, exclusive, reverse} = attrs; + const xRank = x.shape.length; + + util.assert(x.dtype === 'float32' || x.dtype === 'int32', + () => `cumprod does not support ${x.dtype} tensors in the WASM backend`); + // permute required axis to inner most axis + const permutation = backend_util.getAxesPermutation([axis], xRank); + let permutedX = x; + if (permutation !== null) { + permutedX = transpose({inputs: {x}, attrs: {perm: permutation}, backend}); + } + const permutedAxis = backend_util.getInnerMostAxes(1, xRank)[0]; + backend_util.assertAxesAreInnerMostDims('cumprod', [permutedAxis], xRank); + + const permutedOut = backend.makeOutput(permutedX.shape, permutedX.dtype); + const finalDim = permutedX.shape[permutedAxis]; + const permutedXId = backend.dataIdMap.get(permutedX.dataId).id; + const permutedOutId = backend.dataIdMap.get(permutedOut.dataId).id; + wasmCumprod(permutedXId, exclusive ? 1 : 0, reverse ? 1 : 0, finalDim, + permutedOutId, CppDType[x.dtype]); + + // transpose data back if permuted + let out = permutedOut; + if (permutation !== null) { + const undoPermutation = backend_util.getUndoAxesPermutation(permutation); + out = transpose( + {inputs: {x: permutedOut}, attrs: {perm: undoPermutation}, backend}); + backend.disposeData(permutedX.dataId); + backend.disposeData(permutedOut.dataId); + } + return out; +} + +export const cumprodConfig: KernelConfig = { + kernelName: Cumprod, + backendName: 'wasm', + setupFunc: setup, + kernelFunc: cumprod as {} as KernelFunc +}; diff --git a/tfjs-backend-wasm/src/register_all_kernels.ts b/tfjs-backend-wasm/src/register_all_kernels.ts index 94e2492cf99..43c2ac59e10 100644 --- a/tfjs-backend-wasm/src/register_all_kernels.ts +++ b/tfjs-backend-wasm/src/register_all_kernels.ts @@ -38,6 +38,7 @@ import {conv2DBackpropInputConfig} from './kernels/Conv2DBackpropInput'; import {cosConfig} from './kernels/Cos'; import {coshConfig} from './kernels/Cosh'; import {cropAndResizeConfig} from './kernels/CropAndResize'; +import {cumprodConfig} from './kernels/Cumprod'; import {cumsumConfig} from './kernels/Cumsum'; import {depthToSpaceConfig} from './kernels/DepthToSpace'; import {depthwiseConv2dNativeConfig} from './kernels/DepthwiseConv2dNative'; @@ -141,6 +142,7 @@ const kernelConfigs: KernelConfig[] = [ cosConfig, coshConfig, cropAndResizeConfig, + cumprodConfig, cumsumConfig, depthToSpaceConfig, depthwiseConv2dNativeConfig, diff --git a/tfjs-backend-wasm/src/setup_test.ts b/tfjs-backend-wasm/src/setup_test.ts index 06fa911a12d..6761adfebdb 100644 --- a/tfjs-backend-wasm/src/setup_test.ts +++ b/tfjs-backend-wasm/src/setup_test.ts @@ -291,6 +291,7 @@ const TEST_FILTERS: TestFilter[] = [ }, {startsWith: 'reverse'}, {startsWith: 'sum '}, + {startsWith: 'cumprod'}, {startsWith: 'cumsum'}, {startsWith: 'logicalAnd '}, { diff --git a/tfjs-backend-webgl/src/cumprod_gpu.ts b/tfjs-backend-webgl/src/cumprod_gpu.ts new file mode 100644 index 00000000000..bfcd3e825dd --- /dev/null +++ b/tfjs-backend-webgl/src/cumprod_gpu.ts @@ -0,0 +1,87 @@ +/** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import {GPGPUProgram} from './gpgpu_math'; +import {getCoordsDataType, UniformType} from './shader_compiler'; + +export class CumProdProgram implements GPGPUProgram { + variableNames = ['x']; + outputShape: number[]; + userCode: string; + customUniforms = [{name: 'index', type: 'float' as UniformType}]; + + constructor(shape: number[], exclusive: boolean, reverse: boolean) { + this.outputShape = shape; + const rank = shape.length; + const val = exclusive ? '1.0' : `getX(${getCoords(rank, 'coords')})`; + const length = shape[shape.length - 1]; + let condition = ''; + let idxString = ''; + // When exclusive is set, the cumprod op becomes roll op that copies the + // value from the previous index based on the direction specified by the + // reverse flag. + if (exclusive) { + condition = reverse ? `end != ${length - 1}` : 'end != 0'; + idxString = reverse ? 'end + 1' : 'end - 1'; + } else { + condition = reverse ? `end + pow2 < ${length}` : 'end >= pow2'; + idxString = (reverse ? 'end + pow2' : 'end - pow2'); + } + + this.userCode = ` + void main() { + ${getCoordsDataType(rank)} coords = getOutputCoords(); + int end = ${getFinalCoord(rank, 'coords')}; + float val = ${val}; + int pow2 = int(pow(2.0, index)); + if (${condition}) { + int idx = ${idxString}; + ${getFinalCoord(rank, 'coords')} = idx; + val *= getX(${getCoords(rank, 'coords')}); + } + setOutput(val); + } + `; + } +} + +function getCoords(rank: number, name: string): string { + if (rank === 1) { + return `${name}`; + } else if (rank === 2) { + return `${name}.x, ${name}.y`; + } else if (rank === 3) { + return `${name}.x, ${name}.y, ${name}.z`; + } else if (rank === 4) { + return `${name}.x, ${name}.y, ${name}.z, ${name}.w`; + } else { + throw Error(`Cumulative product for rank ${rank} is not yet supported`); + } +} + +function getFinalCoord(rank: number, name: string): string { + if (rank === 1) { + return `${name}`; + } else if (rank === 2) { + return `${name}.y`; + } else if (rank === 3) { + return `${name}.z`; + } else if (rank === 4) { + return `${name}.w`; + } else { + throw Error(`Cumulative product for rank ${rank} is not yet supported`); + } +} diff --git a/tfjs-backend-webgl/src/kernels/Cumprod.ts b/tfjs-backend-webgl/src/kernels/Cumprod.ts new file mode 100644 index 00000000000..060addbf389 --- /dev/null +++ b/tfjs-backend-webgl/src/kernels/Cumprod.ts @@ -0,0 +1,90 @@ +/** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {backend_util, Cumprod, CumprodAttrs, CumprodInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core'; + +import {MathBackendWebGL} from '../backend_webgl'; +import {CumProdProgram} from '../cumprod_gpu'; + +import {identity} from './Identity'; +import {transpose} from './Transpose'; + +export function cumprod( + args: {inputs: CumprodInputs, backend: MathBackendWebGL, + attrs: CumprodAttrs}): + TensorInfo { + const {inputs, backend, attrs} = args; + const {x} = inputs; + const {axis, exclusive, reverse} = attrs; + + const xRank = x.shape.length; + const permutation = backend_util.getAxesPermutation([axis], xRank); + let permutedX = x; + if (permutation != null) { + permutedX = transpose({inputs: {x}, backend, attrs: {perm: permutation}}); + } + const permutedAxis = backend_util.getInnerMostAxes(1, xRank)[0]; + + if (permutedAxis !== xRank - 1) { + throw new Error( + `WebGL cumprod shader expects an inner-most axis=${ + x.shape.length - 1} ` + + `but got axis=${axis}`); + } + const size = permutedX.shape[permutedAxis]; + let result = identity({inputs: {x: permutedX}, backend}); + // Use cumprod parallel algorithm, inspired by: + // https://developer.nvidia.com/gpugems/gpugems3/part-vi-gpu-computing/chapter-39-parallel-prefix-sum-scan-cuda + // Note: although the algorithm is called sum, it works for any associtative + // operator with an identity. + + for (let i = 0; i <= Math.ceil(Math.log2(size)) - 1; i++) { + const program = new CumProdProgram(permutedX.shape, false, reverse); + const customValues = [[i]]; + const prevResult = result; + result = + backend.runWebGLProgram(program, [result], result.dtype, customValues); + backend.disposeIntermediateTensorInfo(prevResult); + } + // For exclusive cumprod, shift the end result in the direction of product + // and add 1 to the front index. + if (exclusive) { + const program = new CumProdProgram(permutedX.shape, exclusive, reverse); + const prevResult = result; + result = backend.runWebGLProgram(program, [result], result.dtype); + backend.disposeIntermediateTensorInfo(prevResult); + } + + if (permutation != null) { + const reversePermutation = backend_util.getUndoAxesPermutation(permutation); + const reverseTransposedResult = transpose( + {inputs: {x: result}, backend, attrs: {perm: reversePermutation}}); + + backend.disposeIntermediateTensorInfo(result); + backend.disposeIntermediateTensorInfo(permutedX); + + return reverseTransposedResult; + } + + return result; +} + +export const cumprodConfig: KernelConfig = { + kernelName: Cumprod, + backendName: 'webgl', + kernelFunc: cumprod as {} as KernelFunc +}; diff --git a/tfjs-backend-webgl/src/register_all_kernels.ts b/tfjs-backend-webgl/src/register_all_kernels.ts index 4f3bb065de1..908a64cde5e 100644 --- a/tfjs-backend-webgl/src/register_all_kernels.ts +++ b/tfjs-backend-webgl/src/register_all_kernels.ts @@ -55,6 +55,7 @@ import {conv3DBackpropInputConfig} from './kernels/Conv3DBackpropInputV2'; import {cosConfig} from './kernels/Cos'; import {coshConfig} from './kernels/Cosh'; import {cropAndResizeConfig} from './kernels/CropAndResize'; +import {cumprodConfig} from './kernels/Cumprod'; import {cumsumConfig} from './kernels/Cumsum'; import {denseBincountConfig} from './kernels/DenseBincount'; import {depthToSpaceConfig} from './kernels/DepthToSpace'; @@ -220,6 +221,7 @@ const kernelConfigs: KernelConfig[] = [ cosConfig, coshConfig, cropAndResizeConfig, + cumprodConfig, cumsumConfig, denseBincountConfig, depthToSpaceConfig, diff --git a/tfjs-converter/docs/supported_ops.md b/tfjs-converter/docs/supported_ops.md index ac88786bd08..508d7ac290f 100644 --- a/tfjs-converter/docs/supported_ops.md +++ b/tfjs-converter/docs/supported_ops.md @@ -291,6 +291,7 @@ |Tensorflow Op Name|Tensorflow.js Op Name| |---|---| +|Cumprod|cumprod| |Cumsum|cumsum| ## Operations - Segment diff --git a/tfjs-converter/python/tensorflowjs/op_list/reduction.json b/tfjs-converter/python/tensorflowjs/op_list/reduction.json index e1a70c9ee10..bbb0cfdae1e 100644 --- a/tfjs-converter/python/tensorflowjs/op_list/reduction.json +++ b/tfjs-converter/python/tensorflowjs/op_list/reduction.json @@ -241,6 +241,34 @@ } ] }, + { + "tfOpName": "Cumprod", + "category": "reduction", + "inputs": [ + { + "start": 0, + "name": "x", + "type": "tensor" + }, + { + "start": 1, + "name": "axis", + "type": "number" + } + ], + "attrs": [ + { + "tfName": "exclusive", + "name": "exclusive", + "type": "bool" + }, + { + "tfName": "reverse", + "name": "reverse", + "type": "bool" + } + ] + }, { "tfOpName": "Cumsum", "category": "reduction", @@ -269,4 +297,4 @@ } ] } -] \ No newline at end of file +] diff --git a/tfjs-converter/src/operations/executors/reduction_executor.ts b/tfjs-converter/src/operations/executors/reduction_executor.ts index 2803bed5603..912537dc862 100644 --- a/tfjs-converter/src/operations/executors/reduction_executor.ts +++ b/tfjs-converter/src/operations/executors/reduction_executor.ts @@ -104,6 +104,17 @@ export const executeOp: InternalOpExecutor = getParamValue('x', node, tensorMap, context) as Tensor, axis, keepDims)]; } + case 'Cumprod': { + const axis = + getParamValue('axis', node, tensorMap, context) as number; + const exclusive = + getParamValue('exclusive', node, tensorMap, context) as boolean; + const reverse = + getParamValue('reverse', node, tensorMap, context) as boolean; + return [tfOps.cumprod( + getParamValue('x', node, tensorMap, context) as Tensor, axis, + exclusive, reverse)]; + } case 'Cumsum': { const axis = getParamValue('axis', node, tensorMap, context) as number; diff --git a/tfjs-converter/src/operations/executors/reduction_executor_test.ts b/tfjs-converter/src/operations/executors/reduction_executor_test.ts index 64d81006c2f..a2c31c11825 100644 --- a/tfjs-converter/src/operations/executors/reduction_executor_test.ts +++ b/tfjs-converter/src/operations/executors/reduction_executor_test.ts @@ -77,6 +77,20 @@ describe('reduction', () => { expect(tfOps.argMin).toHaveBeenCalledWith(input1[0], 1); }); }); + describe('Cumprod', () => { + it('should call tfOps.cumprod', () => { + spyOn(tfOps, 'cumprod'); + node.op = 'Cumprod'; + node.attrParams.exclusive = createBoolAttr(true); + node.attrParams.reverse = createBoolAttr(false); + node.inputNames = ['input1', 'input2']; + node.inputParams.axis = createNumberAttrFromIndex(1); + const input2 = [tfOps.scalar(2)]; + executeOp(node, {input1, input2}, context); + + expect(tfOps.cumprod).toHaveBeenCalledWith(input1[0], 2, true, false); + }); + }); describe('Cumsum', () => { it('should call tfOps.cumsum', () => { spyOn(tfOps, 'cumsum'); diff --git a/tfjs-core/src/kernel_names.ts b/tfjs-core/src/kernel_names.ts index 2d58403d479..4efeeed296b 100644 --- a/tfjs-core/src/kernel_names.ts +++ b/tfjs-core/src/kernel_names.ts @@ -238,6 +238,14 @@ export type CosInputs = UnaryInputs; export const Cosh = 'Cosh'; export type CoshInputs = UnaryInputs; +export const Cumprod = 'Cumprod'; +export type CumprodInputs = Pick; +export interface CumprodAttrs { + axis: number; + exclusive: boolean; + reverse: boolean; +} + export const Cumsum = 'Cumsum'; export type CumsumInputs = Pick; export interface CumsumAttrs { diff --git a/tfjs-core/src/ops/cumprod.ts b/tfjs-core/src/ops/cumprod.ts new file mode 100644 index 00000000000..42f355b621c --- /dev/null +++ b/tfjs-core/src/ops/cumprod.ts @@ -0,0 +1,69 @@ +/** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the 'License'); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an 'AS IS' BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import { ENGINE } from '../engine'; +import { Cumprod, CumprodAttrs, CumprodInputs } from '../kernel_names'; +import { NamedAttrMap } from '../kernel_registry'; +import { Tensor } from '../tensor'; +import { NamedTensorMap } from '../tensor_types'; +import { convertToTensor } from '../tensor_util_env'; +import { TensorLike } from '../types'; + +import { op } from './operation'; + +/** + * Computes the cumulative product of a `tf.Tensor` along `axis`. + * + * ```js + * const x = tf.tensor([1, 2, 3, 4]); + * x.cumprod().print(); + * ``` + * ```js + * const x = tf.tensor([[1, 2], [3, 4]]); + * x.cumprod().print(); + * ``` + * + * @param x The input tensor to cumulatively multiply. + * @param axis The axis along which to multiply. Optional. Defaults to 0. + * @param exclusive Whether to perform exclusive cumulative product. Optional. + * Defaults to false. If set to true then the product of each tensor entry + * does not include its own value, but only the values previous to it + * along the specified axis. + * @param reverse Whether to multiply in the opposite direction. Optional. + * Defaults to false. + * + * @doc {heading: 'Operations', subheading: 'Scan'} + */ +function cumprod_( + x: Tensor | TensorLike, + axis = 0, + exclusive = false, + reverse = false +): T { + const $x = convertToTensor(x, 'x', 'cumprod'); + + const inputs: CumprodInputs = { x: $x }; + const attrs: CumprodAttrs = { axis, exclusive, reverse }; + + return ENGINE.runKernel( + Cumprod, + inputs as {} as NamedTensorMap, + attrs as {} as NamedAttrMap + ); +} + +export const cumprod = op({ cumprod_ }); diff --git a/tfjs-core/src/ops/cumprod_test.ts b/tfjs-core/src/ops/cumprod_test.ts new file mode 100644 index 00000000000..4e9d2a54a38 --- /dev/null +++ b/tfjs-core/src/ops/cumprod_test.ts @@ -0,0 +1,167 @@ +/** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the 'License'); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an 'AS IS' BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import * as tf from '../index'; +import { ALL_ENVS, describeWithFlags } from '../jasmine_util'; +import { expectArraysClose } from '../test_util'; + +describeWithFlags('cumprod', ALL_ENVS, () => { + it('1D standard', async () => { + const res = tf.tensor1d([1, 2, 3, 4]).cumprod(); + expect(res.shape).toEqual([4]); + expectArraysClose(await res.data(), [1, 2, 6, 24]); + }); + + it('1D reverse', async () => { + const reverse = true; + const exclusive = false; + const res = tf.tensor1d([1, 2, 3, 4]).cumprod(0, exclusive, reverse); + expect(res.shape).toEqual([4]); + expectArraysClose(await res.data(), [24, 24, 12, 4]); + }); + + it('1D exclusive', async () => { + const exclusive = true; + const res = tf.tensor1d([1, 2, 3, 4]).cumprod(0, exclusive); + expect(res.shape).toEqual([4]); + expectArraysClose(await res.data(), [1, 1, 2, 6]); + }); + + it('1D exclusive reverse', async () => { + const reverse = true; + const exclusive = true; + const res = tf.tensor1d([1, 2, 3, 4]).cumprod(0, exclusive, reverse); + expect(res.shape).toEqual([4]); + expectArraysClose(await res.data(), [24, 12, 4, 1]); + }); + + // TODO: once gradients are implemented, create tests something like this. + // it('gradient: 1D', async () => { + // const a = tf.tensor1d([1, 2, 3]); + // const dy = tf.tensor1d([4, 5, 6]); + // const da = tf.grad((x) => tf.cumprod(x))(a, dy); + + // expect(da.shape).toEqual([3]); + // expectArraysClose(await da.data(), [15, 11, 6]); + // }); + + // it('gradient with clones', async () => { + // const a = tf.tensor1d([1, 2, 3]); + // const dy = tf.tensor1d([4, 5, 6]); + // const da = tf.grad((x) => tf.cumprod(x.clone()).clone())(a, dy); + + // expect(da.shape).toEqual([3]); + // expectArraysClose(await da.data(), [15, 11, 6]); + // }); + + it('2D standard', async () => { + const res = tf + .tensor2d([ + [1, 2], + [3, 4], + ]) + .cumprod(1); + expect(res.shape).toEqual([2, 2]); + expectArraysClose(await res.array(), [[1, 2], [3, 12]]); + }); + + it('2D reverse exclusive', async () => { + const reverse = true; + const exclusive = true; + const res = tf + .tensor2d([ + [1, 2], + [3, 4], + ]) + .cumprod(1, exclusive, reverse); + expect(res.shape).toEqual([2, 2]); + expectArraysClose(await res.array(), [[2, 1], [4, 1]]); + }); + + it('2D axis=0', async () => { + const res = tf + .tensor2d([ + [1, 2], + [3, 4], + ]) + .cumprod(); + expect(res.shape).toEqual([2, 2]); + expectArraysClose(await res.array(), [[1, 2], [3, 8]]); + }); + + it('3D standard', async () => { + const res = tf + .tensor3d([ + [ + [0, 1], + [2, 3], + ], + [ + [4, 5], + [6, 7], + ], + ]) + .cumprod(2); + expect(res.shape).toEqual([2, 2, 2]); + expectArraysClose(await res.array(), [ + [ + [0, 0 * 1], + [2, 2 * 3] + ], + [ + [4, 4 * 5], + [6, 6 * 7] + ] + ]); + }); + + it('4d axis=2', async () => { + const input = tf.add(tf.ones([1, 32, 8, 4]), tf.ones([1, 32, 8, 4])); + const res = tf.cumprod(input, 2, false, false); + + expect(res.shape).toEqual([1, 32, 8, 4]); + + const earlySlice = tf.slice(res, [0, 0, 0, 0], [1, 1, 8, 1]); + const lateSlice = tf.slice(res, [0, 31, 0, 0], [1, 1, 8, 1]); + const expectedDataInEachSlice = [2, 4, 8, 16, 32, 64, 128, 256]; + expectArraysClose(await earlySlice.data(), expectedDataInEachSlice); + expectArraysClose(await lateSlice.data(), expectedDataInEachSlice); + }); + + it('handle permutation properly', async () => { + const res = tf.ones([1, 240, 1, 10]).cumprod(1); + expect(res.shape).toEqual([1, 240, 1, 10]); + }); + + it('throws when passed a non-tensor', () => { + expect(() => tf.cumprod({} as tf.Tensor)).toThrowError( + /Argument 'x' passed to 'cumprod' must be a Tensor/ + ); + }); + + it('accepts a tensor-like object', async () => { + const res = tf.cumprod([1, 2, 3, 4]); + expect(res.shape).toEqual([4]); + expectArraysClose(await res.data(), [1, 2, 6, 24]); + }); + + it('throws error for string tensor', () => { + expect(() => tf.cumprod(['a', 'b', 'c'])).toThrowError( + /Argument 'x' passed to 'cumprod' must be numeric tensor/ + ); + }); +}); diff --git a/tfjs-core/src/ops/ops.ts b/tfjs-core/src/ops/ops.ts index 96a7acdf608..0209352d706 100644 --- a/tfjs-core/src/ops/ops.ts +++ b/tfjs-core/src/ops/ops.ts @@ -59,6 +59,7 @@ export {conv3d} from './conv3d'; export {conv3dTranspose} from './conv3d_transpose'; export {cos} from './cos'; export {cosh} from './cosh'; +export {cumprod} from './cumprod'; export {cumsum} from './cumsum'; export {denseBincount} from './dense_bincount'; export {depthToSpace} from './depth_to_space'; diff --git a/tfjs-core/src/public/chained_ops/cumprod.ts b/tfjs-core/src/public/chained_ops/cumprod.ts new file mode 100644 index 00000000000..2f7b7a6ae94 --- /dev/null +++ b/tfjs-core/src/public/chained_ops/cumprod.ts @@ -0,0 +1,38 @@ +/** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the 'License'); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an 'AS IS' BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import { cumprod } from '../../ops/cumprod'; +import { getGlobalTensorClass, Tensor } from '../../tensor'; +import { Rank } from '../../types'; + +declare module '../../tensor' { + interface Tensor { + cumprod( + axis?: number, + exclusive?: boolean, + reverse?: boolean + ): Tensor; + } +} + +getGlobalTensorClass().prototype.cumprod = function ( + axis?: number, + exclusive?: boolean, + reverse?: boolean +): Tensor { + this.throwIfDisposed(); + return cumprod(this, axis, exclusive, reverse); +}; diff --git a/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts b/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts index 66e092d2e17..2cf81f78646 100644 --- a/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts +++ b/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts @@ -48,6 +48,7 @@ import './conv2d_transpose'; import './conv2d'; import './cos'; import './cosh'; +import './cumprod'; import './cumsum'; import './depth_to_space'; import './depthwise_conv2d'; diff --git a/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts b/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts index 71dc145b541..0a5c21079bc 100644 --- a/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts +++ b/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts @@ -59,6 +59,7 @@ const CHAINED_OPS = [ 'conv2dTranspose', 'cos', 'cosh', + 'cumprod', 'cumsum', 'depthToSpace', 'depthwiseConv2d', diff --git a/tfjs-node/src/kernels/Cumprod.ts b/tfjs-node/src/kernels/Cumprod.ts new file mode 100644 index 00000000000..94642a52c0a --- /dev/null +++ b/tfjs-node/src/kernels/Cumprod.ts @@ -0,0 +1,41 @@ +/** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {Cumprod, CumprodAttrs, CumprodInputs, KernelConfig, scalar} from '@tensorflow/tfjs'; + +import {createTensorsTypeOpAttr, NodeJSKernelBackend} from '../nodejs_kernel_backend'; + +export const cumprodConfig: KernelConfig = { + kernelName: Cumprod, + backendName: 'tensorflow', + kernelFunc: (args) => { + const {x} = args.inputs as CumprodInputs; + const backend = args.backend as NodeJSKernelBackend; + const {axis, exclusive, reverse} = args.attrs as {} as CumprodAttrs; + + const axisTensor = scalar(axis, 'int32'); + const opAttrs = [ + {name: 'exclusive', type: backend.binding.TF_ATTR_BOOL, value: exclusive}, + {name: 'reverse', type: backend.binding.TF_ATTR_BOOL, value: reverse}, + createTensorsTypeOpAttr('T', x.dtype), + createTensorsTypeOpAttr('Tidx', 'int32') + ]; + const res = backend.executeSingleOutput(Cumprod, opAttrs, [x, axisTensor]); + axisTensor.dispose(); + return res; + } +}; diff --git a/tfjs-node/src/register_all_kernels.ts b/tfjs-node/src/register_all_kernels.ts index 27af75991ec..56d9d3b1d3b 100644 --- a/tfjs-node/src/register_all_kernels.ts +++ b/tfjs-node/src/register_all_kernels.ts @@ -57,6 +57,7 @@ import {conv3DBackpropInputV2Config} from './kernels/Conv3DBackpropInputV2'; import {cosConfig} from './kernels/Cos'; import {coshConfig} from './kernels/Cosh'; import {cropAndResizeConfig} from './kernels/CropAndResize'; +import {cumprodConfig} from './kernels/Cumprod'; import {cumsumConfig} from './kernels/Cumsum'; import {depthToSpaceConfig} from './kernels/DepthToSpace'; import {depthwiseConv2dNativeConfig} from './kernels/DepthwiseConv2dNative'; @@ -214,6 +215,7 @@ const kernelConfigs: KernelConfig[] = [ cosConfig, coshConfig, cropAndResizeConfig, + cumprodConfig, cumsumConfig, bincountConfig, depthToSpaceConfig,