Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Initial version of cumprod #6211

Merged
merged 5 commits into from
Mar 11, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
88 changes: 88 additions & 0 deletions tfjs-backend-cpu/src/kernels/Cumprod.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
/**
* @license
* Copyright 2022 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/

import {backend_util, Cumprod, CumprodAttrs, CumprodInputs, KernelConfig, KernelFunc, TensorInfo, TypedArray, upcastType, util} from '@tensorflow/tfjs-core';

import {MathBackendCPU} from '../backend_cpu';
import {assertNotComplex} from '../cpu_util';
import {transpose} from './Transpose';

export function cumprod(
args: {inputs: CumprodInputs, backend: MathBackendCPU,
attrs: CumprodAttrs}): TensorInfo {
const {inputs, backend, attrs} = args;
const {x} = inputs;
const {axis, exclusive, reverse} = attrs;

assertNotComplex(x, 'cumprod');

const permutation = backend_util.getAxesPermutation([axis], x.shape.length);
let $x = x;
if (permutation != null) {
$x = transpose({inputs: {x}, backend, attrs: {perm: permutation}});
}
const permutedAxis = backend_util.getInnerMostAxes(1, x.shape.length)[0];

if (permutedAxis !== $x.shape.length - 1) {
throw new Error(
`backend.cumprod in CPU expects an inner-most ` +
`axis=${$x.shape.length - 1} but got axis=${permutedAxis}`);
}

const resultDtype = upcastType($x.dtype, 'int32');
const vals = util.makeOnesTypedArray(
util.sizeFromShape($x.shape), resultDtype) as TypedArray;

const aVals = backend.data.get($x.dataId).values as TypedArray;
const finalDim = $x.shape[$x.shape.length - 1];
const indexAdjuster = reverse ?
(i: number, j: number) => i + finalDim - j - 1 :
(i: number, j: number) => i + j;
for (let i = 0; i < aVals.length; i += finalDim) {
for (let j = 0; j < finalDim; j++) {
const idx = indexAdjuster(i, j);
if (j === 0) {
vals[idx] = exclusive ? 1 : aVals[idx];
} else {
const prevIdx = indexAdjuster(i, j - 1);
vals[idx] = exclusive ? aVals[prevIdx] * vals[prevIdx] :
aVals[idx] * vals[prevIdx];
}
}
}

const result = backend.makeTensorInfo($x.shape, resultDtype, vals);

if (permutation != null) {
const reversePermutation = backend_util.getUndoAxesPermutation(permutation);
const reverseTransposedResult = transpose(
{inputs: {x: result}, backend, attrs: {perm: reversePermutation}});

backend.disposeIntermediateTensorInfo(result);
backend.disposeIntermediateTensorInfo($x);

return reverseTransposedResult;
}

return result;
}

export const cumprodConfig: KernelConfig = {
kernelName: Cumprod,
backendName: 'cpu',
kernelFunc: cumprod as {} as KernelFunc
};
2 changes: 2 additions & 0 deletions tfjs-backend-cpu/src/register_all_kernels.ts
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ import {conv3DBackpropInputV2Config} from './kernels/Conv3DBackpropInputV2';
import {cosConfig} from './kernels/Cos';
import {coshConfig} from './kernels/Cosh';
import {cropAndResizeConfig} from './kernels/CropAndResize';
import {cumprodConfig} from './kernels/Cumprod';
import {cumsumConfig} from './kernels/Cumsum';
import {denseBincountConfig} from './kernels/DenseBincount';
import {depthToSpaceConfig} from './kernels/DepthToSpace';
Expand Down Expand Up @@ -224,6 +225,7 @@ const kernelConfigs: KernelConfig[] = [
cosConfig,
coshConfig,
cropAndResizeConfig,
cumprodConfig,
cumsumConfig,
denseBincountConfig,
depthToSpaceConfig,
Expand Down
10 changes: 10 additions & 0 deletions tfjs-backend-wasm/src/cc/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -285,6 +285,7 @@ tfjs_cc_library(
":Cos",
":Cosh",
":CropAndResize",
":Cumprod",
":Cumsum",
":DepthToSpace",
":DepthwiseConv2dNative",
Expand Down Expand Up @@ -536,6 +537,15 @@ tfjs_cc_library(
],
)

tfjs_cc_library(
name = "Cumprod",
srcs = ["kernels/Cumprod.cc"],
deps = [
":backend",
":util",
],
)

tfjs_cc_library(
name = "Cumsum",
srcs = ["kernels/Cumsum.cc"],
Expand Down
74 changes: 74 additions & 0 deletions tfjs-backend-wasm/src/cc/kernels/Cumprod.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
/* Copyright 2022 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ===========================================================================*/

#ifdef __EMSCRIPTEN__
#include <emscripten.h>
#endif

#include <cstddef>
#include <cstdint>

#include "tfjs-backend-wasm/src/cc/backend.h"
#include "tfjs-backend-wasm/src/cc/util.h"

namespace tfjs {

template <typename T>
void cumprod(const size_t x_id, const size_t exclusive, const size_t reverse,
const size_t final_dim, const size_t out_id) {
auto& x_info = backend::get_tensor_info(x_id);
auto& out_info = backend::get_tensor_info_out(out_id);

const T* x_buf = reinterpret_cast<const T*>(x_info.memory_offset);
T* out_buf = reinterpret_cast<T*>(out_info.memory_offset);

for (size_t i = 0; i < x_info.size; i += final_dim) {
for (size_t j = 0; j < final_dim; ++j) {
const size_t idx = reverse ? i + final_dim - j - 1 : i + j;
if (j == 0) {
out_buf[idx] = exclusive ? 1 : x_buf[idx];
} else {
const size_t prev_idx = reverse ? idx + 1 : idx - 1;
out_buf[idx] = exclusive ? x_buf[prev_idx] * out_buf[prev_idx] :
x_buf[idx] * out_buf[prev_idx];
}
}
}
}

namespace wasm {
// We use C-style API to interface with Javascript.
extern "C" {

#ifdef __EMSCRIPTEN__
EMSCRIPTEN_KEEPALIVE
#endif
void Cumprod(const size_t x_id, const size_t exclusive, const size_t reverse,
const size_t final_dim, const size_t out_id, const DType dtype) {
switch (dtype) {
case DType::float32:
cumprod<float>(x_id, exclusive, reverse, final_dim, out_id);
break;
case DType::int32:
cumprod<int32_t>(x_id, exclusive, reverse, final_dim, out_id);
break;
default:
util::warn("Cumprod for tensor id %d failed. Unsupported dtype %d",
x_id, dtype);
}
}

} // extern "C"
} // namespace wasm
} // namespace tfjs
83 changes: 83 additions & 0 deletions tfjs-backend-wasm/src/kernels/Cumprod.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
/**
* @license
* Copyright 2022 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/

import {backend_util, KernelConfig, KernelFunc, Cumprod, CumprodAttrs, CumprodInputs, TensorInfo, util} from '@tensorflow/tfjs-core';

import {BackendWasm} from '../backend_wasm';

import {CppDType} from './types';

import {transpose} from './Transpose';

let wasmCumprod: (xId: number, exclusive: number, reverse: number,
finalDim: number, outId: number, dtype: CppDType) => void;

function setup(backend: BackendWasm) {
wasmCumprod = backend.wasm.cwrap(Cumprod, null /* void */, [
'number', // x_id
'number', // exclusive
'number', // reverse
'number', // final_dim
'number', // out_id
'number' // dtype
]);
}

export function cumprod(
args: {inputs: CumprodInputs, backend: BackendWasm, attrs: CumprodAttrs}):
TensorInfo {
const {inputs, backend, attrs} = args;
const {x} = inputs;
const {axis, exclusive, reverse} = attrs;
const xRank = x.shape.length;

util.assert(x.dtype === 'float32' || x.dtype === 'int32',
() => `cumprod does not support ${x.dtype} tensors in the WASM backend`);
// permute required axis to inner most axis
const permutation = backend_util.getAxesPermutation([axis], xRank);
let permutedX = x;
if (permutation !== null) {
permutedX = transpose({inputs: {x}, attrs: {perm: permutation}, backend});
}
const permutedAxis = backend_util.getInnerMostAxes(1, xRank)[0];
backend_util.assertAxesAreInnerMostDims('cumprod', [permutedAxis], xRank);

const permutedOut = backend.makeOutput(permutedX.shape, permutedX.dtype);
const finalDim = permutedX.shape[permutedAxis];
const permutedXId = backend.dataIdMap.get(permutedX.dataId).id;
const permutedOutId = backend.dataIdMap.get(permutedOut.dataId).id;
wasmCumprod(permutedXId, exclusive ? 1 : 0, reverse ? 1 : 0, finalDim,
permutedOutId, CppDType[x.dtype]);

// transpose data back if permuted
let out = permutedOut;
if (permutation !== null) {
const undoPermutation = backend_util.getUndoAxesPermutation(permutation);
out = transpose(
{inputs: {x: permutedOut}, attrs: {perm: undoPermutation}, backend});
backend.disposeData(permutedX.dataId);
backend.disposeData(permutedOut.dataId);
}
return out;
}

export const cumprodConfig: KernelConfig = {
kernelName: Cumprod,
backendName: 'wasm',
setupFunc: setup,
kernelFunc: cumprod as {} as KernelFunc
};
2 changes: 2 additions & 0 deletions tfjs-backend-wasm/src/register_all_kernels.ts
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ import {conv2DBackpropInputConfig} from './kernels/Conv2DBackpropInput';
import {cosConfig} from './kernels/Cos';
import {coshConfig} from './kernels/Cosh';
import {cropAndResizeConfig} from './kernels/CropAndResize';
import {cumprodConfig} from './kernels/Cumprod';
import {cumsumConfig} from './kernels/Cumsum';
import {depthToSpaceConfig} from './kernels/DepthToSpace';
import {depthwiseConv2dNativeConfig} from './kernels/DepthwiseConv2dNative';
Expand Down Expand Up @@ -141,6 +142,7 @@ const kernelConfigs: KernelConfig[] = [
cosConfig,
coshConfig,
cropAndResizeConfig,
cumprodConfig,
cumsumConfig,
depthToSpaceConfig,
depthwiseConv2dNativeConfig,
Expand Down
1 change: 1 addition & 0 deletions tfjs-backend-wasm/src/setup_test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,7 @@ const TEST_FILTERS: TestFilter[] = [
},
{startsWith: 'reverse'},
{startsWith: 'sum '},
{startsWith: 'cumprod'},
{startsWith: 'cumsum'},
{startsWith: 'logicalAnd '},
{
Expand Down
87 changes: 87 additions & 0 deletions tfjs-backend-webgl/src/cumprod_gpu.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
/**
* @license
* Copyright 2022 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import {GPGPUProgram} from './gpgpu_math';
import {getCoordsDataType, UniformType} from './shader_compiler';

export class CumProdProgram implements GPGPUProgram {
variableNames = ['x'];
outputShape: number[];
userCode: string;
customUniforms = [{name: 'index', type: 'float' as UniformType}];

constructor(shape: number[], exclusive: boolean, reverse: boolean) {
this.outputShape = shape;
const rank = shape.length;
const val = exclusive ? '1.0' : `getX(${getCoords(rank, 'coords')})`;
const length = shape[shape.length - 1];
let condition = '';
let idxString = '';
// When exclusive is set, the cumprod op becomes roll op that copies the
// value from the previous index based on the direction specified by the
// reverse flag.
if (exclusive) {
condition = reverse ? `end != ${length - 1}` : 'end != 0';
idxString = reverse ? 'end + 1' : 'end - 1';
} else {
condition = reverse ? `end + pow2 < ${length}` : 'end >= pow2';
idxString = (reverse ? 'end + pow2' : 'end - pow2');
}

this.userCode = `
void main() {
${getCoordsDataType(rank)} coords = getOutputCoords();
int end = ${getFinalCoord(rank, 'coords')};
float val = ${val};
int pow2 = int(pow(2.0, index));
if (${condition}) {
int idx = ${idxString};
${getFinalCoord(rank, 'coords')} = idx;
val *= getX(${getCoords(rank, 'coords')});
}
setOutput(val);
}
`;
}
}

function getCoords(rank: number, name: string): string {
if (rank === 1) {
return `${name}`;
} else if (rank === 2) {
return `${name}.x, ${name}.y`;
} else if (rank === 3) {
return `${name}.x, ${name}.y, ${name}.z`;
} else if (rank === 4) {
return `${name}.x, ${name}.y, ${name}.z, ${name}.w`;
} else {
throw Error(`Cumulative product for rank ${rank} is not yet supported`);
}
}

function getFinalCoord(rank: number, name: string): string {
if (rank === 1) {
return `${name}`;
} else if (rank === 2) {
return `${name}.y`;
} else if (rank === 3) {
return `${name}.z`;
} else if (rank === 4) {
return `${name}.w`;
} else {
throw Error(`Cumulative product for rank ${rank} is not yet supported`);
}
}
Loading