diff --git a/binding/tfjs_backend.cc b/binding/tfjs_backend.cc index 6233ccc7..50ba3317 100644 --- a/binding/tfjs_backend.cc +++ b/binding/tfjs_backend.cc @@ -59,7 +59,10 @@ TFE_TensorHandle *CreateTFE_TensorHandleFromTypedArray(napi_env env, width = sizeof(float); break; case napi_int32_array: - if (dtype != TF_INT32) { + if (dtype != TF_INT32 && dtype != TF_INT64) { + // Currently, both int32- and int64-type Tensors are represented + // as Int32Arrays in JavaScript. See int64_tensors.ts for details + // about the latter. NAPI_THROW_ERROR(env, "Tensor type does not match Int32Array"); return nullptr; } @@ -78,12 +81,26 @@ TFE_TensorHandle *CreateTFE_TensorHandleFromTypedArray(napi_env env, } // Double check that width matches TF data type size: - if (width != TF_DataTypeSize(dtype)) { - NAPI_THROW_ERROR(env, - "Byte size of elements differs between JavaScript VM " - "(%zu) and TensorFlow (%zu)", - width, TF_DataTypeSize(dtype)); - return nullptr; + if (dtype == TF_INT64) { + // Currently, int64-type Tensors are represented as Int32Arrays. So the + // logic for comparing the byte size of the typed-array representation and + // the byte size of the tensor dtype needs to be special-cased for int64. + if (width * 2 != TF_DataTypeSize(dtype)) { + NAPI_THROW_ERROR( + env, + "Byte size of elements differs between JavaScript VM " + "(%zu * 2 = %zu) and TensorFlow (%zu) for int64-type tensor", + width, width * 2, TF_DataTypeSize(dtype)); + return nullptr; + } + } else { + if (width != TF_DataTypeSize(dtype)) { + NAPI_THROW_ERROR(env, + "Byte size of elements differs between JavaScript VM " + "(%zu) and TensorFlow (%zu)", + width, TF_DataTypeSize(dtype)); + return nullptr; + } } // Determine the size of the buffer based on the dimensions. @@ -93,16 +110,35 @@ TFE_TensorHandle *CreateTFE_TensorHandleFromTypedArray(napi_env env, } // Ensure the shape matches the length of the passed in typed-array. - if (num_elements != array_length) { - NAPI_THROW_ERROR(env, - "Shape does not match typed-array in bindData() " - "(num_elements=%zu, array_length=%zu)", - num_elements, array_length); - return nullptr; + if (dtype == TF_INT64) { + // Currently, int64-type Tensors are represented as Int32Arrays. + // To represent a int64-type Tensor of `n` elements, an Int32Array of + // length `2 * n` is requried. This is why the length-match checking + // logic is special-cased for int64. + if (array_length != num_elements * 2) { + NAPI_THROW_ERROR( + env, + "Shape does not match two times typed-array in bindData() " + "(num_elements * 2 = %zu, array_length=%zu) for int64 data type", + num_elements * 2, array_length); + return nullptr; + } + } else { + if (num_elements != array_length) { + NAPI_THROW_ERROR(env, + "Shape does not match typed-array in bindData() " + "(num_elements=%zu, array_length=%zu)", + num_elements, array_length); + return nullptr; + } } // Allocate and memcpy JS data to Tensor. - const size_t byte_size = num_elements * width; + // Currently, int64-type Tensors are represented as Int32Arrays. + // So the logic for comparing the byte size of the typed-array representation + // and the byte size of the tensor dtype needs to be special-cased for int64. + const size_t byte_size = + dtype == TF_INT64 ? num_elements * width * 2 : num_elements * width; TF_AutoTensor tensor( TF_AllocateTensor(dtype, shape, shape_length, byte_size)); memcpy(TF_TensorData(tensor.tensor), array_data, byte_size); @@ -252,8 +288,8 @@ void CopyTFE_TensorHandleDataToTypedArray(napi_env env, &array_buffer_value); ENSURE_NAPI_OK(env, nstatus); - // TFE_TensorHandleResolve can use a shared data pointer, memcpy() the current - // value to the newly allocated NAPI buffer. + // TFE_TensorHandleResolve can use a shared data pointer, memcpy() the + // current value to the newly allocated NAPI buffer. memcpy(array_buffer_data, TF_TensorData(tensor.tensor), byte_length); nstatus = napi_create_typedarray(env, array_type, num_elements, @@ -324,6 +360,56 @@ void CopyTFE_TensorHandleDataToStringArray(napi_env env, } } +void CopyTFE_TensorHandleDataToResourceArray( + napi_env env, TFE_Context *tfe_context, TFE_TensorHandle *tfe_tensor_handle, + napi_value *result) { + TF_AutoStatus tf_status; + + TF_AutoTensor tensor( + TFE_TensorHandleResolve(tfe_tensor_handle, tf_status.status)); + ENSURE_TF_OK(env, tf_status); + + if (TF_TensorType(tensor.tensor) != TF_RESOURCE) { + NAPI_THROW_ERROR(env, "Tensor is not of type TF_RESOURCE"); + return; + } + + void *tensor_data = TF_TensorData(tensor.tensor); + ENSURE_VALUE_IS_NOT_NULL(env, tensor_data); + + size_t num_elements = GetTensorNumElements(tensor.tensor); + if (num_elements != 1) { + NAPI_THROW_ERROR(env, + "For DT_RESOURCE tensors, Node.js binding currently " + "supports only exactly 1 element, but encountered " + "DT_RESOURCE tensor with %zu elements.", + num_elements); + } + + TF_AutoStatus status; + + // Create a JS string to stash the resouce handle into. + napi_status nstatus; + size_t byte_length = TF_TensorByteSize(tensor.tensor); + nstatus = napi_create_array_with_length(env, byte_length, result); + ENSURE_NAPI_OK(env, nstatus); + + napi_value array_buffer_value; + void *array_buffer_data = nullptr; + nstatus = napi_create_arraybuffer(env, byte_length, &array_buffer_data, + &array_buffer_value); + ENSURE_NAPI_OK(env, nstatus); + + // TFE_TensorHandleResolve can use a shared data pointer, memcpy() the + // current value to the newly allocated NAPI buffer. + memcpy(array_buffer_data, tensor_data, byte_length); + + // This method will only return uint8 arrays. + nstatus = napi_create_typedarray(env, napi_uint8_array, byte_length, + array_buffer_value, 0, result); + ENSURE_NAPI_OK(env, nstatus); +} + // Handles converting the stored TF_Tensor data into the correct JS value. void CopyTFE_TensorHandleDataToJSData(napi_env env, TFE_Context *tfe_context, TFE_TensorHandle *tfe_tensor_handle, @@ -340,6 +426,7 @@ void CopyTFE_TensorHandleDataToJSData(napi_env env, TFE_Context *tfe_context, // Determine the type of the array napi_typedarray_type typed_array_type; bool is_string = false; + bool is_resource = false; TF_DataType tensor_data_type = TFE_TensorHandleDataType(tfe_tensor_handle); switch (tensor_data_type) { case TF_COMPLEX64: @@ -355,6 +442,11 @@ void CopyTFE_TensorHandleDataToJSData(napi_env env, TFE_Context *tfe_context, case TF_STRING: is_string = true; break; + case TF_RESOURCE: + // We currently represent a resource handle as an `Uint8Array`. + typed_array_type = napi_uint8_array; + is_resource = true; + break; default: REPORT_UNKNOWN_TF_DATA_TYPE(env, TFE_TensorHandleDataType(tfe_tensor_handle)); @@ -364,6 +456,9 @@ void CopyTFE_TensorHandleDataToJSData(napi_env env, TFE_Context *tfe_context, if (is_string) { CopyTFE_TensorHandleDataToStringArray(env, tfe_context, tfe_tensor_handle, result); + } else if (is_resource) { + CopyTFE_TensorHandleDataToResourceArray(env, tfe_context, tfe_tensor_handle, + result); } else { CopyTFE_TensorHandleDataToTypedArray(env, tfe_context, tfe_tensor_handle, tensor_data_type, typed_array_type, @@ -426,8 +521,9 @@ void AssignOpAttr(napi_env env, TFE_Op *tfe_op, napi_value attr_value) { nstatus = GetStringParam(env, attr_name_value, attr_name_string); ENSURE_NAPI_OK(env, nstatus); - // OpAttr will be used beyond the scope of this function call. Stash ops in a - // set for re-use instead of dynamically reallocating strings for operations. + // OpAttr will be used beyond the scope of this function call. Stash ops in + // a set for re-use instead of dynamically reallocating strings for + // operations. const char *attr_name = ATTR_NAME_SET.insert(attr_name_string.c_str()).first->c_str(); @@ -761,7 +857,8 @@ napi_value TFJSBackend::ExecuteOp(napi_env env, napi_value op_name_value, nstatus = napi_get_value_int32(env, num_output_values, &num_outputs); ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); - // Push `nullptr` to get a valid pointer in the call to `TFE_Execute()` below. + // Push `nullptr` to get a valid pointer in the call to `TFE_Execute()` + // below. std::vector result_handles(num_outputs, nullptr); int size = result_handles.size(); diff --git a/binding/tfjs_binding.cc b/binding/tfjs_binding.cc index cf42a1d4..bcf66850 100644 --- a/binding/tfjs_binding.cc +++ b/binding/tfjs_binding.cc @@ -163,9 +163,11 @@ static napi_value InitTFNodeJSBinding(napi_env env, napi_value exports) { // Types EXPORT_INT_PROPERTY(TF_FLOAT); EXPORT_INT_PROPERTY(TF_INT32); + EXPORT_INT_PROPERTY(TF_INT64); EXPORT_INT_PROPERTY(TF_BOOL); EXPORT_INT_PROPERTY(TF_COMPLEX64); EXPORT_INT_PROPERTY(TF_STRING); + EXPORT_INT_PROPERTY(TF_RESOURCE); // Op AttrType EXPORT_INT_PROPERTY(TF_ATTR_STRING); diff --git a/package.json b/package.json index 13be6788..5e67367f 100644 --- a/package.json +++ b/package.json @@ -34,6 +34,8 @@ "clang-format": "~1.2.2", "jasmine": "~3.1.0", "nyc": "^12.0.2", + "shelljs": "^0.8.3", + "tmp": "^0.0.33", "ts-node": "^5.0.1", "tslint": "~5.9.1", "typescript": "~2.9.2", diff --git a/src/index.ts b/src/index.ts index 3c021a97..9ca078f7 100644 --- a/src/index.ts +++ b/src/index.ts @@ -36,6 +36,7 @@ export const io = { ...tf.io, ...nodeIo }; + // Export all union package symbols export * from '@tensorflow/tfjs'; @@ -61,3 +62,5 @@ tf.io.registerLoadRouter(nodeHTTPRequestRouter); import {ProgbarLogger} from './callbacks'; // Register the ProgbarLogger for Model.fit() at verbosity level 1. tf.registerCallbackConstructor(1, ProgbarLogger); + +export * from './node'; diff --git a/src/int64_tensors.ts b/src/int64_tensors.ts new file mode 100644 index 00000000..e8bf45e5 --- /dev/null +++ b/src/int64_tensors.ts @@ -0,0 +1,83 @@ +/** + * @license + * Copyright 2018 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {Shape, util} from '@tensorflow/tfjs'; +import {endianness} from 'os'; + +const INT32_MAX = 2147483648; + +/** + * Node.js-specific tensor type: int64-type scalar. + * + * This class is created for a specific purpose: to support + * writing `step`s to TensorBoard via op-kernel bindings. + * `step` is required to have an int64 dtype, but TensorFlow.js + * (tfjs-core) doesn't have a built-in int64 dtype. This is + * related to a lack of `Int64Array` or `Uint64Array` typed + * array in basic JavaScript. + * + * This class is introduced as a workaround. + */ +export class Int64Scalar { + readonly dtype: string = 'int64'; + readonly rank: number = 1; + private valueArray_: Int32Array; + + private static endiannessOkay_: boolean; + + constructor(readonly value: number) { + // The reason why we need to check endianness of the machine here is + // negative int64 values and the way in which we represent them + // using Int32Arrays in JavaScript. We represent each int64 value with + // two consecutive elements of an Int32Array. For positive values, + // the high part is simply zero; for negative values, the high part + // should be -1. The ordering of the low and high parts assumes + // little endian (i.e., least significant digits appear first). + // This assumption is checked by the lines below. + if (Int64Scalar.endiannessOkay_ == null) { + if (endianness() !== 'LE') { + throw new Error( + `Int64Scalar does not support endianness of this machine: ` + + `${endianness()}`); + } + Int64Scalar.endiannessOkay_ = true; + } + + util.assert( + value > -INT32_MAX && value < INT32_MAX - 1, + `Got a value outside of the bound of values supported for int64 ` + + `dtype ([-${INT32_MAX}, ${INT32_MAX - 1}]): ${value}`); + util.assert( + Number.isInteger(value), + `Expected value to be an integer, but got ${value}`); + + // We use two int32 elements to represent a int64 value. This assumes + // little endian, which is checked above. + const highPart = value >= 0 ? 0 : -1; + const lowPart = value % INT32_MAX; + this.valueArray_ = new Int32Array([lowPart, highPart]); + } + + get shape(): Shape { + return []; + } + + /** Get the Int32Array that represents the int64 value. */ + get valueArray(): Int32Array { + return this.valueArray_; + } +} diff --git a/src/int64_tensors_test.ts b/src/int64_tensors_test.ts new file mode 100644 index 00000000..e4d171f6 --- /dev/null +++ b/src/int64_tensors_test.ts @@ -0,0 +1,61 @@ +/** + * @license + * Copyright 2018 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {Int64Scalar} from './int64_tensors'; + +describe('int64 tensors', () => { + it('positive value', () => { + const x = new Int64Scalar(42); + expect(x.dtype).toEqual('int64'); + const valueArray = x.valueArray; + expect(valueArray.constructor.name).toEqual('Int32Array'); + expect(valueArray.length).toEqual(2); + expect(valueArray[0]).toEqual(42); + expect(valueArray[1]).toEqual(0); + }); + + it('zero value', () => { + const x = new Int64Scalar(0); + expect(x.dtype).toEqual('int64'); + const valueArray = x.valueArray; + expect(valueArray.constructor.name).toEqual('Int32Array'); + expect(valueArray.length).toEqual(2); + expect(valueArray[0]).toEqual(0); + expect(valueArray[1]).toEqual(0); + }); + + it('negative value', () => { + const x = new Int64Scalar(-3); + expect(x.dtype).toEqual('int64'); + const valueArray = x.valueArray; + expect(valueArray.constructor.name).toEqual('Int32Array'); + expect(valueArray.length).toEqual(2); + expect(valueArray[0]).toEqual(-3); + expect(valueArray[1]).toEqual(-1); + }); + + it('Non-integer value leads to error', () => { + expect(() => new Int64Scalar(0.4)).toThrowError(/integer/); + expect(() => new Int64Scalar(-3.2)).toThrowError(/integer/); + }); + + it('Out-of-bound value leads to error', () => { + expect(() => new Int64Scalar(2147483648)).toThrowError(/bound/); + expect(() => new Int64Scalar(2147483648 * 2)).toThrowError(/bound/); + expect(() => new Int64Scalar(-2147483648 - 1)).toThrowError(/bound/); + }); +}); diff --git a/src/node.ts b/src/node.ts new file mode 100644 index 00000000..a35f993f --- /dev/null +++ b/src/node.ts @@ -0,0 +1,24 @@ +/** + * @license + * Copyright 2018 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +/** + * Public API symbols under the tf.node.* namespace. + */ + +import {summaryFileWriter} from './tensorboard'; + +export const node = {summaryFileWriter}; diff --git a/src/nodejs_kernel_backend.ts b/src/nodejs_kernel_backend.ts index 5d061cb9..c979cfe8 100644 --- a/src/nodejs_kernel_backend.ts +++ b/src/nodejs_kernel_backend.ts @@ -16,12 +16,13 @@ */ // tslint:disable-next-line:max-line-length -import {BackendTimingInfo, DataMover, DataType, fill, KernelBackend, ones, Rank, rsqrt, scalar, ShapeMap, Tensor, Tensor1D, tensor1d, Tensor2D, tensor2d, Tensor3D, tensor3d, Tensor4D} from '@tensorflow/tfjs-core'; +import {BackendTimingInfo, DataMover, DataType, fill, KernelBackend, ones, Rank, rsqrt, Scalar, scalar, ShapeMap, Tensor, Tensor1D, tensor1d, Tensor2D, tensor2d, Tensor3D, tensor3d, Tensor4D, tidy, util} from '@tensorflow/tfjs-core'; import {Conv2DInfo, Conv3DInfo} from '@tensorflow/tfjs-core/dist/ops/conv_util'; import {Tensor5D} from '@tensorflow/tfjs-core/dist/tensor'; import {upcastType} from '@tensorflow/tfjs-core/dist/types'; import {isNullOrUndefined} from 'util'; +import {Int64Scalar} from './int64_tensors'; // tslint:disable-next-line:max-line-length import {createTensorsTypeOpAttr, createTypeOpAttr, getTFDType} from './ops/op_utils'; import {TensorMetadata, TFEOpAttr, TFJSBinding} from './tfjs_binding'; @@ -50,6 +51,23 @@ export class NodeJSKernelBackend extends KernelBackend { // TODO(kreeger, smilkov): Implement this. } + private typeAttributeFromTensor(value: Tensor): number { + switch (value.dtype) { + case 'float32': + return this.binding.TF_FLOAT; + case 'int32': + return this.binding.TF_INT32; + case 'bool': + return this.binding.TF_BOOL; + case 'complex64': + return this.binding.TF_COMPLEX64; + case 'string': + return this.binding.TF_STRING; + default: + throw new Error(`Unsupported dtype ${value.dtype}`); + } + } + // Creates a new Tensor and maps the dataId to the passed in ID. private createOutputTensor(metadata: TensorMetadata): Tensor { const newId = {}; @@ -78,6 +96,11 @@ export class NodeJSKernelBackend extends KernelBackend { case this.binding.TF_STRING: dtype = 'string'; break; + case this.binding.TF_RESOURCE: + // NOTE(cais): We currently represent resource-type Tensors + // as string of ubytes. + dtype = 'string'; + break; default: throw new Error(`Unknown dtype enum ${metadata.dtype}`); } @@ -85,20 +108,30 @@ export class NodeJSKernelBackend extends KernelBackend { } // Prepares Tensor instances for Op execution. - private getInputTensorIds(tensors: Tensor[]): number[] { + private getInputTensorIds(tensors: Array): number[] { const ids: number[] = []; for (let i = 0; i < tensors.length; i++) { - const info = this.tensorMap.get(tensors[i].dataId); - // TODO - what about ID in this case? Handle in write()?? - if (info.values != null) { - // Values were delayed to write into the TensorHandle. Do that before Op - // execution and clear stored values. - info.id = - this.binding.createTensor(info.shape, info.dtype, info.values); - info.values = null; - this.tensorMap.set(tensors[i].dataId, info); + if (tensors[i] instanceof Tensor) { + const info = this.tensorMap.get((tensors[i] as Tensor).dataId); + // TODO - what about ID in this case? Handle in write()?? + if (info.values != null) { + // Values were delayed to write into the TensorHandle. Do that before + // Op execution and clear stored values. + info.id = + this.binding.createTensor(info.shape, info.dtype, info.values); + info.values = null; + this.tensorMap.set((tensors[i] as Tensor).dataId, info); + } + ids.push(info.id); + } else if (tensors[i] instanceof Int64Scalar) { + // Then `tensors[i]` is a Int64Scalar, which we currently represent + // using an `Int32Array`. + const value = (tensors[i] as Int64Scalar).valueArray; + const id = this.binding.createTensor([], this.binding.TF_INT64, value); + ids.push(id); + } else { + throw new Error(`Invalid Tensor type: ${typeof tensors[i]}`); } - ids.push(info.id); } return ids; } @@ -1461,6 +1494,74 @@ export class NodeJSKernelBackend extends KernelBackend { return tensor3d(values, outShape, 'int32'); } + // ------------------------------------------------------------ + // TensorBoard-related (tfjs-node-specific) backend kernels. + + summaryWriter(logdir: string): Tensor1D { + const opAttrs = [ + { + name: 'shared_name', + type: this.binding.TF_ATTR_STRING, + value: `logdir:${logdir}` + }, + {name: 'container', type: this.binding.TF_ATTR_STRING, value: ''} + ]; + const writerResource = + this.executeSingleOutput('SummaryWriter', opAttrs, []); + return writerResource as Tensor1D; + } + + createSummaryFileWriter( + resourceHandle: Tensor, logdir: string, maxQueue?: number, + flushMillis?: number, filenameSuffix?: string): void { + const inputArgs = [ + resourceHandle, scalar(logdir), + scalar(maxQueue == null ? 10 : maxQueue, 'int32'), + scalar(flushMillis == null ? 2 * 60 * 1000 : flushMillis, 'int32'), + scalar(filenameSuffix == null ? '.v2' : filenameSuffix) + ]; + this.executeMultipleOutputs('CreateSummaryFileWriter', [], inputArgs, 0); + } + + writeScalarSummary( + resourceHandle: Tensor, step: number, name: string, + value: Scalar|number): void { + tidy(() => { + util.assert( + Number.isInteger(step), + `step is expected to be an integer, but is instead ${step}`); + const inputArgs: Array = + [resourceHandle, new Int64Scalar(step), scalar(name, 'string')]; + + let typeAttr: number; + if (typeof value === 'number') { + inputArgs.push(scalar(value)); + typeAttr = this.binding.TF_FLOAT; + } else { + // `value` is a Scalar. + util.assert( + value.rank === 0, + `A non-scalar tensor (rank ${value.rank}) is passed to ` + + `writeScalarSummary()`); + inputArgs.push(value); + typeAttr = this.typeAttributeFromTensor(value); + } + const opAttrs: TFEOpAttr[] = + [{name: 'T', type: this.binding.TF_ATTR_TYPE, value: typeAttr}]; + + this.binding.executeOp( + 'WriteScalarSummary', opAttrs, this.getInputTensorIds(inputArgs), 0); + }); + } + + flushSummaryWriter(resourceHandle: Tensor): void { + const inputArgs: Tensor[] = [resourceHandle]; + this.executeMultipleOutputs('FlushSummaryWriter', [], inputArgs, 0); + } + + // ~ TensorBoard-related (tfjs-node-specific) backend kernels. + // ------------------------------------------------------------ + memory() { // Due to automatic garbage collection, the numbers are unreliable. // TODO(kreeger): Since there is finalization in C, count the true diff --git a/src/ops/op_utils.ts b/src/ops/op_utils.ts index 9968367e..a0a65a96 100644 --- a/src/ops/op_utils.ts +++ b/src/ops/op_utils.ts @@ -45,6 +45,13 @@ export function getTFDType(dataType: tfc.DataType): number { return binding.TF_COMPLEX64; case 'string': return binding.TF_STRING; + // tslint:disable-next-line:no-any + case 'int64' as any: + // int64 is not a generally supported dtype in TensorFlow.js + // (tfjs-core). However, it needs to be included here for the purpose of + // writing the `step` value to TensorBoard via WriteScalarSummary and + // other op kernels. + return binding.TF_INT64; default: const errorMessage = `Unknown dtype: ${dataType}`; throw new Error(errorMessage); diff --git a/src/tensorboard.ts b/src/tensorboard.ts new file mode 100644 index 00000000..8b929681 --- /dev/null +++ b/src/tensorboard.ts @@ -0,0 +1,106 @@ + +/** + * @license + * Copyright 2019 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {Scalar, Tensor, util} from '@tensorflow/tfjs'; +import {NodeJSKernelBackend} from './nodejs_kernel_backend'; +import {nodeBackend} from './ops/op_utils'; + +export class SummaryFileWriter { + backend: NodeJSKernelBackend; + + constructor(private readonly resourceHandle: Tensor) { + this.backend = nodeBackend(); + } + + /** + * Write a scalar summary. + * + * @param name A name of the summary. The summary tag for TensorBoard will be + * this name. + * @param value A real numeric scalar value, as `tf.Scalar` or a JavaScript + * `number`. + * @param step Required `int64`-castable, monotically-increasing step value. + * @param description Optinal long-form description for this summary, as a + * `string`. *Not implemented yet*. + */ + scalar( + name: string, value: Scalar|number, step: number, description?: string) { + // N.B.: Unlike the Python TensorFlow API, step is a required parameter, + // because the construct of global step does not exist in TensorFlow.js. + if (description != null) { + throw new Error('scalar() does not support description yet'); + } + + this.backend.writeScalarSummary(this.resourceHandle, step, name, value); + } + + /** + * Force summary writer to send all buffered data to storage. + */ + flush() { + this.backend.flushSummaryWriter(this.resourceHandle); + } +} + +/** + * Use a cache for `SummaryFileWriter` instance. + * + * Using multiple instances of `SummaryFileWriter` pointing to the same + * logdir has potential problems. Using this cache avoids those problems. + */ +const summaryFileWriterCache: {[logdir: string]: SummaryFileWriter} = {}; + +/** + * Create a summary file writer for TensorBoard. + * + * Example: + * ```javascript + * const tf = require('@tensorflow/tfjs-node'); + * + * const summaryWriter = tf.node.summaryFileWriter('/tmp/tfjs_tb_logdir'); + * + * for (let step = 0; step < 100; ++step) { + * summaryWriter.scalar('dummyValue', Math.sin(2 * Math.PI * step / 8), step); + * } + * ``` + * + * @param logdir Log directory in which the summary data will be written. + * @param maxQueue Maximum queue length (default: `10`). + * @param flushMillis Flush every __ milliseconds (default: `120e3`, i.e, + * `120` seconds). + * @param filenameSuffix Suffix of the protocol buffer file names to be + * written in the `logdir` (default: `.v2`). + * @returns An instance of `SummaryFileWriter`. + */ +export function summaryFileWriter( + logdir: string, maxQueue = 10, flushMillis = 120000, + filenameSuffix = '.v2'): SummaryFileWriter { + util.assert( + logdir != null && typeof logdir === 'string' && logdir.length > 0, + `Invalid logdir: ${logdir}. Expected a non-empty string for logdir.`); + if (!(logdir in summaryFileWriterCache)) { + const backend = nodeBackend(); + const writerResource = backend.summaryWriter(logdir); + + backend.createSummaryFileWriter( + writerResource, logdir, maxQueue, flushMillis, filenameSuffix); + + summaryFileWriterCache[logdir] = new SummaryFileWriter(writerResource); + } + return summaryFileWriterCache[logdir]; +} diff --git a/src/tensorboard_test.ts b/src/tensorboard_test.ts new file mode 100644 index 00000000..32f1b477 --- /dev/null +++ b/src/tensorboard_test.ts @@ -0,0 +1,145 @@ +/** + * @license + * Copyright 2018 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {scalar} from '@tensorflow/tfjs'; +import * as fs from 'fs'; +import * as path from 'path'; +import * as tfn from './index'; + +// tslint:disable-next-line:no-require-imports +const shelljs = require('shelljs'); +// tslint:disable-next-line:no-require-imports +const tmp = require('tmp'); + +describe('tensorboard', () => { + let tmpLogDir: string; + + beforeEach(() => { + tmpLogDir = tmp.dirSync().name; + }); + + afterEach(() => { + if (tmpLogDir != null) { + shelljs.rm('-rf', tmpLogDir); + } + }); + + it('Create summaryFileWriter and write scalar', () => { + const writer = tfn.node.summaryFileWriter(tmpLogDir); + writer.scalar('foo', 42, 0); + writer.flush(); + + // Currently, we only verify that the file exists and the size + // increases in a sensible way as we write more scalars to it. + // The difficulty is in reading the protobuf contents of the event + // file in JavaScript/TypeScript. + const fileNames = fs.readdirSync(tmpLogDir); + expect(fileNames.length).toEqual(1); + const eventFilePath = path.join(tmpLogDir, fileNames[0]); + const fileSize0 = fs.statSync(eventFilePath).size; + + writer.scalar('foo', 43, 1); + writer.flush(); + const fileSize1 = fs.statSync(eventFilePath).size; + const incrementPerScalar = fileSize1 - fileSize0; + expect(incrementPerScalar).toBeGreaterThan(0); + + writer.scalar('foo', 44, 2); + writer.scalar('foo', 45, 3); + writer.flush(); + const fileSize2 = fs.statSync(eventFilePath).size; + expect(fileSize2 - fileSize1).toEqual(2 * incrementPerScalar); + }); + + it('Writing tf.Scalar works', () => { + const writer = tfn.node.summaryFileWriter(tmpLogDir); + writer.scalar('foo', scalar(42), 0); + writer.flush(); + + // Currently, we only verify that the file exists and the size + // increases in a sensible way as we write more scalars to it. + // The difficulty is in reading the protobuf contents of the event + // file in JavaScript/TypeScript. + const fileNames = fs.readdirSync(tmpLogDir); + expect(fileNames.length).toEqual(1); + }); + + it('No crosstalk between two summary writers', () => { + const logDir1 = path.join(tmpLogDir, '1'); + const writer1 = tfn.node.summaryFileWriter(logDir1); + writer1.scalar('foo', 42, 0); + writer1.flush(); + + const logDir2 = path.join(tmpLogDir, '2'); + const writer2 = tfn.node.summaryFileWriter(logDir2); + writer2.scalar('foo', 1.337, 0); + writer2.flush(); + + // Currently, we only verify that the file exists and the size + // increases in a sensible way as we write more scalars to it. + // The difficulty is in reading the protobuf contents of the event + // file in JavaScript/TypeScript. + let fileNames = fs.readdirSync(logDir1); + expect(fileNames.length).toEqual(1); + const eventFilePath1 = path.join(logDir1, fileNames[0]); + const fileSize1Num0 = fs.statSync(eventFilePath1).size; + + fileNames = fs.readdirSync(logDir2); + expect(fileNames.length).toEqual(1); + const eventFilePath2 = path.join(logDir2, fileNames[0]); + const fileSize2Num0 = fs.statSync(eventFilePath2).size; + expect(fileSize2Num0).toBeGreaterThan(0); + + writer1.scalar('foo', 43, 1); + writer1.flush(); + const fileSize1Num1 = fs.statSync(eventFilePath1).size; + const incrementPerScalar = fileSize1Num1 - fileSize1Num0; + expect(incrementPerScalar).toBeGreaterThan(0); + + writer1.scalar('foo', 44, 2); + writer1.scalar('foo', 45, 3); + writer1.flush(); + const fileSize1Num2 = fs.statSync(eventFilePath1).size; + expect(fileSize1Num2 - fileSize1Num1).toEqual(2 * incrementPerScalar); + + const fileSize2Num1 = fs.statSync(eventFilePath2).size; + expect(fileSize2Num1).toEqual(fileSize2Num0); + + writer2.scalar('foo', 1.336, 1); + writer2.scalar('foo', 1.335, 2); + writer2.flush(); + + const fileSize1Num3 = fs.statSync(eventFilePath1).size; + expect(fileSize1Num3).toEqual(fileSize1Num2); + const fileSize2Num2 = fs.statSync(eventFilePath2).size; + expect(fileSize2Num2 - fileSize2Num1).toEqual(2 * incrementPerScalar); + }); + + it('Writing into existing directory works', () => { + shelljs.mkdir('-p', tmpLogDir); + const writer = tfn.node.summaryFileWriter(path.join(tmpLogDir, '22')); + writer.scalar('foo', 42, 0); + writer.flush(); + + const fileNames = fs.readdirSync(tmpLogDir); + expect(fileNames.length).toEqual(1); + }); + + it('empty logdir leads to error', () => { + expect(() => tfn.node.summaryFileWriter('')).toThrowError(/empty string/); + }); +}); diff --git a/src/tfjs_binding.d.ts b/src/tfjs_binding.d.ts index 656ef4d1..bd92811c 100644 --- a/src/tfjs_binding.d.ts +++ b/src/tfjs_binding.d.ts @@ -50,9 +50,11 @@ export interface TFJSBinding { // TF Types TF_FLOAT: number; TF_INT32: number; + TF_INT64: number; TF_BOOL: number; TF_COMPLEX64: number; TF_STRING: number; + TF_RESOURCE: number; // TF OpAttrTypes TF_ATTR_STRING: number; @@ -61,6 +63,7 @@ export interface TFJSBinding { TF_ATTR_BOOL: number; TF_ATTR_TYPE: number; TF_ATTR_SHAPE: number; + TF_ATTR_RESOURCE: number; TF_Version: string; } diff --git a/yarn.lock b/yarn.lock index 72b2f10e..de79fdc8 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1095,6 +1095,11 @@ inherits@2, inherits@~2.0.0: resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" integrity sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4= +interpret@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/interpret/-/interpret-1.2.0.tgz#d5061a6224be58e8083985f5014d844359576296" + integrity sha512-mT34yGKMNceBQUoVn7iCDKDntA7SC6gycMAWzGx1z/CMCTV7b2AAtXlo3nRyHZ1FelRkQbQjprHSYGwzLtkVbw== + invariant@^2.2.0: version "2.2.4" resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.4.tgz#610f3c92c9359ce1db616e538008d23ff35158e6" @@ -1756,6 +1761,11 @@ os-locale@^2.0.0: lcid "^1.0.0" mem "^1.1.0" +os-tmpdir@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" + integrity sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ= + p-finally@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae" @@ -1970,6 +1980,13 @@ read-pkg@^3.0.0: normalize-package-data "^2.3.2" path-type "^3.0.0" +rechoir@^0.6.2: + version "0.6.2" + resolved "https://registry.yarnpkg.com/rechoir/-/rechoir-0.6.2.tgz#85204b54dba82d5742e28c96756ef43af50e3384" + integrity sha1-hSBLVNuoLVdC4oyWdW70OvUOM4Q= + dependencies: + resolve "^1.1.6" + regex-not@^1.0.0, regex-not@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/regex-not/-/regex-not-1.0.2.tgz#1f4ece27e00b0b65e0247a6810e6a85d83a5752c" @@ -2098,6 +2115,15 @@ shebang-regex@^1.0.0: resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3" integrity sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM= +shelljs@^0.8.3: + version "0.8.3" + resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.8.3.tgz#a7f3319520ebf09ee81275b2368adb286659b097" + integrity sha512-fc0BKlAWiLpwZljmOvAOTE/gXawtCoNrP5oaY7KIaQbbyHeQVg01pSEuEGvGh3HEdBU4baCD7wQBwADmM/7f7A== + dependencies: + glob "^7.0.0" + interpret "^1.0.0" + rechoir "^0.6.2" + signal-exit@^3.0.0, signal-exit@^3.0.1, signal-exit@^3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d" @@ -2327,6 +2353,13 @@ test-exclude@^4.2.0: read-pkg-up "^3.0.0" require-main-filename "^1.0.1" +tmp@^0.0.33: + version "0.0.33" + resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9" + integrity sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw== + dependencies: + os-tmpdir "~1.0.2" + to-fast-properties@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e"