Skip to content

Commit

Permalink
Pr 2.10.0 (#14)
Browse files Browse the repository at this point in the history
* update schema.fbs and tflite module to v2.10.0

* Auto-generate tflite/utils.py to new opcode to name mapping

* Auto-generate tflite/__init__.py to new schema.fbs

* update api doc for v2.10.0

* update my mail

* version to 2.10.0

* the missed docs

* make packaging tool happy

* fix links in readme

* Fix test for BuiltinCode() API compaibility issue of TensorFlow

The builtin code is extended in TensorFlow 2.4.x from 8 bit to 32 bit.
See this example of how to handle it gracefully in your code
https://github.com/apache/tvm/blob/b20b7c4ad4ad3774a42f47614245f8eeabe875cb/python/tvm/relay/frontend/tflite.py#L297-L316
  • Loading branch information
zhenhuaw-me authored Nov 12, 2022
1 parent 6bdbf07 commit c7331fe
Show file tree
Hide file tree
Showing 311 changed files with 25,647 additions and 1,272 deletions.
182 changes: 171 additions & 11 deletions 3rdparty/schema.fbs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
// Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
// Version 3a: Add new builtin op code field. Has backward compatibility with
// version 3.
// Version 3b: Rename fields in SignatureDef. Has backward compatibility with
// version 3 and 3a.

namespace tflite;

Expand All @@ -44,6 +46,14 @@ enum TensorType : byte {
INT8 = 9,
FLOAT64 = 10,
COMPLEX128 = 11,
UINT64 = 12,
// Experimental: Resource and variant types are experimental, that are subject
// to change. Do not implement custom kernels using resource & variant types
// now.
RESOURCE = 13,
VARIANT = 14,
UINT32 = 15,
UINT16 = 16
}

// Custom quantization parameters for experimenting with new quantization
Expand Down Expand Up @@ -210,13 +220,17 @@ table Tensor {
// Encodes `shape` with unknown dimensions. Unknown dimensions are
// represented with -1.
shape_signature:[int]; // Optional.

// If false, the rank or the number of tensor dimensions is unknown.
// If false, "shape" must be [].
has_rank: bool = false;
}

// A list of builtin operators. Builtin operators are slightly faster than custom
// ones, but not by much. Moreover, while custom operators accept an opaque
// object containing configuration parameters, builtins have a predetermined
// set of acceptable options.

// LINT.IfChange
enum BuiltinOperator : int32 {
ADD = 0,
AVERAGE_POOL_2D = 1,
Expand Down Expand Up @@ -352,9 +366,37 @@ enum BuiltinOperator : int32 {
SEGMENT_SUM = 125,
BATCH_MATMUL = 126,
PLACEHOLDER_FOR_GREATER_OP_CODES = 127,
CUMSUM = 128
}

CUMSUM = 128,
CALL_ONCE = 129,
BROADCAST_TO = 130,
RFFT2D = 131,
CONV_3D = 132,
IMAG=133,
REAL=134,
COMPLEX_ABS=135,
HASHTABLE = 136,
HASHTABLE_FIND = 137,
HASHTABLE_IMPORT = 138,
HASHTABLE_SIZE = 139,
REDUCE_ALL = 140,
CONV_3D_TRANSPOSE = 141,
VAR_HANDLE = 142,
READ_VARIABLE = 143,
ASSIGN_VARIABLE = 144,
BROADCAST_ARGS = 145,
RANDOM_STANDARD_NORMAL = 146,
BUCKETIZE = 147,
RANDOM_UNIFORM = 148,
MULTINOMIAL = 149,
GELU = 150,
DYNAMIC_UPDATE_SLICE = 151,
RELU_0_TO_1 = 152,
UNSORTED_SEGMENT_PROD = 153,
UNSORTED_SEGMENT_MAX = 154,
UNSORTED_SEGMENT_SUM = 155,
ATAN2 = 156
}
// LINT.ThenChange(nnapi_linter/linter.proto)

// Options for the builtin operators.
union BuiltinOptions {
Expand Down Expand Up @@ -460,10 +502,32 @@ union BuiltinOptions {
SegmentSumOptions,
BatchMatMulOptions,
CumsumOptions,
}

CallOnceOptions,
BroadcastToOptions,
Rfft2dOptions,
Conv3DOptions,
HashtableOptions,
HashtableFindOptions,
HashtableImportOptions,
HashtableSizeOptions,
VarHandleOptions,
ReadVariableOptions,
AssignVariableOptions,
RandomOptions,
BucketizeOptions,
GeluOptions,
DynamicUpdateSliceOptions,
UnsortedSegmentProdOptions,
UnsortedSegmentMaxOptions,
UnsortedSegmentSumOptions,
ATan2Options
}

// LINT.IfChange
enum Padding : byte { SAME, VALID }
// LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)

// LINT.IfChange
enum ActivationFunctionType : byte {
NONE = 0,
RELU = 1,
Expand All @@ -472,6 +536,7 @@ enum ActivationFunctionType : byte {
TANH = 4,
SIGN_BIT = 5,
}
// LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)

table Conv2DOptions {
padding:Padding;
Expand All @@ -482,6 +547,18 @@ table Conv2DOptions {
dilation_h_factor:int = 1;
}

// Options for both Conv3D and Conv3DTranspose.
table Conv3DOptions {
padding:Padding;
stride_d:int;
stride_w:int;
stride_h:int;
fused_activation_function:ActivationFunctionType;
dilation_d_factor:int = 1;
dilation_w_factor:int = 1;
dilation_h_factor:int = 1;
}

table Pool2DOptions {
padding:Padding;
stride_w:int;
Expand Down Expand Up @@ -551,10 +628,12 @@ table BidirectionalSequenceRNNOptions {
asymmetric_quantize_inputs:bool;
}

// LINT.IfChange
enum FullyConnectedOptionsWeightsFormat: byte {
DEFAULT = 0,
SHUFFLED4x16INT8 = 1,
}
// LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)

// An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
table FullyConnectedOptions {
Expand Down Expand Up @@ -587,7 +666,7 @@ table ConcatenationOptions {

table AddOptions {
fused_activation_function:ActivationFunctionType;
// Parameters supported by version 4.
// Parameters supported by version 3.
pot_scale_int16:bool = true;
}

Expand All @@ -596,6 +675,7 @@ table MulOptions {
}

table L2NormOptions {
// This field is currently ignored in the L2 Norm Op.
fused_activation_function:ActivationFunctionType;
}

Expand All @@ -606,12 +686,14 @@ table LocalResponseNormalizationOptions {
beta:float;
}

// LINT.IfChange
enum LSTMKernelType : byte {
// Full LSTM kernel which supports peephole and projection.
FULL = 0,
// Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell.
BASIC = 1,
}
// LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)

// An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
table LSTMOptions {
Expand Down Expand Up @@ -733,6 +815,8 @@ table EmbeddingLookupSparseOptions {

table GatherOptions {
axis: int;
// Parameters for Gather version 5 or above.
batch_dims: int = 0;
}

table TransposeOptions {
Expand Down Expand Up @@ -909,12 +993,14 @@ table LeakyReluOptions {
table SquaredDifferenceOptions {
}

// LINT.IfChange
enum MirrorPadMode : byte {
// Doesn't include borders.
REFLECT = 0,
// Includes borders.
SYMMETRIC = 1,
}
// LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)

table MirrorPadOptions {
mode:MirrorPadMode;
Expand Down Expand Up @@ -955,6 +1041,10 @@ table IfOptions {
else_subgraph_index:int;
}

table CallOnceOptions {
init_subgraph_index:int;
}

table WhileOptions {
cond_subgraph_index:int;
body_subgraph_index:int;
Expand All @@ -981,13 +1071,80 @@ table SegmentSumOptions {
table BatchMatMulOptions {
adj_x:bool;
adj_y:bool;
// Parameters for BatchMatMul version 4 or above.
// If set to true, then weights-only op will use asymmetric quantization for
// inputs.
asymmetric_quantize_inputs: bool;
}

table CumsumOptions {
exclusive:bool;
reverse:bool;
}

table BroadcastToOptions {
}

table Rfft2dOptions {
}

table HashtableOptions {
// The identity of hash tables. This identity will be used across different
// subgraphs in the same interpreter instance.
table_id:int;
key_dtype:TensorType;
value_dtype:TensorType;
}

table HashtableFindOptions {
}

table HashtableImportOptions {
}

table HashtableSizeOptions {
}

table VarHandleOptions {
container:string;
shared_name:string;
}

table ReadVariableOptions {
}

table AssignVariableOptions {
}

table RandomOptions {
seed: long;
seed2: long;
}

table BucketizeOptions {
boundaries: [float]; // The bucket boundaries.
}

table GeluOptions {
approximate: bool;
}

table DynamicUpdateSliceOptions {
}

table UnsortedSegmentProdOptions {
}

table UnsortedSegmentMaxOptions {
}

table UnsortedSegmentSumOptions {
}

table ATan2Options {
}


// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
// builtin, or a string if the operator is custom.
table OperatorCode {
Expand Down Expand Up @@ -1103,11 +1260,14 @@ table SignatureDef {
// Named outputs for this signature.
outputs:[TensorMap];

// Exported method name for this signature.
method_name:string;

// Key value which was in the Tensorflow SavedModel SignatureDef map.
key:string;
signature_key:string;

// Model tag, deprecated.
deprecated_tag:string (deprecated);

// Index of subgraphs that corresponds to the exported method.
subgraph_index:uint;
}

table Model {
Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,8 @@ As the operator definition may change across different TensorFlow versions, this
2. [Update](scripts/gen-op-list.py) the builtin operator mapping.
3. [Update](scripts/update-importing.py) the classes and functions import of submodules.
4. [Update](scripts/gen-doc.sh) the API document.
5. Update the versioning in [setup.py](setup.py).
6. [Build](scripts/build.sh) and [Test](tests) (simply `pytest`) around. Don't forget to re-install the newly built `tflite` package before testing it.
5. Update the versioning in [`__init__.py`](tflite/__init__.py).
6. [Build](scripts/build-wheel.sh) and [test](tests) (simply `pytest`) around. Don't forget to re-install the newly built `tflite` package before testing it.
4. Push your change and open [Pull Request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests).
5. The maintainer will take the responsibility to upload change to PyPI when merged.

Expand Down
Loading

0 comments on commit c7331fe

Please sign in to comment.