Skip to content

Commit

Permalink
Added versioning for ADD/SUB with new option in the schema.fbs
Browse files Browse the repository at this point in the history
schema_generated.h is edited manually.
  • Loading branch information
wwwind committed Apr 3, 2020
1 parent dd0d9e8 commit abae3fd
Show file tree
Hide file tree
Showing 8 changed files with 56 additions and 60 deletions.
4 changes: 4 additions & 0 deletions tensorflow/lite/c/builtin_op_data.h
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,8 @@ typedef struct {

typedef struct {
TfLiteFusedActivation activation;
// Parameter added for the version 4.
bool pot_scale_int16;
} TfLiteAddParams;

typedef struct {
Expand All @@ -197,6 +199,8 @@ typedef struct {

typedef struct {
TfLiteFusedActivation activation;
// Parameter added for the version 5.
bool pot_scale_int16;
} TfLiteSubParams;

typedef struct {
Expand Down
2 changes: 2 additions & 0 deletions tensorflow/lite/core/api/flatbuffer_conversions.cc
Original file line number Diff line number Diff line change
Expand Up @@ -377,6 +377,7 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
if (const auto* schema_params = op->builtin_options_as_AddOptions()) {
params->activation =
parse_activation(schema_params->fused_activation_function());
params->pot_scale_int16 = schema_params->pot_scale_int16();
}
*builtin_data = reinterpret_cast<void*>(params.release());
break;
Expand All @@ -395,6 +396,7 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
if (const auto* schema_params = op->builtin_options_as_SubOptions()) {
params->activation =
parse_activation(schema_params->fused_activation_function());
params->pot_scale_int16 = schema_params->pot_scale_int16();
}
*builtin_data = reinterpret_cast<void*>(params.release());
break;
Expand Down
20 changes: 10 additions & 10 deletions tensorflow/lite/kernels/add.cc
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ struct OpData {
// This parameter is used to indicate whether
// parameter scale is power of two.
// It is used in 16-bit -> 16-bit quantization.
bool pot_scale_16bit;
bool pot_scale_int16;
};

void* Init(TfLiteContext* context, const char* buffer, size_t length) {
Expand Down Expand Up @@ -99,8 +99,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}

// 8bit -> 8bit general quantized path, with general rescalings
// as well as, 16bit -> 16bit with general rescalings
bool pot_scale_16bit = true;
// as well as, int16 -> int16 with general rescalings
bool pot_scale_int16 = true;

bool input1_scale_is_pot = false;
bool input2_scale_is_pot = false;
Expand All @@ -116,7 +116,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
// the scale parameter is a general number
// the scale parameter is POT and
// zero_point is zero for inputs/output.
pot_scale_16bit = (input1->params.zero_point == 0) &&
pot_scale_int16 = (input1->params.zero_point == 0) &&
(input2->params.zero_point == 0) &&
(output->params.zero_point == 0);

Expand All @@ -129,14 +129,14 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
output_scale_is_pot =
CheckedLog2(output->params.scale, &output_scale_log2_rounded);

pot_scale_16bit &=
input1_scale_is_pot && input2_scale_is_pot && output_scale_is_pot;
pot_scale_int16 &= input1_scale_is_pot && input2_scale_is_pot &&
output_scale_is_pot;
}

data->pot_scale_16bit = pot_scale_16bit;
data->pot_scale_int16 = pot_scale_int16;

if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
!pot_scale_16bit) {
!pot_scale_int16) {
// 8bit -> 8bit general quantized path, with general rescalings
// as well as, 16bit -> 16bit with general rescalings
data->input1_offset = -input1->params.zero_point;
Expand All @@ -146,7 +146,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
// The shift is set to 15 for 16-bit and 20 in case of 8-bit, accordingly.
// In case of 16-bit we have 65535 << 15 which is less than 1 << 31,
// therefore the addition will still fit in a 32 bit accumulator.
data->left_shift = !pot_scale_16bit ? 15 : 20;
data->left_shift = !pot_scale_int16 ? 15 : 20;
const double twice_max_input_scale =
2 * std::max(input1->params.scale, input2->params.scale);
const double real_input1_multiplier =
Expand Down Expand Up @@ -259,7 +259,7 @@ TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* input2,
TfLiteTensor* output) {
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
!data->pot_scale_16bit) {
!data->pot_scale_int16) {
tflite::ArithmeticParams op_params;
op_params.left_shift = data->left_shift;
op_params.input1_offset = data->input1_offset;
Expand Down
16 changes: 8 additions & 8 deletions tensorflow/lite/kernels/sub.cc
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ struct OpData {
// This parameter is used to indicate whether
// parameter scale is power of two.
// It is used in 16-bit -> 16-bit quantization.
bool pot_scale_16bit;
bool pot_scale_int16;
};

void* Init(TfLiteContext* context, const char* buffer, size_t length) {
Expand Down Expand Up @@ -225,7 +225,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {

// 8bit -> 8bit general quantized path, with general rescalings
// as well as, 16bit -> 16bit with general rescalings
bool pot_scale_16bit = true;
bool pot_scale_int16 = true;

bool input1_scale_is_pot = false;
bool input2_scale_is_pot = false;
Expand All @@ -241,7 +241,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
// the scale parameter is a general number
// the scale parameter is POT and
// zero_point is zero for inputs/output.
pot_scale_16bit = (input1->params.zero_point == 0) &&
pot_scale_int16 = (input1->params.zero_point == 0) &&
(input2->params.zero_point == 0) &&
(output->params.zero_point == 0);

Expand All @@ -254,14 +254,14 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
output_scale_is_pot =
CheckedLog2(output->params.scale, &output_scale_log2_rounded);

pot_scale_16bit &=
input1_scale_is_pot && input2_scale_is_pot && output_scale_is_pot;
pot_scale_int16 &= input1_scale_is_pot && input2_scale_is_pot &&
output_scale_is_pot;
}

data->pot_scale_16bit = pot_scale_16bit;
data->pot_scale_int16 = pot_scale_int16;

if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
!pot_scale_16bit) {
!pot_scale_int16) {
TF_LITE_ENSURE_OK(context, PrepareGeneralSubOp(context, input1, input2,
output, params, data, -1));
} else if (output->type == kTfLiteInt16) {
Expand Down Expand Up @@ -355,7 +355,7 @@ void EvalQuantized(TfLiteContext* context, TfLiteNode* node,
} else {
TF_LITE_SUB(reference_integer_ops, Add, int8_t);
}
} else if (!data->pot_scale_16bit) {
} else if (!data->pot_scale_int16) {
if (need_broadcast) {
TF_LITE_SUB(reference_ops, BroadcastAdd4DSlow, int16_t);
} else {
Expand Down
4 changes: 4 additions & 0 deletions tensorflow/lite/schema/schema.fbs
Original file line number Diff line number Diff line change
Expand Up @@ -570,6 +570,8 @@ table ConcatenationOptions {

table AddOptions {
fused_activation_function:ActivationFunctionType;
// Parameters supported by version 4.
pot_scale_int16:bool = true;
}

table MulOptions {
Expand Down Expand Up @@ -681,6 +683,8 @@ table DepthToSpaceOptions {

table SubOptions {
fused_activation_function:ActivationFunctionType;
// Parameters supported by version 5
pot_scale_int16:bool = true;
}

table DivOptions {
Expand Down
22 changes: 18 additions & 4 deletions tensorflow/lite/schema/schema_generated.h
Original file line number Diff line number Diff line change
Expand Up @@ -4676,22 +4676,29 @@ flatbuffers::Offset<ConcatenationOptions> CreateConcatenationOptions(flatbuffers

struct AddOptionsT : public flatbuffers::NativeTable {
typedef AddOptions TableType;
bool pot_scale_int16;
tflite::ActivationFunctionType fused_activation_function;
AddOptionsT()
: fused_activation_function(tflite::ActivationFunctionType_NONE) {
: pot_scale_int16(true),
fused_activation_function(tflite::ActivationFunctionType_NONE) {
}
};

struct AddOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef AddOptionsT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_FUSED_ACTIVATION_FUNCTION = 4
VT_FUSED_ACTIVATION_FUNCTION = 4,
VT_POT_SCALE_INT16 = 6
};
bool pot_scale_int16() const {
return GetField<uint8_t>(VT_POT_SCALE_INT16, 0) != 0;
}
tflite::ActivationFunctionType fused_activation_function() const {
return static_cast<tflite::ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<uint8_t>(verifier, VT_POT_SCALE_INT16) &&
VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
verifier.EndTable();
}
Expand Down Expand Up @@ -5793,22 +5800,29 @@ flatbuffers::Offset<DepthToSpaceOptions> CreateDepthToSpaceOptions(flatbuffers::

struct SubOptionsT : public flatbuffers::NativeTable {
typedef SubOptions TableType;
bool pot_scale_int16;
tflite::ActivationFunctionType fused_activation_function;
SubOptionsT()
: fused_activation_function(tflite::ActivationFunctionType_NONE) {
: pot_scale_int16(true),
fused_activation_function(tflite::ActivationFunctionType_NONE) {
}
};

struct SubOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef SubOptionsT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_FUSED_ACTIVATION_FUNCTION = 4
VT_FUSED_ACTIVATION_FUNCTION = 4,
VT_POT_SCALE_INT16 = 6
};
bool pot_scale_int16() const {
return GetField<uint8_t>(VT_POT_SCALE_INT16, 0) != 0;
}
tflite::ActivationFunctionType fused_activation_function() const {
return static_cast<tflite::ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<uint8_t>(verifier, VT_POT_SCALE_INT16) &&
VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
verifier.EndTable();
}
Expand Down
1 change: 0 additions & 1 deletion tensorflow/lite/tools/versioning/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ cc_library(
"//tensorflow/core:tflite_portable_logging",
"//tensorflow/lite:minimal_logging",
"//tensorflow/lite/kernels/internal:compatibility",
"//tensorflow/lite/kernels/internal:quantization_util",
"//tensorflow/lite/schema:schema_fbs",
"//tensorflow/lite/schema:schema_fbs_with_mutable",
"@com_google_absl//absl/memory",
Expand Down
47 changes: 10 additions & 37 deletions tensorflow/lite/tools/versioning/op_version.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ limitations under the License.
#include "absl/strings/str_split.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"

namespace tflite {
namespace {
Expand Down Expand Up @@ -509,42 +508,17 @@ OpSignature GetOpSignature(const OperatorCode* op_code, const Operator* op,
}
} break;

case BuiltinOperator_ADD:
case BuiltinOperator_SUB: {
op_sig.options.addsub.pot_scale_int16 = false;
const Tensor* input1_tensor =
subgraph->tensors()->Get(op->inputs()->Get(0));
const Tensor* input2_tensor =
subgraph->tensors()->Get(op->inputs()->Get(1));
const Tensor* output_tensor =
subgraph->tensors()->Get(op->outputs()->Get(0));
const QuantizationParameters* input1_quant =
input1_tensor->quantization();
const QuantizationParameters* input2_quant =
input2_tensor->quantization();
const QuantizationParameters* output_quant =
output_tensor->quantization();
if (input1_quant && input1_quant->scale() &&
input1_quant->scale()->Length() && input2_quant &&
input2_quant->scale() && input2_quant->scale()->Length() &&
output_quant && output_quant->scale() &&
output_quant->scale()->Length()) {
float input1_scale = input1_quant->scale()->Get(0);
float input2_scale = input2_quant->scale()->Get(0);
float output_scale = output_quant->scale()->Get(0);

int scale_log2_rounded = 0;
bool input1_scale_is_pot =
CheckedLog2(input1_scale, &scale_log2_rounded);

bool input2_scale_is_pot =
CheckedLog2(input2_scale, &scale_log2_rounded);

bool output_scale_is_pot =
CheckedLog2(output_scale, &scale_log2_rounded);
case BuiltinOperator_ADD: {
auto add_option = op->builtin_options_as_AddOptions();
if (add_option) {
op_sig.options.addsub.pot_scale_int16 = add_option->pot_scale_int16();
}
} break;

op_sig.options.addsub.pot_scale_int16 =
input1_scale_is_pot && input2_scale_is_pot && output_scale_is_pot;
case BuiltinOperator_SUB: {
auto sub_option = op->builtin_options_as_SubOptions();
if (sub_option) {
op_sig.options.addsub.pot_scale_int16 = sub_option->pot_scale_int16();
}

if (op_code->builtin_code() == BuiltinOperator_SUB) {
Expand All @@ -553,7 +527,6 @@ OpSignature GetOpSignature(const OperatorCode* op_code, const Operator* op,
op_sig.options.broadcast.num_dims =
std::max(GetNumDims(subgraph, op, 0), GetNumDims(subgraph, op, 1));
}

} break;

case BuiltinOperator_LSTM: {
Expand Down

0 comments on commit abae3fd

Please sign in to comment.