Skip to content

Commit

Permalink
Reland (Attempt #3) PR tensorflow#35985: [TFLite int16] 16-bit versio…
Browse files Browse the repository at this point in the history
…n of ADD/SUB reference kernel operators

Imported from GitHub PR tensorflow#35985

This PR is one of steps to extend 8-bit quantization to support symmetric 16-bit activations.

Each activation is of type int16 and symmetric around zero. The weight tensor precision remains at 8-bit signed values. The bias is set to int64 precision.

In this PR we introduce implementation and tests for ADD/SUB kernel reference function.
The specification of this operator:

SUB
  Input 0:
    data_type  : int16
    range      : [-32768, 32767]
    granularity: per-tensor, zero_point=0
  Input 1:
    data_type  : int16
    range      : [-32768, 32767]
    granularity: per-tensor, zero_point=0
  Output 0:
    data_type  : int16
    range      : [-32768, 32767]
    granularity: per-tensor, zero_point=0

ADD
  Input 0:
    data_type  : int16
    range      : [-32768, 32767]
    granularity: per-tensor, zero_point=0
  Input 1:
    data_type  : int16
    range      : [-32768, 32767]
    granularity: per-tensor, zero_point=0
  Output 0:
    data_type  : int16
    range      : [-32768, 32767]
    granularity: per-tensor, zero_point=0
Copybara import of the project:

--
b94cb47 by Elena Zhelezina <elena.zhelezina@arm.com>:

Added 16-bit version of ADD/SUB operators. Broadcasting is included.

--
924d0b7 by Elena Zhelezina <elena.zhelezina@arm.com>:

Addressed reviewer comments.

--
dd0d9e8 by Elena Zhelezina <elena.zhelezina@arm.com>:

Added versioning to ADD/SUB + some rework of the existing code.

--
abae3fd by Elena Zhelezina <elena.zhelezina@arm.com>:

Added versioning for ADD/SUB with new option in the schema.fbs
schema_generated.h is edited manually.

--
24f3f55 by Elena Zhelezina <elena.zhelezina@arm.com>:

Fix for broken build.

--
d252fe1 by Elena Zhelezina <elena.zhelezina@arm.com>:

Fix for the failing internal test for NN delegates.

--
2223a5c by Elena Zhelezina <elena.zhelezina@arm.com>:

Fix for asan failures.

Change-Id: I2cf421ddda7f9e802202239136ab062bcd63b4aa

--
3c219a4 by Elena Zhelezina <elena.zhelezina@arm.com>:

Added broadcast params to addsub structure.

Change-Id: I61d7d4a94087d052a782890799211031f6ed3015

--
9131a38 by Elena Zhelezina <elena.zhelezina@arm.com>:

Corrected defaults.

Change-Id: I9ea50c75014cc03ac91fdef0f5b4fe11395f7074
PiperOrigin-RevId: 324865496
  • Loading branch information
jdduke authored and tensorflower-gardener committed Aug 4, 2020
1 parent beab9b8 commit 6be604a
Show file tree
Hide file tree
Showing 16 changed files with 321 additions and 73 deletions.
4 changes: 4 additions & 0 deletions tensorflow/lite/c/builtin_op_data.h
Original file line number Diff line number Diff line change
Expand Up @@ -199,6 +199,8 @@ typedef struct {

typedef struct {
TfLiteFusedActivation activation;
// Parameter added for the version 4.
bool pot_scale_int16;
} TfLiteAddParams;

typedef struct {
Expand All @@ -220,6 +222,8 @@ typedef struct {

typedef struct {
TfLiteFusedActivation activation;
// Parameter added for the version 5.
bool pot_scale_int16;
} TfLiteSubParams;

typedef struct {
Expand Down
2 changes: 2 additions & 0 deletions tensorflow/lite/core/api/flatbuffer_conversions.cc
Original file line number Diff line number Diff line change
Expand Up @@ -896,6 +896,7 @@ TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->pot_scale_int16 = schema_params->pot_scale_int16();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
Expand Down Expand Up @@ -1631,6 +1632,7 @@ TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter,
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->pot_scale_int16 = schema_params->pot_scale_int16();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
Expand Down
4 changes: 4 additions & 0 deletions tensorflow/lite/experimental/writer/writer_lib_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ TEST(Writer, FloatModelTest) {
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
Expand Down Expand Up @@ -84,6 +85,7 @@ TEST(Writer, CustomInputOutputTest) {
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
Expand Down Expand Up @@ -131,6 +133,7 @@ TEST(Writer, CustomInputOutputErrorCasesTest) {
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
Expand Down Expand Up @@ -173,6 +176,7 @@ TEST(Writer, PerTensorQuantizedModelTest) {
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
Expand Down
85 changes: 66 additions & 19 deletions tensorflow/lite/kernels/add.cc
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,11 @@ struct OpData {
int32 input1_offset;
int32 input2_offset;
int32 output_offset;

// This parameter is used to indicate whether
// parameter scale is power of two.
// It is used in 16-bit -> 16-bit quantization.
bool pot_scale_int16;
};

void* Init(TfLiteContext* context, const char* buffer, size_t length) {
Expand Down Expand Up @@ -103,12 +108,55 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
output_size = TfLiteIntArrayCopy(input1->dims);
}

if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) {
// 8bit -> 8bit general quantized path, with general rescalings
// as well as, int16 -> int16 with general rescalings
bool pot_scale_int16 = true;

bool input1_scale_is_pot = false;
bool input2_scale_is_pot = false;
bool output_scale_is_pot = false;

int input1_scale_log2_rounded{0};
int input2_scale_log2_rounded{0};
int output_scale_log2_rounded{0};

if (input1->type == kTfLiteInt16 && input2->type == kTfLiteInt16 &&
output->type == kTfLiteInt16) {
// In case of 16-bit, there are two implementation:
// the scale parameter is a general number
// the scale parameter is POT and
// zero_point is zero for inputs/output.
pot_scale_int16 = (input1->params.zero_point == 0) &&
(input2->params.zero_point == 0) &&
(output->params.zero_point == 0);

input1_scale_is_pot =
CheckedLog2(input1->params.scale, &input1_scale_log2_rounded);

input2_scale_is_pot =
CheckedLog2(input2->params.scale, &input2_scale_log2_rounded);

output_scale_is_pot =
CheckedLog2(output->params.scale, &output_scale_log2_rounded);

pot_scale_int16 &=
input1_scale_is_pot && input2_scale_is_pot && output_scale_is_pot;
}

data->pot_scale_int16 = pot_scale_int16;

if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
!pot_scale_int16) {
// 8bit -> 8bit general quantized path, with general rescalings
// as well as, 16bit -> 16bit with general rescalings
data->input1_offset = -input1->params.zero_point;
data->input2_offset = -input2->params.zero_point;
data->output_offset = output->params.zero_point;
data->left_shift = 20;

// The shift is set to 15 for 16-bit and 20 in case of 8-bit, accordingly.
// In case of 16-bit we have 65535 << 15 which is less than 1 << 31,
// therefore the addition will still fit in a 32 bit accumulator.
data->left_shift = !pot_scale_int16 ? 15 : 20;
const double twice_max_input_scale =
2 * std::max(input1->params.scale, input2->params.scale);
const double real_input1_multiplier =
Expand Down Expand Up @@ -144,19 +192,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, input2->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);

int input1_scale_log2_rounded;
bool input1_scale_is_pot =
CheckedLog2(input1->params.scale, &input1_scale_log2_rounded);
TF_LITE_ENSURE(context, input1_scale_is_pot);

int input2_scale_log2_rounded;
bool input2_scale_is_pot =
CheckedLog2(input2->params.scale, &input2_scale_log2_rounded);
TF_LITE_ENSURE(context, input2_scale_is_pot);

int output_scale_log2_rounded;
bool output_scale_is_pot =
CheckedLog2(output->params.scale, &output_scale_log2_rounded);
TF_LITE_ENSURE(context, output_scale_is_pot);

data->input1_shift = input1_scale_log2_rounded - output_scale_log2_rounded;
Expand Down Expand Up @@ -231,7 +268,8 @@ TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* input1,
const TfLiteTensor* input2,
TfLiteTensor* output) {
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) {
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
!data->pot_scale_int16) {
tflite::ArithmeticParams op_params;
op_params.left_shift = data->left_shift;
op_params.input1_offset = data->input1_offset;
Expand Down Expand Up @@ -266,6 +304,15 @@ TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node,
TF_LITE_ADD(optimized_integer_ops, Add, int8_t);
}
}
} else if (output->type == kTfLiteInt16) {
if (need_broadcast) {
TF_LITE_ADD(reference_ops, BroadcastAdd4DSlow, int16_t);
} else {
reference_ops::Add(
op_params, GetTensorShape(input1), GetTensorData<int16_t>(input1),
GetTensorShape(input2), GetTensorData<int16_t>(input2),
GetTensorShape(output), GetTensorData<int16_t>(output), false);
}
} else {
if (kernel_type == kReference) {
if (need_broadcast) {
Expand All @@ -283,12 +330,12 @@ TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node,
}
#undef TF_LITE_ADD
} else if (output->type == kTfLiteInt16) {
tflite::ArithmeticParams op_params;
op_params.input1_shift = data->input1_shift;
op_params.input2_shift = data->input2_shift;
SetActivationParams(data->output_activation_min,
data->output_activation_max, &op_params);
#define TF_LITE_ADD(type, opname) \
tflite::ArithmeticParams op_params; \
op_params.input1_shift = data->input1_shift; \
op_params.input2_shift = data->input2_shift; \
SetActivationParams(data->output_activation_min, \
data->output_activation_max, &op_params); \
type::opname(op_params, GetTensorShape(input1), \
GetTensorData<int16_t>(input1), GetTensorShape(input2), \
GetTensorData<int16_t>(input2), GetTensorShape(output), \
Expand Down
31 changes: 23 additions & 8 deletions tensorflow/lite/kernels/add_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -310,15 +310,18 @@ TEST(QuantizedAddOpModel, QuantizedTestsNoActivationInt16) {
const float kMin = -1.f;
const float kMax = 32767.f / 32768.f;
float kQuantizedTolerance = GetToleranceInt16(kMin, kMax);
std::vector<std::vector<float>> inputs1 = {
{0.1, 0.2, 0.3, 0.4}, {-0.8, 0.2, 0.4, 0.7}, {-0.8, 0.2, 0.7, 0.3}};
std::vector<std::vector<float>> inputs2 = {
{0.6, 0.4, 0.3, 0.1}, {0.6, 0.4, 0.5, -0.8}, {0.6, 0.4, -0.8, 0.5}};
std::vector<std::vector<float>> results = {
{0.7, 0.6, 0.6, 0.5}, {-0.2, 0.6, 0.9, -0.1}, {-0.2, 0.6, -0.1, 0.8}};
std::vector<std::vector<float>> inputs1 = {{0.1, 0.2, 0.3, 0.4, 0.9, 0.7},
{-0.8, 0.2, 0.4, 0.7, 0.1, 0.0},
{-0.8, 0.2, 0.7, 0.3, 0.9, 0.1}};
std::vector<std::vector<float>> inputs2 = {{0.6, 0.4, 0.3, 0.1, -0.1, 0.3},
{0.6, 0.4, 0.5, -0.8, 0.0, -1.0},
{0.6, 0.4, -0.8, 0.5, -0.9, 0.1}};
std::vector<std::vector<float>> results = {{0.7, 0.6, 0.6, 0.5, 0.8, 1.0},
{-0.2, 0.6, 0.9, -0.1, 0.1, -1.0},
{-0.2, 0.6, -0.1, 0.8, 0.0, 0.2}};
for (size_t i = 0; i < inputs1.size(); ++i) {
QuantizedAddOpModel m({TensorType_INT16, {1, 2, 2, 1}, kMin, kMax},
{TensorType_INT16, {1, 2, 2, 1}, kMin, kMax},
QuantizedAddOpModel m({TensorType_INT16, {1, 2, 3, 1}, kMin, kMax},
{TensorType_INT16, {1, 2, 3, 1}, kMin, kMax},
{TensorType_INT16, {}, kMin, kMax},
ActivationFunctionType_NONE);
m.QuantizeAndPopulate<int16_t>(m.input1(), inputs1[i]);
Expand Down Expand Up @@ -439,6 +442,10 @@ TEST(QuantizedAddOpModel, QuantizedWithScalarBroadcastInt8) {
QuantizedWithScalarBroadcast<TensorType_INT8, int8_t>();
}

TEST(QuantizedAddOpModel, QuantizedWithScalarBroadcastInt16) {
QuantizedWithScalarBroadcast<TensorType_INT16, int16_t>();
}

template <enum TensorType tensor_type, typename integer_dtype>
void QuantizedWithMixedBroadcast() {
float kQuantizedTolerance = GetTolerance(-3.f, 3.f);
Expand Down Expand Up @@ -501,6 +508,10 @@ TEST(QuantizedAddOpModel, QuantizedWithMixedBroadcastInt8) {
QuantizedWithMixedBroadcast<TensorType_INT8, int8_t>();
}

TEST(QuantizedAddOpModel, QuantizedWithMixedBroadcastInt16) {
QuantizedWithMixedBroadcast<TensorType_INT16, int16_t>();
}

template <enum TensorType tensor_type, typename integer_dtype>
void QuantizedWithGenericBroadcast() {
float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
Expand All @@ -527,5 +538,9 @@ TEST(QuantizedAddOpModel, QuantizedWithGenericdBroadcastInt8) {
QuantizedWithGenericBroadcast<TensorType_INT8, int8_t>();
}

TEST(QuantizedAddOpModel, QuantizedWithGenericdBroadcastInt16) {
QuantizedWithGenericBroadcast<TensorType_INT16, int16_t>();
}

} // namespace
} // namespace tflite
66 changes: 50 additions & 16 deletions tensorflow/lite/kernels/internal/reference/add.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,13 +51,18 @@ inline void Add(const ArithmeticParams& params,

// Element-wise add that can often be used for inner loop of broadcast add as
// well as the non-broadcast add.

// This function is used for 8-bit as well as for 16-bit, but the accumulator
// is 32-bit for both cases. The overflow does not happen due to the
// choice of the shift (20 or 15, accordingly - see add.cc for more comments).
template <typename T>
inline void AddElementwise(int size, const ArithmeticParams& params,
const uint8_t* input1_data,
const uint8_t* input2_data, uint8_t* output_data) {
TFLITE_DCHECK_GT(params.input1_offset, -256);
TFLITE_DCHECK_GT(params.input2_offset, -256);
TFLITE_DCHECK_LT(params.input1_offset, 256);
TFLITE_DCHECK_LT(params.input2_offset, 256);
const T* input1_data, const T* input2_data,
T* output_data) {
TFLITE_DCHECK_GT(params.input1_offset, -std::numeric_limits<T>::max());
TFLITE_DCHECK_GT(params.input2_offset, -std::numeric_limits<T>::max());
TFLITE_DCHECK_LT(params.input1_offset, std::numeric_limits<T>::max());
TFLITE_DCHECK_LT(params.input2_offset, std::numeric_limits<T>::max());

for (int i = 0; i < size; ++i) {
const int32_t input1_val = params.input1_offset + input1_data[i];
Expand All @@ -78,7 +83,7 @@ inline void AddElementwise(int size, const ArithmeticParams& params,
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
output_data[i] = static_cast<uint8_t>(clamped_output);
output_data[i] = static_cast<T>(clamped_output);
}
}

Expand Down Expand Up @@ -132,10 +137,38 @@ inline void Add(const ArithmeticParams& params,
AddElementwise(flat_size, params, input1_data, input2_data, output_data);
}

inline void AddGeneralParamScale(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
const int16_t* input1_data,
const RuntimeShape& input2_shape,
const int16_t* input2_data,
const RuntimeShape& output_shape,
int16_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);

int max_value = std::numeric_limits<int16_t>::max();

TFLITE_DCHECK_GT(params.input1_offset, -max_value);
TFLITE_DCHECK_GT(params.input2_offset, -max_value);
TFLITE_DCHECK_LT(params.input1_offset, max_value);
TFLITE_DCHECK_LT(params.input2_offset, max_value);
AddElementwise(flat_size, params, input1_data, input2_data, output_data);
}

inline void Add(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const int16_t* input1_data,
const RuntimeShape& input2_shape, const int16_t* input2_data,
const RuntimeShape& output_shape, int16_t* output_data) {
const RuntimeShape& output_shape, int16_t* output_data,
bool pot_scale = true) {
if (!pot_scale) {
AddGeneralParamScale(params, input1_shape, input1_data, input2_shape,
input2_data, output_shape, output_data);
return;
}

TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);

Expand Down Expand Up @@ -258,13 +291,14 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
}
}

inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
const uint8_t* input1_data,
const RuntimeShape& input2_shape,
const uint8_t* input2_data,
const RuntimeShape& output_shape,
uint8_t* output_data) {
// This function is used for 8-bit as well as for 16-bit, but the accumulator
// is 32-bit for both cases. The overflow does not happen due to the
// choice of the shift (20 or 15, accordingly - see add.cc for more comments).
template <typename T>
inline void BroadcastAdd4DSlow(
const ArithmeticParams& params, const RuntimeShape& input1_shape,
const T* input1_data, const RuntimeShape& input2_shape,
const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
Expand Down Expand Up @@ -314,7 +348,7 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
output_data[Offset(extended_output_shape, b, y, x, c)] =
static_cast<uint8_t>(clamped_output);
static_cast<T>(clamped_output);
}
}
}
Expand Down
6 changes: 3 additions & 3 deletions tensorflow/lite/kernels/register.cc
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,8 @@ BuiltinOpResolver::BuiltinOpResolver() {
/* min_version = */ 1,
/* max_version = */ 3);
AddBuiltin(BuiltinOperator_ADD, Register_ADD(),
/* min_version = */ 1,
/* max_version = */ 2);
/* min_version */ 1,
/* max_version */ 4);
AddBuiltin(BuiltinOperator_SPACE_TO_BATCH_ND, Register_SPACE_TO_BATCH_ND(),
/* min_version = */ 1,
/* max_version = */ 3);
Expand Down Expand Up @@ -143,7 +143,7 @@ BuiltinOpResolver::BuiltinOpResolver() {
/* max_version */ 2);
AddBuiltin(BuiltinOperator_SUB, Register_SUB(),
/* min_version = */ 1,
/* max_version = */ 4);
/* max_version = */ 5);
AddBuiltin(BuiltinOperator_SPLIT, Register_SPLIT(),
/* min_version = */ 1,
/* max_version = */ 4);
Expand Down
Loading

0 comments on commit 6be604a

Please sign in to comment.