Skip to content

Commit

Permalink
Merge branch 'test/int16' into 'master'
Browse files Browse the repository at this point in the history
test: generate int16 test cases

See merge request ai/esp-dl!112
  • Loading branch information
sun-xiangyu committed Dec 20, 2024
2 parents a383981 + 470ab4e commit 3292cbd
Show file tree
Hide file tree
Showing 17 changed files with 120 additions and 54 deletions.
4 changes: 2 additions & 2 deletions .gitlab/ci/gen_test_cases.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,11 +28,11 @@ gen_espdl_ops_cases:
- IMAGE: [python:3.11]
TORCH: ["torch==2.5.0"]
TARGET: [esp32p4]
BITS: [8]
BITS: [8, 16]
- IMAGE: [python:3.10]
TORCH: [torch]
TARGET: [esp32s3]
BITS: [8]
BITS: [8, 16]
variables:
MODEL_PATH: test_apps/esp-dl/models
CONFIG_FILE: tools/ops_test/config/op_cfg.toml
4 changes: 2 additions & 2 deletions esp-dl/dl/model/include/dl_model_base.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,8 @@ class Model {
* The address of model data while location is MODEL_LOCATION_IN_FLASH_RODATA.
* The label of partition while location is MODEL_LOCATION_IN_FLASH_PARTITION.
* The path of model while location is MODEL_LOCATION_IN_SDCARD.
* @param location The model location.
* @param model_index The model index of packed models.
* @param location The model location.
* @param internal_size Internal ram size, in bytes
* @param mm_type Type of memory manager
* @param key The key of encrypted model.
Expand All @@ -76,8 +76,8 @@ class Model {
* The address of model data while location is MODEL_LOCATION_IN_FLASH_RODATA.
* The label of partition while location is MODEL_LOCATION_IN_FLASH_PARTITION.
* The path of model while location is MODEL_LOCATION_IN_SDCARD.
* @param location The model location.
* @param model_name The model name of packed models.
* @param location The model location.
* @param internal_size Internal ram size, in bytes
* @param mm_type Type of memory manager
* @param key The key of encrypted model.
Expand Down
11 changes: 7 additions & 4 deletions esp-dl/dl/module/include/dl_module_clip.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,12 +103,15 @@ class Clip : public Module {
Module *op = nullptr;
quant_type_t quant_type;
fbs_model->get_operation_attribute(node_name, "quant_type", quant_type);
TensorBase *table = fbs_model->get_operation_lut(node_name);

// Create module
if (table != NULL) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
} else {
if (quant_type == QUANT_TYPE_SYMM_8BIT) {
TensorBase *table = fbs_model->get_operation_lut(node_name);
if (table) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
}
if (op == nullptr) {
TensorBase *min = fbs_model->get_operation_parameter(node_name, 1);
TensorBase *max = fbs_model->get_operation_parameter(node_name, 2);
assert(min->exponent == max->exponent);
Expand Down
11 changes: 7 additions & 4 deletions esp-dl/dl/module/include/dl_module_exp.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,15 +85,18 @@ class Exp : public Module {
Module *op = nullptr;
quant_type_t quant_type;
fbs_model->get_operation_attribute(node_name, "quant_type", quant_type);
TensorBase *table = fbs_model->get_operation_lut(node_name);

// Create module
if (table != NULL) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
if (quant_type == QUANT_TYPE_SYMM_8BIT) {
TensorBase *table = fbs_model->get_operation_lut(node_name);
if (table) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
} else {
op = new Exp(node_name.c_str(), MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
} else {
op = new Exp(node_name.c_str(), MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
op->print();

return op;
}
Expand Down
10 changes: 7 additions & 3 deletions esp-dl/dl/module/include/dl_module_hard_sigmoid.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,11 +90,15 @@ class HardSigmoid : public Module {
fbs_model->get_operation_attribute(node_name, "quant_type", quant_type);
fbs_model->get_operation_attribute(node_name, "alpha", alpha);
fbs_model->get_operation_attribute(node_name, "beta", beta);
TensorBase *table = fbs_model->get_operation_lut(node_name);

// Create module
if (table != NULL) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
if (quant_type == QUANT_TYPE_SYMM_8BIT) {
TensorBase *table = fbs_model->get_operation_lut(node_name);
if (table) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
} else {
op = new HardSigmoid(node_name.c_str(), alpha, beta, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
} else {
op = new HardSigmoid(node_name.c_str(), alpha, beta, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
Expand Down
11 changes: 7 additions & 4 deletions esp-dl/dl/module/include/dl_module_hard_swish.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,15 +78,18 @@ class HardSwish : public Module {
Module *op = nullptr;
quant_type_t quant_type;
fbs_model->get_operation_attribute(node_name, "quant_type", quant_type);
TensorBase *table = fbs_model->get_operation_lut(node_name);

// Create module
if (table != NULL) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
if (quant_type == QUANT_TYPE_SYMM_8BIT) {
TensorBase *table = fbs_model->get_operation_lut(node_name);
if (table) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
} else {
op = new HardSwish(node_name.c_str(), MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
} else {
op = new HardSwish(node_name.c_str(), MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
op->print();

return op;
}
Expand Down
10 changes: 7 additions & 3 deletions esp-dl/dl/module/include/dl_module_leaky_relu.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,11 +88,15 @@ class LeakyRelu : public Module {
float alpha = 0.01;
fbs_model->get_operation_attribute(node_name, "quant_type", quant_type);
fbs_model->get_operation_attribute(node_name, "alpha", alpha);
TensorBase *table = fbs_model->get_operation_lut(node_name);

// Create module
if (table != NULL) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
if (quant_type == QUANT_TYPE_SYMM_8BIT) {
TensorBase *table = fbs_model->get_operation_lut(node_name);
if (table) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
} else {
op = new LeakyRelu(node_name.c_str(), alpha, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
} else {
op = new LeakyRelu(node_name.c_str(), alpha, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
Expand Down
10 changes: 7 additions & 3 deletions esp-dl/dl/module/include/dl_module_log.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -84,11 +84,15 @@ class Log : public Module {
Module *op = nullptr;
quant_type_t quant_type;
fbs_model->get_operation_attribute(node_name, "quant_type", quant_type);
TensorBase *table = fbs_model->get_operation_lut(node_name);

// Create module
if (table != NULL) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
if (quant_type == QUANT_TYPE_SYMM_8BIT) {
TensorBase *table = fbs_model->get_operation_lut(node_name);
if (table) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
} else {
op = new Log(node_name.c_str(), MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
} else {
op = new Log(node_name.c_str(), MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
Expand Down
39 changes: 28 additions & 11 deletions esp-dl/dl/module/include/dl_module_prelu.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ namespace module {
*/
class PRelu : public Module {
private:
TensorBase *alpha;
TensorBase *m_alpha;

public:
/**
Expand All @@ -29,19 +29,31 @@ class PRelu : public Module {
TensorBase *alpha = NULL,
module_inplace_t inplace = MODULE_NON_INPLACE,
quant_type_t quant_type = QUANT_TYPE_NONE) :
Module(name, inplace, quant_type), alpha(alpha)
Module(name, inplace, quant_type), m_alpha(alpha)
{
}

/**
* @brief Destroy the PRelu object.
*/
~PRelu() { delete this->alpha; }
~PRelu() { delete m_alpha; }

std::vector<std::vector<int>> get_output_shape(std::vector<std::vector<int>> &input_shapes)
{
assert(input_shapes.size() == 1);
assert(input_shapes[0][3] == this->alpha->shape[0]);
if (m_alpha->shape[0] != input_shapes[0][3]) {
TensorBase *new_alpha = new TensorBase(
{input_shapes[0][3], 1, 1}, nullptr, m_alpha->exponent, m_alpha->dtype, true, m_alpha->caps);
if (m_alpha->get_dtype() == DATA_TYPE_INT16) {
int16_t alpha_value = m_alpha->get_element<int16_t>(0);
int16_t *alpha_ptr = new_alpha->get_element_ptr<int16_t>();
for (int i = 0; i < input_shapes[0][3]; i++) {
alpha_ptr[i] = alpha_value;
}
delete m_alpha;
m_alpha = new_alpha;
}
}
std::vector<std::vector<int>> output_shapes(1, input_shapes[0]);
return output_shapes;
}
Expand Down Expand Up @@ -73,7 +85,7 @@ class PRelu : public Module {
TensorBase *input = tensors[m_inputs_index[0]];
TensorBase *output = tensors[m_outputs_index[0]];

std::vector<base::ArgsType<T>> m_args = base::get_activation_args<T>(output, input, PReLU, alpha, mode);
std::vector<base::ArgsType<T>> m_args = base::get_activation_args<T>(output, input, PReLU, m_alpha, mode);
int task_size = m_args.size();
if (task_size == 1) { // single task
forward_args((void *)&m_args[0]);
Expand All @@ -93,19 +105,24 @@ class PRelu : public Module {
quant_type_t quant_type;
fbs_model->get_operation_attribute(node_name, "quant_type", quant_type);
TensorBase *alpha = fbs_model->get_operation_parameter(node_name, 1);
TensorBase *table = fbs_model->get_operation_lut(node_name);
// [c, 1, 1]
assert(alpha->shape.size() == 3);

// Create module
if (table != NULL) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
if (alpha != nullptr) {
delete alpha;
if (quant_type == QUANT_TYPE_SYMM_8BIT) {
TensorBase *table = fbs_model->get_operation_lut(node_name);
if (table) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
if (alpha != nullptr) {
delete alpha;
}
} else {
op = new PRelu(node_name.c_str(), alpha, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
} else if (quant_type == QUANT_TYPE_SYMM_8BIT || quant_type == QUANT_TYPE_SYMM_16BIT) {
} else {
op = new PRelu(node_name.c_str(), alpha, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}

return op;
}

Expand Down
10 changes: 7 additions & 3 deletions esp-dl/dl/module/include/dl_module_relu.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,11 +104,15 @@ class Relu : public Module {
Module *op = nullptr;
quant_type_t quant_type;
fbs_model->get_operation_attribute(node_name, "quant_type", quant_type);
TensorBase *table = fbs_model->get_operation_lut(node_name);

// Create module
if (table != NULL) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
if (quant_type == QUANT_TYPE_SYMM_8BIT) {
TensorBase *table = fbs_model->get_operation_lut(node_name);
if (table) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
} else {
op = new Relu(node_name.c_str(), MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
} else {
op = new Relu(node_name.c_str(), MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
Expand Down
10 changes: 7 additions & 3 deletions esp-dl/dl/module/include/dl_module_sigmoid.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,11 +88,15 @@ class Sigmoid : public Module {
Module *op = nullptr;
quant_type_t quant_type;
fbs_model->get_operation_attribute(node_name, "quant_type", quant_type);
TensorBase *table = fbs_model->get_operation_lut(node_name);

// Create module
if (table != NULL) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
if (quant_type == QUANT_TYPE_SYMM_8BIT) {
TensorBase *table = fbs_model->get_operation_lut(node_name);
if (table) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
} else {
op = new Sigmoid(node_name.c_str(), MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
} else {
op = new Sigmoid(node_name.c_str(), MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
Expand Down
11 changes: 7 additions & 4 deletions esp-dl/dl/module/include/dl_module_sqrt.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -84,15 +84,18 @@ class Sqrt : public Module {
Module *op = nullptr;
quant_type_t quant_type;
fbs_model->get_operation_attribute(node_name, "quant_type", quant_type);
TensorBase *table = fbs_model->get_operation_lut(node_name);

// Create module
if (table != NULL) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
if (quant_type == QUANT_TYPE_SYMM_8BIT) {
TensorBase *table = fbs_model->get_operation_lut(node_name);
if (table) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
} else {
op = new Sqrt(node_name.c_str(), MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
} else {
op = new Sqrt(node_name.c_str(), MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
op->print();

return op;
}
Expand Down
10 changes: 7 additions & 3 deletions esp-dl/dl/module/include/dl_module_tanh.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -87,11 +87,15 @@ class Tanh : public Module {
Module *op = nullptr;
quant_type_t quant_type;
fbs_model->get_operation_attribute(node_name, "quant_type", quant_type);
TensorBase *table = fbs_model->get_operation_lut(node_name);

// Create module
if (table != NULL) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
if (quant_type == QUANT_TYPE_SYMM_8BIT) {
TensorBase *table = fbs_model->get_operation_lut(node_name);
if (table) {
op = new LUT(node_name.c_str(), table, MODULE_INPLACE_CHANGED_BUFFER, quant_type);
} else {
op = new Tanh(node_name.c_str(), MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
} else {
op = new Tanh(node_name.c_str(), MODULE_INPLACE_CHANGED_BUFFER, quant_type);
}
Expand Down
1 change: 1 addition & 0 deletions esp-dl/dl/tensor/include/dl_tensor_base.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -459,6 +459,7 @@ class TensorBase {
* @brief print the information of TensorBase
*
* @param print_data Whether print the data
* @return This function does not return any value.
*/
virtual void print(bool print_data = false);
};
Expand Down
5 changes: 4 additions & 1 deletion esp-dl/dl/tensor/src/dl_tensor_base.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -743,6 +743,7 @@ template bool TensorBase::compare_elements<uint16_t>(const uint16_t *gt_elements
template bool TensorBase::compare_elements<int32_t>(const int32_t *gt_elements, float epsilon, bool verbose);
template bool TensorBase::compare_elements<uint32_t>(const uint32_t *gt_elements, float epsilon, bool verbose);
template bool TensorBase::compare_elements<float>(const float *gt_elements, float epsilon, bool verbose);
template bool TensorBase::compare_elements<double>(const double *gt_elements, float epsilon, bool verbose);

bool TensorBase::is_same_shape(TensorBase *tensor)
{
Expand All @@ -765,7 +766,7 @@ bool TensorBase::equal(TensorBase *tensor, float epsilon, bool verbose)

// compare data type
dtype_t type1 = this->get_dtype();
dtype_t type2 = this->get_dtype();
dtype_t type2 = tensor->get_dtype();
if (type1 != type2) {
if (verbose) {
ESP_LOGE(__FUNCTION__, "data type not equal: %s != %s", dtype_to_string(type1), dtype_to_string(type2));
Expand Down Expand Up @@ -807,6 +808,8 @@ bool TensorBase::equal(TensorBase *tensor, float epsilon, bool verbose)
return this->compare_elements<int32_t>((int32_t *)tensor->get_element_ptr(), epsilon, verbose);
} else if (type1 == DATA_TYPE_UINT32) {
return this->compare_elements<uint32_t>((uint32_t *)tensor->get_element_ptr(), epsilon, verbose);
} else if (type1 == DATA_TYPE_DOUBLE) {
return this->compare_elements<double>((double *)tensor->get_element_ptr(), epsilon, verbose);
}
} else {
return (memcmp(this->get_element_ptr(), tensor->get_element_ptr(), this->get_bytes()) == 0);
Expand Down
15 changes: 12 additions & 3 deletions test_apps/esp-dl/main/test_dl_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,18 @@ void compare_test_outputs(Model *model, std::map<std::string, TensorBase *> infe
if (infer_output) {
TensorBase *ground_truth_tensor = fbs_model_instance->get_test_output_tensor(infer_output_name, true);
TEST_ASSERT_EQUAL_MESSAGE(true, ground_truth_tensor != nullptr, "The test output tensor is not found");
TEST_ASSERT_EQUAL_MESSAGE(true,
infer_output->equal(ground_truth_tensor, 1e-5, true),
"The output tensor is not equal to the ground truth");
if (ground_truth_tensor->get_dtype() == DATA_TYPE_INT16 ||
ground_truth_tensor->get_dtype() == DATA_TYPE_UINT16) {
// The int16 quantization cannot be fully aligned, and there may be rounding errors of +-1.
TEST_ASSERT_EQUAL_MESSAGE(true,
infer_output->equal(ground_truth_tensor, 1 + 1e-5, true),
"The output tensor is not equal to the ground truth");
} else {
TEST_ASSERT_EQUAL_MESSAGE(true,
infer_output->equal(ground_truth_tensor, 1e-5, true),
"The output tensor is not equal to the ground truth");
}

delete ground_truth_tensor;
}
}
Expand Down
2 changes: 1 addition & 1 deletion tools/ops_test/config/op_cfg.toml
Original file line number Diff line number Diff line change
Expand Up @@ -864,7 +864,7 @@

[ops_test.Resize]
class_name = "RESIZE2D_TEST"
quant_bits = ["int8", "int16"]
quant_bits = ["int8"]
description = "Only support nearest and do not support roi"
[[ops_test.Resize.cfg]]
input_shape = [1, 96, 5, 5]
Expand Down

0 comments on commit 3292cbd

Please sign in to comment.