diff --git a/backends/cadence/aot/functions_hifi.yaml b/backends/cadence/aot/functions_hifi.yaml index 3bdbb33d59b..524c7a8e540 100644 --- a/backends/cadence/aot/functions_hifi.yaml +++ b/backends/cadence/aot/functions_hifi.yaml @@ -469,6 +469,11 @@ - arg_meta: null kernel_name: impl::HiFi::quantized_linear_per_tensor_out +- func: cadence::im2row.out(Tensor input, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, Tensor in_zero_point, bool channel_last=False, *, Tensor(a!) out) -> Tensor(a!) + kernels: + - arg_meta: null + kernel_name: cadence::impl::HiFi::im2row_out + - func: cadence::quantized_linear_asym8sxasym8s_asym8s.per_tensor_out(Tensor src, Tensor weight, Tensor bias, int src_zero_point, int weight_zero_point, int out_multiplier, int out_shift, int out_zero_point, Tensor? offset, *, Tensor(a!) out) -> Tensor(a!) kernels: - arg_meta: null diff --git a/backends/cadence/hifi/kernels/CMakeLists.txt b/backends/cadence/hifi/kernels/CMakeLists.txt index 936e28e2241..c366cecbe0c 100644 --- a/backends/cadence/hifi/kernels/CMakeLists.txt +++ b/backends/cadence/hifi/kernels/CMakeLists.txt @@ -18,6 +18,7 @@ add_library( ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_mode_f32_broadcast.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_fmod_broadcast_f32.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_greater_lesser_equal_f32.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_im2row.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_logicalxor_bool_bool.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_minimum_maximum_f32.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_mul_f32_broadcast.c diff --git a/backends/cadence/hifi/kernels/kernels.h b/backends/cadence/hifi/kernels/kernels.h index 08343e2528b..6a3dcd1d245 100644 --- a/backends/cadence/hifi/kernels/kernels.h +++ b/backends/cadence/hifi/kernels/kernels.h @@ -196,6 +196,28 @@ extern "C" WORD32 xa_nn_elm_where_broadcast_4D_f32xf32_f32( const unsigned char* __restrict__ p_condition, const WORD32* const p_condition_shape); +extern "C" WORD32 xa_nn_im2row_quantized( + const WORD8* __restrict__ data_im, + const WORD32 in_zero_point, + /* input parameters*/ + const WORD32 channels, + const WORD32 height, + const WORD32 width, + /* output parameters */ + const WORD32 out_height, + const WORD32 out_width, + /* convolution parameters */ + const WORD32 kernel_h, + const WORD32 kernel_w, + const WORD32 pad_h, + const WORD32 pad_w, + const WORD32 stride_h, + const WORD32 stride_w, + const WORD32 dilation_h, + const WORD32 dilation_w, + WORD8* __restrict__ data_col, + WORD32 channels_last); + extern "C" WORD32 xa_nn_reduce_mean_4D_f32_f32( FLOAT32* __restrict__ p_out, const WORD32* const p_out_shape, diff --git a/backends/cadence/hifi/operators/CMakeLists.txt b/backends/cadence/hifi/operators/CMakeLists.txt index 26555da9760..e9369596893 100644 --- a/backends/cadence/hifi/operators/CMakeLists.txt +++ b/backends/cadence/hifi/operators/CMakeLists.txt @@ -16,6 +16,7 @@ include(${EXECUTORCH_ROOT}/tools/cmake/Codegen.cmake) # ATen compliant ops that are needed to run this model. set(_aten_ops__srcs + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/im2row_out.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_add.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_atan2.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_bitwise_and.cpp" diff --git a/backends/cadence/hifi/operators/im2row_out.cpp b/backends/cadence/hifi/operators/im2row_out.cpp new file mode 100644 index 00000000000..4793a36fec4 --- /dev/null +++ b/backends/cadence/hifi/operators/im2row_out.cpp @@ -0,0 +1,431 @@ +// (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define ALIGN_PTR(x, bytes) ((((unsigned)(x)) + (bytes - 1)) & (~(bytes - 1))) + +using ::executorch::aten::IntArrayRef; +using ::executorch::aten::ScalarType; +using ::executorch::aten::Tensor; +using ::executorch::runtime::KernelRuntimeContext; + +namespace cadence { +namespace impl { +namespace HiFi { +namespace native { + +template +__attribute__((always_inline)) void im2row_( + const T* __restrict__ data_im, + const int32_t in_zero_point, + /* input parameters*/ + const int32_t channels, + const int32_t height, + const int32_t width, + /* output parameters */ + const int32_t out_height, + const int32_t out_width, + /* convolution parameters */ + const int32_t kernel_h, + const int32_t kernel_w, + const int32_t pad_h, + const int32_t pad_w, + const int32_t stride_h, + const int32_t stride_w, + const int32_t dilation_h, + const int32_t dilation_w, + T* __restrict__ data_col, + bool channels_last) { + // Consider convolving the input image of dimensions channels * height * width + // (or height * width * channels for NHWC layout) with a filter of dimensions + // channels * kernels_h * kernels_w. Assume that this convolution will produce + // an output of dimensinos out_height x out_width. For each point the output, + // im2row takes the data from the input that is used in the computation of + // that output point, and flattens it into a vector of size channels_col = + // channels * kernel_h * kernel_w. The output of im2row will therefore be a 2D + // array of size (out_height * out_width) x channels_col + const int32_t channels_col = channels * kernel_h * kernel_w; + + // If the layout is NHWC, we can copy 'channels' worth of contiguous data + // points when performing im2row. + if (channels_last) { + // Iterate over the output domain + for (int _h = 0; _h < out_height; ++_h) { + for (int _w = 0; _w < out_width; ++_w) { + int32_t i_col = _h * out_width + _w; + // Each point in the output domain is the result of applying a filter of + // size kernel_h x kernel_w x channels on the input. But since channels + // is contiguous, we will not explicitly have a loop for it. + for (int _kh = 0; _kh < kernel_h; ++_kh) { + int32_t h_im = _h * stride_h - pad_h + _kh * dilation_h; + for (int _kw = 0; _kw < kernel_w; ++_kw) { + int32_t w_im = _w * stride_w - pad_w + _kw * dilation_w; + + // h_im and w_im are the actual height and width coordinates of the + // input tensor from where we need to copy 'channels' points. + const T* __restrict__ slice_im = + data_im + (h_im * width + w_im) * channels; + T* __restrict__ slice_col = data_col + i_col * channels_col + + (_kh * kernel_w + _kw) * channels; + // If the coordinates were within the input domain, we copy + // 'channels' contiguous values. Otherwise we will fill the output + // with 0's. + if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { + std::memcpy(slice_col, slice_im, channels * sizeof(T)); + } else { + std::fill_n(slice_col, channels, T(in_zero_point)); + } + } + } + } + } + } else { + // Iterate over the output domain + for (int _h = 0; _h < out_height; ++_h) { + for (int _w = 0; _w < out_width; ++_w) { + int32_t i_col = _h * out_width + _w; + + // Each point in the output domain is the result of applying a filter + // of size chanenls * kernel_h x kernel_w on the input + for (int _c = 0; _c < channels; ++_c) { + for (int _kh = 0; _kh < kernel_h; ++_kh) { + for (int _kw = 0; _kw < kernel_w; ++_kw) { + // c_col is the linearized access in the channels_col vector. + int32_t c_col = (_c * kernel_h + _kh) * kernel_w + _kw; + // h_im and w_im are the actual height and width coordinates of + // the input tensor that we need to copy to the output. + int32_t h_im = _h * stride_h - pad_h + _kh * dilation_h; + int32_t w_im = _w * stride_w - pad_w + _kw * dilation_w; + // If the current data access is within the input tensor, copy the + // value + data_col[i_col * channels_col + c_col] = + (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) + ? data_im[(_c * height + h_im) * width + w_im] + : static_cast(in_zero_point); + } + } + } + } + } + } +} + +void im2row_out( + __ET_UNUSED KernelRuntimeContext& ctx, + const Tensor& input, + IntArrayRef kernel_size, + IntArrayRef dilation, + IntArrayRef padding, + IntArrayRef stride, + const Tensor& in_zero_point, + bool channel_last, + Tensor& out) { + // Compute the input tensor's dims + bool unit_height = input.dim() == 3; + const int32_t batch_size = input.size(0); + const int32_t in_c = + channel_last ? input.size(3 - unit_height) : input.size(1); + const int32_t in_h = + unit_height ? 1 : (channel_last ? input.size(1) : input.size(2)); + const int32_t in_w = + channel_last ? input.size(2 - unit_height) : input.size(3 - unit_height); + + // Get the kernel parameters + int32_t kernel_h = kernel_size[0]; + int32_t kernel_w = kernel_size[1]; + int32_t dilation_h = dilation[0]; + int32_t dilation_w = dilation[1]; + int32_t pad_h = padding[0]; + int32_t pad_w = padding[1]; + int32_t stride_h = stride[0]; + int32_t stride_w = stride[1]; + + // If we were to apply a convolution on the input tensor, compute the output + // height and width. + int32_t out_h = + (in_h + 2 * pad_h - dilation_h * (kernel_h - 1) - 1) / stride_h + 1; + int32_t out_w = + (in_w + 2 * pad_w - dilation_w * (kernel_w - 1) - 1) / stride_w + 1; + + ET_DCHECK_MSG( + (out_h * out_w) == out.size(1), "dimension mismatch for output"); + ET_DCHECK_MSG( + (kernel_h * kernel_w * in_c) == out.size(2), + "dimension mismatch for output"); + // Check if the input is per-tensor quantized or per-channel quantized. The + // zero point for each batch could differ for per-channel quantized input. + bool per_tensor_quantized = in_zero_point.numel() == 1; + + bool optimized = false; + if (input.scalar_type() == ScalarType::Char || + input.scalar_type() == ScalarType::Byte) + optimized = true; + + if (!optimized) { + WORD8* ptr1 = (WORD8*)kernels::allocate_temp_memory( + ctx, ((batch_size * in_c * in_h * in_w) + 8) * sizeof(WORD8)); + + WORD8* pin = (WORD8*)ALIGN_PTR(ptr1, 8); + + WORD32 p_inp_shape[4]; + p_inp_shape[0] = input.size(0); + p_inp_shape[1] = in_c; + p_inp_shape[2] = in_h; + p_inp_shape[3] = in_w; + + WORD32 p_out_shape[4]; + p_out_shape[0] = input.size(0); + p_out_shape[1] = in_h; + p_out_shape[2] = in_w; + p_out_shape[3] = in_c; + + WORD32 p_permute_vec[4] = {0, 2, 3, 1}; + + WORD8* __restrict__ p_inp = + (WORD8* __restrict__)input.const_data_ptr(); + + xa_nn_transpose_8_8( + pin, + p_out_shape, + p_inp, + p_inp_shape, + p_permute_vec, + 4, // input dimensions + 4); // output dimensions + + const int8_t* __restrict__ in_data = pin; + int8_t* __restrict__ out_data = out.mutable_data_ptr(); + const int32_t* __restrict__ zero_point = + in_zero_point.const_data_ptr(); + int32_t in_plane = in_c * in_h * in_w; + int32_t out_plane = kernel_h * kernel_w * in_c * out_h * out_w; + for (size_t n = 0; n < batch_size; ++n) { + xa_nn_im2row_quantized( + &in_data[n * in_plane], + per_tensor_quantized ? zero_point[0] : zero_point[n], + in_c, + in_h, + in_w, + out_h, + out_w, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + &out_data[n * out_plane], + 1 /*channel_last*/); + } + } else { +#define typed_im2row(dtype, ctype) \ + case ScalarType::dtype: { \ + const ctype* __restrict__ in_data = input.const_data_ptr(); \ + ctype* __restrict__ out_data = out.mutable_data_ptr(); \ + const int32_t* __restrict__ zero_point = \ + in_zero_point.const_data_ptr(); \ + int32_t in_plane = in_c * in_h * in_w; \ + int32_t out_plane = kernel_h * kernel_w * in_c * out_h * out_w; \ + for (size_t n = 0; n < batch_size; ++n) { \ + im2row_( \ + &in_data[n * in_plane], \ + per_tensor_quantized ? zero_point[0] : zero_point[n], \ + in_c, \ + in_h, \ + in_w, \ + out_h, \ + out_w, \ + kernel_h, \ + kernel_w, \ + pad_h, \ + pad_w, \ + stride_h, \ + stride_w, \ + dilation_h, \ + dilation_w, \ + &out_data[n * out_plane], \ + channel_last); \ + } \ + break; \ + } + + ScalarType dtype = input.scalar_type(); + switch (dtype) { + typed_im2row(Float, float); + typed_im2row(Byte, uint8_t); + typed_im2row(Char, int8_t); + default: + ET_DCHECK_MSG( + false, + "im2row not implemented for dtype %s", + torch::executor::toString(dtype)); + } +#undef typed_im2row + } +} + +void im2row_per_tensor_out( + __ET_UNUSED KernelRuntimeContext& ctx, + const Tensor& input, + IntArrayRef kernel_size, + IntArrayRef dilation, + IntArrayRef padding, + IntArrayRef stride, + int64_t in_zero_point, + bool channel_last, + Tensor& out) { + // Compute the input tensor's dims + bool unit_height = input.dim() == 3; + const int32_t batch_size = input.size(0); + const int32_t in_c = + channel_last ? input.size(3 - unit_height) : input.size(1); + const int32_t in_h = + unit_height ? 1 : (channel_last ? input.size(1) : input.size(2)); + const int32_t in_w = + channel_last ? input.size(2 - unit_height) : input.size(3 - unit_height); + + // Get the kernel parameters + int32_t kernel_h = kernel_size[0]; + int32_t kernel_w = kernel_size[1]; + int32_t dilation_h = dilation[0]; + int32_t dilation_w = dilation[1]; + int32_t pad_h = padding[0]; + int32_t pad_w = padding[1]; + int32_t stride_h = stride[0]; + int32_t stride_w = stride[1]; + + // If we were to apply a convolution on the input tensor, compute the output + // height and width. + int32_t out_h = + (in_h + 2 * pad_h - dilation_h * (kernel_h - 1) - 1) / stride_h + 1; + int32_t out_w = + (in_w + 2 * pad_w - dilation_w * (kernel_w - 1) - 1) / stride_w + 1; + + ET_DCHECK_MSG( + (out_h * out_w) == out.size(1), "dimension mismatch for output"); + ET_DCHECK_MSG( + (kernel_h * kernel_w * in_c) == out.size(2), + "dimension mismatch for output"); + + bool optimized = false; + if (input.scalar_type() == ScalarType::Char || + input.scalar_type() == ScalarType::Byte) + optimized = true; + + if (!optimized) { + WORD8* ptr1 = (WORD8*)kernels::allocate_temp_memory( + ctx, ((batch_size * in_c * in_h * in_w) + 8) * sizeof(WORD8)); + + WORD8* pin = (WORD8*)ALIGN_PTR(ptr1, 8); + + WORD32 p_inp_shape[4]; + p_inp_shape[0] = input.size(0); + p_inp_shape[1] = in_c; + p_inp_shape[2] = in_h; + p_inp_shape[3] = in_w; + + WORD32 p_out_shape[4]; + p_out_shape[0] = input.size(0); + p_out_shape[1] = in_h; + p_out_shape[2] = in_w; + p_out_shape[3] = in_c; + + WORD32 p_permute_vec[4] = {0, 2, 3, 1}; + + WORD8* __restrict__ p_inp = + (WORD8* __restrict__)input.const_data_ptr(); + + xa_nn_transpose_8_8( + pin, + p_out_shape, + p_inp, + p_inp_shape, + p_permute_vec, + 4, // input dimensions + 4); // output dimensions + + const int8_t* __restrict__ in_data = pin; + int8_t* __restrict__ out_data = out.mutable_data_ptr(); + int32_t in_plane = in_c * in_h * in_w; + int32_t out_plane = kernel_h * kernel_w * in_c * out_h * out_w; + for (size_t n = 0; n < batch_size; ++n) { + xa_nn_im2row_quantized( + &in_data[n * in_plane], + (int32_t)in_zero_point, + in_c, + in_h, + in_w, + out_h, + out_w, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + &out_data[n * out_plane], + 1 /*channel_last*/); + } + } else { +#define typed_im2row_per_tensor(dtype, ctype) \ + case ScalarType::dtype: { \ + const ctype* __restrict__ in_data = input.const_data_ptr(); \ + ctype* __restrict__ out_data = out.mutable_data_ptr(); \ + int32_t in_plane = in_c * in_h * in_w; \ + int32_t out_plane = kernel_h * kernel_w * in_c * out_h * out_w; \ + for (size_t n = 0; n < batch_size; ++n) { \ + im2row_( \ + &in_data[n * in_plane], \ + in_zero_point, \ + in_c, \ + in_h, \ + in_w, \ + out_h, \ + out_w, \ + kernel_h, \ + kernel_w, \ + pad_h, \ + pad_w, \ + stride_h, \ + stride_w, \ + dilation_h, \ + dilation_w, \ + &out_data[n * out_plane], \ + channel_last); \ + } \ + break; \ + } + + ScalarType dtype = input.scalar_type(); + switch (dtype) { + typed_im2row_per_tensor(Float, float); + typed_im2row_per_tensor(Byte, uint8_t); + typed_im2row_per_tensor(Char, int8_t); + default: + ET_DCHECK_MSG( + false, + "im2row.per_tensor not implemented for dtype %s", + torch::executor::toString(dtype)); + } +#undef typed_im2row_per_tensor + } +} + +} // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_greater_lesser_equal_f32.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_greater_lesser_equal_f32.c index 792b152e1fa..35581a42471 100644 --- a/backends/cadence/hifi/third-party/nnlib/xa_nn_greater_lesser_equal_f32.c +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_greater_lesser_equal_f32.c @@ -2,6 +2,7 @@ #include "xa_nnlib_common_fpu.h" #include "xa_nn_common.h" #include "xa_nnlib_err_chk.h" +//#include "xa_nn_basic_state.h" #include "xa_nnlib_kernels_api.h" diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_im2row.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_im2row.c new file mode 100644 index 00000000000..3746991d430 --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_im2row.c @@ -0,0 +1,133 @@ +#include "xa_type_def.h" +#include "xa_nnlib_common_fpu.h" +#include "xa_nn_common.h" +#include "xa_nnlib_err_chk.h" +//#include "xa_nn_basic_state.h" +#include "xa_nnlib_kernels_api.h" + +WORD32 xa_nn_im2row_quantized( + const WORD8* __restrict__ data_im, + const WORD32 in_zero_point, + /* input parameters*/ + const WORD32 channels, + const WORD32 height, + const WORD32 width, + /* output parameters */ + const WORD32 out_height, + const WORD32 out_width, + /* convolution parameters */ + const WORD32 kernel_h, + const WORD32 kernel_w, + const WORD32 pad_h, + const WORD32 pad_w, + const WORD32 stride_h, + const WORD32 stride_w, + const WORD32 dilation_h, + const WORD32 dilation_w, + WORD8* __restrict__ data_col, + WORD32 channels_last) +{ + const WORD32 channels_col = channels * kernel_h * kernel_w; + + // If the layout is NHWC, we can copy 'channels' worth of contiguous data + // points when performing im2row. + if (channels_last) { + // Iterate over the output domain + for (int _h = 0; _h < out_height; ++_h) { + for (int _w = 0; _w < out_width; ++_w) { + int32_t i_col = _h * out_width + _w; + // Each point in the output domain is the result of applying a filter of + // size kernel_h x kernel_w x channels on the input. But since channels + // is contiguous, we will not explicitly have a loop for it. + for (int _kh = 0; _kh < kernel_h; ++_kh) { + int32_t h_im = _h * stride_h - pad_h + _kh * dilation_h; + for (int _kw = 0; _kw < kernel_w; ++_kw) { + int32_t w_im = _w * stride_w - pad_w + _kw * dilation_w; + + // h_im and w_im are the actual height and width coordinates of the + // input tensor from where we need to copy 'channels' points. + const int8_t* __restrict__ slice_im = + data_im + (h_im * width + w_im) * channels; + int8_t* __restrict__ slice_col = data_col + i_col * channels_col + + (_kh * kernel_w + _kw) * channels; + // If the coordinates were within the input domain, we copy + // 'channels' contiguous values. Otherwise we will fill the output + // with 0's. + if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { + const ae_int24x2 *pae_inp = (const ae_int24x2 *)slice_im; + ae_int24x2 *pae_out = (ae_int24x2 *)slice_col; + ae_valign inp_a, out_a; + inp_a = AE_LA64_PP(pae_inp); + out_a = AE_ZALIGN64(); + + int ic; + for(ic = 0; ic < channels; ic += 6) + { + ae_int24x2 d0; + AE_LA24X2_IP(d0, inp_a, pae_inp); + AE_SA24X2_IP(d0, out_a, pae_out); + } + AE_SA64POS_FP(out_a, pae_out); + for(int i = ic; i < (channels & 5); i++) + { + slice_col[i] = slice_im[i]; + } + } + else { + ae_int24x2 *pae_out = (ae_int24x2 *)slice_col; + ae_valign out_a; + out_a = AE_ZALIGN64(); + + ae_int32x2 tmp = AE_MOVDA32(in_zero_point); + ae_int32x2 in_zero_point32x2 = AE_SLLI32(tmp, 8); + in_zero_point32x2 = AE_OR32(in_zero_point32x2, tmp); + in_zero_point32x2 = AE_SLLI32(in_zero_point32x2, 8); + in_zero_point32x2 = AE_OR32(in_zero_point32x2, in_zero_point32x2); + + ae_int24x2 d0 = AE_MOVINT24X2_FROMINT32X2(in_zero_point32x2); + int ic; + for(ic = 0; ic < channels; ic += 6) + { + AE_SA24X2_IP(d0, out_a, pae_out); + } + AE_SA64POS_FP(out_a, pae_out); + for(int i = ic; ic < (channels & 5); i++) + { + slice_col[i] = (int8_t)(in_zero_point); + } + } + } + } + } + } + } else { + // Iterate over the output domain + for (int _h = 0; _h < out_height; ++_h) { + for (int _w = 0; _w < out_width; ++_w) { + int32_t i_col = _h * out_width + _w; + + // Each point in the output domain is the result of applying a filter + // of size chanenls * kernel_h x kernel_w on the input + for (int _c = 0; _c < channels; ++_c) { + for (int _kh = 0; _kh < kernel_h; ++_kh) { + for (int _kw = 0; _kw < kernel_w; ++_kw) { + // c_col is the linearized access in the channels_col vector. + int32_t c_col = (_c * kernel_h + _kh) * kernel_w + _kw; + // h_im and w_im are the actual height and width coordinates of + // the input tensor that we need to copy to the output. + int32_t h_im = _h * stride_h - pad_h + _kh * dilation_h; + int32_t w_im = _w * stride_w - pad_w + _kw * dilation_w; + // If the current data access is within the input tensor, copy the + // value + data_col[i_col * channels_col + c_col] = + (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) + ? data_im[(_c * height + h_im) * width + w_im] + : (int8_t)(in_zero_point); + } + } + } + } + } + } + return 0; +}