File tree Expand file tree Collapse file tree 5 files changed +26
-29
lines changed Expand file tree Collapse file tree 5 files changed +26
-29
lines changed Original file line number Diff line number Diff line change @@ -95,26 +95,26 @@ Tensor& add_out(
9595 }
9696
9797 for (int i = 0 ; i < max_dim; i++) {
98- out_shape[i] = 1 ;
98+ out_shape[i] = 1 ;
9999 inp1_shape[i] = 1 ;
100100 inp2_shape[i] = 1 ;
101101 }
102-
103- int offset_out = max_dim - out.dim ();
102+
103+ int offset_out = max_dim - out.dim ();
104104 int offset_inp1 = max_dim - a.dim ();
105105 int offset_inp2 = max_dim - b.dim ();
106-
106+
107107 for (int i = 0 ; i < out.dim (); i++) {
108108 out_shape[i + offset_out] = out.size (i);
109109 }
110110 for (int i = 0 ; i < a.dim (); i++) {
111111 inp1_shape[i + offset_inp1] = a.size (i);
112112 }
113113 for (int i = 0 ; i < b.dim (); i++) {
114- inp2_shape[i + offset_inp2] = b.size (i);
114+ inp2_shape[i + offset_inp2] = b.size (i);
115115 }
116116
117- if ((compute_type == ScalarType::Int) && (optimized)){
117+ if ((compute_type == ScalarType::Int) && (optimized)) {
118118 const int * const inp1_data = a.const_data_ptr <int >();
119119 const int * const inp2_data = b.const_data_ptr <int >();
120120 int * const out_data = out.mutable_data_ptr <int >();
Original file line number Diff line number Diff line change @@ -87,23 +87,23 @@ Tensor& mul_out(
8787 }
8888
8989 for (int i = 0 ; i < max_dim; i++) {
90- out_shape[i] = 1 ;
90+ out_shape[i] = 1 ;
9191 inp1_shape[i] = 1 ;
9292 inp2_shape[i] = 1 ;
9393 }
94-
95- int offset_out = max_dim - out.dim ();
94+
95+ int offset_out = max_dim - out.dim ();
9696 int offset_inp1 = max_dim - a.dim ();
9797 int offset_inp2 = max_dim - b.dim ();
98-
98+
9999 for (int i = 0 ; i < out.dim (); i++) {
100100 out_shape[i + offset_out] = out.size (i);
101101 }
102102 for (int i = 0 ; i < a.dim (); i++) {
103103 inp1_shape[i + offset_inp1] = a.size (i);
104104 }
105105 for (int i = 0 ; i < b.dim (); i++) {
106- inp2_shape[i + offset_inp2] = b.size (i);
106+ inp2_shape[i + offset_inp2] = b.size (i);
107107 }
108108
109109 if ((compute_type == ScalarType::Int) && (optimized)) {
Original file line number Diff line number Diff line change @@ -23,7 +23,6 @@ using torch::executor::apply_binary_elementwise_fn;
2323using torch::executor::Error;
2424using torch::executor::resize_to_broadcast_target_size;
2525
26-
2726namespace cadence {
2827namespace impl {
2928namespace HiFi {
Original file line number Diff line number Diff line change @@ -351,4 +351,3 @@ Tensor& pow_Scalar_out(
351351} // namespace HiFi
352352} // namespace impl
353353} // namespace cadence
354-
Original file line number Diff line number Diff line change @@ -26,8 +26,7 @@ using ::executorch::aten::Tensor;
2626using ::executorch::runtime::getLeadingDims;
2727using ::executorch::runtime::KernelRuntimeContext;
2828
29-
30- // The nnlib kernel to compute quantized linear via matmul.
29+ // The nnlib kernel to compute quantized linear via matmul.
3130
3231void _quantized_linear_asym8u (
3332 const Tensor& in,
@@ -48,22 +47,22 @@ void _quantized_linear_asym8u(
4847 const int32_t * __restrict__ bias_data = bias.const_data_ptr <int32_t >();
4948 uint8_t * __restrict__ out_data = out.mutable_data_ptr <uint8_t >();
5049 int32_t ret = xa_nn_matmul_asym8uxasym8u_asym8u (
51- out_data,
52- weight_data,
53- in_data,
54- bias_data,
55- out_dim,
56- in_dim,
57- in_dim,
58- leading_dims,
59- in_dim,
60- out_dim,
61- 1 ,
50+ out_data,
51+ weight_data,
52+ in_data,
53+ bias_data,
54+ out_dim,
55+ in_dim,
56+ in_dim,
57+ leading_dims,
58+ in_dim,
59+ out_dim,
60+ 1 ,
6261 -weight_zero_point.const_data_ptr <int32_t >()[0 ], // mat1_zero_bias
6362 -in_zero_point, // mat2_zero_bias
64- out_multiplier.const_data_ptr <int32_t >()[0 ],
65- out_shift.const_data_ptr <int32_t >()[0 ],
66- out_zero_point);
63+ out_multiplier.const_data_ptr <int32_t >()[0 ],
64+ out_shift.const_data_ptr <int32_t >()[0 ],
65+ out_zero_point);
6766 ET_DCHECK_MSG (ret == 0 , " HiFi quantized::linear failed" );
6867}
6968
You can’t perform that action at this time.
0 commit comments