Skip to content

Commit 07bb097

Browse files
cyyeverpytorchmergebot
authored andcommitted
Fix clang-tidy bugprone* warnings (pytorch#148529)
Fixes #ISSUE_NUMBER Pull Request resolved: pytorch#148529 Approved by: https://github.com/ezyang
1 parent 3f920f3 commit 07bb097

28 files changed

+74
-79
lines changed

aten/src/ATen/ParallelNative.cpp

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -222,8 +222,7 @@ void set_num_threads(int nthreads) {
222222
int stored_nthreads = num_intraop_threads.load();
223223
if (stored_nthreads <= 0) {
224224
// plus one because of master thread
225-
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
226-
stored_nthreads = _get_intraop_pool().size() + 1;
225+
stored_nthreads = static_cast<int>(_get_intraop_pool().size() + 1);
227226
}
228227
if (stored_nthreads != nthreads) {
229228
TORCH_WARN(
@@ -251,8 +250,7 @@ int get_num_threads() {
251250
return intraop_default_num_threads();
252251
} else {
253252
TORCH_INTERNAL_ASSERT(nthreads == CONSUMED);
254-
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
255-
return _get_intraop_pool().size() + 1;
253+
return static_cast<int>(_get_intraop_pool().size() + 1);
256254
}
257255
#else
258256
caffe2::PThreadPool* const pool = caffe2::pthreadpool();

aten/src/ATen/cuda/CUDABlas.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,12 +111,15 @@ static cublasOperation_t _cublasOpFromChar(char op) {
111111
// NOLINTNEXTLINE(bugprone-switch-missing-default-case)
112112
switch (op) {
113113
case 'n':
114+
[[fallthrough]];
114115
case 'N':
115116
return CUBLAS_OP_N;
116117
case 't':
118+
[[fallthrough]];
117119
case 'T':
118120
return CUBLAS_OP_T;
119121
case 'c':
122+
[[fallthrough]];
120123
case 'C':
121124
return CUBLAS_OP_C;
122125
}

aten/src/ATen/cudnn/Descriptors.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -156,8 +156,7 @@ void FilterDescriptor::set(const at::Tensor &t, const at::MemoryFormat memory_fo
156156
default:
157157
TORCH_INTERNAL_ASSERT(false, "unsupported memory_format for cuDNN filters");
158158
}
159-
// NOLINTNEXTLINE(*narrowing-conversions)
160-
set(getDataType(t), static_cast<int64_t>(dim), size, filter_format);
159+
set(getDataType(t), static_cast<int>(dim), size, filter_format);
161160
}
162161

163162
std::string cudnnMemoryFormatToString(cudnnTensorFormat_t tformat) {

aten/src/ATen/native/CPUFallback.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -98,13 +98,13 @@ void cpu_fallback(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool
9898
const auto arguments_begin = stack->size() - num_arguments;
9999

100100
std::vector<at::Tensor> tensor_args;
101-
std::vector<int> tensor_args_indices;
101+
std::vector<size_t> tensor_args_indices;
102102

103103
std::vector<c10::List<at::Tensor>> tensorlist_args;
104-
std::vector<int> tensorlist_args_indices;
104+
std::vector<size_t> tensorlist_args_indices;
105105

106106
std::vector<c10::List<std::optional<at::Tensor>>> optional_tensorlist_args;
107-
std::vector<int> optional_tensorlist_args_indices;
107+
std::vector<size_t> optional_tensorlist_args_indices;
108108

109109
std::optional<c10::Device> tgt_device = std::nullopt;
110110
// save converted cpu tensor for TensorList and optional TensorList

aten/src/ATen/native/mkldnn/Normalization.cpp

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -162,17 +162,15 @@ std::tuple<Tensor, Tensor, Tensor> mkldnn_batch_norm(
162162
ideep::tensor saved_mean;
163163
ideep::tensor saved_var;
164164
ideep::batch_normalization_forward_training::compute(
165-
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
166-
x, w, b, y, saved_mean, saved_var, momentum, eps);
165+
x, w, b, y, saved_mean, saved_var, static_cast<float>(momentum), static_cast<float>(eps));
167166
if (use_running_stat) {
168167
auto len = x.get_nelems() / w.get_nelems(); // n*h*w
169168
ideep::tensor m = itensor_from_tensor(running_mean);
170169
ideep::tensor v = itensor_from_tensor(running_var);
171170
const std::vector<float> scales_mean{static_cast<float>(1 - momentum),
172171
static_cast<float>(momentum)};
173172
const std::vector<float> scales_var{static_cast<float>(1 - momentum),
174-
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
175-
static_cast<float>(momentum * len / (len - 1))};
173+
static_cast<float>(momentum * static_cast<double>(len) / (static_cast<double>(len) - 1))};
176174
ideep::sum::compute(scales_mean, {m, saved_mean}, m);
177175
ideep::sum::compute(scales_var, {v, saved_var}, v);
178176
}

aten/src/ATen/native/mkldnn/xpu/detail/QConv.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,8 +85,9 @@ at::Tensor quantized_convolution(
8585
std::optional<std::string_view> unary_attr,
8686
torch::List<std::optional<at::Scalar>> unary_scalars,
8787
std::optional<std::string_view> unary_algorithm) {
88-
Attr attr =
89-
Attr(/*q_scale=*/1.0 / inv_output_scale, /*zp=*/output_zero_point);
88+
Attr attr = Attr(
89+
/*q_scale=*/static_cast<float>(1.0 / inv_output_scale),
90+
/*zp=*/output_zero_point);
9091

9192
auto ndim = act.ndimension();
9293
construct_attr_by_post_op(

aten/src/ATen/native/mkldnn/xpu/detail/QMatmul.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ void quantized_matmul(
112112
// config we support:
113113
// activation: s8&u8; per tensor calibrated; symmetric&asymmetric
114114
// weight: s8; per_tensor/per_channel calibrated; symmetric
115-
auto attr = Attr(1.0 / output_scale, output_zero_point);
115+
auto attr = Attr(static_cast<float>(1.0 / output_scale), output_zero_point);
116116
construct_attr_by_post_op(
117117
binary_post_op,
118118
binary_alpha,

aten/src/ATen/native/quantized/QTensor.cpp

Lines changed: 9 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -81,8 +81,8 @@ std::vector<Tensor> quantize_per_tensor_list_cpu(
8181
for (const auto i : c10::irange(tensors.size())) {
8282
quantized_tensors.push_back(at::quantize_per_tensor(
8383
tensors[i],
84-
scales[i].item<double>(),
85-
zero_points[i].item<int64_t>(),
84+
scales[static_cast<int64_t>(i)].item<double>(),
85+
zero_points[static_cast<int64_t>(i)].item<int64_t>(),
8686
dtype));
8787
}
8888
return quantized_tensors;
@@ -293,18 +293,16 @@ std::tuple<double, int64_t> _choose_qparams_per_tensor(
293293

294294
static float calculate_quant_loss(
295295
const float* input,
296-
int numel,
296+
int64_t numel,
297297
float xmin,
298298
float xmax,
299299
float* q_input,
300-
int bit_width) {
300+
int64_t bit_width) {
301301
xmin = static_cast<at::Half>(xmin);
302302
float data_range = xmax - xmin;
303-
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
304-
float qmax = (1 << bit_width) - 1;
303+
float qmax = static_cast<float>((1 << bit_width) - 1);
305304
float scale = data_range == 0
306-
? 1.0
307-
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
305+
? 1.0f
308306
: static_cast<float>(static_cast<at::Half>(data_range / qmax));
309307
float inverse_scale = scale == 0 ? 1.0f : 1.0f / scale;
310308

@@ -347,10 +345,10 @@ std::tuple<Tensor, Tensor> choose_qparams_optimized(
347345
const float* input_row = input_tensor.const_data_ptr<float>();
348346
float xmin = *std::min_element(input_row, input_row + numel);
349347
float xmax = *std::max_element(input_row, input_row + numel);
348+
float n_bins_float = static_cast<float>(n_bins);
350349

351-
float stepsize = (xmax - xmin) / n_bins;
352-
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
353-
int min_bins = n_bins * (1.0 - (float) ratio);
350+
float stepsize = (xmax - xmin) / n_bins_float;
351+
float min_bins = static_cast<float>(n_bins_float* (1.0 - ratio));
354352
Tensor input_tensor_contig = input_tensor.contiguous();
355353
const float* input = input_tensor_contig.const_data_ptr<float>();
356354
std::vector<float> q_input(numel);
@@ -363,7 +361,6 @@ std::tuple<Tensor, Tensor> choose_qparams_optimized(
363361
float cur_max = xmax;
364362
float cur_loss = loss;
365363

366-
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
367364
float thr = min_bins * stepsize;
368365
while (cur_min + thr < cur_max) {
369366
// move left

aten/src/ATen/native/quantized/cpu/LinearUnpackImpl.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ std::tuple<at::Tensor, std::optional<at::Tensor>> PackedLinearWeightsQnnp::
8484
at::device(c10::kCPU).dtype(c10::kFloat));
8585

8686
at::Tensor zero_points = at::empty(
87-
w_zero_points.size() - kPaddingChannels, at::device(c10::kCPU).dtype(c10::kLong));
87+
static_cast<int64_t>(w_zero_points.size() - kPaddingChannels), at::device(c10::kCPU).dtype(c10::kLong));
8888
for (const auto i : c10::irange(zero_points.numel())) {
8989
zero_points[i] = ((int64_t)w_zero_points[i] - 128);
9090
}

aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -108,8 +108,7 @@ Tensor qcat_nhwc_kernel(
108108
const int64_t N = qx0.size(0);
109109
const int64_t H = qx0.size(2);
110110
const int64_t W = qx0.size(3);
111-
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
112-
float inv_scale = 1.0 / scale;
111+
float inv_scale = static_cast<float>(1.0 / scale);
113112

114113
auto output = at::_empty_affine_quantized(
115114
{N, C_out, H, W},
@@ -1282,12 +1281,10 @@ void qelu_kernel(
12821281
template <bool ReLUFused = false>
12831282
void qadd_scalar_kernel(Tensor& out, const Tensor& self, const Scalar& other) {
12841283
int64_t zero_point = out.q_zero_point();
1285-
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
1286-
float scale = out.q_scale();
1287-
float inv_scale = 1.0f / scale;
1284+
float scale = static_cast<float>(out.q_scale());
1285+
float inv_scale = static_cast<float>(1.0f / scale);
12881286
int64_t self_zero_point = self.q_zero_point();
1289-
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
1290-
float self_scale = self.q_scale();
1287+
float self_scale = static_cast<float>(self.q_scale());
12911288

12921289
float multiplier = self_scale * inv_scale;
12931290

0 commit comments

Comments
 (0)