Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[clang-tidy] NO.3 bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions PART 2 #62109

Merged
merged 1 commit into from
Mar 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion paddle/fluid/distributed/collective/process_group_nccl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -528,7 +528,9 @@ std::shared_ptr<ProcessGroup::Task> ProcessGroupNCCL::Gather(
size_t offset = 0;
size_t numel = out_tensor->numel() / size_;
for (auto i = 0; i < size_; i++) {
partial_tensors.push_back(GetPartialTensor(*out_tensor, offset, numel));
partial_tensors.push_back(GetPartialTensor(*out_tensor,
static_cast<int64_t>(offset),
static_cast<int64_t>(numel)));
offset += numel;
}
}
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/distributed/test/ctr_accessor_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ TEST(downpour_feature_value_accessor_test, test_shrink) {

float* value = new float[acc->GetAccessorInfo().dim];
for (auto i = 0u; i < acc->GetAccessorInfo().dim; ++i) {
value[i] = i * 1.0;
value[i] = static_cast<float>(i) * 1.0;
}
ASSERT_TRUE(!acc->Shrink(value));

Expand All @@ -98,7 +98,7 @@ TEST(downpour_feature_value_accessor_test, test_save) {

float* value = new float[acc->GetAccessorInfo().dim];
for (auto i = 0u; i < acc->GetAccessorInfo().dim; ++i) {
value[i] = i * 1.0;
value[i] = static_cast<float>(i) * 1.0;
}

// save all feature
Expand Down Expand Up @@ -166,7 +166,7 @@ TEST(downpour_feature_value_accessor_test, test_update) {
for (auto i = 0u; i < item_size; ++i) {
float* p = new float[acc->GetAccessorInfo().update_dim];
for (auto j = 0u; j < acc->GetAccessorInfo().update_dim; ++j) {
p[j] = i + 1;
p[j] = static_cast<float>(i) + 1.0;
}
grad[i] = p;
}
Expand Down Expand Up @@ -288,7 +288,7 @@ TEST(downpour_feature_value_accessor_test, test_string_related) {
const int field_size = 15;
float* value = new float[field_size];
for (auto i = 0u; i < field_size; ++i) {
value[i] = i;
value[i] = static_cast<float>(i);
}

auto str = acc->ParseToString(value, 0);
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/framework/downpour_lite_worker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -410,7 +410,8 @@ void DownpourLiteWorker::TrainFilesWithProfiler() {
fprintf(stderr,
"push dense time percent: %f\n",
push_dense_time / total_time * 100);
fprintf(stderr, "%6.2f instances/s\n", total_inst / total_time);
fprintf(
stderr, "%6.2f instances/s\n", total_inst / total_time); // NOLINT
}
}
timeline.Start();
Expand Down
5 changes: 3 additions & 2 deletions paddle/fluid/framework/downpour_worker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -334,8 +334,9 @@ void DownpourWorker::AdjustInsWeight() {
}
float ins_weight = 1.0;
if (nid_show >= 0 && nid_show < nid_adjw_threshold) {
ins_weight = log(M_E + (nid_adjw_threshold - nid_show) /
nid_adjw_threshold * nid_adjw_ratio);
ins_weight = static_cast<float>(
log(M_E + (nid_adjw_threshold - nid_show) / nid_adjw_threshold *
nid_adjw_ratio));
// count nid adjw insnum and weight
++nid_adjw_num;
nid_adjw_weight += ins_weight;
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/fleet/gloo_wrapper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ void HdfsStore::wait(const std::vector<std::string>& keys,
int32_t last_check_rank = -1;
for (size_t i = 0; i < check_key_status.size(); ++i) {
if (!check_key_status[i]) {
last_check_rank = i;
last_check_rank = static_cast<int32_t>(i);
break;
}
}
Expand Down Expand Up @@ -252,7 +252,7 @@ void ParallelConnectContext::connectFullMesh(
connect_threads[i].reset(new std::thread(
[&store, &transportContext, total_add_size, this](
size_t thread_idx, size_t thread_num) -> void {
for (int i = thread_idx; i < size; i += thread_num) {
for (int i = thread_idx; i < size; i += thread_num) { // NOLINT
if (i == rank) {
continue;
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/fleet/metrics.cc
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ void BasicAucCalculator::add_uid_unlock_data(double pred,
WuaucRecord record;
record.uid_ = uid;
record.label_ = label;
record.pred_ = pred;
record.pred_ = static_cast<float>(pred);
wuauc_records_.emplace_back(std::move(record));
}

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/mkldnn/cpu_bfloat16_pass_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,9 @@ void MainTest(const ProgramDesc& prog,
auto graph = std::make_unique<ir::Graph>(prog);
auto pass = PassRegistry::Instance().Get("cpu_bfloat16_pass");

int original_nodes_num = graph->Nodes().size();
int original_nodes_num = static_cast<int>(graph->Nodes().size());
graph.reset(pass->Apply(graph.release()));
int current_nodes_num = graph->Nodes().size();
int current_nodes_num = static_cast<int>(graph->Nodes().size());

int quantize_nodes_count = 0;
int dequantize_nodes_count = 0;
Expand Down
16 changes: 9 additions & 7 deletions paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,8 @@ void CPUQuantizePass::QuantizeInput(Graph* g,
"Var(%s) isn't the input of the %s operator.",
input_name,
op->Op()->Type()));
unsigned max = is_input_unsigned ? U8_MAX : S8_MAX;
float scale = scale_to_one * max;
unsigned max = is_input_unsigned ? U8_MAX : S8_MAX; // NOLINT
float scale = static_cast<float>(scale_to_one) * max;

// Create quantize output variable
VarDesc quantize_out_desc(patterns::PDNodeName("quantize", "out"));
Expand Down Expand Up @@ -175,12 +175,13 @@ void CPUQuantizePass::QuantizeInputs(Graph* g,

double scale_out = GetScaleValueForNode(output);
unsigned max = are_inputs_unsigned ? U8_MAX : S8_MAX;
float scale = scale_out * max;
float scale = static_cast<float>(scale_out) * max;

for (size_t var_id = 0; var_id < unique_var_names.size(); var_id++) {
auto index = -1;
for (size_t it = 0; it < inputs.size(); it++) {
if (inputs[it]->Name() == unique_var_names[var_id]) index = it;
if (inputs[it]->Name() == unique_var_names[var_id])
index = static_cast<int>(it);
}

if (index == -1) {
Expand Down Expand Up @@ -249,7 +250,7 @@ void CPUQuantizePass::DequantizeOutput(Graph* g,
output_name,
op->Op()->Type()));
unsigned max = is_unsigned ? U8_MAX : S8_MAX;
float scale = scale_to_one * max;
float scale = static_cast<float>(scale_to_one) * max;

// Create dequantize input variable
VarDesc dequantize_in_desc(patterns::PDNodeName("dequantize", "in"));
Expand Down Expand Up @@ -298,12 +299,13 @@ void CPUQuantizePass::DequantizeOutputs(Graph* g,
std::vector<Node*> dequantize_in_nodes(outputs.size());

unsigned max = is_unsigned ? U8_MAX : S8_MAX;
float scale = scale_to_one * max;
float scale = static_cast<float>(scale_to_one) * max;

for (size_t var_id = 0; var_id < var_names.size(); var_id++) {
auto index = -1;
for (size_t it = 0; it < outputs.size(); it++) {
if (outputs[it]->Name() == var_names[var_id]) index = it;
if (outputs[it]->Name() == var_names[var_id])
index = static_cast<int>(it);
}

if (index == -1) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ void FuseQuantTranspose2DequantOneDNNPass::FuseTranspose2Dequantize(
dequant_op->Op()->HasAttr("Scale")
? PADDLE_GET_CONST(float, dequant_op->Op()->GetAttr("Scale"))
: 1;
float reorder_scale = 1.0 / scale;
float reorder_scale = static_cast<float>(1.0) / scale;
float shift =
dequant_op->Op()->HasAttr("Shift")
? PADDLE_GET_CONST(float, dequant_op->Op()->GetAttr("Shift"))
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/framework/ir/trt_skip_layernorm_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,8 @@ void TrtSkipLayerNormFusePass::ApplyImpl(ir::Graph *graph) const {
}
new_desc.SetAttr("begin_norm_axis", begin_norm_axis);
}
int32_t hidden_size = layer_norm_scale->Var()->GetShape()[0];
int32_t hidden_size =
static_cast<int32_t>(layer_norm_scale->Var()->GetShape()[0]);
new_desc.SetAttr("hidden_size", hidden_size);

auto fused_node = graph->CreateOpNode(&new_desc); // OpDesc will be copied.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ std::vector<std::string> IOVarsFilter(const std::vector<Node*>& nodes) {

void StrToBinaryFile(const std::string& path, const std::string& str) {
std::ofstream file(path.c_str(), std::ios::binary);
file.write(str.c_str(), str.size());
file.write(str.c_str(), str.size()); // NOLINT
file.close();
}

Expand Down
8 changes: 5 additions & 3 deletions paddle/fluid/inference/api/analysis_config.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1217,11 +1217,13 @@ float AnalysisConfig::fraction_of_gpu_memory_for_pool() const {
size_t gpu_total, gpu_available;
platform::SetDeviceId(gpu_device_id_);
platform::GpuMemoryUsage(&gpu_available, &gpu_total);
double total_gpu_memory = gpu_total / 1024. / 1024.;
double total_gpu_memory = static_cast<double>(gpu_total) / 1024. / 1024.;
float fraction_of_gpu_memory =
static_cast<double>(memory_pool_init_size_mb()) / total_gpu_memory;
static_cast<float>(memory_pool_init_size_mb()) /
static_cast<float>(total_gpu_memory);
VLOG(3) << "total_gpu_memory is " << total_gpu_memory
<< "M, gpu_available is " << gpu_available / 1024. / 1024.
<< "M, gpu_available is "
<< static_cast<double>(gpu_available) / 1024. / 1024.
<< "M, memory_pool_init_size is " << memory_pool_init_size_mb()
<< "M.";
return fraction_of_gpu_memory;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/memory/allocation/cuda_managed_allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ phi::Allocation* CUDAManagedAllocator::AllocateImpl(size_t size) {

std::string err_msg;
if (UNLIKELY(is_limited)) {
int64_t limit_size_mb = limit_size >> 20;
int64_t limit_size_mb = limit_size >> 20; // NOLINT
err_msg = string::Sprintf(
"Or set environment variable `FLAGS_gpu_memory_limit_mb` to a larger "
"value. Currently `FLAGS_gpu_memory_limit_mb` is %d, so the maximum "
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/memory/allocation/system_allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,8 @@ void* CUDAPinnedAllocator::Alloc(size_t* index, size_t size) {
if (size > usable) {
LOG(WARNING) << "Cannot malloc " << size / 1024.0 / 1024.0
<< " MB pinned memory."
<< ", available " << usable / 1024.0 / 1024.0 << " MB";
<< ", available " << usable / 1024.0 / 1024.0
<< " MB"; // NOLINT
return nullptr;
}

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/fused/resnet_unit_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ static framework::DDim GetBitmaskDims(std::vector<int> out_shape) {
std::multiplies<int>()) / // NOLINT
c;
int32_t c_int32_elems = ((c + 63) & ~63) / 32;
int32_t nhw_int32_elems = ((nhw + 31) & ~31);
int32_t nhw_int32_elems = static_cast<int32_t>(((nhw + 31) & ~31));
std::vector<int> bitmask_shape = {nhw_int32_elems, c_int32_elems, 1};
return common::make_ddim(bitmask_shape);
}
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ class ReshapeMKLDNNKernel : public framework::OpKernel<T> {
"be -1. But received shape = [%s], shape[%d] is also -1.",
common::make_ddim(shape),
i));
unk_dim_idx = i;
unk_dim_idx = static_cast<int>(i);
} else if (shape[i] == copy_dim_val) {
PADDLE_ENFORCE_LT(
static_cast<int>(i),
Expand All @@ -217,9 +217,9 @@ class ReshapeMKLDNNKernel : public framework::OpKernel<T> {
shape[i]));
}

capacity *= (shape[i] ? shape[i] : in_dims[i]);
capacity *= (shape[i] ? shape[i] : in_dims[i]); // NOLINT
output_shape[i] =
(shape[i] ? static_cast<int64_t>(shape[i]) : in_dims[i]);
(shape[i] ? static_cast<int64_t>(shape[i]) : in_dims[i]); // NOLINT
}

if (unk_dim_idx != -1) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ int OpYamlInfoParser::GetTensorParamIndexByArgsName(
kernel_fn_tensor_params_.end(),
args_name);
if (iter != kernel_fn_tensor_params_.end()) {
return std::distance(kernel_fn_tensor_params_.begin(), iter);
return std::distance(kernel_fn_tensor_params_.begin(), iter); // NOLINT
} else {
return -1;
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/platform/gen_comm_id_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ static int SocketSend(int fd, const char* buffer, int size) {
int offset = 0;
int bytes = 0;
while (offset < size) {
bytes = send(fd, buffer + offset, size - offset, 0);
bytes = send(fd, buffer + offset, size - offset, 0); // NOLINT
if (bytes == -1) {
if (errno != EINTR && errno != EWOULDBLOCK && errno != EAGAIN) {
// send failed
Expand All @@ -100,7 +100,7 @@ static int SocketRecv(int fd, char* buffer, int size) {
int offset = 0;
int bytes = 0;
while (offset < size) {
bytes = recv(fd, buffer + offset, size - offset, 0);
bytes = recv(fd, buffer + offset, size - offset, 0); // NOLINT
if (bytes == 0) {
// closed by client, maybe probing alive client
return 0;
Expand Down
11 changes: 7 additions & 4 deletions paddle/fluid/platform/profiler/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,8 @@ float CalculateEstOccupancy(uint32_t DeviceId,
float occupancy = 0.0;
std::vector<int> device_ids = GetSelectedDevices();
if (DeviceId < device_ids.size()) {
const gpuDeviceProp& device_property = GetDeviceProperties(DeviceId);
const gpuDeviceProp& device_property =
GetDeviceProperties(static_cast<int>(DeviceId));
cudaOccFuncAttributes occFuncAttr;
occFuncAttr.maxThreadsPerBlock = INT_MAX;
occFuncAttr.numRegs = RegistersPerThread;
Expand All @@ -127,11 +128,13 @@ float CalculateEstOccupancy(uint32_t DeviceId,
blockSize,
dynamicSmemSize);
if (status == CUDA_OCC_SUCCESS) {
if (occ_result.activeBlocksPerMultiprocessor < BlocksPerSm) {
BlocksPerSm = occ_result.activeBlocksPerMultiprocessor;
if (static_cast<float>(occ_result.activeBlocksPerMultiprocessor) <
BlocksPerSm) {
BlocksPerSm =
static_cast<float>(occ_result.activeBlocksPerMultiprocessor);
}
occupancy =
BlocksPerSm * blockSize /
BlocksPerSm * static_cast<float>(blockSize) /
static_cast<float>(device_property.maxThreadsPerMultiProcessor);
} else {
LOG(WARNING) << "Failed to calculate estimated occupancy, status = "
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/pybind/eager_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -518,7 +518,7 @@ std::vector<int64_t> CastPyArg2VectorOfInt64(PyObject* obj, size_t arg_pos) {
} else if (obj == Py_None) {
return {};
} else if (PyObject_CheckLongOrConvertToLong(&obj)) {
return {static_cast<int64_t>(PyLong_AsLong(obj))};
return {static_cast<int64_t>(PyLong_AsLong(obj))}; // NOLINT
} else {
PADDLE_THROW(platform::errors::InvalidType(
"argument (position %d) must be "
Expand Down Expand Up @@ -566,7 +566,7 @@ std::vector<size_t> CastPyArg2VectorOfSize_t(PyObject* obj, size_t arg_pos) {
} else if (obj == Py_None) {
return {};
} else if (PyObject_CheckLongOrConvertToLong(&obj)) {
return {PyLong_AsSize_t(obj)};
return {PyLong_AsSize_t(obj)}; // NOLINT
} else {
PADDLE_THROW(platform::errors::InvalidType(
"argument (position %d) must be "
Expand Down Expand Up @@ -614,7 +614,7 @@ std::vector<float> CastPyArg2VectorOfFloat(PyObject* obj, size_t arg_pos) {
} else if (obj == Py_None) {
return {};
} else if (PyObject_CheckFloatOrConvertToFloat(&obj)) {
return {static_cast<float>(PyFloat_AsDouble(obj))};
return {static_cast<float>(PyFloat_AsDouble(obj))}; // NOLINT
} else {
PADDLE_THROW(platform::errors::InvalidType(
"argument (position %d) must be "
Expand Down
5 changes: 3 additions & 2 deletions paddle/fluid/pybind/imperative.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1357,8 +1357,9 @@ void BindImperative(py::module *m_ptr) {
auto *index_data = index_tensor.data<int64_t>();
auto *buffer_data =
buffer_tensor->mutable_data<float>(buffer_tensor->place());
const int &slice_size = src_tensor.numel() / src_tensor.dims()[0];
const int &copy_bytes = slice_size * sizeof(float);
const int &slice_size =
static_cast<int>(src_tensor.numel()) / src_tensor.dims()[0];
const int &copy_bytes = static_cast<int>(slice_size) * sizeof(float);
int64_t c = 0;
for (int64_t i = 0; i < index_tensor.numel(); i++) {
std::memcpy(buffer_data + c * slice_size,
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/api/profiler/device_tracer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -571,10 +571,10 @@ class DeviceTracerImpl : public DeviceTracer {
Event *e = c->second;
Event *parent = e->parent();
while (parent) {
parent->AddCudaElapsedTime(r.start_ns, r.end_ns);
parent->AddCudaElapsedTime(r.start_ns, r.end_ns); // NOLINT
parent = parent->parent();
}
e->AddCudaElapsedTime(r.start_ns, r.end_ns);
e->AddCudaElapsedTime(r.start_ns, r.end_ns); // NOLINT
}
}
for (const auto &r : mem_records_) {
Expand All @@ -583,10 +583,10 @@ class DeviceTracerImpl : public DeviceTracer {
Event *e = c->second;
Event *parent = e->parent();
while (parent) {
parent->AddCudaElapsedTime(r.start_ns, r.end_ns);
parent->AddCudaElapsedTime(r.start_ns, r.end_ns); // NOLINT
parent = parent->parent();
}
e->AddCudaElapsedTime(r.start_ns, r.end_ns);
e->AddCudaElapsedTime(r.start_ns, r.end_ns); // NOLINT
}
}
#endif
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/api/profiler/profiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ double Event::CpuElapsedMs(const Event &e) const {

double Event::CudaElapsedMs(const Event &e) const {
#ifdef PADDLE_WITH_CUPTI
return gpu_ns_ / 1000000.0;
return static_cast<double>(gpu_ns_) / 1000000.0;
#else
LOG_FIRST_N(WARNING, 1) << "CUDA CUPTI is not enabled";
return 0;
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/backends/device_base.cc
Original file line number Diff line number Diff line change
Expand Up @@ -215,9 +215,9 @@ size_t DeviceInterface::AllocSize(size_t dev_id, bool realloc) {
size_t flag_mb = realloc ? FLAGS_reallocate_gpu_memory_in_mb
: FLAGS_initial_gpu_memory_in_mb;
size_t alloc_bytes =
(flag_mb > 0ul
? flag_mb << 20
: available_to_alloc * FLAGS_fraction_of_gpu_memory_to_use);
(flag_mb > 0ul ? flag_mb << 20
: available_to_alloc *
FLAGS_fraction_of_gpu_memory_to_use); // NOLINT
PADDLE_ENFORCE_GE(available_to_alloc,
alloc_bytes,
phi::errors::ResourceExhausted(
Expand Down
Loading