diff --git a/paddle/fluid/distributed/collective/process_group_nccl.cc b/paddle/fluid/distributed/collective/process_group_nccl.cc index 82e95204590bd..f38fe1207c199 100644 --- a/paddle/fluid/distributed/collective/process_group_nccl.cc +++ b/paddle/fluid/distributed/collective/process_group_nccl.cc @@ -528,7 +528,9 @@ std::shared_ptr ProcessGroupNCCL::Gather( size_t offset = 0; size_t numel = out_tensor->numel() / size_; for (auto i = 0; i < size_; i++) { - partial_tensors.push_back(GetPartialTensor(*out_tensor, offset, numel)); + partial_tensors.push_back(GetPartialTensor(*out_tensor, + static_cast(offset), + static_cast(numel))); offset += numel; } } diff --git a/paddle/fluid/distributed/test/ctr_accessor_test.cc b/paddle/fluid/distributed/test/ctr_accessor_test.cc index 9b71e4524625c..0288a93d71a96 100644 --- a/paddle/fluid/distributed/test/ctr_accessor_test.cc +++ b/paddle/fluid/distributed/test/ctr_accessor_test.cc @@ -79,7 +79,7 @@ TEST(downpour_feature_value_accessor_test, test_shrink) { float* value = new float[acc->GetAccessorInfo().dim]; for (auto i = 0u; i < acc->GetAccessorInfo().dim; ++i) { - value[i] = i * 1.0; + value[i] = static_cast(i) * 1.0; } ASSERT_TRUE(!acc->Shrink(value)); @@ -98,7 +98,7 @@ TEST(downpour_feature_value_accessor_test, test_save) { float* value = new float[acc->GetAccessorInfo().dim]; for (auto i = 0u; i < acc->GetAccessorInfo().dim; ++i) { - value[i] = i * 1.0; + value[i] = static_cast(i) * 1.0; } // save all feature @@ -166,7 +166,7 @@ TEST(downpour_feature_value_accessor_test, test_update) { for (auto i = 0u; i < item_size; ++i) { float* p = new float[acc->GetAccessorInfo().update_dim]; for (auto j = 0u; j < acc->GetAccessorInfo().update_dim; ++j) { - p[j] = i + 1; + p[j] = static_cast(i) + 1.0; } grad[i] = p; } @@ -288,7 +288,7 @@ TEST(downpour_feature_value_accessor_test, test_string_related) { const int field_size = 15; float* value = new float[field_size]; for (auto i = 0u; i < field_size; ++i) { - value[i] = i; + value[i] = static_cast(i); } auto str = acc->ParseToString(value, 0); diff --git a/paddle/fluid/framework/downpour_lite_worker.cc b/paddle/fluid/framework/downpour_lite_worker.cc index 3d453c018c1d5..e86856bf1b2ff 100644 --- a/paddle/fluid/framework/downpour_lite_worker.cc +++ b/paddle/fluid/framework/downpour_lite_worker.cc @@ -410,7 +410,8 @@ void DownpourLiteWorker::TrainFilesWithProfiler() { fprintf(stderr, "push dense time percent: %f\n", push_dense_time / total_time * 100); - fprintf(stderr, "%6.2f instances/s\n", total_inst / total_time); + fprintf( + stderr, "%6.2f instances/s\n", total_inst / total_time); // NOLINT } } timeline.Start(); diff --git a/paddle/fluid/framework/downpour_worker.cc b/paddle/fluid/framework/downpour_worker.cc index 6ce2967a08f1f..0d5bd66297c53 100644 --- a/paddle/fluid/framework/downpour_worker.cc +++ b/paddle/fluid/framework/downpour_worker.cc @@ -334,8 +334,9 @@ void DownpourWorker::AdjustInsWeight() { } float ins_weight = 1.0; if (nid_show >= 0 && nid_show < nid_adjw_threshold) { - ins_weight = log(M_E + (nid_adjw_threshold - nid_show) / - nid_adjw_threshold * nid_adjw_ratio); + ins_weight = static_cast( + log(M_E + (nid_adjw_threshold - nid_show) / nid_adjw_threshold * + nid_adjw_ratio)); // count nid adjw insnum and weight ++nid_adjw_num; nid_adjw_weight += ins_weight; diff --git a/paddle/fluid/framework/fleet/gloo_wrapper.cc b/paddle/fluid/framework/fleet/gloo_wrapper.cc index 140de78bf01dc..3b96e73adefb9 100644 --- a/paddle/fluid/framework/fleet/gloo_wrapper.cc +++ b/paddle/fluid/framework/fleet/gloo_wrapper.cc @@ -165,7 +165,7 @@ void HdfsStore::wait(const std::vector& keys, int32_t last_check_rank = -1; for (size_t i = 0; i < check_key_status.size(); ++i) { if (!check_key_status[i]) { - last_check_rank = i; + last_check_rank = static_cast(i); break; } } @@ -252,7 +252,7 @@ void ParallelConnectContext::connectFullMesh( connect_threads[i].reset(new std::thread( [&store, &transportContext, total_add_size, this]( size_t thread_idx, size_t thread_num) -> void { - for (int i = thread_idx; i < size; i += thread_num) { + for (int i = thread_idx; i < size; i += thread_num) { // NOLINT if (i == rank) { continue; } diff --git a/paddle/fluid/framework/fleet/metrics.cc b/paddle/fluid/framework/fleet/metrics.cc index 58e1e195fbab7..5801860f66566 100644 --- a/paddle/fluid/framework/fleet/metrics.cc +++ b/paddle/fluid/framework/fleet/metrics.cc @@ -301,7 +301,7 @@ void BasicAucCalculator::add_uid_unlock_data(double pred, WuaucRecord record; record.uid_ = uid; record.label_ = label; - record.pred_ = pred; + record.pred_ = static_cast(pred); wuauc_records_.emplace_back(std::move(record)); } diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_bfloat16_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/cpu_bfloat16_pass_tester.cc index dfd838895aeb4..951d064364ce3 100644 --- a/paddle/fluid/framework/ir/mkldnn/cpu_bfloat16_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_bfloat16_pass_tester.cc @@ -73,9 +73,9 @@ void MainTest(const ProgramDesc& prog, auto graph = std::make_unique(prog); auto pass = PassRegistry::Instance().Get("cpu_bfloat16_pass"); - int original_nodes_num = graph->Nodes().size(); + int original_nodes_num = static_cast(graph->Nodes().size()); graph.reset(pass->Apply(graph.release())); - int current_nodes_num = graph->Nodes().size(); + int current_nodes_num = static_cast(graph->Nodes().size()); int quantize_nodes_count = 0; int dequantize_nodes_count = 0; diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc index 2f1e7e8a53865..0e9c452455de3 100644 --- a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc @@ -94,8 +94,8 @@ void CPUQuantizePass::QuantizeInput(Graph* g, "Var(%s) isn't the input of the %s operator.", input_name, op->Op()->Type())); - unsigned max = is_input_unsigned ? U8_MAX : S8_MAX; - float scale = scale_to_one * max; + unsigned max = is_input_unsigned ? U8_MAX : S8_MAX; // NOLINT + float scale = static_cast(scale_to_one) * max; // Create quantize output variable VarDesc quantize_out_desc(patterns::PDNodeName("quantize", "out")); @@ -175,12 +175,13 @@ void CPUQuantizePass::QuantizeInputs(Graph* g, double scale_out = GetScaleValueForNode(output); unsigned max = are_inputs_unsigned ? U8_MAX : S8_MAX; - float scale = scale_out * max; + float scale = static_cast(scale_out) * max; for (size_t var_id = 0; var_id < unique_var_names.size(); var_id++) { auto index = -1; for (size_t it = 0; it < inputs.size(); it++) { - if (inputs[it]->Name() == unique_var_names[var_id]) index = it; + if (inputs[it]->Name() == unique_var_names[var_id]) + index = static_cast(it); } if (index == -1) { @@ -249,7 +250,7 @@ void CPUQuantizePass::DequantizeOutput(Graph* g, output_name, op->Op()->Type())); unsigned max = is_unsigned ? U8_MAX : S8_MAX; - float scale = scale_to_one * max; + float scale = static_cast(scale_to_one) * max; // Create dequantize input variable VarDesc dequantize_in_desc(patterns::PDNodeName("dequantize", "in")); @@ -298,12 +299,13 @@ void CPUQuantizePass::DequantizeOutputs(Graph* g, std::vector dequantize_in_nodes(outputs.size()); unsigned max = is_unsigned ? U8_MAX : S8_MAX; - float scale = scale_to_one * max; + float scale = static_cast(scale_to_one) * max; for (size_t var_id = 0; var_id < var_names.size(); var_id++) { auto index = -1; for (size_t it = 0; it < outputs.size(); it++) { - if (outputs[it]->Name() == var_names[var_id]) index = it; + if (outputs[it]->Name() == var_names[var_id]) + index = static_cast(it); } if (index == -1) { diff --git a/paddle/fluid/framework/ir/mkldnn/quant_transpose2_dequant_onednn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/quant_transpose2_dequant_onednn_fuse_pass.cc index 09bebfaec99c3..b331cc996fffc 100644 --- a/paddle/fluid/framework/ir/mkldnn/quant_transpose2_dequant_onednn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/quant_transpose2_dequant_onednn_fuse_pass.cc @@ -137,7 +137,7 @@ void FuseQuantTranspose2DequantOneDNNPass::FuseTranspose2Dequantize( dequant_op->Op()->HasAttr("Scale") ? PADDLE_GET_CONST(float, dequant_op->Op()->GetAttr("Scale")) : 1; - float reorder_scale = 1.0 / scale; + float reorder_scale = static_cast(1.0) / scale; float shift = dequant_op->Op()->HasAttr("Shift") ? PADDLE_GET_CONST(float, dequant_op->Op()->GetAttr("Shift")) diff --git a/paddle/fluid/framework/ir/trt_skip_layernorm_fuse_pass.cc b/paddle/fluid/framework/ir/trt_skip_layernorm_fuse_pass.cc index 81f96f2fc33f4..0708218dbd07c 100644 --- a/paddle/fluid/framework/ir/trt_skip_layernorm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/trt_skip_layernorm_fuse_pass.cc @@ -218,7 +218,8 @@ void TrtSkipLayerNormFusePass::ApplyImpl(ir::Graph *graph) const { } new_desc.SetAttr("begin_norm_axis", begin_norm_axis); } - int32_t hidden_size = layer_norm_scale->Var()->GetShape()[0]; + int32_t hidden_size = + static_cast(layer_norm_scale->Var()->GetShape()[0]); new_desc.SetAttr("hidden_size", hidden_size); auto fused_node = graph->CreateOpNode(&new_desc); // OpDesc will be copied. diff --git a/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc index ad95fe3091ce1..41cdbf3fcfd0f 100644 --- a/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc @@ -71,7 +71,7 @@ std::vector IOVarsFilter(const std::vector& nodes) { void StrToBinaryFile(const std::string& path, const std::string& str) { std::ofstream file(path.c_str(), std::ios::binary); - file.write(str.c_str(), str.size()); + file.write(str.c_str(), str.size()); // NOLINT file.close(); } diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index 17841b8be5bad..3429fdfb71c1f 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -1217,11 +1217,13 @@ float AnalysisConfig::fraction_of_gpu_memory_for_pool() const { size_t gpu_total, gpu_available; platform::SetDeviceId(gpu_device_id_); platform::GpuMemoryUsage(&gpu_available, &gpu_total); - double total_gpu_memory = gpu_total / 1024. / 1024.; + double total_gpu_memory = static_cast(gpu_total) / 1024. / 1024.; float fraction_of_gpu_memory = - static_cast(memory_pool_init_size_mb()) / total_gpu_memory; + static_cast(memory_pool_init_size_mb()) / + static_cast(total_gpu_memory); VLOG(3) << "total_gpu_memory is " << total_gpu_memory - << "M, gpu_available is " << gpu_available / 1024. / 1024. + << "M, gpu_available is " + << static_cast(gpu_available) / 1024. / 1024. << "M, memory_pool_init_size is " << memory_pool_init_size_mb() << "M."; return fraction_of_gpu_memory; diff --git a/paddle/fluid/memory/allocation/cuda_managed_allocator.cc b/paddle/fluid/memory/allocation/cuda_managed_allocator.cc index 77ca495cacbc7..36659fdbadce2 100644 --- a/paddle/fluid/memory/allocation/cuda_managed_allocator.cc +++ b/paddle/fluid/memory/allocation/cuda_managed_allocator.cc @@ -65,7 +65,7 @@ phi::Allocation* CUDAManagedAllocator::AllocateImpl(size_t size) { std::string err_msg; if (UNLIKELY(is_limited)) { - int64_t limit_size_mb = limit_size >> 20; + int64_t limit_size_mb = limit_size >> 20; // NOLINT err_msg = string::Sprintf( "Or set environment variable `FLAGS_gpu_memory_limit_mb` to a larger " "value. Currently `FLAGS_gpu_memory_limit_mb` is %d, so the maximum " diff --git a/paddle/fluid/memory/allocation/system_allocator.cc b/paddle/fluid/memory/allocation/system_allocator.cc index 4ca1f21c563fc..8fd7967e9752d 100644 --- a/paddle/fluid/memory/allocation/system_allocator.cc +++ b/paddle/fluid/memory/allocation/system_allocator.cc @@ -208,7 +208,8 @@ void* CUDAPinnedAllocator::Alloc(size_t* index, size_t size) { if (size > usable) { LOG(WARNING) << "Cannot malloc " << size / 1024.0 / 1024.0 << " MB pinned memory." - << ", available " << usable / 1024.0 / 1024.0 << " MB"; + << ", available " << usable / 1024.0 / 1024.0 + << " MB"; // NOLINT return nullptr; } diff --git a/paddle/fluid/operators/fused/resnet_unit_op.cc b/paddle/fluid/operators/fused/resnet_unit_op.cc index f1f2628119c15..5827cd3427dee 100644 --- a/paddle/fluid/operators/fused/resnet_unit_op.cc +++ b/paddle/fluid/operators/fused/resnet_unit_op.cc @@ -27,7 +27,7 @@ static framework::DDim GetBitmaskDims(std::vector out_shape) { std::multiplies()) / // NOLINT c; int32_t c_int32_elems = ((c + 63) & ~63) / 32; - int32_t nhw_int32_elems = ((nhw + 31) & ~31); + int32_t nhw_int32_elems = static_cast(((nhw + 31) & ~31)); std::vector bitmask_shape = {nhw_int32_elems, c_int32_elems, 1}; return common::make_ddim(bitmask_shape); } diff --git a/paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc index 5e5e2f8c19abe..05183431afd62 100644 --- a/paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc @@ -190,7 +190,7 @@ class ReshapeMKLDNNKernel : public framework::OpKernel { "be -1. But received shape = [%s], shape[%d] is also -1.", common::make_ddim(shape), i)); - unk_dim_idx = i; + unk_dim_idx = static_cast(i); } else if (shape[i] == copy_dim_val) { PADDLE_ENFORCE_LT( static_cast(i), @@ -217,9 +217,9 @@ class ReshapeMKLDNNKernel : public framework::OpKernel { shape[i])); } - capacity *= (shape[i] ? shape[i] : in_dims[i]); + capacity *= (shape[i] ? shape[i] : in_dims[i]); // NOLINT output_shape[i] = - (shape[i] ? static_cast(shape[i]) : in_dims[i]); + (shape[i] ? static_cast(shape[i]) : in_dims[i]); // NOLINT } if (unk_dim_idx != -1) { diff --git a/paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.cc b/paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.cc index 7f84eac85bdb8..41140053a22f0 100644 --- a/paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.cc +++ b/paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.cc @@ -232,7 +232,7 @@ int OpYamlInfoParser::GetTensorParamIndexByArgsName( kernel_fn_tensor_params_.end(), args_name); if (iter != kernel_fn_tensor_params_.end()) { - return std::distance(kernel_fn_tensor_params_.begin(), iter); + return std::distance(kernel_fn_tensor_params_.begin(), iter); // NOLINT } else { return -1; } diff --git a/paddle/fluid/platform/gen_comm_id_helper.cc b/paddle/fluid/platform/gen_comm_id_helper.cc index 40d80f8ef2cbc..ab10f799f68d1 100644 --- a/paddle/fluid/platform/gen_comm_id_helper.cc +++ b/paddle/fluid/platform/gen_comm_id_helper.cc @@ -82,7 +82,7 @@ static int SocketSend(int fd, const char* buffer, int size) { int offset = 0; int bytes = 0; while (offset < size) { - bytes = send(fd, buffer + offset, size - offset, 0); + bytes = send(fd, buffer + offset, size - offset, 0); // NOLINT if (bytes == -1) { if (errno != EINTR && errno != EWOULDBLOCK && errno != EAGAIN) { // send failed @@ -100,7 +100,7 @@ static int SocketRecv(int fd, char* buffer, int size) { int offset = 0; int bytes = 0; while (offset < size) { - bytes = recv(fd, buffer + offset, size - offset, 0); + bytes = recv(fd, buffer + offset, size - offset, 0); // NOLINT if (bytes == 0) { // closed by client, maybe probing alive client return 0; diff --git a/paddle/fluid/platform/profiler/utils.cc b/paddle/fluid/platform/profiler/utils.cc index 46a94e7fcb23c..17ff0e5cf85bb 100644 --- a/paddle/fluid/platform/profiler/utils.cc +++ b/paddle/fluid/platform/profiler/utils.cc @@ -106,7 +106,8 @@ float CalculateEstOccupancy(uint32_t DeviceId, float occupancy = 0.0; std::vector device_ids = GetSelectedDevices(); if (DeviceId < device_ids.size()) { - const gpuDeviceProp& device_property = GetDeviceProperties(DeviceId); + const gpuDeviceProp& device_property = + GetDeviceProperties(static_cast(DeviceId)); cudaOccFuncAttributes occFuncAttr; occFuncAttr.maxThreadsPerBlock = INT_MAX; occFuncAttr.numRegs = RegistersPerThread; @@ -127,11 +128,13 @@ float CalculateEstOccupancy(uint32_t DeviceId, blockSize, dynamicSmemSize); if (status == CUDA_OCC_SUCCESS) { - if (occ_result.activeBlocksPerMultiprocessor < BlocksPerSm) { - BlocksPerSm = occ_result.activeBlocksPerMultiprocessor; + if (static_cast(occ_result.activeBlocksPerMultiprocessor) < + BlocksPerSm) { + BlocksPerSm = + static_cast(occ_result.activeBlocksPerMultiprocessor); } occupancy = - BlocksPerSm * blockSize / + BlocksPerSm * static_cast(blockSize) / static_cast(device_property.maxThreadsPerMultiProcessor); } else { LOG(WARNING) << "Failed to calculate estimated occupancy, status = " diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index d613c008b4958..c6a2db061594b 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -518,7 +518,7 @@ std::vector CastPyArg2VectorOfInt64(PyObject* obj, size_t arg_pos) { } else if (obj == Py_None) { return {}; } else if (PyObject_CheckLongOrConvertToLong(&obj)) { - return {static_cast(PyLong_AsLong(obj))}; + return {static_cast(PyLong_AsLong(obj))}; // NOLINT } else { PADDLE_THROW(platform::errors::InvalidType( "argument (position %d) must be " @@ -566,7 +566,7 @@ std::vector CastPyArg2VectorOfSize_t(PyObject* obj, size_t arg_pos) { } else if (obj == Py_None) { return {}; } else if (PyObject_CheckLongOrConvertToLong(&obj)) { - return {PyLong_AsSize_t(obj)}; + return {PyLong_AsSize_t(obj)}; // NOLINT } else { PADDLE_THROW(platform::errors::InvalidType( "argument (position %d) must be " @@ -614,7 +614,7 @@ std::vector CastPyArg2VectorOfFloat(PyObject* obj, size_t arg_pos) { } else if (obj == Py_None) { return {}; } else if (PyObject_CheckFloatOrConvertToFloat(&obj)) { - return {static_cast(PyFloat_AsDouble(obj))}; + return {static_cast(PyFloat_AsDouble(obj))}; // NOLINT } else { PADDLE_THROW(platform::errors::InvalidType( "argument (position %d) must be " diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index c540fe0687d88..288a05d638b73 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -1357,8 +1357,9 @@ void BindImperative(py::module *m_ptr) { auto *index_data = index_tensor.data(); auto *buffer_data = buffer_tensor->mutable_data(buffer_tensor->place()); - const int &slice_size = src_tensor.numel() / src_tensor.dims()[0]; - const int ©_bytes = slice_size * sizeof(float); + const int &slice_size = + static_cast(src_tensor.numel()) / src_tensor.dims()[0]; + const int ©_bytes = static_cast(slice_size) * sizeof(float); int64_t c = 0; for (int64_t i = 0; i < index_tensor.numel(); i++) { std::memcpy(buffer_data + c * slice_size, diff --git a/paddle/phi/api/profiler/device_tracer.cc b/paddle/phi/api/profiler/device_tracer.cc index f15d6bbb88457..748eedff4ee6d 100644 --- a/paddle/phi/api/profiler/device_tracer.cc +++ b/paddle/phi/api/profiler/device_tracer.cc @@ -571,10 +571,10 @@ class DeviceTracerImpl : public DeviceTracer { Event *e = c->second; Event *parent = e->parent(); while (parent) { - parent->AddCudaElapsedTime(r.start_ns, r.end_ns); + parent->AddCudaElapsedTime(r.start_ns, r.end_ns); // NOLINT parent = parent->parent(); } - e->AddCudaElapsedTime(r.start_ns, r.end_ns); + e->AddCudaElapsedTime(r.start_ns, r.end_ns); // NOLINT } } for (const auto &r : mem_records_) { @@ -583,10 +583,10 @@ class DeviceTracerImpl : public DeviceTracer { Event *e = c->second; Event *parent = e->parent(); while (parent) { - parent->AddCudaElapsedTime(r.start_ns, r.end_ns); + parent->AddCudaElapsedTime(r.start_ns, r.end_ns); // NOLINT parent = parent->parent(); } - e->AddCudaElapsedTime(r.start_ns, r.end_ns); + e->AddCudaElapsedTime(r.start_ns, r.end_ns); // NOLINT } } #endif diff --git a/paddle/phi/api/profiler/profiler.cc b/paddle/phi/api/profiler/profiler.cc index 6dc419658d3c2..e9c49741a5e6b 100644 --- a/paddle/phi/api/profiler/profiler.cc +++ b/paddle/phi/api/profiler/profiler.cc @@ -77,7 +77,7 @@ double Event::CpuElapsedMs(const Event &e) const { double Event::CudaElapsedMs(const Event &e) const { #ifdef PADDLE_WITH_CUPTI - return gpu_ns_ / 1000000.0; + return static_cast(gpu_ns_) / 1000000.0; #else LOG_FIRST_N(WARNING, 1) << "CUDA CUPTI is not enabled"; return 0; diff --git a/paddle/phi/backends/device_base.cc b/paddle/phi/backends/device_base.cc index f27919bef05fe..7860d322f1faa 100644 --- a/paddle/phi/backends/device_base.cc +++ b/paddle/phi/backends/device_base.cc @@ -215,9 +215,9 @@ size_t DeviceInterface::AllocSize(size_t dev_id, bool realloc) { size_t flag_mb = realloc ? FLAGS_reallocate_gpu_memory_in_mb : FLAGS_initial_gpu_memory_in_mb; size_t alloc_bytes = - (flag_mb > 0ul - ? flag_mb << 20 - : available_to_alloc * FLAGS_fraction_of_gpu_memory_to_use); + (flag_mb > 0ul ? flag_mb << 20 + : available_to_alloc * + FLAGS_fraction_of_gpu_memory_to_use); // NOLINT PADDLE_ENFORCE_GE(available_to_alloc, alloc_bytes, phi::errors::ResourceExhausted( diff --git a/paddle/phi/backends/device_code.cc b/paddle/phi/backends/device_code.cc index 670e0e3781598..e2016ff78b7c3 100644 --- a/paddle/phi/backends/device_code.cc +++ b/paddle/phi/backends/device_code.cc @@ -186,7 +186,8 @@ static std::string FindCUDAIncludePath() { } for (std::string suffix : {"/lib", "/lib64"}) { if (EndWith(FLAGS_cuda_dir, suffix)) { - cuda_include_path.erase(cuda_include_path.end() - suffix.length()); + cuda_include_path.erase(cuda_include_path.end() - + suffix.length()); // NOLINT break; } } diff --git a/paddle/phi/backends/gpu/cuda/cuda_info.cc b/paddle/phi/backends/gpu/cuda/cuda_info.cc index 0af1beb782fcf..505fc7f3f6cd6 100644 --- a/paddle/phi/backends/gpu/cuda/cuda_info.cc +++ b/paddle/phi/backends/gpu/cuda/cuda_info.cc @@ -28,7 +28,7 @@ namespace gpu { int DnnVersion() { if (!dynload::HasCUDNN()) return -1; - return dynload::cudnnGetVersion(); + return dynload::cudnnGetVersion(); // NOLINT } static int GetGPUDeviceCountImpl() { diff --git a/paddle/phi/backends/gpu/gpu_info.cc b/paddle/phi/backends/gpu/gpu_info.cc index 96048de5c047c..32546f762c39e 100644 --- a/paddle/phi/backends/gpu/gpu_info.cc +++ b/paddle/phi/backends/gpu/gpu_info.cc @@ -66,7 +66,7 @@ size_t GpuAvailableMemToAlloc() { size_t available = 0; memory_utils::GpuMemoryUsage(&available, &total); size_t reserving = - static_cast(fraction_reserve_gpu_memory * available); + static_cast(fraction_reserve_gpu_memory * available); // NOLINT // If available size is less than minimum chunk size, no usable memory exists size_t available_to_alloc = available - reserving; size_t min_chunk_size = GpuMinChunkSize(); diff --git a/paddle/phi/infermeta/binary.cc b/paddle/phi/infermeta/binary.cc index fdef52a5fb6e1..ce47a88c420df 100644 --- a/paddle/phi/infermeta/binary.cc +++ b/paddle/phi/infermeta/binary.cc @@ -166,8 +166,8 @@ void ArrayReadInferMeta(const MetaTensor& array, out->set_dims({-1}); } else { double index = i.to(); - out->set_dims(array.dims(index)); - out->share_lod(array, index); + out->set_dims(array.dims(index)); // NOLINT + out->share_lod(array, index); // NOLINT } out->set_dtype(array.dtype()); out->set_layout(array.layout()); @@ -3557,8 +3557,8 @@ void WeightDequantizeInferMeta(const MetaTensor& x, dim_scale[0], (x.dims()[1] + (group_size - 1)) / group_size)); } - int n = x.dims()[1]; - int k = x.dims()[0]; + int n = static_cast(x.dims()[1]); + int k = static_cast(x.dims()[0]); out->set_dims(common::make_ddim({n, k})); out->set_dtype(out_dtype); } diff --git a/paddle/phi/infermeta/multiary.cc b/paddle/phi/infermeta/multiary.cc index 978a80674272f..6bd8b0c14a1ca 100644 --- a/paddle/phi/infermeta/multiary.cc +++ b/paddle/phi/infermeta/multiary.cc @@ -4648,8 +4648,8 @@ void MaskedMultiheadAttentionInferMeta(const MetaTensor& x, int v_num_head = k_num_head; int dim_head = static_cast(cache_kv.dims()[4]); // below's num_head is q's head actually. - int num_head = - x.dims()[x.dims().size() - 1] / dim_head - k_num_head - v_num_head; + int num_head = x.dims()[x.dims().size() - 1] / dim_head - k_num_head - + v_num_head; // NOLINT PADDLE_ENFORCE_EQ( num_head % k_num_head, diff --git a/paddle/phi/infermeta/spmd_rules/elementwise.cc b/paddle/phi/infermeta/spmd_rules/elementwise.cc index 3db396de8b613..d558dfa69b7b5 100644 --- a/paddle/phi/infermeta/spmd_rules/elementwise.cc +++ b/paddle/phi/infermeta/spmd_rules/elementwise.cc @@ -31,7 +31,7 @@ std::string GetInputBroadcastNotation(const std::vector& shape, const int max_ndim, const std::string& alphabet, std::vector* broadcast_axis_count) { - int ndim = shape.size(); + int ndim = static_cast(shape.size()); int start_dim = max_ndim - ndim; std::string axes_notation = GetBroadcastAxes(ndim, max_ndim, alphabet); @@ -54,8 +54,8 @@ void GetBinaryNotations(const std::vector& x_shape, std::string* x_axes, std::string* y_axes, std::string* out_axes) { - int x_ndim = x_shape.size(); - int y_ndim = y_shape.size(); + int x_ndim = static_cast(x_shape.size()); + int y_ndim = static_cast(y_shape.size()); int max_ndim = std::max(x_ndim, y_ndim); int ninputs = 2; std::string alphabet = "abcdefghijklmnopqrstuvwxyz"; @@ -82,7 +82,7 @@ void GetBinaryNotations(const std::vector& x_shape, SpmdInfo ElementwiseUnaryInferSpmd(const DistMetaTensor& x) { // Step0: Verify Input Args Based on Elementwise Logic auto x_shape = common::vectorize(x.dims()); - int x_ndim = x_shape.size(); + int x_ndim = static_cast(x_shape.size()); TensorDistAttr x_dist_attr_src = x.dist_attr(); std::vector x_dims_mapping = x_dist_attr_src.dims_mapping(); PADDLE_ENFORCE_EQ(x_ndim, @@ -129,7 +129,7 @@ SpmdInfo ElementwiseUnaryInferSpmd(const DistMetaTensor& x) { SpmdInfo ElementwiseUnaryWithPartialInferSpmd(const DistMetaTensor& x) { // Step0: Verify Input Args Based on Elementwise Logic auto x_shape = common::vectorize(x.dims()); - int x_ndim = x_shape.size(); + int x_ndim = static_cast(x_shape.size()); TensorDistAttr x_dist_attr_src = x.dist_attr(); std::vector x_dims_mapping = x_dist_attr_src.dims_mapping(); PADDLE_ENFORCE_EQ(x_ndim, @@ -177,9 +177,9 @@ SpmdInfo ElementwiseUnaryInferSpmdReverse(const DistMetaTensor& x, const DistMetaTensor& out) { // Step0: Verify Input Args Based on Elementwise Logic auto x_shape = common::vectorize(x.dims()); - int x_ndim = x_shape.size(); + int x_ndim = static_cast(x_shape.size()); auto out_shape = common::vectorize(out.dims()); - int out_ndim = out_shape.size(); + int out_ndim = static_cast(out_shape.size()); TensorDistAttr out_dist_attr_src = out.dist_attr(); std::vector out_dims_mapping = out_dist_attr_src.dims_mapping(); PADDLE_ENFORCE_EQ( @@ -233,9 +233,9 @@ SpmdInfo ElementwiseBinaryInferSpmd(const DistMetaTensor& x, const DistMetaTensor& y) { // Step0: Verify Input Args Based on Elementwise Logic auto x_shape = common::vectorize(x.dims()); - int x_ndim = x_shape.size(); + int x_ndim = static_cast(x_shape.size()); auto y_shape = common::vectorize(y.dims()); - int y_ndim = y_shape.size(); + int y_ndim = static_cast(y_shape.size()); TensorDistAttr x_dist_attr_src = x.dist_attr(); TensorDistAttr y_dist_attr_src = y.dist_attr(); std::vector x_dims_mapping = x_dist_attr_src.dims_mapping(); @@ -303,11 +303,11 @@ SpmdInfo ElementwiseBinaryInferSpmdReverse(const DistMetaTensor& x, const DistMetaTensor& out) { // Step0: Verify Input Args Based on Elementwise Logic auto x_shape = common::vectorize(x.dims()); - int x_ndim = x_shape.size(); + int x_ndim = static_cast(x_shape.size()); auto y_shape = common::vectorize(y.dims()); - int y_ndim = y_shape.size(); + int y_ndim = static_cast(y_shape.size()); auto out_shape = common::vectorize(out.dims()); - int out_ndim = out_shape.size(); + int out_ndim = static_cast(out_shape.size()); int max_ndim = std::max(x_ndim, y_ndim); TensorDistAttr out_dist_attr = out.dist_attr(); std::vector out_dims_mapping = out_dist_attr.dims_mapping(); diff --git a/paddle/phi/infermeta/spmd_rules/reduction.cc b/paddle/phi/infermeta/spmd_rules/reduction.cc index 608794d348541..ef5d93a04533e 100644 --- a/paddle/phi/infermeta/spmd_rules/reduction.cc +++ b/paddle/phi/infermeta/spmd_rules/reduction.cc @@ -71,7 +71,7 @@ SpmdInfo ReductionInferSpmdBase(const DistMetaTensor& x, int reduce_type) { // Step0: Verify input args based on reduction logic auto x_shape = common::vectorize(x.dims()); - int x_ndim = x_shape.size(); + int x_ndim = static_cast(x_shape.size()); auto x_dist_attr_src = x.dist_attr(); std::vector x_dims_mapping = x_dist_attr_src.dims_mapping(); PADDLE_ENFORCE_EQ( @@ -175,8 +175,8 @@ SpmdInfo ReductionInferSpmdReverse(const DistMetaTensor& x, // Step0: Verify input args based on reduction logic auto x_shape = common::vectorize(x.dims()); auto out_shape = common::vectorize(out.dims()); - int x_ndim = x_shape.size(); - int out_ndim = out_shape.size(); + int x_ndim = static_cast(x_shape.size()); + int out_ndim = static_cast(out_shape.size()); auto out_dist_attr_src = out.dist_attr(); std::vector out_dims_mapping = out_dist_attr_src.dims_mapping(); PADDLE_ENFORCE_EQ( @@ -240,7 +240,7 @@ SpmdInfo ReductionGradInferSpmd(const DistMetaTensor& x, for (size_t i = 0; i < axis_value.size(); ++i) { if (axis_value[i] < 0) { - axis_value[i] += x_dim.size(); + axis_value[i] += x_dim.size(); // NOLINT } } std::sort(axis_value.begin(), axis_value.end()); diff --git a/paddle/phi/infermeta/spmd_rules/replicated.cc b/paddle/phi/infermeta/spmd_rules/replicated.cc index 8d9c6d0d5be6c..390117862e04e 100644 --- a/paddle/phi/infermeta/spmd_rules/replicated.cc +++ b/paddle/phi/infermeta/spmd_rules/replicated.cc @@ -35,8 +35,8 @@ std::vector GetReplicatedDimsMapping(const int ndim) { SpmdInfo ReplicatedInferSpmd(const std::vector& ins, const std::vector& outs) { // step1: Build Einsum Notation for input tensor's batch axis - int64_t ninputs = ins.size(); - int64_t noutputs = outs.size(); + int64_t ninputs = static_cast(ins.size()); + int64_t noutputs = static_cast(outs.size()); // Step2: Unshard Output's Dims Mapping. std::vector output_dist_attrs; @@ -94,8 +94,8 @@ SpmdInfo ReplicatedInferSpmdReverse( const std::vector& ins, const std::vector& outs) { // step1: Build Einsum Notation for input tensor's batch axis - int64_t ninputs = ins.size(); - int64_t noutputs = outs.size(); + int64_t ninputs = static_cast(ins.size()); + int64_t noutputs = static_cast(outs.size()); // Step2: Unshard Output's Dims Mapping. std::vector output_dist_attrs; @@ -145,7 +145,7 @@ SpmdInfo ReplicatedInferDynamic( const std::vector*>>& inputs) { std::vector nonnull_inputs; - int64_t ninputs = inputs.size(); + int64_t ninputs = static_cast(inputs.size()); SpmdInfo spmd_info; auto build_tensor_dist_attr = diff --git a/paddle/phi/infermeta/spmd_rules/softmax.cc b/paddle/phi/infermeta/spmd_rules/softmax.cc index d86db4d41ae23..b6f886a49468a 100644 --- a/paddle/phi/infermeta/spmd_rules/softmax.cc +++ b/paddle/phi/infermeta/spmd_rules/softmax.cc @@ -31,7 +31,7 @@ using phi::distributed::auto_parallel::str_join; SpmdInfo SoftmaxInferSpmd(const DistMetaTensor& x, int axis) { // Step0: Verify input args based on softmax logic auto x_shape = common::vectorize(x.dims()); - int x_ndim = x_shape.size(); + int x_ndim = static_cast(x_shape.size()); auto x_dist_attr_src = x.dist_attr(); std::vector x_dims_mapping = x_dist_attr_src.dims_mapping(); PADDLE_ENFORCE_EQ( @@ -100,8 +100,8 @@ SpmdInfo SoftmaxInferSpmdReverse(const DistMetaTensor& x, // Step0: verify input args based on softmax logic auto x_shape = common::vectorize(x.dims()); auto out_shape = common::vectorize(out.dims()); - int x_ndim = x_shape.size(); - int out_ndim = out_shape.size(); + int x_ndim = static_cast(x_shape.size()); + int out_ndim = static_cast(out_shape.size()); auto out_dist_attr_src = out.dist_attr(); std::vector out_dims_mapping = out_dist_attr_src.dims_mapping(); PADDLE_ENFORCE_EQ( diff --git a/paddle/phi/infermeta/spmd_rules/unsqueeze.cc b/paddle/phi/infermeta/spmd_rules/unsqueeze.cc index cbb010fe6c6bf..b1a36fe76bb78 100644 --- a/paddle/phi/infermeta/spmd_rules/unsqueeze.cc +++ b/paddle/phi/infermeta/spmd_rules/unsqueeze.cc @@ -93,7 +93,7 @@ SpmdInfo UnsqueezeInferSpmd(const DistMetaTensor& x, const std::vector& axis) { // Step0: Verify input args based on unsqueeze logic auto x_shape = common::vectorize(x.dims()); - int x_ndim = x_shape.size(); + int x_ndim = static_cast(x_shape.size()); auto x_dist_attr_src = x.dist_attr(); std::vector x_dims_mapping = x_dist_attr_src.dims_mapping(); PADDLE_ENFORCE_EQ( @@ -162,9 +162,9 @@ SpmdInfo UnsqueezeInferSpmdReverse(const DistMetaTensor& x, const std::vector& axis) { // Step0: Verify input args based on unsqueeze logic auto x_shape = common::vectorize(x.dims()); - int x_ndim = x_shape.size(); + int x_ndim = static_cast(x_shape.size()); auto out_shape = common::vectorize(out.dims()); - int out_ndim = out_shape.size(); + int out_ndim = static_cast(out_shape.size()); auto out_dist_attr_src = out.dist_attr(); std::vector out_dims_mapping = out_dist_attr_src.dims_mapping(); PADDLE_ENFORCE_EQ( @@ -217,7 +217,7 @@ SpmdInfo UnsqueezeInferSpmdReverse(const DistMetaTensor& x, VLOG(4) << "UnsqueezeInferSpmdReverse: Out shape: [" << str_join(out_shape) << "] X shape: [" << str_join(x_shape) << "]"; VLOG(4) << "Transformation from output to input:"; - for (int64_t i = 0, n = trans.size(); i < n; i++) { + for (int64_t i = 0, n = static_cast(trans.size()); i < n; i++) { std::shared_ptr t = trans[i]; VLOG(4) << "\tX axis[" << i << "]: " << t->to_string(); } diff --git a/paddle/phi/infermeta/spmd_rules/utils.cc b/paddle/phi/infermeta/spmd_rules/utils.cc index b67d7bd251b1b..336924dd5e951 100644 --- a/paddle/phi/infermeta/spmd_rules/utils.cc +++ b/paddle/phi/infermeta/spmd_rules/utils.cc @@ -423,13 +423,14 @@ TensorDistAttr FromPlacements( auto& placement = placements[mesh_dim]; if (placement->is_shard()) { auto shard_placement = std::dynamic_pointer_cast(placement); - dims_mapping[shard_placement->get_axis()] = mesh_dim; + dims_mapping[shard_placement->get_axis()] = + static_cast(mesh_dim); } if (placement->is_partial()) { auto partial_placement = std::dynamic_pointer_cast(placement); auto reduce_type = partial_placement->get_reduce_type(); - partial_status[mesh_dim] = reduce_type; + partial_status[mesh_dim] = reduce_type; // NOLINT } } dst_dist_attr.set_dims_mapping(dims_mapping); @@ -470,7 +471,7 @@ std::vector GetLocalShape( for (size_t i = 0; i < n_placement; i++) { auto& placement = placements.at(i); if (placement->is_shard()) { - auto mesh_dim_size = mesh.dim_size(i); + auto mesh_dim_size = mesh.dim_size(i); // NOLINT auto shard_dim = std::dynamic_pointer_cast(placement)->get_axis(); auto split_size = diff --git a/paddle/phi/kernels/funcs/jit/gen/blas.cc b/paddle/phi/kernels/funcs/jit/gen/blas.cc index 8c287efcf5ddd..1e29b7f4953fe 100644 --- a/paddle/phi/kernels/funcs/jit/gen/blas.cc +++ b/paddle/phi/kernels/funcs/jit/gen/blas.cc @@ -104,7 +104,7 @@ void VXXJitCode::genCode() { } else { vmovss(ptr[param3 + offset], xmm_dst); } - offset += sizeof(float) * block; + offset += sizeof(float) * block; // NOLINT rest -= block; } ret(); diff --git a/paddle/phi/kernels/funcs/jit/gen/gru.cc b/paddle/phi/kernels/funcs/jit/gen/gru.cc index 599564f431497..33dfaa6cd097c 100644 --- a/paddle/phi/kernels/funcs/jit/gen/gru.cc +++ b/paddle/phi/kernels/funcs/jit/gen/gru.cc @@ -39,7 +39,7 @@ void GRUJitCode::genCode() { vmovaps(ymm_one, ptr[reg_ptr_tmp + OFFSET_EXP_ONE]); } int offset = 0; - int d = num_ * sizeof(float); + int d = num_ * sizeof(float); // NOLINT for (int i = 0; i < num_ / YMM_FLOAT_BLOCK; ++i) { ymm_t ymm_u = ymm_t(1); ymm_t ymm_r = ymm_t(2); diff --git a/paddle/phi/kernels/funcs/jit/gen/lstm.cc b/paddle/phi/kernels/funcs/jit/gen/lstm.cc index e22a5a2880dff..4943989a50c79 100644 --- a/paddle/phi/kernels/funcs/jit/gen/lstm.cc +++ b/paddle/phi/kernels/funcs/jit/gen/lstm.cc @@ -42,7 +42,7 @@ void LSTMJitCode::genCode() { } int offset = 0; - int d = num_ * sizeof(float); + int d = num_ * sizeof(float); // NOLINT for (int i = 0; i < num_ / YMM_FLOAT_BLOCK; ++i) { /* gates: W_ch, W_ih, W_fh, W_oh */ ymm_t ymm_c = ymm_t(0); diff --git a/paddle/phi/kernels/fusion/onednn/fused_transpose_kernel.cc b/paddle/phi/kernels/fusion/onednn/fused_transpose_kernel.cc index a7f9e49e32560..f8a2f4fe0201e 100644 --- a/paddle/phi/kernels/fusion/onednn/fused_transpose_kernel.cc +++ b/paddle/phi/kernels/fusion/onednn/fused_transpose_kernel.cc @@ -34,7 +34,7 @@ void SetInMemDescWithSqueeze2FuseSupport( int j = 0; for (size_t i = 0; i < x_vec_dims.size(); ++i) { if (squeeze2_axes_set.count(i) || - squeeze2_axes_set.count(i - x_vec_dims.size())) { + squeeze2_axes_set.count(i - x_vec_dims.size())) { // NOLINT PADDLE_ENFORCE_EQ( x_vec_dims[i], 1, @@ -68,7 +68,7 @@ void FusedTransposeKernel(const Context& dev_ctx, if ((x_dims.size() >= 3) && (phi::OneDNNContext::tls().get_cur_paddle_data_layout() == phi::DataLayout::kNHWC)) { - int axis_size = axis.size(); + int axis_size = static_cast(axis.size()); std::vector formated_axis = axis; std::vector count(axis_size, 0); for (int i = 0; i < axis_size; i++) { @@ -85,7 +85,7 @@ void FusedTransposeKernel(const Context& dev_ctx, phi::DDim out_dims(x_dims); for (size_t i = 0; i < axis.size(); i++) { - out_dims[i] = x_dims[formated_axis[i]]; + out_dims[i] = x_dims[formated_axis[i]]; // NOLINT } out->Resize(out_dims); } diff --git a/paddle/phi/kernels/onednn/concat_grad_kernel.cc b/paddle/phi/kernels/onednn/concat_grad_kernel.cc index fc36fa4ab0fd8..9563f73f0ba92 100644 --- a/paddle/phi/kernels/onednn/concat_grad_kernel.cc +++ b/paddle/phi/kernels/onednn/concat_grad_kernel.cc @@ -40,7 +40,7 @@ void ConcatGradKernel(const Context& dev_ctx, auto out_grad_vec_dims = common::vectorize(out_grad.dims()); - axis = funcs::ComputeAxis(axis, out_grad_vec_dims.size()); + axis = static_cast(funcs::ComputeAxis(axis, out_grad_vec_dims.size())); std::vector offset(out_grad_vec_dims.size(), 0); @@ -60,7 +60,7 @@ void ConcatGradKernel(const Context& dev_ctx, auto reorder_dst_memory_p = reorder_handler.AcquireDstMemory( grad, x_grad_vec_dims, - funcs::GetPlainOneDNNFormat(x_grad_vec_dims.size()), + funcs::GetPlainOneDNNFormat(static_cast(x_grad_vec_dims.size())), dev_ctx.GetPlace()); auto reorder_p = reorder_handler.AcquireReorder(reorder_dst_memory_p, slice_mem_p); diff --git a/paddle/phi/kernels/onednn/expand_grad_kernel.cc b/paddle/phi/kernels/onednn/expand_grad_kernel.cc index a8b1beb45832f..7de901df9561d 100644 --- a/paddle/phi/kernels/onednn/expand_grad_kernel.cc +++ b/paddle/phi/kernels/onednn/expand_grad_kernel.cc @@ -50,7 +50,7 @@ void ExpandGradKernel(const Context& dev_ctx, auto reorder_dst_memory_p = reorder_handler.AcquireDstMemory( in_grad, - funcs::GetPlainOneDNNFormat(in_grad_vec_dims.size()), + funcs::GetPlainOneDNNFormat(static_cast(in_grad_vec_dims.size())), dev_ctx.GetPlace()); auto reorder_p = reorder_handler.AcquireReorder(reorder_src_memory_p, diff --git a/paddle/phi/kernels/onednn/matmul_grad_kernel.cc b/paddle/phi/kernels/onednn/matmul_grad_kernel.cc index 3866a2d06ae45..46a2a7450d41c 100644 --- a/paddle/phi/kernels/onednn/matmul_grad_kernel.cc +++ b/paddle/phi/kernels/onednn/matmul_grad_kernel.cc @@ -51,8 +51,10 @@ void CalculateMatrixDims(const std::vector &x_dims, for (size_t i = 0; i < x_bd_dims->size() - 2; ++i) { (*out_bd_dims)[i] = std::max((*x_bd_dims)[i], (*y_bd_dims)[i]); } - int h_idx = trans_x ? x_bd_dims->size() - 1 : x_bd_dims->size() - 2; - int w_idx = trans_y ? y_bd_dims->size() - 2 : y_bd_dims->size() - 1; + int h_idx = + trans_x ? x_bd_dims->size() - 1 : x_bd_dims->size() - 2; // NOLINT + int w_idx = + trans_y ? y_bd_dims->size() - 2 : y_bd_dims->size() - 1; // NOLINT (*out_bd_dims)[x_bd_dims->size() - 2] = (*x_bd_dims)[h_idx]; (*out_bd_dims)[y_bd_dims->size() - 1] = (*y_bd_dims)[w_idx]; diff --git a/paddle/phi/kernels/onednn/matmul_kernel.cc b/paddle/phi/kernels/onednn/matmul_kernel.cc index b7b31ff479b30..342fce6f2be02 100644 --- a/paddle/phi/kernels/onednn/matmul_kernel.cc +++ b/paddle/phi/kernels/onednn/matmul_kernel.cc @@ -124,7 +124,7 @@ void MatmulKernel(const Context &dev_ctx, auto x_dims = common::vectorize(x.dims()); auto y_dims = common::vectorize(y.dims()); - int ndims = std::max(x_dims.size(), y_dims.size()); + int ndims = std::max(x_dims.size(), y_dims.size()); // NOLINT ndims = std::max(ndims, 3); std::vector x_bd_dims(ndims, 1); @@ -266,7 +266,7 @@ class MulPrimitiveFactory { auto scale_out_data = force_fp32_output ? 1.0f : scale_out; bool is_multi_channel = scale_y_data.size() > 1; - int count = is_multi_channel ? scale_y_data.size() : 1; + int count = is_multi_channel ? scale_y_data.size() : 1; // NOLINT std::vector output_shift_scale(count); for (int i = 0; i < count; i++) { if (scale_y_data[i] == 0.0) diff --git a/paddle/phi/kernels/onednn/slice_grad_kernel.cc b/paddle/phi/kernels/onednn/slice_grad_kernel.cc index 7f8f6b815b4f0..a929751433ab9 100644 --- a/paddle/phi/kernels/onednn/slice_grad_kernel.cc +++ b/paddle/phi/kernels/onednn/slice_grad_kernel.cc @@ -60,7 +60,7 @@ void SliceGradKernel(const Context& dev_ctx, auto reorder_dst_memory_p = reorder_handler.AcquireDstMemory( input_grad, dx_dims, - funcs::GetPlainOneDNNFormat(dx_dims.size()), + funcs::GetPlainOneDNNFormat(static_cast(dx_dims.size())), dev_ctx.GetPlace()); memset(input_grad->data(), 0, reorder_dst_memory_p->get_desc().get_size()); diff --git a/paddle/phi/kernels/onednn/slice_kernel.cc b/paddle/phi/kernels/onednn/slice_kernel.cc index bd59d61c17e79..aeff6168f047c 100644 --- a/paddle/phi/kernels/onednn/slice_kernel.cc +++ b/paddle/phi/kernels/onednn/slice_kernel.cc @@ -69,7 +69,7 @@ void SliceKernel(const Context& dev_ctx, auto reorder_dst_memory_p = reorder_handler.AcquireDstMemory( out, slice_dims, - funcs::GetPlainOneDNNFormat(x_vec_dims.size()), + funcs::GetPlainOneDNNFormat(static_cast(x_vec_dims.size())), dev_ctx.GetPlace()); auto reorder_p = diff --git a/paddle/phi/kernels/onednn/squeeze_grad_kernel.cc b/paddle/phi/kernels/onednn/squeeze_grad_kernel.cc index d8ff4e72c1b11..78a3c4dce6bd3 100644 --- a/paddle/phi/kernels/onednn/squeeze_grad_kernel.cc +++ b/paddle/phi/kernels/onednn/squeeze_grad_kernel.cc @@ -37,7 +37,7 @@ void SqueezeGradKernel(const Context& dev_ctx, dout.mem_desc(), funcs::to_void_cast(dout.data())); auto reorder_dst_memory_p = reorder_handler.AcquireDstMemory( dx, - funcs::GetPlainOneDNNFormat(dout_vec_dims.size()), + funcs::GetPlainOneDNNFormat(static_cast(dout_vec_dims.size())), dev_ctx.GetPlace()); auto reorder_p = reorder_handler.AcquireReorder(reorder_dst_memory_p, reorder_src_memory_p); diff --git a/test/cpp/fluid/fused/cudnn_bn_add_relu_test.cc b/test/cpp/fluid/fused/cudnn_bn_add_relu_test.cc index 770093efdacb4..cad204415174b 100644 --- a/test/cpp/fluid/fused/cudnn_bn_add_relu_test.cc +++ b/test/cpp/fluid/fused/cudnn_bn_add_relu_test.cc @@ -764,7 +764,7 @@ class CudnnBNAddReluTester { int c = channels_; int64_t nhw = ele_count_; int32_t c_int32_elems = ((c + 63) & ~63) / 32; - int32_t nhw_int32_elems = (nhw + 31) & ~31; + int32_t nhw_int32_elems = (static_cast(nhw) + 31) & ~31; bitmask.Resize(common::make_ddim({nhw_int32_elems, c_int32_elems, 1})); auto data_shape = common::vectorize(x.dims()); diff --git a/test/cpp/fluid/memory/buddy_allocator_test.cc b/test/cpp/fluid/memory/buddy_allocator_test.cc index b399e6fc2ade1..7f4f452d0ebc3 100644 --- a/test/cpp/fluid/memory/buddy_allocator_test.cc +++ b/test/cpp/fluid/memory/buddy_allocator_test.cc @@ -173,8 +173,8 @@ TEST(BuddyAllocator, FractionRefillPool) { // Max chunk size should be same during allocation EXPECT_EQ(max_chunk_size, buddy_allocator.GetMaxChunkSize()); - size_t alloc = - platform::GpuAvailableMemToAlloc() * FLAGS_fraction_of_gpu_memory_to_use; + size_t alloc = platform::GpuAvailableMemToAlloc() * + FLAGS_fraction_of_gpu_memory_to_use; // NOLINT // Exceed pool trigger refilling size of fraction of avaiable gpu, and should // be able to alloc 60% of the remaining GPU int* p1 = TestBuddyAllocator(&buddy_allocator, @@ -184,8 +184,8 @@ TEST(BuddyAllocator, FractionRefillPool) { // Max chunk size should be same during allocation EXPECT_EQ(max_chunk_size, buddy_allocator.GetMaxChunkSize()); - alloc = - platform::GpuAvailableMemToAlloc() * FLAGS_fraction_of_gpu_memory_to_use; + alloc = platform::GpuAvailableMemToAlloc() * + FLAGS_fraction_of_gpu_memory_to_use; // NOLINT // Exceed pool trigger refilling size of fraction of avaiable gpu, and should // be able to alloc 60% of the remaining GPU TestBuddyAllocator(&buddy_allocator, diff --git a/test/cpp/imperative/test_group.cc b/test/cpp/imperative/test_group.cc index 2243a24dee90d..287e67c9bcff4 100644 --- a/test/cpp/imperative/test_group.cc +++ b/test/cpp/imperative/test_group.cc @@ -73,7 +73,7 @@ void GroupConcatSplit(Place place, size_t size) { std::vector value; for (size_t j = 0; j < len; ++j) { - value.push_back(static_cast(1.0 * j)); + value.push_back(static_cast(1.0 * j)); // NOLINT } if (std::is_same::value) { @@ -89,7 +89,7 @@ void GroupConcatSplit(Place place, size_t size) { phi::DenseTensor tmp; tmp.ShareDataWith(*tensor).Resize({static_cast(len)}); group.dense_tensors_.push_back(std::move(tmp)); - group.all_length_ += len; + group.all_length_ += static_cast(len); group.dtype_ = framework::TransToProtoVarType(tensor->dtype()); } diff --git a/test/cpp/inference/api/analyzer_dam_tester.cc b/test/cpp/inference/api/analyzer_dam_tester.cc index d17f8670adcf4..ea31fe3760b53 100644 --- a/test/cpp/inference/api/analyzer_dam_tester.cc +++ b/test/cpp/inference/api/analyzer_dam_tester.cc @@ -193,7 +193,7 @@ void SetInput(std::vector> *inputs) { DataRecord data(FLAGS_infer_data, FLAGS_batch_size); std::vector input_slots; int test_batch_num = - FLAGS_test_all_data ? data.num_samples / FLAGS_batch_size : 1; + FLAGS_test_all_data ? data.num_samples / FLAGS_batch_size : 1; // NOLINT LOG(INFO) << "The number of samples to be test: " << test_batch_num * FLAGS_batch_size; for (int bid = 0; bid < test_batch_num; ++bid) { diff --git a/test/cpp/inference/api/analyzer_int8_object_detection_tester.cc b/test/cpp/inference/api/analyzer_int8_object_detection_tester.cc index 311fb0946ca00..12be843475b74 100644 --- a/test/cpp/inference/api/analyzer_int8_object_detection_tester.cc +++ b/test/cpp/inference/api/analyzer_int8_object_detection_tester.cc @@ -43,7 +43,7 @@ std::vector ReadObjectsNum(std::ifstream &file, file.clear(); file.seekg(offset); file.read(reinterpret_cast(num_objects.data()), - total_images * sizeof(size_t)); + total_images * sizeof(size_t)); // NOLINT if (file.eof()) LOG(ERROR) << "Reached end of stream"; if (file.fail()) throw std::runtime_error("Failed reading file."); diff --git a/test/cpp/inference/api/analyzer_lexical_analysis_gru_tester.cc b/test/cpp/inference/api/analyzer_lexical_analysis_gru_tester.cc index 2a79ce572dda2..2d0355d361b2d 100644 --- a/test/cpp/inference/api/analyzer_lexical_analysis_gru_tester.cc +++ b/test/cpp/inference/api/analyzer_lexical_analysis_gru_tester.cc @@ -49,7 +49,7 @@ std::vector ReadSentenceLod(std::ifstream &file, file.clear(); file.seekg(offset); file.read(reinterpret_cast(sentence_lod.data()), - total_sentences_num * sizeof(size_t)); + total_sentences_num * sizeof(size_t)); // NOLINT if (file.eof()) LOG(ERROR) << "Reached end of stream"; if (file.fail()) throw std::runtime_error("Failed reading file."); diff --git a/test/cpp/phi/kernels/test_fused_adam_kernel.cc b/test/cpp/phi/kernels/test_fused_adam_kernel.cc index 73e1b21ac3120..ec0926508c9e8 100644 --- a/test/cpp/phi/kernels/test_fused_adam_kernel.cc +++ b/test/cpp/phi/kernels/test_fused_adam_kernel.cc @@ -445,7 +445,7 @@ static auto GenerateRandomShapes(size_t n, uint64_t low, uint64_t high) { std::uniform_int_distribution dist(low, high); std::vector> shapes(n); for (size_t i = 0; i < n; ++i) { - shapes[i].push_back(dist(engine)); + shapes[i].push_back(static_cast(dist(engine))); } return shapes; } diff --git a/test/cpp/phi/kernels/test_memcpy_dev_api.cc b/test/cpp/phi/kernels/test_memcpy_dev_api.cc index 14f5fe15c301b..9a35a1ad99c3f 100644 --- a/test/cpp/phi/kernels/test_memcpy_dev_api.cc +++ b/test/cpp/phi/kernels/test_memcpy_dev_api.cc @@ -43,7 +43,7 @@ TEST(DEV_API, memcpy_d2h) { auto* x_cpu_data = cpu_ctx->template Alloc(&x_cpu); for (int i = 0; i < x_cpu.numel(); i++) { - x_cpu_data[i] = i; + x_cpu_data[i] = static_cast(i); } const auto alloc =