Skip to content

Commit

Permalink
[clang-tidy] replenish cppcoreguidelines-narrowing-conversions (#56904)
Browse files Browse the repository at this point in the history
  • Loading branch information
gouzil authored Sep 7, 2023
1 parent 98baeda commit 54e4475
Show file tree
Hide file tree
Showing 118 changed files with 542 additions and 473 deletions.
7 changes: 4 additions & 3 deletions paddle/fluid/framework/block_desc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -143,18 +143,19 @@ void BlockDesc::PrependAllocatedOp(std::unique_ptr<OpDesc> &&op_desc) {

OpDesc *BlockDesc::InsertOp(size_t index) {
need_update_ = true;
auto it = ops_.begin() + index;
auto it = ops_.begin() + index; // NOLINT
std::unique_ptr<OpDesc> new_op(new OpDesc(this));
it = ops_.insert(it, std::move(new_op));
return (*it).get();
}

void BlockDesc::RemoveOp(size_t s, size_t e) {
if (ops_.begin() + s >= ops_.end() || ops_.begin() + e > ops_.end()) {
if (ops_.begin() + s >= ops_.end() || // NOLINT
ops_.begin() + e > ops_.end()) { // NOLINT
return;
}
need_update_ = true;
ops_.erase(ops_.begin() + s, ops_.begin() + e);
ops_.erase(ops_.begin() + s, ops_.begin() + e); // NOLINT
}

void BlockDesc::RemoveOpInternal(const OpDesc *op_desc) {
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/framework/custom_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -653,8 +653,8 @@ static void RunDefaultInferDtypeFunc(
if (detail::IsDuplicableVar(pair.first)) {
size_t size = ctx->InputSize(pair.first);
for (size_t i = 0; i < size; ++i) {
auto dtype = ctx->GetInputDataType(pair.first, i);
ctx->SetOutputDataType(pair.second, dtype, i);
auto dtype = ctx->GetInputDataType(pair.first, static_cast<int>(i));
ctx->SetOutputDataType(pair.second, dtype, static_cast<int>(i));
}
} else {
auto dtype = ctx->GetInputDataType(pair.first);
Expand All @@ -681,7 +681,7 @@ static void RunInferDtypeFunc(
std::vector<DataType> vec_custom_dtype;
if (ctx->HasInput(in_name)) { // general inputs
for (size_t i = 0; i < ctx->InputSize(in_name); ++i) {
auto dtype = ctx->GetInputDataType(in_name, i);
auto dtype = ctx->GetInputDataType(in_name, static_cast<int>(i));
vec_custom_dtype.emplace_back(
paddle::framework::TransToPhiDataType(dtype));
}
Expand Down Expand Up @@ -799,8 +799,8 @@ static void RunInferDtypeFunc(
if (ctx->HasOutput(out_name)) {
size_t size = ctx->InputSize(in_name);
for (size_t i = 0; i < size; ++i) {
auto dtype = ctx->GetInputDataType(in_name, i);
ctx->SetOutputDataType(out_name, dtype, i);
auto dtype = ctx->GetInputDataType(in_name, static_cast<int>(i));
ctx->SetOutputDataType(out_name, dtype, static_cast<int>(i));
}
} else {
PADDLE_ENFORCE(
Expand Down
49 changes: 27 additions & 22 deletions paddle/fluid/framework/data_feed.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,9 @@ class BufferedLineFileReader {
class FILEReader {
public:
explicit FILEReader(FILE* fp) : fp_(fp) {}
int read(char* buf, int len) { return fread(buf, sizeof(char), len, fp_); }
int read(char* buf, int len) {
return static_cast<int>(fread(buf, sizeof(char), len, fp_));
}

private:
FILE* fp_;
Expand Down Expand Up @@ -644,10 +646,11 @@ void MultiSlotDataFeed::Init(
use_slots_.clear();
use_slots_is_dense_.clear();
for (size_t i = 0; i < all_slot_num; ++i) {
const auto& slot = multi_slot_desc.slots(i);
const auto& slot = multi_slot_desc.slots(static_cast<int>(i));
all_slots_[i] = slot.name();
all_slots_type_[i] = slot.type();
use_slots_index_[i] = slot.is_used() ? use_slots_.size() : -1;
use_slots_index_[i] =
static_cast<int>(slot.is_used() ? use_slots_.size() : -1);
total_dims_without_inductive_[i] = 1;
inductive_shape_index_[i] = -1;
if (slot.is_used()) {
Expand Down Expand Up @@ -1048,10 +1051,11 @@ void MultiSlotInMemoryDataFeed::Init(
use_slots_is_dense_.clear();
slot_conf_.resize(all_slot_num);
for (size_t i = 0; i < all_slot_num; ++i) {
const auto& slot = multi_slot_desc.slots(i);
const auto& slot = multi_slot_desc.slots(static_cast<int>(i));
all_slots_[i] = slot.name();
all_slots_type_[i] = slot.type();
use_slots_index_[i] = slot.is_used() ? use_slots_.size() : -1;
use_slots_index_[i] =
static_cast<int>(slot.is_used() ? use_slots_.size() : -1);

slot_conf_[i].name = slot.name();
slot_conf_[i].type = slot.type();
Expand Down Expand Up @@ -1839,21 +1843,21 @@ void PaddleBoxDataFeed::GetRankOffset(const std::vector<PvInstance>& pv_vec,
int max_rank = 3; // the value is setting
int row = ins_number;
int col = max_rank * 2 + 1;
int pv_num = pv_vec.size();
int pv_num = static_cast<int>(pv_vec.size());

std::vector<int> rank_offset_mat(row * col, -1);
rank_offset_mat.shrink_to_fit();

for (int i = 0; i < pv_num; i++) {
auto pv_ins = pv_vec[i];
int ad_num = pv_ins->ads.size();
int ad_num = static_cast<int>(pv_ins->ads.size());
int index_start = index;
for (int j = 0; j < ad_num; ++j) {
auto ins = pv_ins->ads[j];
int rank = -1;
if ((ins->cmatch == 222 || ins->cmatch == 223) &&
ins->rank <= static_cast<uint32_t>(max_rank) && ins->rank != 0) {
rank = ins->rank;
rank = static_cast<int>(ins->rank);
}

rank_offset_mat[index * col] = rank;
Expand All @@ -1864,12 +1868,13 @@ void PaddleBoxDataFeed::GetRankOffset(const std::vector<PvInstance>& pv_vec,
if ((cur_ins->cmatch == 222 || cur_ins->cmatch == 223) &&
cur_ins->rank <= static_cast<uint32_t>(max_rank) &&
cur_ins->rank != 0) {
fast_rank = cur_ins->rank;
fast_rank = static_cast<int>(cur_ins->rank);
}

if (fast_rank > 0) {
int m = fast_rank - 1;
rank_offset_mat[index * col + 2 * m + 1] = cur_ins->rank;
rank_offset_mat[index * col + 2 * m + 1] =
static_cast<int>(cur_ins->rank);
rank_offset_mat[index * col + 2 * m + 2] = index_start + k;
}
}
Expand Down Expand Up @@ -2035,7 +2040,7 @@ void SlotRecordInMemoryDataFeed::Init(const DataFeedDesc& data_feed_desc) {
float_total_dims_size_ = 0;
float_total_dims_without_inductives_.clear();
for (size_t i = 0; i < all_slot_num; ++i) {
const auto& slot = multi_slot_desc.slots(i);
const auto& slot = multi_slot_desc.slots(static_cast<int>(i));
all_slots_[i] = slot.name();

AllSlotInfo& all_slot = all_slots_info_[i];
Expand All @@ -2046,7 +2051,7 @@ void SlotRecordInMemoryDataFeed::Init(const DataFeedDesc& data_feed_desc) {

if (slot.is_used()) {
UsedSlotInfo& info = used_slots_info_[use_slot_size_];
info.idx = i;
info.idx = static_cast<int>(i);
info.slot = slot.name();
info.type = slot.type();
info.dense = slot.is_dense();
Expand Down Expand Up @@ -2423,20 +2428,20 @@ bool SlotRecordInMemoryDataFeed::ParseOneInstance(const std::string& line,
slot_uint64_feasigns.resize(uint64_use_slot_size_);

if (parse_ins_id_) {
int num = strtol(&str[pos], &endptr, 10);
int num = static_cast<int>(strtol(&str[pos], &endptr, 10));
CHECK(num == 1); // NOLINT
pos = endptr - str + 1;
pos = static_cast<int>(endptr - str + 1);
size_t len = 0;
while (str[pos + len] != ' ') {
++len;
}
rec->ins_id_ = std::string(str + pos, len);
pos += len + 1;
pos += static_cast<int>(len + 1);
}
if (parse_logkey_) {
int num = strtol(&str[pos], &endptr, 10);
int num = static_cast<int>(strtol(&str[pos], &endptr, 10));
CHECK(num == 1); // NOLINT
pos = endptr - str + 1;
pos = static_cast<int>(endptr - str + 1);
size_t len = 0;
while (str[pos + len] != ' ') {
++len;
Expand All @@ -2452,14 +2457,14 @@ bool SlotRecordInMemoryDataFeed::ParseOneInstance(const std::string& line,
rec->search_id = search_id;
rec->cmatch = cmatch;
rec->rank = rank;
pos += len + 1;
pos += static_cast<int>(len + 1);
}

int float_total_slot_num = 0;
int uint64_total_slot_num = 0;

for (auto& info : all_slots_info_) {
int num = strtol(&str[pos], &endptr, 10);
int num = static_cast<int>(strtol(&str[pos], &endptr, 10));
PADDLE_ENFORCE(num,
"The number of ids can not be zero, you need padding "
"it in data generator; or if there is something wrong with "
Expand Down Expand Up @@ -2488,7 +2493,7 @@ bool SlotRecordInMemoryDataFeed::ParseOneInstance(const std::string& line,
++uint64_total_slot_num;
}
}
pos = endptr - str;
pos = static_cast<int>(endptr - str);
} else {
for (int j = 0; j <= num; ++j) {
// pos = line.find_first_of(' ', pos + 1);
Expand Down Expand Up @@ -2565,7 +2570,7 @@ void SlotRecordInMemoryDataFeed::PutToFeedVec(const SlotRecord* ins_vec,
batch_fea.resize(total_instance + fea_num);
memcpy(
&batch_fea[total_instance], slot_values, sizeof(float) * fea_num);
total_instance += fea_num;
total_instance += static_cast<int>(fea_num);
slot_offset.push_back(total_instance);
}

Expand All @@ -2588,7 +2593,7 @@ void SlotRecordInMemoryDataFeed::PutToFeedVec(const SlotRecord* ins_vec,
memcpy(&batch_fea[total_instance],
slot_values,
sizeof(uint64_t) * fea_num);
total_instance += fea_num;
total_instance += static_cast<int>(fea_num);
}
if (fea_num == 0) {
batch_fea.resize(total_instance + fea_num);
Expand Down
17 changes: 9 additions & 8 deletions paddle/fluid/framework/data_set.cc
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ static void compute_batch_num(const int64_t ins_num,
int thread_batch_num = batch_size * thread_num;
// less data
if (static_cast<int64_t>(thread_batch_num) > ins_num) {
compute_left_batch_num(ins_num, thread_num, offset, 0);
compute_left_batch_num(static_cast<int>(ins_num), thread_num, offset, 0);
return;
}

Expand Down Expand Up @@ -1271,7 +1271,7 @@ void MultiSlotDataset::PreprocessInstance() {
input_channel_->Close();
std::vector<PvInstance> pv_data;
input_channel_->ReadAll(input_records_);
int all_records_num = input_records_.size();
int all_records_num = static_cast<int>(input_records_.size());
std::vector<Record*> all_records;
all_records.reserve(all_records_num);
for (int index = 0; index < all_records_num; ++index) {
Expand Down Expand Up @@ -1333,7 +1333,7 @@ void MultiSlotDataset::GenerateLocalTablesUnlock(int table_id,
local_map_tables = fleet_ptr_->GetLocalTable();
local_map_tables.resize(shard_num);
// read thread
int channel_num = multi_output_channel_.size();
int channel_num = static_cast<int>(multi_output_channel_.size());
if (read_thread_num < channel_num) {
read_thread_num = channel_num;
}
Expand Down Expand Up @@ -1361,7 +1361,8 @@ void MultiSlotDataset::GenerateLocalTablesUnlock(int table_id,
this->multi_output_channel_[i]->ReadAll(vec_data);
for (auto& item : vec_data) {
for (auto& feature : item.uint64_feasigns_) {
int shard = feature.sign().uint64_feasign_ % shard_num;
int shard =
static_cast<int>(feature.sign().uint64_feasign_ % shard_num);
task_keys[shard].push_back(feature.sign().uint64_feasign_);
}
}
Expand Down Expand Up @@ -1634,11 +1635,11 @@ void MultiSlotDataset::PreprocessChannel(
int out_channel_size = 0;
if (cur_channel_ == 0) {
for (auto& item : multi_output_channel_) {
out_channel_size += item->Size();
out_channel_size += static_cast<int>(item->Size());
}
} else {
for (auto& item : multi_consume_channel_) {
out_channel_size += item->Size();
out_channel_size += static_cast<int>(item->Size());
}
}
VLOG(2) << "DatasetImpl<T>::SlotsShuffle() begin with input channel size: "
Expand Down Expand Up @@ -1724,14 +1725,14 @@ void MultiSlotDataset::PreprocessChannel(
if (!item) {
continue;
}
end_size += item->Size();
end_size += static_cast<int>(item->Size());
}
} else {
for (auto& item : multi_consume_channel_) {
if (!item) {
continue;
}
end_size += item->Size();
end_size += static_cast<int>(item->Size());
}
}
CHECK(input_channel_->Size() == 0)
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/details/fetch_async_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ void FetchAsyncOpHandle::FetchMergedLodTensor(

// for 0D tensor, can't concat eath tensor. So stack 0D and concat 1+D tensor
if (rank == 0) {
int src_lodtensor_size = src_lodtensors.size();
int src_lodtensor_size = static_cast<int>(src_lodtensors.size());
new_dim = phi::make_ddim(std::vector<int>({src_lodtensor_size}));
} else {
bool find_first_dims = false;
Expand Down Expand Up @@ -211,7 +211,7 @@ void FetchAsyncOpHandle::FetchMergedLodTensor(
if (rank == 0) {
end = begin + 1;
} else {
end = begin + src->dims()[0];
end = static_cast<int>(begin + src->dims()[0]);
}

if (end == begin) {
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/framework/details/fused_all_reduce_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -401,7 +401,8 @@ void FusedAllReduceOpHandle::GetDTypeAndNumel(
"The size of grad tensors of fused_all_reduce_op_handle "
"must be > 0, but got %d.",
len));
*numel += phi::Alignment(len * size_of_dtype, places_[0]) / size_of_dtype;
*numel += static_cast<int64_t>(
phi::Alignment(len * size_of_dtype, places_[0]) / size_of_dtype);
}
}

Expand Down
5 changes: 3 additions & 2 deletions paddle/fluid/framework/details/fused_broadcast_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,9 @@ void FusedBroadcastOpHandle::RunImpl() {
for (size_t i = 0; i < in_var_handles.size(); ++i) {
BroadcastOneVar(
*in_var_handles[i],
std::vector<VarHandle *>(out_var_handles.begin() + i * place_num,
out_var_handles.begin() + (i + 1) * place_num),
std::vector<VarHandle *>(
out_var_handles.begin() + i * place_num, // NOLINT
out_var_handles.begin() + (i + 1) * place_num), // NOLINT
local_exec_scopes_);
}
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/details/gather_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ void GatherOpHandle::RunImpl() {
out_var_handle->place(), [in_tensors, out_tensor, &dev_ctx, t_out_p] {
int s = 0, e = 0;
for (const auto &in_tensor : in_tensors) {
e += in_tensor.dims()[0];
e += static_cast<int>(in_tensor.dims()[0]);
auto sub_out = out_tensor->Slice(s, e);
paddle::framework::TensorCopy(in_tensor, t_out_p, *dev_ctx, &sub_out);
s = e;
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/details/gather_op_handle_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -134,12 +134,12 @@ struct TestGatherOpHandle {
}

void TestGatherSelectedRows(size_t output_scope_idx) {
int height = kDims[0] * 2;
int height = static_cast<int>(kDims[0] * 2);
std::vector<int64_t> rows{0, 1, 2, 3, 3, 0, 14, 7, 3, 1,
2, 4, 6, 3, 1, 1, 1, 1, 3, 7};
std::vector<float> send_vector(phi::product(kDims));
for (size_t k = 0; k < send_vector.size(); ++k) {
send_vector[k] = k;
send_vector[k] = static_cast<float>(k);
}

for (size_t input_scope_idx = 0; input_scope_idx < gpu_list_.size();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ ScaleLossGradOpHandle::ScaleLossGradOpHandle(ir::Node *node,
platform::DeviceContext *dev_ctx,
proto::VarType::Type dtype)
: OpHandleBase(node),
coeff_(static_cast<float>(1.0 / num_dev)),
coeff_(static_cast<float>(1.0 / num_dev)), // NOLINT
scope_(scope),
place_(place),
out_dtype_(dtype) {
Expand Down
5 changes: 3 additions & 2 deletions paddle/fluid/framework/details/scope_buffered_monitor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -169,14 +169,15 @@ void ScopeBufferedMonitor::Apply(const std::function<void()> &callback,
if (VLOG_IS_ON(8)) {
for (size_t idx = 0; idx < gpu_memory_size_per_gpu.size(); ++idx) {
VLOG(8) << "history local exec scopes contains "
<< string::HumanReadableSize(gpu_memory_size_per_gpu.at(idx))
<< string::HumanReadableSize(
gpu_memory_size_per_gpu.at(idx)) // NOLINT
<< " in " << places_.at(idx);
}
}

if (FLAGS_local_exe_sub_scope_limit > 0) {
for (size_t idx = 0; idx < gpu_memory_size_per_gpu.size(); ++idx) {
if (gpu_memory_size_per_gpu.at(idx) / kMB >=
if (gpu_memory_size_per_gpu.at(idx) / kMB >= // NOLINT
FLAGS_local_exe_sub_scope_limit) {
platform::DeviceContextPool::Instance().Get(places_.at(idx))->Wait();
local_exec_scopes_.at(idx)->DropKids();
Expand Down
Loading

0 comments on commit 54e4475

Please sign in to comment.