Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Hackathon 6th Fundable Projects 2 No.22】cppcoreguidelines-pro-type-member-init_4-part #64037

Merged
merged 12 commits into from
May 23, 2024
2 changes: 1 addition & 1 deletion paddle/common/ddim.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ namespace common {

DDim::DDim() : rank_(-1) { dim_[0] = 0; }

DDim::DDim(const DDim& ddim) : dim_() { CopyFrom(ddim); }
DDim::DDim(const DDim& ddim) : dim_(), rank_(-1) { CopyFrom(ddim); }

DDim::DDim(const int* d, int n) : rank_(n) {
dynamic_dim_assign(d, dim_.GetMutable(), n);
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/distributed/fleet_executor/fleet_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
namespace paddle {
namespace distributed {

FleetExecutor::FleetExecutor(const std::string& exe_desc_str) {
FleetExecutor::FleetExecutor(const std::string& exe_desc_str) : carrier_ids_() {
bool parse_flag = exe_desc_.ParseFromString(exe_desc_str);
PADDLE_ENFORCE(parse_flag,
platform::errors::PreconditionNotMet(
Expand All @@ -42,7 +42,7 @@ FleetExecutor::FleetExecutor(const std::string& exe_desc_str) {
}

FleetExecutor::FleetExecutor(const FleetExecutorDesc& exe_desc)
: exe_desc_(exe_desc) {
: exe_desc_(exe_desc), carrier_ids_() {
// Message bus will be created and inited only once
GlobalVal<MessageBus>::Create();
InitMessageBus();
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/framework/details/eager_deletion_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ EagerDeletionOpHandle::EagerDeletionOpHandle(
scope_idx_(scope_idx),
place_(place),
var_infos_(vars.begin(), vars.end()),
gc_(gc) {
gc_(gc),
vars_() {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
if (platform::is_gpu_place(place)) {
dev_ctx_ = reinterpret_cast<phi::GPUContext *>(
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/framework/details/fetch_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ FetchOpHandle::FetchOpHandle(ir::Node *node,
offset_(offset),
local_scopes_(local_scopes),
local_exec_scopes_(local_exec_scopes),
tensors_(),
return_merged_(return_merged) {}

FetchOpHandle::~FetchOpHandle() = default;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ int kProgramId = -1;

ExecutorPrepareContext::ExecutorPrepareContext(
const framework::ProgramDesc& prog, size_t block_id)
: prog_(prog), block_id_(block_id) {}
: prog_(prog), block_id_(block_id), ops_(), unused_vars_() {}

void ExecutorPrepareContext::PrepareUnusedVars(
const std::vector<std::string>& keep_vars, bool force_disable_gc) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@ namespace paddle {
namespace framework {
namespace ir {

OpGraphView::OpGraphView(const std::vector<details::OpHandleBase *> &ops) {
OpGraphView::OpGraphView(const std::vector<details::OpHandleBase *> &ops)
: preceding_ops_(), pending_ops_() {
Build(ops);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -559,7 +559,7 @@ void DependencyBuilder::UpdateVarMinRwOp(
/// ======================== ///
/// For new ir ///
/// ======================== ///
PirDependencyBuilder::PirDependencyBuilder() {
PirDependencyBuilder::PirDependencyBuilder() : instructions_() {
is_build_ = false;
op_downstream_map_ = std::make_shared<std::map<size_t, std::set<size_t>>>();
op_happens_before_ = std::make_shared<std::vector<std::vector<bool>>>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,10 +117,9 @@ const std::vector<WorkQueueOptions> ConstructWorkQueueOptions(
AsyncWorkQueue::AsyncWorkQueue(size_t host_num_threads,
size_t device_num_threads,
EventsWaiter* waiter)
: host_num_thread_(host_num_threads) {
queue_group_ = CreateWorkQueueGroup(
ConstructWorkQueueOptions(host_num_threads, device_num_threads, waiter));
}
: host_num_thread_(host_num_threads),
queue_group_(CreateWorkQueueGroup(ConstructWorkQueueOptions(
host_num_threads, device_num_threads, waiter))) {}

void AsyncWorkQueue::AddTask(const OpFuncType& op_func_type,
std::function<void()> fn) {
Expand Down
6 changes: 4 additions & 2 deletions paddle/fluid/framework/new_executor/interpretercore.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,8 @@ namespace framework {
InterpreterCore::InterpreterCore(const platform::Place& place,
const BlockDesc& block,
framework::Scope* scope,
const ExecutionConfig& execution_config) {
const ExecutionConfig& execution_config)
: impl_(nullptr), fetch_var_names_() {
VLOG(4) << "InterpreterCore(): " << this << " on " << place;
impl_ = std::make_unique<ProgramInterpreter>(
place, block, scope, execution_config);
Expand All @@ -52,7 +53,8 @@ InterpreterCore::InterpreterCore(
const std::vector<std::string>& fetch_var_names,
const ::pir::Block* ir_block,
framework::Scope* scope,
const ExecutionConfig& execution_config) {
const ExecutionConfig& execution_config)
: impl_(nullptr), fetch_var_names_() {
VLOG(4) << "InterpreterCore(): " << this << " on " << place;
impl_ = std::make_unique<PirInterpreter>(
place, fetch_var_names, ir_block, scope, execution_config);
Expand Down
21 changes: 18 additions & 3 deletions paddle/fluid/framework/parallel_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,18 @@ class ParallelExecutorPrivate {
public:
ParallelExecutorPrivate(const std::vector<platform::Place> &places,
Scope *global_scope)
: places_(places), global_scope_(global_scope) {
: places_(places),
local_scopes_(),
local_exec_scopes_(),
global_scope_(global_scope),
executor_(nullptr),
is_persistable_(),
own_local_scope_(false),
use_device_(DeviceType::CPU),
use_all_reduce_(false),
nranks_(0),
mem_opt_var_infos_(),
gcs_() {
if (!FLAGS_pe_profile_fname.empty()) {
std::call_once(gProfileOnce, [] {
#ifdef WITH_GPERFTOOLS
Expand Down Expand Up @@ -674,7 +685,9 @@ ParallelExecutor::ParallelExecutor(const std::vector<platform::Place> &places,
const ExecutionStrategy &exec_strategy,
const BuildStrategy &build_strategy,
ir::Graph *graph)
: member_(new ParallelExecutorPrivate(places, scope)) {
: member_(new ParallelExecutorPrivate(places, scope)),
async_graphs_(),
var_infos_() {
PADDLE_ENFORCE_EQ(!places.empty(),
true,
platform::errors::Unavailable(
Expand Down Expand Up @@ -747,7 +760,9 @@ ParallelExecutor::ParallelExecutor(const platform::Place &place,
const ExecutionStrategy &exec_strategy,
const BuildStrategy &build_strategy,
ir::Graph *graph)
: member_(new ParallelExecutorPrivate({place}, scope)) {
: member_(new ParallelExecutorPrivate({place}, scope)),
async_graphs_(),
var_infos_() {
// Initialize necessary info of member_ with strategy.
InitExecutorPrivateMemberInfo(exec_strategy,
build_strategy,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/scope.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ COMMON_DECLARE_bool(eager_delete_scope);

namespace paddle {
namespace framework {
Scope::Scope() = default;
Scope::Scope() : vars_(), kids_() {}
Scope::~Scope() { DropKids(); } // NOLINT

Scope& Scope::NewScope() const {
Expand Down
4 changes: 1 addition & 3 deletions paddle/fluid/imperative/amp_auto_cast.cc
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,7 @@ OpSupportedInfos(const std::string& place,
}

AutoCastGuard::AutoCastGuard(std::shared_ptr<AmpAttrs> state, AmpLevel level)
: state_(state) {
pre_amp_level_ = state_->GetAmpLevel();

: state_(state), pre_amp_level_(state_->GetAmpLevel()) {
if (pre_amp_level_ != level) {
state_->SetAmpLevel(level);
}
Expand Down
7 changes: 2 additions & 5 deletions paddle/fluid/inference/tensorrt/test_tensorrt.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,8 @@ class Logger : public nvinfer1::ILogger {

class ScopedWeights {
public:
explicit ScopedWeights(float value) : value_(value) {
w.type = nvinfer1::DataType::kFLOAT;
w.values = &value_;
w.count = 1;
}
explicit ScopedWeights(float value)
: value_(value), w{nvinfer1::DataType::kFLOAT, &value_, 1} {}
const nvinfer1::Weights& get() { return w; }

private:
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/pir/dialect/operator/ir/op_dialect.cc
Original file line number Diff line number Diff line change
Expand Up @@ -437,6 +437,8 @@ class IdManager {
return instance;
}

IdManager() : ids_() {}

~IdManager() {
for (auto id : ids_) {
delete id;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/graph.cc
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ void BindNode(py::module *m) {

class PYBIND11_HIDDEN PassAttrGetterSetterRegistry {
private:
PassAttrGetterSetterRegistry() = default;
PassAttrGetterSetterRegistry() : getter_setter_map_() {}
DISABLE_COPY_AND_ASSIGN(PassAttrGetterSetterRegistry);

using Getter = std::function<py::object(const framework::ir::Pass & /*pass*/,
Expand Down
2 changes: 1 addition & 1 deletion test/cpp/fluid/test_common_infer_shape_functions.cc
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ class DygraphInferShapeTest {
imperative::NameVarBaseMap outs_;
framework::AttributeMap attrs_;
std::string op_type_;
std::map<std::string, framework::DDim> expected_dims_;
std::map<std::string, framework::DDim> expected_dims_ = {};
};
} // namespace details

Expand Down
2 changes: 1 addition & 1 deletion test/cpp/inference/api/mkldnn_quantizer_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ namespace paddle {

class MkldnnQuantizerTest : public testing::Test {
public:
MkldnnQuantizerTest() {
MkldnnQuantizerTest() : predictor(nullptr), mkldnn_quantizer(nullptr) {
AnalysisConfig config(FLAGS_dirname);
predictor = CreatePaddlePredictor(config);
auto* predictor_p = static_cast<AnalysisPredictor*>(predictor.get());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ namespace inference {
struct Record {
std::vector<float> data;
std::vector<int32_t> shape;
Record() : data(), shape() {}
};

Record ProcessALine(const std::string &line) {
Expand Down
8 changes: 6 additions & 2 deletions test/deprecated/cpp/inference/api/analyzer_rnn2_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,13 @@ struct DataRecord {
size_t num_samples; // total number of samples
size_t batch_iter{0};
size_t batch_size{1};
DataRecord() = default;
DataRecord() : link_step_data_all(), lod(), rnn_link_data(), num_samples(0) {}
explicit DataRecord(const std::string &path, int batch_size = 1)
: batch_size(batch_size) {
: link_step_data_all(),
lod(),
rnn_link_data(),
num_samples(0),
batch_size(batch_size) {
Load(path);
}
DataRecord NextBatch() {
Expand Down