diff --git a/paddle/fluid/framework/device_worker.cc b/paddle/fluid/framework/device_worker.cc index 1069c893dbd53..579a693503cda 100644 --- a/paddle/fluid/framework/device_worker.cc +++ b/paddle/fluid/framework/device_worker.cc @@ -246,7 +246,8 @@ bool CheckValidOutput(phi::DenseTensor* tensor, size_t batch_size) { void DeviceWorker::DumpParam(const Scope& scope, const int batch_id) { std::ostringstream os; - int device_id = static_cast(place_.GetDeviceId()); + int device_id = + static_cast(static_cast(place_.GetDeviceId())); for (auto& param : *dump_param_) { os.str(""); Variable* var = scope.FindVar(param); diff --git a/paddle/fluid/framework/hogwild_worker.cc b/paddle/fluid/framework/hogwild_worker.cc index 095f35c6ad3c9..aff0a991993d1 100644 --- a/paddle/fluid/framework/hogwild_worker.cc +++ b/paddle/fluid/framework/hogwild_worker.cc @@ -309,7 +309,8 @@ int HogwildWorker::IsParameter(const std::string &name, bool full_match) { } } void HogwildWorker::BuildShardingDepends(const ProgramDesc &program) { - nccl_rank_id_ = place_.GetDeviceId(); + nccl_rank_id_ = + static_cast(static_cast(place_.GetDeviceId())); #if defined(PADDLE_WITH_CUDA) && defined(PADDLE_WITH_GPU_GRAPH) auto gpu_ps = PSGPUWrapper::GetInstance(); nccl_rank_id_ = gpu_ps->GetNCCLRankId(nccl_rank_id_); diff --git a/paddle/fluid/operators/collective/c_comm_init_op.cc b/paddle/fluid/operators/collective/c_comm_init_op.cc index b16d6fb6fe7df..768c60c27b093 100644 --- a/paddle/fluid/operators/collective/c_comm_init_op.cc +++ b/paddle/fluid/operators/collective/c_comm_init_op.cc @@ -118,7 +118,8 @@ class CCommInitOp : public framework::OperatorBase { int nranks = Attr("nranks"); int rid = Attr("ring_id"); - int device_id = place.device; + int device_id = + static_cast(static_cast(place.device)); if (Attr("device_id") >= 0) { device_id = Attr("device_id"); }