diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 618d31098563b..6fe18f2479478 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -187,6 +187,4 @@ endif() if(WITH_ASCEND_CL) cc_test(gelu_op_npu_test SRCS gelu_op_npu_test.cc DEPS op_registry gelu_op scope device_context enforce executor) -cc_test(mean_op_npu_test SRCS mean_op_npu_test.cc DEPS op_registry mean_op scope device_context enforce executor) endif() - diff --git a/paddle/fluid/operators/lookup_table_v2_op_npu.cc b/paddle/fluid/operators/lookup_table_v2_op_npu.cc index e7cc048ed3ce4..8ab4d70fd3ff6 100644 --- a/paddle/fluid/operators/lookup_table_v2_op_npu.cc +++ b/paddle/fluid/operators/lookup_table_v2_op_npu.cc @@ -54,6 +54,7 @@ class LookupTableV2GradNPUKernel : public framework::OpKernel { auto *table_t = ctx.Input("W"); auto *table_grad_t = ctx.Output(framework::GradVarName("W")); + table_grad_t->mutable_data(ctx.GetPlace()); framework::NPUAttributeMap attr_input = {{"use_locking", true}}; auto runner = NpuOpRunner("ScatterAdd", {*table_t, *ids_t, *output_grad_t}, diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc index 7d4194227b4cd..a38800da87fd1 100644 --- a/paddle/fluid/operators/softmax_op.cc +++ b/paddle/fluid/operators/softmax_op.cc @@ -206,9 +206,10 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { auto input_data_type = OperatorWithKernel::IndicateVarDataType( ctx, framework::GradVarName("Out")); if (input_data_type == framework::proto::VarType::FP16) { - PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, - platform::errors::InvalidArgument( - "float16 can only be used on GPU place")); + if (!(platform::is_gpu_place(ctx.GetPlace()) || + platform::is_npu_place(ctx.GetPlace()))) + PADDLE_THROW(platform::errors::InvalidArgument( + "float16 can only be used on GPU/NPU place")); } return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_,