diff --git a/test/auto_parallel/custom_op/custom_relu_op.cu b/test/auto_parallel/custom_op/custom_relu_op.cu index 810ff75be5578..ad0ed12e0fb60 100644 --- a/test/auto_parallel/custom_op/custom_relu_op.cu +++ b/test/auto_parallel/custom_op/custom_relu_op.cu @@ -14,7 +14,9 @@ #include "paddle/extension.h" -#define CHECK_GPU_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.") +#define CHECK_GPU_INPUT(x) \ + PADDLE_ENFORCE_EQ( \ + x.is_gpu(), true, common::errors::Fatal(#x " must be a GPU Tensor.")) template __global__ void relu_cuda_forward_kernel(const data_t* x, @@ -42,7 +44,10 @@ std::vector relu_cuda_forward(const paddle::Tensor& x) { CHECK_GPU_INPUT(x); auto out = paddle::empty_like(x); - PD_CHECK(x.place() == paddle::DefaultGPUPlace()); + PADDLE_ENFORCE_EQ( + x.place() == paddle::DefaultGPUPlace(), + true, + common::errors::InvalidArgument("Input tensor `x` should be on GPU")); int64_t numel = x.numel(); int64_t block = 512; @@ -64,7 +69,10 @@ std::vector relu_cuda_backward(const paddle::Tensor& x, CHECK_GPU_INPUT(grad_out); auto grad_x = paddle::empty_like(x); - PD_CHECK(x.place() == paddle::DefaultGPUPlace()); + PADDLE_ENFORCE_EQ( + x.place() == paddle::DefaultGPUPlace(), + true, + common::errors::InvalidArgument("Input tensor `x` should be on GPU")); int64_t numel = out.numel(); int64_t block = 512; diff --git a/test/cpp_extension/custom_relu_forward.cu b/test/cpp_extension/custom_relu_forward.cu index e0405309f7add..227f32c0b217f 100644 --- a/test/cpp_extension/custom_relu_forward.cu +++ b/test/cpp_extension/custom_relu_forward.cu @@ -12,9 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "paddle/common/enforce.h" #include "paddle/extension.h" -#define CHECK_GPU_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.") +#define CHECK_GPU_INPUT(x) \ + PADDLE_ENFORCE_EQ( \ + x.is_gpu(), true, common::errors::Fatal(#x " must be a GPU Tensor.")) template __global__ void relu_cuda_forward_kernel(const data_t* x, @@ -30,7 +33,10 @@ paddle::Tensor relu_cuda_forward(const paddle::Tensor& x) { CHECK_GPU_INPUT(x); auto out = paddle::empty_like(x); - PD_CHECK(x.place() == paddle::DefaultGPUPlace()); + PADDLE_ENFORCE_EQ( + x.place() == paddle::DefaultGPUPlace(), + true, + common::errors::InvalidArgument("Input tensor `x` should be on GPU")); int64_t numel = x.numel(); int64_t block = 512; diff --git a/test/custom_op/custom_inplace.cu b/test/custom_op/custom_inplace.cu index 9891045f3bd47..b843520ade9e7 100644 --- a/test/custom_op/custom_inplace.cu +++ b/test/custom_op/custom_inplace.cu @@ -18,7 +18,9 @@ #include "paddle/extension.h" -#define CHECK_GPU_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.") +#define CHECK_GPU_INPUT(x) \ + PADDLE_ENFORCE_EQ( \ + x.is_gpu(), true, common::errors::Fatal(#x " must be a GPU Tensor.")) template __global__ void relu_cuda_forward_kernel(data_t* x, int64_t num) { @@ -31,7 +33,10 @@ __global__ void relu_cuda_forward_kernel(data_t* x, int64_t num) { void ReluForwardInplace(paddle::Tensor& x) { // NOLINT CHECK_GPU_INPUT(x); - PD_CHECK(x.place() == paddle::DefaultGPUPlace()); + PADDLE_ENFORCE_EQ( + x.place() == paddle::DefaultGPUPlace(), + true, + common::errors::InvalidArgument("Input tensor `x` should be on GPU")); int64_t numel = x.numel(); int64_t block = 512; diff --git a/test/custom_op/custom_relu_op.cu b/test/custom_op/custom_relu_op.cu index 49e5d16938eb8..b68e57f08df3c 100644 --- a/test/custom_op/custom_relu_op.cu +++ b/test/custom_op/custom_relu_op.cu @@ -14,7 +14,9 @@ #include "paddle/extension.h" -#define CHECK_GPU_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.") +#define CHECK_GPU_INPUT(x) \ + PADDLE_ENFORCE_EQ( \ + x.is_gpu(), true, common::errors::Fatal(#x " must be a GPU Tensor.")) template __global__ void relu_cuda_forward_kernel(const data_t* x, @@ -55,7 +57,10 @@ std::vector relu_cuda_forward(const paddle::Tensor& x) { CHECK_GPU_INPUT(x); auto out = paddle::empty_like(x); - PD_CHECK(x.place() == paddle::DefaultGPUPlace()); + PADDLE_ENFORCE_EQ( + x.place() == paddle::DefaultGPUPlace(), + true, + common::errors::InvalidArgument("Input tensor `x` should be on GPU")); int64_t numel = x.numel(); int64_t block = 512; @@ -77,7 +82,10 @@ std::vector relu_cuda_backward(const paddle::Tensor& x, CHECK_GPU_INPUT(grad_out); auto grad_x = paddle::empty_like(x); - PD_CHECK(x.place() == paddle::DefaultGPUPlace()); + PADDLE_ENFORCE_EQ( + x.place() == paddle::DefaultGPUPlace(), + true, + common::errors::InvalidArgument("Input tensor `x` should be on GPU")); int64_t numel = out.numel(); int64_t block = 512;