Skip to content

Commit

Permalink
error message optimization in softmax_with_cross_entropy_op (PaddlePa…
Browse files Browse the repository at this point in the history
…ddle#27772)

* error message optimization in softmax_with_cross_entropy_op

* fix some unsuited comment
  • Loading branch information
yghstill authored and chen-zhiyu committed Oct 15, 2020
1 parent fdbaf2e commit 3bcf055
Showing 1 changed file with 12 additions and 6 deletions.
18 changes: 12 additions & 6 deletions paddle/fluid/operators/softmax_with_cross_entropy_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,8 @@ static void HardLabelSoftmaxWithCrossEntropy(
CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(4);
CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(2);
default:
PADDLE_THROW("BlockDim must be 2^n in softmax_with_cross_entropy_op");
PADDLE_THROW(platform::errors::Unavailable(
"Block Dimension must be 2^n in softmax_with_cross_entropy_op."));
break;
}
#undef CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL
Expand Down Expand Up @@ -397,7 +398,8 @@ static void SoftmaxWithCrossEntropyFusedKernel(const T* logits_data,
CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(4);
CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(2);
default:
PADDLE_THROW("BlockDim must be 2^n in softmax_with_cross_entropy_op");
PADDLE_THROW(platform::errors::Unavailable(
"Block Dimension must be 2^n in softmax_with_cross_entropy_op."));
break;
}

Expand All @@ -408,8 +410,10 @@ template <typename T>
class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
PADDLE_ENFORCE(platform::is_gpu_place(context.GetPlace()),
"This kernel only runs on GPU device.");
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(context.GetPlace()), true,
platform::errors::Unavailable("softmax_with_cross_entropy operator's "
"CUDA kernel only runs on GPU device."));
const Tensor* logits = context.Input<Tensor>("Logits");
const Tensor* labels = context.Input<Tensor>("Label");
Tensor* softmax = context.Output<Tensor>("Softmax");
Expand Down Expand Up @@ -469,8 +473,10 @@ template <typename T>
class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
PADDLE_ENFORCE(platform::is_gpu_place(context.GetPlace()),
"This kernel only runs on GPU device.");
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(context.GetPlace()), true,
platform::errors::Unavailable("softmax_with_cross_entropy operator's "
"CUDA kernel only runs on GPU device."));
const Tensor* labels = context.Input<Tensor>("Label");
const T* loss_grad_data =
context.Input<Tensor>(framework::GradVarName("Loss"))->data<T>();
Expand Down

0 comments on commit 3bcf055

Please sign in to comment.