diff --git a/paddle/fluid/operators/mean_op_xpu.cc b/paddle/fluid/operators/mean_op_xpu.cc index 3d95609b726f7b1e9ffb9f965cf8f51311a23634..71bcc4be15ce53477751d71a510562df6c826ad5 100644 --- a/paddle/fluid/operators/mean_op_xpu.cc +++ b/paddle/fluid/operators/mean_op_xpu.cc @@ -32,8 +32,11 @@ class MeanXPUKernel : public framework::OpKernel { const float* x_data = input->data(); float* y_data = output->data(); int r = xpu::mean(dev_ctx.x_context(), x_data, y_data, input->numel()); - PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, - platform::errors::InvalidArgument("XPU kernel error!")); + PADDLE_ENFORCE_EQ( + r, xpu::Error_t::SUCCESS, + platform::errors::External( + "XPU kernel error, Mean op execution not succeed, error code=%d", + r)); } }; template @@ -49,8 +52,11 @@ class MeanGradXPUKernel : public framework::OpKernel { float* dx = IG->data(); const float* dy = OG->data(); int r = xpu::mean_grad(dev_ctx.x_context(), dx, dy, IG->numel()); - PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, - platform::errors::InvalidArgument("XPU kernel error!")); + PADDLE_ENFORCE_EQ( + r, xpu::Error_t::SUCCESS, + platform::errors::External( + "XPU kernel error. Mean_grad execution not succeed, error code=%d", + r)); } }; diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op_xpu.cc b/paddle/fluid/operators/softmax_with_cross_entropy_op_xpu.cc index 727b9ef812a64fb555ce23623aea5baae0fb89e7..f4f6eb9cdc82d0671eacbb1b75a1a30b0d2149bd 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op_xpu.cc +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op_xpu.cc @@ -28,7 +28,7 @@ class SoftmaxWithCrossEntropyXPUKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE_EQ( platform::is_xpu_place(context.GetPlace()), true, - platform::errors::InvalidArgument("This kernel only runs on XPU.")); + platform::errors::PreconditionNotMet("This kernel only runs on XPU.")); const Tensor* logits = context.Input("Logits"); const Tensor* labels = context.Input("Label"); Tensor* softmax = context.Output("Softmax"); @@ -46,8 +46,11 @@ class SoftmaxWithCrossEntropyXPUKernel : public framework::OpKernel { context.template device_context(); int r = xpu::softmax2d_forward(dev_ctx.x_context(), logits->data(), softmax->data(), n, d); - PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, - platform::errors::InvalidArgument("XPU kernel error!")); + PADDLE_ENFORCE_EQ( + r, xpu::Error_t::SUCCESS, + platform::errors::External("XPU kernel error. Softmax2d_forward " + "execution not succeed, error code=%d", + r)); // cross_entropy auto ignore_index = context.Attr("ignore_index"); const bool soft_label = context.Attr("soft_label"); @@ -61,10 +64,13 @@ class SoftmaxWithCrossEntropyXPUKernel : public framework::OpKernel { int* labels_int32_host = reinterpret_cast(std::malloc(n * sizeof(int))); int* labels_int32_device = NULL; - PADDLE_ENFORCE_EQ( - xpu_malloc(reinterpret_cast(&labels_int32_device), - n * sizeof(int)), - XPU_SUCCESS, platform::errors::InvalidArgument("XPU kernel error!")); + int ret = xpu_malloc(reinterpret_cast(&labels_int32_device), + n * sizeof(int)); + PADDLE_ENFORCE_EQ(ret, XPU_SUCCESS, + platform::errors::External( + "XPU API return wrong value[%d], please check " + "where Baidu Kunlun Card is properly installed.", + ret)); dev_ctx.Wait(); memory::Copy(platform::CPUPlace(), labels_int64_host, BOOST_GET_CONST(platform::XPUPlace, context.GetPlace()), @@ -78,8 +84,11 @@ class SoftmaxWithCrossEntropyXPUKernel : public framework::OpKernel { int r = xpu::cross_entropy_forward( dev_ctx.x_context(), n, d, softmax->data(), labels_int32_device, loss->data(), nullptr, ignore_index); - PADDLE_ENFORCE_EQ(r, xpu::Error_t::SUCCESS, - platform::errors::InvalidArgument("XPU kernel error!")); + PADDLE_ENFORCE_EQ( + r, xpu::Error_t::SUCCESS, + platform::errors::External("XPU kernel error. Cross_entropy_forward " + "execution not succeed, error code=%d", + r)); dev_ctx.Wait(); std::free(labels_int32_host); std::free(labels_int64_host);