未验证 提交 5e874cc3 编写于 作者: J Jacek Czaja 提交者: GitHub

- Cosmetic fixes to align with PADDLE_ENFORCE guidelines (#26891)

test=develop
上级 e6af53b1
......@@ -33,10 +33,12 @@ class MKLDNNActivationKernel
public:
void Compute(const framework::ExecutionContext &ctx) const override {
const auto *x = ctx.Input<Tensor>("X");
PADDLE_ENFORCE_EQ(x->layout(), DataLayout::kMKLDNN,
"Wrong layout set for X tensor");
PADDLE_ENFORCE_NE(x->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for X tensor");
PADDLE_ENFORCE_EQ(
x->layout(), DataLayout::kMKLDNN,
platform::errors::InvalidArgument("Wrong layout set for X tensor"));
PADDLE_ENFORCE_NE(
x->format(), MKLDNNMemoryFormat::undef,
platform::errors::InvalidArgument("Wrong format set for X tensor"));
Functor functor;
functor(ctx);
......@@ -50,9 +52,11 @@ class MKLDNNActivationGradKernel
void Compute(const framework::ExecutionContext &ctx) const override {
const auto *diff_y = ctx.Input<Tensor>(framework::GradVarName("Out"));
PADDLE_ENFORCE_EQ(diff_y->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Input OutGrad tensor");
platform::errors::InvalidArgument(
"Wrong layout set for Input OutGrad tensor"));
PADDLE_ENFORCE_NE(diff_y->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Input OutGrad tensor");
platform::errors::InvalidArgument(
"Wrong format set for Input OutGrad tensor"));
Functor functor;
functor(ctx);
......@@ -82,7 +86,7 @@ void eltwise_forward(const framework::ExecutionContext &ctx,
PADDLE_ENFORCE(
x->dims().size() == 2 || x->dims().size() == 3 || x->dims().size() == 4,
"Input dim must be with 2, 3 or 4");
platform::errors::Unimplemented("Input dim must be with 2, 3 or 4"));
auto src_tz = framework::vectorize<int64_t>(x->dims());
......
......@@ -262,9 +262,11 @@ class BatchNormMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
auto *diff_shift = ctx.Output<Tensor>(framework::GradVarName("Bias"));
PADDLE_ENFORCE_EQ(diff_y->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Input diff_y tensor");
platform::errors::InvalidArgument(
"Wrong layout set for Input diff_y tensor"));
PADDLE_ENFORCE_NE(diff_y->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Input diff_y tensor");
platform::errors::InvalidArgument(
"Wrong format set for Input diff_y tensor"));
auto src_tz = paddle::framework::vectorize<int64_t>(x->dims());
auto scale_tz = paddle::framework::vectorize<int64_t>(scale->dims());
......
......@@ -30,10 +30,12 @@ using platform::to_void_cast;
static void EnforceLayouts(const std::vector<const Tensor*> inputs) {
for (auto* input : inputs) {
PADDLE_ENFORCE_EQ(input->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Input tensor");
PADDLE_ENFORCE_NE(input->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Input tensor");
PADDLE_ENFORCE_EQ(
input->layout(), DataLayout::kMKLDNN,
platform::errors::InvalidArgument("Wrong layout set for Input tensor"));
PADDLE_ENFORCE_NE(
input->format(), MKLDNNMemoryFormat::undef,
platform::errors::InvalidArgument("Wrong format set for Input tensor"));
}
}
......@@ -49,7 +51,7 @@ static platform::CPUPlace GetCpuPlace(
const paddle::framework::ExecutionContext& ctx) {
auto place = ctx.GetPlace();
PADDLE_ENFORCE(paddle::platform::is_cpu_place(place),
"It must use CPUPlace.");
platform::errors::InvalidArgument("It must use CPUPlace."));
return BOOST_GET_CONST(platform::CPUPlace, place);
}
......
......@@ -561,7 +561,8 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
PADDLE_ENFORCE_EQ(
!fuse_residual_conn || !force_fp32_output, true,
"residual fusion does not support force output with fp32");
platform::errors::Unimplemented(
"residual fusion does not support force output with fp32"));
auto* bias = ctx.HasInput("Bias") ? ctx.Input<Tensor>("Bias") : nullptr;
......@@ -625,7 +626,8 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
? dilations.size() == 3 && dilations[0] == 1 &&
dilations[1] == 1 && dilations[2] == 1
: dilations.size() == 2 && dilations[0] == 1 && dilations[1] == 1,
true, "dilation in convolution is not implemented yet");
true, platform::errors::Unimplemented(
"dilation in convolution is not implemented yet"));
const K* filter_data = filter->data<K>();
auto scale_in_data = ctx.Attr<float>("Scale_in");
......@@ -887,7 +889,8 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
"The output_grad tensor's layout should be %d, but got %d.",
DataLayout::kMKLDNN, output_grad->layout()));
PADDLE_ENFORCE_NE(output_grad->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for output_grad tensor");
platform::errors::InvalidArgument(
"Wrong format set for output_grad tensor"));
PADDLE_ENFORCE_EQ(
ctx.Attr<bool>("is_test"), false,
......
......@@ -117,7 +117,8 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
PADDLE_ENFORCE(
dilations.size() == 2 && dilations[0] == 1 && dilations[1] == 1,
"dilation in convolution is not implemented yet");
platform::errors::Unimplemented(
"dilation in convolution is not implemented yet"));
const T* input_data = input->data<T>();
const T* filter_data = filter->data<T>();
......
......@@ -83,19 +83,24 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
const Tensor* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
Tensor* in_x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
PADDLE_ENFORCE_EQ(in_x->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Input tensor");
PADDLE_ENFORCE_NE(in_x->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Input tensor");
PADDLE_ENFORCE_EQ(
in_x->layout(), DataLayout::kMKLDNN,
platform::errors::InvalidArgument("Wrong layout set for Input tensor"));
PADDLE_ENFORCE_NE(
in_x->format(), MKLDNNMemoryFormat::undef,
platform::errors::InvalidArgument("Wrong format set for Input tensor"));
PADDLE_ENFORCE_EQ(out_grad->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Input output_grad tensor");
platform::errors::InvalidArgument(
"Wrong layout set for Input output_grad tensor"));
PADDLE_ENFORCE_NE(out_grad->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Input output_grad tensor");
platform::errors::InvalidArgument(
"Wrong format set for Input output_grad tensor"));
PADDLE_ENFORCE_EQ(
ctx.Attr<bool>("is_test"), false,
"is_test attribute should be set to False in training phase.");
platform::errors::InvalidArgument(
"is_test attribute should be set to False in training phase."));
std::string pooling_type = ctx.Attr<std::string>("pooling_type");
......
......@@ -140,7 +140,8 @@ class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel<T> {
PADDLE_ENFORCE_EQ(
dout->dims(), dx->dims(),
"The shape of softmax_grad's input and output must be identical.");
platform::errors::InvalidArgument(
"The shape of softmax_grad's input and output must be identical."));
auto dims = dout->dims(); // input and output share the same shape
const int axis = CanonicalAxis(ctx.Attr<int>("axis"), dims.size());
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册