From f31217fc2e535d0d1079a02895214c2c2f434809 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Mon, 11 Sep 2017 14:50:54 +0800 Subject: [PATCH] Fix issues --- paddle/operators/pad_op.cc | 5 +++-- paddle/operators/pad_op.h | 21 +++++++++++---------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 894fe2cecf..ef678cf3d3 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -27,10 +27,10 @@ class PadOp : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &ctx) const override { auto x_dim = ctx.Input("X")->dims(); auto paddings = Attr>("paddings"); - PADDLE_ENFORCE_EQ(x_dim.size() * 2, int(paddings.size()), + PADDLE_ENFORCE_EQ(x_dim.size() * 2, int64_t(paddings.size()), "Size of paddings should be equal to 2 * dimension size " "of input tensor."); - std::vector out_dims(x_dim.size()); + std::vector out_dims(x_dim.size()); for (int i = 0; i < x_dim.size(); ++i) { out_dims[i] = x_dim[i] + paddings[i * 2] + paddings[i * 2 + 1]; } @@ -95,6 +95,7 @@ class PadOpGrad : public framework::OperatorWithKernel { "Input(Out@GRAD) should not be null"); auto x_dims = ctx.Input("X")->dims(); auto *x_grad = ctx.Output(framework::GradVarName("X")); + PADDLE_ENFORCE_NOT_NULL(x_grad, "Output(X@GRAD) should not be null"); x_grad->Resize(x_dims); } diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index dcf957b47e..53451f925a 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -28,18 +28,17 @@ using EigenTensor = framework::EigenTensor; template void PadFunction(const framework::ExecutionContext& context) { - auto pads = context.GetAttr>("paddings"); + auto pads = context.Attr>("paddings"); Eigen::array, D> paddings; for (int i = 0; i < paddings.size(); ++i) { paddings[i].first = pads[i * 2]; paddings[i].second = pads[i * 2 + 1]; } - T pad_value = context.GetAttr("pad_value"); + T pad_value = context.Attr("pad_value"); auto* x = context.Input("X"); auto* out = context.Output("Out"); out->mutable_data(context.GetPlace()); - auto dims = x->dims(); auto x_tensor = EigenTensor::From(*x); auto out_tensor = EigenTensor::From(*out); @@ -51,8 +50,8 @@ template class PadKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - int dim = context.Input("X")->dims().size(); - switch (dim) { + int rank = context.Input("X")->dims().size(); + switch (rank) { case 1: PadFunction(context); break; @@ -72,14 +71,15 @@ class PadKernel : public framework::OpKernel { PadFunction(context); break; default: - PADDLE_THROW("Only ranks up to 6 supported."); + PADDLE_THROW( + "PadOp only support tensors with no more than 6 dimensions."); } } }; template void PadGradFunction(const framework::ExecutionContext& context) { - auto pads = context.GetAttr>("paddings"); + auto pads = context.Attr>("paddings"); Eigen::array, D> paddings; for (int i = 0; i < paddings.size(); ++i) { paddings[i].first = -pads[i * 2]; @@ -99,9 +99,9 @@ template class PadGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - size_t dim = + size_t rank = context.Input(framework::GradVarName("Out"))->dims().size(); - switch (dim) { + switch (rank) { case 1: PadGradFunction(context); break; @@ -121,7 +121,8 @@ class PadGradKernel : public framework::OpKernel { PadGradFunction(context); break; default: - PADDLE_THROW("Only ranks up to 6 supported."); + PADDLE_THROW( + "PadOp only support tensors with no more than 6 dimensions."); } } }; -- GitLab