diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index dac1c56bdd43a303576e2ee87f9d71b60242fe2c..94a6d20583f9675fa2efdbcb593c064ea5796084 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -26,13 +26,13 @@ class PadOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { auto dim0 = ctx.Input("X")->dims(); - auto paddings = GetAttr>>("paddings"); + auto paddings = GetAttr>("paddings"); PADDLE_ENFORCE_EQ( - dim0.size(), paddings.size(), + dim0.size(), (int)(paddings.size() / 2), "Paddings size should be equal to dimension size of input tensor."); std::vector dim1(dim0.size()); for (int i = 0; i < dim0.size(); ++i) { - dim1[i] = dim0[i] + paddings[i].first + paddings[i].second; + dim1[i] = dim0[i] + paddings[i * 2] + paddings[i * 2 + 1]; } ctx.Output("Out")->Resize(paddle::framework::make_ddim(dim1)); } @@ -42,14 +42,40 @@ class PadOpMaker : public framework::OpProtoAndCheckerMaker { public: PadOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The input of pad op"); - AddOutput("Out", "The output of pad op"); + AddInput("X", "The input of pad op."); + AddOutput("Out", "The output of pad op."); AddComment(R"DOC( -Pad Operator. +Pad input into output, as specified by paddings and pad_value. The input should be a k-D tensor(k > 0 and k < 7). As an example: + +Given: + +X = [[1, 2], + [3, 4]] + +and + +paddings = [(0,1),(1,2)] + +and + +pad_value = 0 + +then we get + +Out = [[0, 1, 2, 0, 0] + [0, 3, 4, 0, 0] + [0, 0, 0, 0, 0]] )DOC"); - AddAttr>>( - "paddings", "The padding rules for each dimension"); - AddAttr("pad_value", "The value to be padded into tensor") + AddAttr>( + "paddings", + "A pair list to describes padding rules for each dimension." + " For 2-D image tensor, paddings=[(0, 1), (2, 3)] means" + " padding 0 row to top, 1 row to bottom, 2 columns to left" + " and 3 columns to right.Paddings size should be equal to" + " dimension size of input tensor."); + AddAttr("pad_value", + "(float) default to 0; " + "The value to be padded into tensor. ") .SetDefault(0.0f); } }; diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index ed547d0a7f75cfd3a1306137ab7a089f4197f1e6..dcf957b47e9cbe2e484dca88199ef0e54a779914 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -28,23 +28,23 @@ using EigenTensor = framework::EigenTensor; template void PadFunction(const framework::ExecutionContext& context) { - auto pads = - context.op().GetAttr>>("paddings"); + auto pads = context.GetAttr>("paddings"); Eigen::array, D> paddings; - for (int i = 0; i < pads.size(); ++i) { - paddings[i] = pads[i]; + for (int i = 0; i < paddings.size(); ++i) { + paddings[i].first = pads[i * 2]; + paddings[i].second = pads[i * 2 + 1]; } - T pad_value = context.op().GetAttr("pad_value"); + T pad_value = context.GetAttr("pad_value"); - auto* X = context.Input("X"); - auto* Out = context.Output("Out"); - Out->mutable_data(context.GetPlace()); - auto dims = X->dims(); + auto* x = context.Input("X"); + auto* out = context.Output("Out"); + out->mutable_data(context.GetPlace()); + auto dims = x->dims(); - auto X_tensor = EigenTensor::From(*X); - auto Out_tensor = EigenTensor::From(*Out); + auto x_tensor = EigenTensor::From(*x); + auto out_tensor = EigenTensor::From(*out); auto place = context.GetEigenDevice(); - Out_tensor.device(place) = X_tensor.pad(paddings, pad_value); + out_tensor.device(place) = x_tensor.pad(paddings, pad_value); } template @@ -72,28 +72,27 @@ class PadKernel : public framework::OpKernel { PadFunction(context); break; default: - LOG(ERROR) << "Only ranks up to 6 supported."; + PADDLE_THROW("Only ranks up to 6 supported."); } } }; template void PadGradFunction(const framework::ExecutionContext& context) { - auto pads = - context.op().GetAttr>>("paddings"); + auto pads = context.GetAttr>("paddings"); Eigen::array, D> paddings; - for (int i = 0; i < pads.size(); ++i) { - paddings[i].first = -pads[i].first; - paddings[i].second = -pads[i].second; + for (int i = 0; i < paddings.size(); ++i) { + paddings[i].first = -pads[i * 2]; + paddings[i].second = -pads[i * 2 + 1]; } - auto* dOut = context.Input(framework::GradVarName("Out")); - auto* dX = context.Output(framework::GradVarName("X")); - dX->mutable_data(context.GetPlace()); + auto* d_out = context.Input(framework::GradVarName("Out")); + auto* d_x = context.Output(framework::GradVarName("X")); + d_x->mutable_data(context.GetPlace()); - auto dX_tensor = EigenTensor::From(*dX); - auto dOut_tensor = EigenTensor::From(*dOut); + auto d_x_tensor = EigenTensor::From(*d_x); + auto d_out_tensor = EigenTensor::From(*d_out); auto place = context.GetEigenDevice(); - dX_tensor.device(place) = dOut_tensor.pad(paddings, 0); + d_x_tensor.device(place) = d_out_tensor.pad(paddings, 0); } template @@ -122,7 +121,7 @@ class PadGradKernel : public framework::OpKernel { PadGradFunction(context); break; default: - LOG(ERROR) << "Only ranks up to 6 supported."; + PADDLE_THROW("Only ranks up to 6 supported."); } } }; diff --git a/python/paddle/v2/framework/tests/test_pad_op.py b/python/paddle/v2/framework/tests/test_pad_op.py index 10aeaa752fe73c9e9abdce3374d466e35a1bbc92..56b9c88f7dfbdf5d2936caf87664fe289777f10f 100644 --- a/python/paddle/v2/framework/tests/test_pad_op.py +++ b/python/paddle/v2/framework/tests/test_pad_op.py @@ -9,36 +9,89 @@ class TestPadOp(unittest.TestCase): __metaclass__ = OpTestMeta def setUp(self): + self.initTestCase() self.type = "pad" - self.inputs = {'X': np.random.random((16, 16)).astype("float32"), } + self.inputs = {'X': np.random.random(self.shape).astype("float32"), } self.attrs = {} - self.attrs['paddings'] = [(0, 1), (2, 3)] - self.attrs['pad_value'] = 0 + self.attrs['paddings'] = np.array(self.paddings).flatten() + self.attrs['pad_value'] = self.pad_value self.outputs = { 'Out': np.pad(self.inputs['X'], - self.attrs['paddings'], + self.paddings, mode='constant', - constant_values=0) + constant_values=self.pad_value) } + def initTestCase(self): + self.shape = (16, 16) + self.paddings = [(0, 1), (2, 3)] + self.pad_value = 0 + + +class TestCase1(TestPadOp): + def initTestCase(self): + self.shape = (2, 3, 4, 4) + self.paddings = [(0, 1), (2, 3), (2, 1), (1, 1)] + self.pad_value = 0.5 + + +class TestCase2(TestPadOp): + def initTestCase(self): + self.shape = (2, 2, 2) + self.paddings = [(0, 0), (0, 0), (1, 2)] + self.pad_value = 1 + + +class TestCase3(TestPadOp): + def initTestCase(self): + self.shape = (8) + self.paddings = [(0, 1)] + self.pad_value = 0.9 + class TestPadGradOp(GradientChecker): def setUp(self): + self.initTestCase() self.op = Operator( type="pad", X="X", Out="Out", - paddings=[(0, 1), (2, 3)], - pad_value=0) - self.inputs = {'X': np.random.random((16, 16)).astype("float32"), } + paddings=np.array(self.paddings).flatten(), + pad_value=self.pad_value) + self.inputs = {'X': np.random.random(self.shape).astype("float32"), } + + def initTestCase(self): + self.shape = (16, 16) + self.paddings = [(0, 1), (2, 3)] + self.pad_value = 0 def test_normal(self): - self.check_grad( - self.op, self.inputs, set(["X"]), "Out", max_relative_error=0.5) + self.check_grad(self.op, self.inputs, set(["X"]), "Out") def test_cpu_gpu_compare(self): self.compare_grad(self.op, self.inputs) +class TestiGradCase1(TestPadOp): + def initTestCase(self): + self.shape = (2, 3, 4, 4) + self.paddings = [(0, 1), (2, 3), (2, 1), (1, 1)] + self.pad_value = 0.5 + + +class TestGradCase2(TestPadOp): + def initTestCase(self): + self.shape = (2, 2, 2) + self.paddings = [(0, 0), (0, 0), (1, 2)] + self.pad_value = 1 + + +class TestGradCase3(TestPadOp): + def initTestCase(self): + self.shape = (8) + self.paddings = [(0, 1)] + self.pad_value = 0.9 + + if __name__ == '__main__': unittest.main()