From 9f8e4981384d247e461290d7ceb642486663390d Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 6 Sep 2017 11:59:10 +0800 Subject: [PATCH] Fix some issues. --- paddle/operators/pad_op.cc | 3 +++ paddle/operators/pad_op.h | 10 +++++----- python/paddle/v2/framework/op.py | 2 +- python/paddle/v2/framework/tests/test_pad_op.py | 15 ++++++++++----- 4 files changed, 19 insertions(+), 11 deletions(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 5dee8d0f5ea..dac1c56bdd4 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -27,6 +27,9 @@ class PadOp : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &ctx) const override { auto dim0 = ctx.Input("X")->dims(); auto paddings = GetAttr>>("paddings"); + PADDLE_ENFORCE_EQ( + dim0.size(), paddings.size(), + "Paddings size should be equal to dimension size of input tensor."); std::vector dim1(dim0.size()); for (int i = 0; i < dim0.size(); ++i) { dim1[i] = dim0[i] + paddings[i].first + paddings[i].second; diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index 9a0a064d752..234019394c8 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -14,8 +14,6 @@ #pragma once -#include "paddle/operators/math/math_function.h" - #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" @@ -30,12 +28,13 @@ using EigenTensor = framework::EigenTensor; template void PadFunction(const framework::ExecutionContext& context) { - auto pads = context.op_.GetAttr>>("paddings"); + auto pads = + context.op().GetAttr>>("paddings"); Eigen::array, D> paddings; for (int i = 0; i < pads.size(); ++i) { paddings[i] = pads[i]; } - T pad_value = context.op_.GetAttr("pad_value"); + T pad_value = context.op().GetAttr("pad_value"); auto* X = context.Input("X"); auto* Out = context.Output("Out"); @@ -80,7 +79,8 @@ class PadKernel : public framework::OpKernel { template void PadGradFunction(const framework::ExecutionContext& context) { - auto pads = context.op_.GetAttr>>("paddings"); + auto pads = + context.op().GetAttr>>("paddings"); Eigen::array, D> paddings; for (int i = 0; i < pads.size(); ++i) { paddings[0].first = -paddings[0].first; diff --git a/python/paddle/v2/framework/op.py b/python/paddle/v2/framework/op.py index 0349407a851..359ccec814d 100644 --- a/python/paddle/v2/framework/op.py +++ b/python/paddle/v2/framework/op.py @@ -96,7 +96,7 @@ class OpDescCreationMethod(object): new_attr.strings.extend(user_defined_attr) elif attr.type == framework_pb2.INT_PAIRS: for p in user_defined_attr: - pair = new_attr.pairs.add() + pair = new_attr.int_pairs.add() pair.first = p[0] pair.second = p[1] else: diff --git a/python/paddle/v2/framework/tests/test_pad_op.py b/python/paddle/v2/framework/tests/test_pad_op.py index b862033d8ca..10aeaa752fe 100644 --- a/python/paddle/v2/framework/tests/test_pad_op.py +++ b/python/paddle/v2/framework/tests/test_pad_op.py @@ -22,17 +22,22 @@ class TestPadOp(unittest.TestCase): } -class PadGradOpTest(GradientChecker): - def test_pad(self): - op = Operator( +class TestPadGradOp(GradientChecker): + def setUp(self): + self.op = Operator( type="pad", X="X", Out="Out", paddings=[(0, 1), (2, 3)], pad_value=0) - inputs = {'X': np.random.random((16, 16)).astype("float32"), } + self.inputs = {'X': np.random.random((16, 16)).astype("float32"), } + + def test_normal(self): + self.check_grad( + self.op, self.inputs, set(["X"]), "Out", max_relative_error=0.5) - self.check_grad(op, inputs, set(["X"]), "Out", max_relative_error=0.5) + def test_cpu_gpu_compare(self): + self.compare_grad(self.op, self.inputs) if __name__ == '__main__': -- GitLab