提交 9f8e4981 编写于 作者: W wanghaoshuang

Fix some issues.

上级 6684b55b
...@@ -27,6 +27,9 @@ class PadOp : public framework::OperatorWithKernel { ...@@ -27,6 +27,9 @@ class PadOp : public framework::OperatorWithKernel {
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
auto dim0 = ctx.Input<Tensor>("X")->dims(); auto dim0 = ctx.Input<Tensor>("X")->dims();
auto paddings = GetAttr<std::vector<std::pair<int, int>>>("paddings"); auto paddings = GetAttr<std::vector<std::pair<int, int>>>("paddings");
PADDLE_ENFORCE_EQ(
dim0.size(), paddings.size(),
"Paddings size should be equal to dimension size of input tensor.");
std::vector<int> dim1(dim0.size()); std::vector<int> dim1(dim0.size());
for (int i = 0; i < dim0.size(); ++i) { for (int i = 0; i < dim0.size(); ++i) {
dim1[i] = dim0[i] + paddings[i].first + paddings[i].second; dim1[i] = dim0[i] + paddings[i].first + paddings[i].second;
......
...@@ -14,8 +14,6 @@ ...@@ -14,8 +14,6 @@
#pragma once #pragma once
#include "paddle/operators/math/math_function.h"
#include "paddle/framework/eigen.h" #include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
...@@ -30,12 +28,13 @@ using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>; ...@@ -30,12 +28,13 @@ using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>;
template <typename Place, typename T, size_t D> template <typename Place, typename T, size_t D>
void PadFunction(const framework::ExecutionContext& context) { void PadFunction(const framework::ExecutionContext& context) {
auto pads = context.op_.GetAttr<std::vector<std::pair<int, int>>>("paddings"); auto pads =
context.op().GetAttr<std::vector<std::pair<int, int>>>("paddings");
Eigen::array<std::pair<int, int>, D> paddings; Eigen::array<std::pair<int, int>, D> paddings;
for (int i = 0; i < pads.size(); ++i) { for (int i = 0; i < pads.size(); ++i) {
paddings[i] = pads[i]; paddings[i] = pads[i];
} }
T pad_value = context.op_.GetAttr<T>("pad_value"); T pad_value = context.op().GetAttr<T>("pad_value");
auto* X = context.Input<Tensor>("X"); auto* X = context.Input<Tensor>("X");
auto* Out = context.Output<Tensor>("Out"); auto* Out = context.Output<Tensor>("Out");
...@@ -80,7 +79,8 @@ class PadKernel : public framework::OpKernel { ...@@ -80,7 +79,8 @@ class PadKernel : public framework::OpKernel {
template <typename Place, typename T, size_t D> template <typename Place, typename T, size_t D>
void PadGradFunction(const framework::ExecutionContext& context) { void PadGradFunction(const framework::ExecutionContext& context) {
auto pads = context.op_.GetAttr<std::vector<std::pair<int, int>>>("paddings"); auto pads =
context.op().GetAttr<std::vector<std::pair<int, int>>>("paddings");
Eigen::array<std::pair<int, int>, D> paddings; Eigen::array<std::pair<int, int>, D> paddings;
for (int i = 0; i < pads.size(); ++i) { for (int i = 0; i < pads.size(); ++i) {
paddings[0].first = -paddings[0].first; paddings[0].first = -paddings[0].first;
......
...@@ -96,7 +96,7 @@ class OpDescCreationMethod(object): ...@@ -96,7 +96,7 @@ class OpDescCreationMethod(object):
new_attr.strings.extend(user_defined_attr) new_attr.strings.extend(user_defined_attr)
elif attr.type == framework_pb2.INT_PAIRS: elif attr.type == framework_pb2.INT_PAIRS:
for p in user_defined_attr: for p in user_defined_attr:
pair = new_attr.pairs.add() pair = new_attr.int_pairs.add()
pair.first = p[0] pair.first = p[0]
pair.second = p[1] pair.second = p[1]
else: else:
......
...@@ -22,17 +22,22 @@ class TestPadOp(unittest.TestCase): ...@@ -22,17 +22,22 @@ class TestPadOp(unittest.TestCase):
} }
class PadGradOpTest(GradientChecker): class TestPadGradOp(GradientChecker):
def test_pad(self): def setUp(self):
op = Operator( self.op = Operator(
type="pad", type="pad",
X="X", X="X",
Out="Out", Out="Out",
paddings=[(0, 1), (2, 3)], paddings=[(0, 1), (2, 3)],
pad_value=0) pad_value=0)
inputs = {'X': np.random.random((16, 16)).astype("float32"), } self.inputs = {'X': np.random.random((16, 16)).astype("float32"), }
def test_normal(self):
self.check_grad(
self.op, self.inputs, set(["X"]), "Out", max_relative_error=0.5)
self.check_grad(op, inputs, set(["X"]), "Out", max_relative_error=0.5) def test_cpu_gpu_compare(self):
self.compare_grad(self.op, self.inputs)
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册