提交 3eadb42d 编写于 作者: W wanghaoshuang

Fix eigen error.

上级 2db7dede
......@@ -26,18 +26,18 @@ class PadOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
auto dim0 = ctx.Input<Tensor>("X")->dims();
auto dim1 = ctx.Output<Tensor>("Out")->dims();
auto paddings = GetAttr<std::vector<std::pair<int32, int32>>>("paddings");
auto paddings = GetAttr<std::vector<std::pair<int, int>>>("paddings");
std::vector<int> dim1(dim0.size());
for (int i = 0; i < dim0.size(); ++i) {
dim1[i] = dim0[i] + paddings[i][0] + paddings[i][1];
dim1[i] = dim0[i] + paddings[i].first + paddings[i].second;
}
ctx.Output<Tensor>("Out")->Resize(dim1);
ctx.Output<Tensor>("Out")->Resize(paddle::framework::make_ddim(dim1));
}
};
class MulOpMaker : public framework::OpProtoAndCheckerMaker {
class PadOpMaker : public framework::OpProtoAndCheckerMaker {
public:
MulOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
PadOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The input of pad op");
AddOutput("Out", "The output of pad op");
......
......@@ -28,52 +28,102 @@ template <typename T, size_t D, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>;
template <typename Place, typename T, size_t D>
void PadFunction(const framework::ExecutionContext& context) {
auto pads = context.op_.GetAttr<std::vector<std::pair<int, int>>>("paddings");
Eigen::array<std::pair<int, int>, D> paddings;
for (int i = 0; i < pads.size(); ++i) {
paddings[i] = pads[i];
}
T pad_value = context.op_.GetAttr<T>("pad_value");
auto* X = context.Input<Tensor>("X");
auto* Out = context.Output<Tensor>("Out");
Out->mutable_data<T>(context.GetPlace());
auto dims = X->dims();
auto X_tensor = EigenTensor<T, D>::From(*X);
auto Out_tensor = EigenTensor<T, D>::From(*Out);
auto place = context.GetEigenDevice<Place>();
Out_tensor.device(place) = X_tensor.pad(paddings, pad_value);
}
template <typename Place, typename T>
class PadKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto paddings =
context.op_.GetAttr<std::vector<std::pair<int, int>>>("paddings");
T pad_value = context.op_.GetAttr<T>("pad_value");
auto* X = context.Input<Tensor>("X");
auto* Out = context.Output<Tensor>("Out");
Out->mutable_data<T>(context.GetPlace());
auto dims = X->dims();
// Eigen::TensorMap<Eigen::Tensor<const T, 2, Eigen::RowMajor,
// Eigen::DenseIndex>> X_tensor = EigenTensor<T, 2>::From(*X);
// Eigen::TensorMap<Eigen::Tensor<T, 2, Eigen::RowMajor, Eigen::DenseIndex>>
// Out_tensor = EigenTensor<T, 2>::From(*Out);
EigenTensor<T, dims.size()>::ConstType X_tensor =
EigenTensor<T, dims.size()>::From(*X);
EigenTensor<T, dims.size()>::Type Out_tensor =
EigenTensor<T, dims.size()>::From(*Out);
Out_tensor = X_tensor.pad(paddings, pad_value);
int dim = context.Input<Tensor>("X")->dims().size();
switch (dim) {
case 1:
PadFunction<Place, T, 1>(context);
break;
case 2:
PadFunction<Place, T, 2>(context);
break;
case 3:
PadFunction<Place, T, 3>(context);
break;
case 4:
PadFunction<Place, T, 4>(context);
break;
case 5:
PadFunction<Place, T, 5>(context);
break;
case 6:
PadFunction<Place, T, 6>(context);
break;
default:
LOG(ERROR) << "Only ranks up to 6 supported.";
}
}
};
template <typename Place, typename T, size_t D>
void PadGradFunction(const framework::ExecutionContext& context) {
auto pads = context.op_.GetAttr<std::vector<std::pair<int, int>>>("paddings");
Eigen::array<std::pair<int, int>, D> paddings;
for (int i = 0; i < pads.size(); ++i) {
paddings[0].first = -paddings[0].first;
paddings[1].second = -paddings[1].second;
}
auto* dOut = context.Input<Tensor>(framework::GradVarName("Out"));
auto* dX = context.Output<Tensor>(framework::GradVarName("X"));
dX->mutable_data<T>(context.GetPlace());
auto dX_tensor = EigenTensor<T, D>::From(*dX);
auto dOut_tensor = EigenTensor<T, D>::From(*dOut);
auto place = context.GetEigenDevice<Place>();
dX_tensor.device(place) = dOut_tensor.pad(paddings, 0);
}
template <typename Place, typename T>
class PadGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
std::vector<std::pair<int, int>> paddings =
context.op_.GetAttr<std::vector<std::pair<int, int>>>("paddings");
for (int i = 0; i < paddings.size(); ++i) {
paddings[0].first = -paddings[0].first;
paddings[1].second = -paddings[1].second;
void Compute(const framework::ExecutionContext& context) const override {
size_t dim =
context.Input<Tensor>(framework::GradVarName("Out"))->dims().size();
switch (dim) {
case 1:
PadGradFunction<Place, T, 1>(context);
break;
case 2:
PadGradFunction<Place, T, 2>(context);
break;
case 3:
PadGradFunction<Place, T, 3>(context);
break;
case 4:
PadGradFunction<Place, T, 4>(context);
break;
case 5:
PadGradFunction<Place, T, 5>(context);
break;
case 6:
PadGradFunction<Place, T, 6>(context);
break;
default:
LOG(ERROR) << "Only ranks up to 6 supported.";
}
auto* dOut = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto dims = dOut->dims();
auto* dX = ctx.Output<Tensor>(framework::GradVarName("X"));
dX->mutable_data<T>(ctx.GetPlace());
EigenTensor<T, dims.size()>::Type dX_tensor =
EigenTensor<T, dims.size()>::From(*dX);
EigenTensor<T, dims.size()>::ConstType dOut_tensor =
EigenTensor<T, dims.size()>::From(*dOut);
dX_tensor = dOut_tensor.pad(paddings, 0);
}
};
......
import unittest
import numpy as np
from paddle.v2.framework.op import Operator
from gradient_checker import GradientChecker, create_op
from op_test_util import OpTestMeta
......@@ -10,19 +11,25 @@ class TestPadOp(unittest.TestCase):
def setUp(self):
self.type = "pad"
self.inputs = {'X': np.random.random((16, 16)).astype("float32"), }
self.attrs['paddings'] = ((0, 1), (2, 3))
self.attrs = {}
self.attrs['paddings'] = [(0, 1), (2, 3)]
self.attrs['pad_value'] = 0
self.outputs = {
'Out': np.pad(self.inputs['X'],
self.attrs['paddings'],
mode='constant',
constant_value=0)
constant_values=0)
}
class PadGradOpTest(GradientChecker):
def test_pad(self):
op = Operator("pad", paddings=((0, 1), (2, 3)), pad_value=0)
op = Operator(
type="pad",
X="X",
Out="Out",
paddings=[(0, 1), (2, 3)],
pad_value=0)
inputs = {'X': np.random.random((16, 16)).astype("float32"), }
self.check_grad(op, inputs, set(["X"]), "Out", max_relative_error=0.5)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册