提交 dd64349a 编写于 作者: Y Yibing Liu

refine reshape operator

上级 02da0d1b
...@@ -29,14 +29,17 @@ class ReshapeOp : public framework::OperatorWithKernel { ...@@ -29,14 +29,17 @@ class ReshapeOp : public framework::OperatorWithKernel {
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
auto *in = ctx.Input<framework::Tensor>("X"); auto *in = ctx.Input<framework::Tensor>("X");
auto shape = ctx.Attr<std::vector<int>>("shape"); auto shape = ctx.Attr<std::vector<int>>("shape");
PADDLE_ENFORCE_EQ((unsigned)shape.size(), in->dims().size(), int64_t capacity = -1;
"The dimension of Input(X) mismatches with Attr(shape).");
size_t shape_size = 1;
for (auto dim : shape) { for (auto dim : shape) {
shape_size *= dim; PADDLE_ENFORCE(dim > 0, "Each dimension of shape must be positive.");
if (capacity < 0) {
capacity = dim;
} else {
capacity *= dim;
}
} }
size_t in_size = framework::product(in->dims()); int64_t in_size = framework::product(in->dims());
PADDLE_ENFORCE_EQ(shape_size, in_size, PADDLE_ENFORCE_EQ(capacity, in_size,
"The size of Input(X) mismatches with Attr(shape)."); "The size of Input(X) mismatches with Attr(shape).");
ctx.Output<framework::Tensor>("Out")->Resize(in->dims()); ctx.Output<framework::Tensor>("Out")->Resize(in->dims());
} }
......
...@@ -21,14 +21,12 @@ ...@@ -21,14 +21,12 @@
namespace paddle { namespace paddle {
namespace operators { namespace operators {
using Tensor = framework::Tensor;
template <typename Place, typename T> template <typename Place, typename T>
class ReshapeKernel : public framework::OpKernel { class ReshapeKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& ctx) const { void Compute(const framework::ExecutionContext& ctx) const {
auto* out = ctx.Output<Tensor>("Out"); auto* out = ctx.Output<framework::Tensor>("Out");
auto* in = ctx.Input<Tensor>("X"); auto* in = ctx.Input<framework::Tensor>("X");
out->mutable_data<T>(ctx.GetPlace()); out->mutable_data<T>(ctx.GetPlace());
auto shape = ctx.Attr<std::vector<int>>("shape"); auto shape = ctx.Attr<std::vector<int>>("shape");
...@@ -46,8 +44,8 @@ template <typename Place, typename T> ...@@ -46,8 +44,8 @@ template <typename Place, typename T>
class ReshapeGradKernel : public framework::OpKernel { class ReshapeGradKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& ctx) const { void Compute(const framework::ExecutionContext& ctx) const {
auto* d_out = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* d_out = ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* d_x = ctx.Output<Tensor>(framework::GradVarName("X")); auto* d_x = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
d_x->mutable_data<T>(ctx.GetPlace()); d_x->mutable_data<T>(ctx.GetPlace());
auto in_dims = d_x->dims(); auto in_dims = d_x->dims();
......
...@@ -10,15 +10,27 @@ class TestReshapeOp(unittest.TestCase): ...@@ -10,15 +10,27 @@ class TestReshapeOp(unittest.TestCase):
def setUp(self): def setUp(self):
self.type = "reshape" self.type = "reshape"
self.inputs = {'X': np.random.random((37, 51)).astype("float32"), } self.inputs = {'X': np.random.random((37, 51)).astype("float32"), }
self.attrs = {'shape': [51, 37]} self.attrs = {'shape': [51 * 37]}
self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])} self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])}
class ReshapeGradOpTest(GradientChecker): class TestReshapeGradOp(GradientChecker):
"""
def test_normal(self): def test_normal(self):
op = Operator("reshape", X='X', Out='Out', shape=[5, 40]) op = Operator("reshape", X='X', Out='Out', shape=[5, 40])
inputs = {"X": np.random.random((10, 20)).astype("float32")} inputs = {"X": np.random.random((10, 20)).astype("float32")}
self.check_grad(op, inputs, set("X"), "Out") self.check_grad(op, inputs, set("X"), "Out")
"""
def setUp(self):
self.op = Operator("reshape", X='X', Out='Out', shape=[5, 40])
self.inputs = {"X": np.random.random((10, 20)).astype("float32")}
def test_normal(self):
self.check_grad(self.op, self.inputs, ["X"], "Out")
def test_dev_compare(self):
self.compare_grad(self.op, self.inputs)
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册