diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index b094e649c3dcdd9ac85979230570d46af308f7ca..c0d08cc6903b9781007f5e2d71c780f583a831c7 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -25,39 +25,28 @@ class ReshapeOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} void InferShape(framework::InferShapeContext *ctx) const override { - // input check PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of ReshapeOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of ReshapeOp should not be null."); const std::vector &shape = ctx->Attrs().Get>("shape"); - PADDLE_ENFORCE_EQ(shape.empty(), ctx->HasInput("Shape"), - "The shape information can only be set by Attr(shape) or " - "by Input(Shape). Attr(shape) and Input(Shape) cannot be " - "set at the same time."); + PADDLE_ENFORCE(!shape.empty(), + "The shape information must be set by Attr(shape)."); + std::vector output_shape; auto x_dims = ctx->GetInputDim("X"); + bool need_copy_dim = ValidateShape(shape, x_dims, output_shape); - if (ctx->HasInput("Shape")) { - // The shape information in given by Input(Shape). - auto shape_dims = ctx->GetInputDim("Shape"); - - PADDLE_ENFORCE(shape_dims.size() == 2UL && shape_dims[0] == 1UL, - "The Input(Label) should be a 2-D tensor with the 1st " - "dimensions fixed to 1 (a row vector)."); - - // The actual output shape will be set at runtime, here temporially set - // the shape of output the same as the shape of input. + if (need_copy_dim) { + // Some dimensions can only be determined during runtime. Here temporarily + // set output tensor's shape the same as that of the input tensor. ctx->SetOutputDim("Out", x_dims); } else { - // The shape information in given by Attr(shape). - std::vector output_shape; - ValidateShape(shape, framework::product(x_dims), output_shape); - - auto out_dims = framework::make_ddim(output_shape); - ctx->SetOutputDim("Out", out_dims); + ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); + // FIXME(caoying): When shape of the output tensor is determined during + // runtime, LoD information of X will not passed to the output. if (shape[0] == x_dims[0]) { // Only pass LoD when the first dimension of output and Input(X) // are the same. @@ -67,41 +56,51 @@ class ReshapeOp : public framework::OperatorWithKernel { } private: - void ValidateShape(const std::vector &shape, const int64_t in_size, + bool ValidateShape(const std::vector &shape, + const framework::DDim &input_dim, std::vector &output_shape) const { - std::vector neg_dims_idx; - const int unknown_index = -1; // only one dimension canbe set to -1, whose - // size will be automatically infered. + // only one dimension canbe set to -1, whose size will be automatically + // infered. + const int64_t unknown_index = -1; + const auto in_size = framework::product(input_dim); + const auto x_rank = input_dim.size(); + bool need_dim_copy = false; + std::vector neg_dims_idx; for (size_t i = 0; i < shape.size(); ++i) { - PADDLE_ENFORCE(shape[i] > 1 || shape[i] == unknown_index, + PADDLE_ENFORCE(shape[i] >= 0 || shape[i] == unknown_index, "Each input dimension of Attr(shape) must be positive, or " "only one input dimension can be -1."); - if (shape[i] == unknown_index) neg_dims_idx.push_back(i); + if (shape[i] == unknown_index) { + neg_dims_idx.push_back(i); + } else if (shape[i] == 0) { + PADDLE_ENFORCE_LT( + i, x_rank, + "Only dimension less than rank of Input(X) can be set to 0."); + need_dim_copy = true; + } } PADDLE_ENFORCE_LE( neg_dims_idx.size(), 1, "Only one input dimension of Attr(shape) may be unknown."); + output_shape.resize(shape.size(), 0); + std::transform(shape.begin(), shape.end(), output_shape.begin(), + [](int a) { return static_cast(a); }); + + // some dimension can only be determinted during runtime. + if (need_dim_copy) return need_dim_copy; + int64_t inferred_dim = 0; if (neg_dims_idx.size()) { int64_t capacity = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies()); inferred_dim = in_size / (-capacity); + PADDLE_ENFORCE_EQ(inferred_dim * (-capacity), in_size, + "Invalid shape is given."); + output_shape[neg_dims_idx[0]] = inferred_dim; } - - output_shape.resize(shape.size(), 0); - std::transform(shape.begin(), shape.end(), output_shape.begin(), - [](int a) { return static_cast(a); }); - if (neg_dims_idx.size()) output_shape[neg_dims_idx[0]] = inferred_dim; - } - - protected: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext &ctx) const override { - return framework::OpKernelType( - framework::ToDataType(ctx.Input("X")->type()), - ctx.device_context()); + return false; } }; @@ -110,14 +109,9 @@ class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker { ReshapeOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input tensor of reshape operator."); - AddInput( - "Shape", - "Tensor, a 1-D tensor that provides the shape information.") - .AsDispensable(); AddOutput("Out", "The output tensor of reshape operator."); AddAttr>( - "shape", "(std::vector) Target shape of reshape operator.") - .SetDefault(std::vector()); + "shape", "(std::vector) Target shape of reshape operator."); AddAttr("inplace", "Change the source tensor's shape without copy memory.") .SetDefault(true); @@ -153,14 +147,6 @@ class ReshapeGradOp : public framework::OperatorWithKernel { "Input(Out@GRAD) shouldn't be null."); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } - - protected: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext &ctx) const override { - return framework::OpKernelType( - framework::ToDataType(ctx.Input("X")->type()), - ctx.device_context()); - } }; } // namespace operators diff --git a/paddle/fluid/operators/reshape_op.h b/paddle/fluid/operators/reshape_op.h index 23fbf1655c7566bb229a2be608ee91c8ef024af7..9dbc5cec6b2285ec091b19f6c8c05fe4e92c94ae 100644 --- a/paddle/fluid/operators/reshape_op.h +++ b/paddle/fluid/operators/reshape_op.h @@ -27,17 +27,8 @@ class ReshapeKernel : public framework::OpKernel { auto* out = ctx.Output("Out"); auto* in = ctx.Input("X"); - auto* shape = ctx.Input("Shape"); - framework::DDim out_dims; - if (shape) { - std::vector output_shape; - ValidateShape(*shape, framework::product(in->dims()), output_shape); - - out_dims = framework::make_ddim(output_shape); - } else { - out_dims = out->dims(); - } - + auto out_dims = + ValidateShape(ctx.Attr>("shape"), in->dims()); bool inplace = ctx.Attr("inplace"); if (!inplace) { out->mutable_data(ctx.GetPlace()); @@ -50,35 +41,31 @@ class ReshapeKernel : public framework::OpKernel { } private: - void ValidateShape(const framework::Tensor& shape, const int64_t in_size, - std::vector& output_shape) const { - std::vector neg_dims_idx; - const int unknown_index = -1; // only one dimension canbe set to -1, whose - // size will be automatically infered. - - const int64_t dimension = shape.dims()[1]; - std::cout << "dimension =" << dimension << std::endl; - const T* shape_data = shape.data(); - - for (int64_t i = 0; i < dimension; ++i) { - PADDLE_ENFORCE(shape_data[i] > 1 || shape_data[i] == unknown_index, - "Each input dimension of Attr(shape) must be positive, or " - "only one input dimension can be -1."); - if (shape_data[i] == unknown_index) neg_dims_idx.push_back(i); - } - PADDLE_ENFORCE_LE( - neg_dims_idx.size(), 1, - "Only one input dimension of Attr(shape) can be unknown."); - + framework::DDim ValidateShape(const std::vector shape_attr, + const framework::DDim& in_dims) const { + const int64_t in_size = framework::product(in_dims); + // only one dimension canbe set to -1, whose size will be automatically + // infered. + const int64_t unknown_index = -1; + + std::vector output_shape(shape_attr.size(), 0); int64_t capacity = 1; - output_shape.resize(dimension, 0); - for (int64_t i = 0; i < dimension; ++i) { - capacity *= shape_data[i]; - output_shape[i] = static_cast(shape_data[i]); + int neg_dim_idx = -1; + for (size_t i = 0; i < shape_attr.size(); ++i) { + if (shape_attr[i] == unknown_index) neg_dim_idx = i; + capacity *= (shape_attr[i] ? shape_attr[i] : in_dims[i]); + output_shape[i] = + (shape_attr[i] ? static_cast(shape_attr[i]) : in_dims[i]); } - if (neg_dims_idx.size()) - output_shape[neg_dims_idx[0]] = in_size / (-capacity); + if (neg_dim_idx != -1) { + output_shape[neg_dim_idx] = -in_size / capacity; + PADDLE_ENFORCE_EQ(output_shape[neg_dim_idx] * capacity, -in_size, + "Invalid shape is given."); + } else { + PADDLE_ENFORCE_EQ(capacity, in_size, "Invalid shape is given."); + } + return framework::make_ddim(output_shape); } }; diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index 2bf7cf21ca94c742a15d980194b896d9ec8ad91b..d326c5651fa39a81bfc9148cfc005c559b10cf1f 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -19,7 +19,6 @@ from layer_function_generator import generate_layer_fn from layer_function_generator import autodoc from ..layer_helper import LayerHelper import tensor -import ops import nn import math @@ -58,7 +57,7 @@ def detection_output(loc, This operation is to get the detection results by performing following two steps: - + 1. Decode input bounding box predictions according to the prior boxes. 2. Get the final detection results by applying multi-class non maximum suppression (NMS). @@ -458,7 +457,7 @@ def ssd_loss(location, num, num_prior, num_class = confidence.shape def __reshape_to_2d(var): - return ops.reshape(x=var, shape=[-1, var.shape[-1]]) + return nn.reshape(x=var, shape=[-1, var.shape[-1]]) # 1. Find matched boundding box by prior box. # 1.1 Compute IOU similarity between ground-truth boxes and prior boxes. @@ -469,7 +468,7 @@ def ssd_loss(location, # 2. Compute confidence for mining hard examples # 2.1. Get the target label based on matched indices - gt_label = ops.reshape(x=gt_label, shape=gt_label.shape + (1, )) + gt_label = nn.reshape(x=gt_label, shape=gt_label.shape + (1, )) target_label, _ = target_assign( gt_label, matched_indices, mismatch_value=background_label) # 2.2. Compute confidence loss. @@ -480,7 +479,7 @@ def ssd_loss(location, conf_loss = nn.softmax_with_cross_entropy(confidence, target_label) # 3. Mining hard examples - conf_loss = ops.reshape(x=conf_loss, shape=(num, num_prior)) + conf_loss = nn.reshape(x=conf_loss, shape=(num, num_prior)) neg_indices = helper.create_tmp_variable(dtype='int32') dtype = matched_indices.dtype updated_matched_indices = helper.create_tmp_variable(dtype=dtype) @@ -548,7 +547,7 @@ def ssd_loss(location, # 5.3 Compute overall weighted loss. loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss # reshape to [N, Np], N is the batch size and Np is the prior box number. - loss = ops.reshape(x=loss, shape=[-1, num_prior]) + loss = nn.reshape(x=loss, shape=[-1, num_prior]) loss = nn.reduce_sum(loss, dim=1, keep_dim=True) if normalize: normalizer = nn.reduce_sum(target_loc_weight) @@ -696,7 +695,7 @@ def multi_box_head(inputs, new_shape = [ -1, reduce(lambda x, y: x * y, input.shape[axis:len(input.shape)]) ] - out = ops.reshape(x=input, shape=new_shape) + out = nn.reshape(x=input, shape=new_shape) return out def _is_list_or_tuple_(data): @@ -793,7 +792,7 @@ def multi_box_head(inputs, mbox_loc.shape[0], mbox_loc.shape[1] * mbox_loc.shape[2] * mbox_loc.shape[3] / 4, 4 ] - mbox_loc_flatten = ops.reshape(mbox_loc, shape=new_shape) + mbox_loc_flatten = nn.reshape(mbox_loc, shape=new_shape) mbox_locs.append(mbox_loc_flatten) # get conf_loc @@ -809,7 +808,7 @@ def multi_box_head(inputs, conf_loc.shape[0], conf_loc.shape[1] * conf_loc.shape[2] * conf_loc.shape[3] / num_classes, num_classes ] - conf_loc_flatten = ops.reshape(conf_loc, shape=new_shape) + conf_loc_flatten = nn.reshape(conf_loc, shape=new_shape) mbox_confs.append(conf_loc_flatten) if len(box_results) == 1: diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 10b0405f47097fa3d83690e519ea878e082f68b9..67a6fd808480d2912b10869bef616b69cdd431fd 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -70,6 +70,7 @@ __all__ = [ 'smooth_l1', 'one_hot', 'autoincreased_step_counter', + 'reshape', ] @@ -3184,6 +3185,8 @@ def one_hot(input, depth): The one-hot tensor or LodTensor, same as input. Examples: + .. code-block:: python + X is a LoDTensor: X.lod = [[0, 1, 4]] X.shape = [4, 1] @@ -3236,3 +3239,56 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1): counter.stop_gradient = True return counter + + +def reshape(x, shape, act=None, inplace=True, name=None): + """ + Gives a new shape to Tensor without changing its data. + This layer takes a tensor as input and the attribute shape specifying the + new shape. The shape attribute must be specified. At most one dimension of + the new shape can be -1. In this case, the value is inferred from the size + of the tensor and the remaining dimensions. A dimension could also be 0, + in which case the actual dimension value is going to be copied from the + input tensor. + + Args: + input(variable): The input tensor. + shape(list): The new shape. At most one dimension of the new shape can + be -1. + act (str): The non-linear activation to be applied to output variable. + inplace(bool): If this flag is set true, a new output tensor is created + whose data is copied from input x, otherwise the output + shares data with input without copying. + + Returns(variable): The output tensor. + + Examples: + .. code-block:: python + + Given a 2-D tensor X with shape [2 x 2], and the new shape: [1, 4]. + The reshape layer will change tensor X into a 2-D tensor with + shape [1 x 4] with its data unchanged. + + Given a 3-D tensor x with shape [2, 3, 4] and the new shape: [3, -1]. + The reshape layer will change tensor X into a 2-D tensor with shape: + [3 x 8] with its data unchanged. + + Given a 3-D tensor x with shape [2, 3, 8] and the new shape: + [-1, 0, 2, 2]. The reshape layer will change tensor X into a 4-D tensor + with shape [4, 3, 2, 2] with its data unchanged. + + """ + + if not (isinstance(shape, list) or isinstance(shape, tuple)): + raise ValueError("Input shape must be a python lsit or tuple.") + + helper = LayerHelper("reshape", **locals()) + reshaped = helper.create_tmp_variable(dtype=x.dtype) + helper.append_op( + type="reshape", + inputs={"X": x}, + attrs={"shape": shape, + "inplace": inplace}, + outputs={"Out": reshaped}) + + return helper.append_activation(reshaped) diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 0b88b639629ac73b16ec36aa5930c3d6a9665943..20dd1b47525f0ec32dc53c574b04820cb7d5941c 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -47,7 +47,6 @@ __activations__ = [ __all__ = [ 'mean', 'mul', - 'reshape', 'scale', 'sigmoid_cross_entropy_with_logits', 'elementwise_add', diff --git a/python/paddle/fluid/tests/unittests/test_reshape_op.py b/python/paddle/fluid/tests/unittests/test_reshape_op.py index dc96aed8dbe55f2f40d9ffb21f569a2e00ac6425..1a54427ab5caa1ce2e4ea49ccf7b3140ed698752 100644 --- a/python/paddle/fluid/tests/unittests/test_reshape_op.py +++ b/python/paddle/fluid/tests/unittests/test_reshape_op.py @@ -14,53 +14,88 @@ import unittest import numpy as np -import pdb from op_test import OpTest -# class TestReshapeOp1(OpTest): -# def setUp(self): -# ori_shape = (2, 25) -# new_shape = [5, 10] -# -# self.op_type = "reshape" -# self.inputs = {"X": np.random.random(ori_shape).astype("float32")} -# self.attrs = {"shape": new_shape} -# self.outputs = {"Out": self.inputs["X"].reshape(new_shape)} -# -# def test_check_output(self): -# self.check_output() -# -# def test_check_grad(self): -# self.check_grad(["X"], "Out") -# -# -# class TestReshapeOpDimInfer1(OpTest): -# def setUp(self): -# self.op_type = "reshape" -# self.inputs = {"X": np.random.random((5, 10)).astype("float32")} -# self.attrs = {"shape": [5, -1, 5]} -# self.outputs = {"Out": self.inputs["X"].reshape(self.attrs["shape"])} -# -# def test_check_output(self): -# self.check_output() -# -# def test_check_grad(self): -# self.check_grad(["X"], "Out") - - -class TestReshapeOp2(OpTest): + +class TestReshapeOp(OpTest): + def setUp(self): + ori_shape = (2, 25) + new_shape = (5, 10) + + self.op_type = "reshape" + self.inputs = {"X": np.random.random(ori_shape).astype("float32")} + self.attrs = {"shape": new_shape, "inplace": False} + self.outputs = {"Out": self.inputs["X"].reshape(new_shape)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + +class TestReshapeOpDimInfer1(OpTest): + def setUp(self): + ori_shape = (5, 10) + new_shape = (5, -1, 5) + + self.op_type = "reshape" + self.inputs = {"X": np.random.random(ori_shape).astype("float32")} + self.attrs = {"shape": new_shape, "inplace": False} + self.outputs = {"Out": self.inputs["X"].reshape(self.attrs["shape"])} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + +class TestReshapeOpDimInfer2(OpTest): + def setUp(self): + ori_shape = (2, 2, 6) + new_shape = (2, 0, 3, -1) + infered_shape = (2, 2, 3, -1) + + self.op_type = "reshape" + self.inputs = {"X": np.random.random(ori_shape).astype("float32")} + self.attrs = {"shape": new_shape, "inplace": False} + self.outputs = {"Out": self.inputs["X"].reshape(infered_shape)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + +class TestReshapeOpInplace(OpTest): def setUp(self): ori_shape = (2, 25) - new_shape = ([5, 10], ) + new_shape = (5, 10) + + self.op_type = "reshape" + self.inputs = {"X": np.random.random(ori_shape).astype("float32")} + self.attrs = {"shape": new_shape} + self.outputs = {"Out": self.inputs["X"].reshape(new_shape)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + +class TestReshapeOpDimInferInplace1(OpTest): + def setUp(self): + ori_shape = (5, 10) + new_shape = (5, -1, 5) self.op_type = "reshape" - self.inputs = { - "X": np.random.random(ori_shape).astype("float32"), - "Shape": np.array( - new_shape, dtype="int64") - } - self.outputs = {"Out": self.inputs["X"].reshape(new_shape[0])} + self.inputs = {"X": np.random.random(ori_shape).astype("float32")} + self.attrs = {"shape": new_shape} + self.outputs = {"Out": self.inputs["X"].reshape(new_shape)} def test_check_output(self): self.check_output() @@ -69,32 +104,23 @@ class TestReshapeOp2(OpTest): self.check_grad(["X"], "Out") -# class TestReshapeOpInplace(OpTest): -# def setUp(self): -# self.op_type = "reshape" -# self.inputs = {'X': np.random.random((10, 20)).astype("float32")} -# self.attrs = {'shape': [10 * 20], 'inplace': True} -# self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])} -# -# def test_check_output(self): -# self.check_output() -# -# def test_check_grad(self): -# self.check_grad(["X"], "Out") -# -# -# class TestReshapeOpDimInferInplace(OpTest): -# def setUp(self): -# self.op_type = "reshape" -# self.inputs = {'X': np.random.random((10, 20)).astype("float32")} -# self.attrs = {'shape': [4, -1, 5], 'inplace': True} -# self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])} -# -# def test_check_output(self): -# self.check_output() -# -# def test_check_grad(self): -# self.check_grad(["X"], "Out") +class TestReshapeOpDimInferInplace2(OpTest): + def setUp(self): + ori_shape = (2, 2, 6) + new_shape = (2, 0, 3, -1) + infered_shape = (2, 2, 3, -1) + + self.op_type = "reshape" + self.inputs = {"X": np.random.random(ori_shape).astype("float32")} + self.attrs = {"shape": new_shape} + self.outputs = {"Out": self.inputs["X"].reshape(infered_shape)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + if __name__ == "__main__": unittest.main()