From 7e5e9934fe9594bb6b2b5415c25ab5db8c8b9bb7 Mon Sep 17 00:00:00 2001 From: lilong12 Date: Fri, 27 Nov 2020 18:59:52 +0800 Subject: [PATCH] update expand as op to use the shape of the target tensor instead of the target tensor itself. (#29020) * update, test=develop --- paddle/fluid/operators/expand_as_v2_op.cc | 49 ++++++------------- paddle/fluid/operators/expand_as_v2_op.h | 10 ++-- .../tests/unittests/test_expand_as_v2_op.py | 16 +++--- python/paddle/tensor/manipulation.py | 10 ++-- 4 files changed, 34 insertions(+), 51 deletions(-) diff --git a/paddle/fluid/operators/expand_as_v2_op.cc b/paddle/fluid/operators/expand_as_v2_op.cc index e8008056c48..70099afbd59 100644 --- a/paddle/fluid/operators/expand_as_v2_op.cc +++ b/paddle/fluid/operators/expand_as_v2_op.cc @@ -25,28 +25,22 @@ class ExpandAsV2Op : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ExpandAsV2"); - OP_INOUT_CHECK(ctx->HasInput("target_tensor"), "Input", "target_tensor", - "ExpandAsV2"); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ExpandAsV2"); auto x_dims = ctx->GetInputDim("X"); - auto target_tensor_dims = ctx->GetInputDim("target_tensor"); + auto target_shape = ctx->Attrs().Get>("target_shape"); PADDLE_ENFORCE_GE( - target_tensor_dims.size(), static_cast(x_dims.size()), + target_shape.size(), static_cast(x_dims.size()), platform::errors::InvalidArgument( - "The rank of Input(target_tensor) must be greater than or equal " + "The rank of target_shape must be greater than or equal " "to the rank of Input(X). But received Input(X): input " - "rank %u, input shape [%s]; received Input(target_tensor): " - "input rank %u, input shape [%s].", - x_dims.size(), x_dims, target_tensor_dims.size(), - target_tensor_dims)); - PADDLE_ENFORCE_LE( - target_tensor_dims.size(), MAX_RANK_SUPPORTED, - platform::errors::InvalidArgument( - "The rank of Input(target_tensor) must not be less than or equal " - "to %d. But received: input rank %u, input shape [%s].", - MAX_RANK_SUPPORTED, x_dims.size(), x_dims)); - std::vector out_shape = framework::vectorize(target_tensor_dims); - ctx->SetOutputDim("Out", framework::make_ddim(out_shape)); + "rank %u; received target_shape: rank %u.", + x_dims.size(), target_shape.size())); + PADDLE_ENFORCE_LE(target_shape.size(), MAX_RANK_SUPPORTED, + platform::errors::InvalidArgument( + "The rank of target_shape must be less than or equal " + "to %d. But received: rank %u.", + MAX_RANK_SUPPORTED, target_shape.size())); + ctx->SetOutputDim("Out", framework::make_ddim(target_shape)); } }; @@ -62,23 +56,11 @@ class ExpandAsV2OpMaker : public framework::OpProtoAndCheckerMaker { "After expanding, size of each dimension of Output(Out) is equal " "to size of the corresponding dimension of Input(X) multiplying " "the corresponding value given by Attr(expand_times)."); - AddInput("target_tensor", "Expand tensor's shape for each dimension."); + AddAttr>("target_shape", + "Expand shape for each dimension.") + .SetDefault({}); AddComment(R"DOC( -Expand the input by given times number. You should set times -number for each dimension by providing tensor 'expend_tensor'. The rank of X -should be in [1, 6]. Please note that size of 'expend_tensor' must be the same -with X's rank. Following is a using case: -Input(X) is a 3-D tensor with shape [2, 3, 1]: - [ - [[1], [2], [3]], - [[4], [5], [6]] - ] -target_tensors'shape: [2, 6, 2] -Output(Out) is a 3-D tensor with shape [2, 6, 2]: - [ - [[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]], - [[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]] - ] +Expand the input to the given shape. )DOC"); } }; @@ -117,7 +99,6 @@ class ExpandAsV2GradOpMaker : public framework::SingleGradOpMaker { void Apply(GradOpPtr op) const override { op->SetType("expand_as_v2_grad"); op->SetInput("X", this->Input("X")); - op->SetInput("target_tensor", this->Input("target_tensor")); op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); op->SetAttrMap(this->Attrs()); diff --git a/paddle/fluid/operators/expand_as_v2_op.h b/paddle/fluid/operators/expand_as_v2_op.h index a4c30dfe129..c36e461926f 100644 --- a/paddle/fluid/operators/expand_as_v2_op.h +++ b/paddle/fluid/operators/expand_as_v2_op.h @@ -59,8 +59,8 @@ class ExpandAsV2Kernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto rank = context.Input("X")->dims().size(); - auto* target_tensor = context.Input("target_tensor"); - auto target_rank = target_tensor->dims().size(); + auto target_shape = context.Attr>("target_shape"); + auto target_rank = target_shape.size(); PADDLE_ENFORCE_GE(target_rank, rank, platform::errors::InvalidArgument( "The rank (%d) of the input 'target_tensor' for " @@ -85,9 +85,8 @@ class ExpandAsV2Kernel : public framework::OpKernel { void ExpandAs(const framework::ExecutionContext& context) const { auto* in0 = context.Input("X"); auto in_dims = in0->dims(); - auto* target_tensor = context.Input("target_tensor"); + auto target_shape = context.Attr>("target_shape"); auto vec_in_dims = framework::vectorize(in_dims); - auto target_shape = framework::vectorize(target_tensor->dims()); auto diff = target_shape.size() - vec_in_dims.size(); vec_in_dims.insert(vec_in_dims.begin(), diff, 1); std::vector repeat_times(vec_in_dims.size()); @@ -132,9 +131,8 @@ class ExpandAsV2GradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* in0 = context.Input("X"); - auto* target_tensor = context.Input("target_tensor"); + auto target_shape = context.Attr>("target_shape"); auto x_dims = in0->dims(); - auto target_shape = target_tensor->dims(); auto vec_in_dims = framework::vectorize(x_dims); auto diff = target_shape.size() - vec_in_dims.size(); vec_in_dims.insert(vec_in_dims.begin(), diff, 1); diff --git a/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py index 4bc6bf3744f..62cd465a176 100755 --- a/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py @@ -26,8 +26,8 @@ class TestExpandAsOpRank1(OpTest): self.op_type = "expand_as_v2" x = np.random.rand(100).astype("float64") target_tensor = np.random.rand(2, 100).astype("float64") - self.inputs = {'X': x, 'target_tensor': target_tensor} - self.attrs = {} + self.inputs = {'X': x} + self.attrs = {'target_shape': target_tensor.shape} bcast_dims = [2, 1] output = np.tile(self.inputs['X'], bcast_dims) self.outputs = {'Out': output} @@ -44,8 +44,8 @@ class TestExpandAsOpRank2(OpTest): self.op_type = "expand_as_v2" x = np.random.rand(10, 12).astype("float64") target_tensor = np.random.rand(10, 12).astype("float64") - self.inputs = {'X': x, 'target_tensor': target_tensor} - self.attrs = {} + self.inputs = {'X': x} + self.attrs = {'target_shape': target_tensor.shape} bcast_dims = [1, 1] output = np.tile(self.inputs['X'], bcast_dims) self.outputs = {'Out': output} @@ -62,8 +62,8 @@ class TestExpandAsOpRank3(OpTest): self.op_type = "expand_as_v2" x = np.random.rand(2, 3, 20).astype("float64") target_tensor = np.random.rand(2, 3, 20).astype("float64") - self.inputs = {'X': x, 'target_tensor': target_tensor} - self.attrs = {} + self.inputs = {'X': x} + self.attrs = {'target_shape': target_tensor.shape} bcast_dims = [1, 1, 1] output = np.tile(self.inputs['X'], bcast_dims) self.outputs = {'Out': output} @@ -80,8 +80,8 @@ class TestExpandAsOpRank4(OpTest): self.op_type = "expand_as_v2" x = np.random.rand(1, 1, 7, 16).astype("float64") target_tensor = np.random.rand(4, 6, 7, 16).astype("float64") - self.inputs = {'X': x, 'target_tensor': target_tensor} - self.attrs = {} + self.inputs = {'X': x} + self.attrs = {'target_shape': target_tensor.shape} bcast_dims = [4, 6, 1, 1] output = np.tile(self.inputs['X'], bcast_dims) self.outputs = {'Out': output} diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index d303ce0e28a..15a009ad899 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -1183,7 +1183,7 @@ def expand_as(x, y, name=None): # [[1, 2, 3], [1, 2, 3]] """ if in_dygraph_mode(): - return core.ops.expand_as_v2(x, y) + return core.ops.expand_as_v2(x, 'target_shape', y.shape) check_variable_and_dtype( x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'expand_as') @@ -1195,12 +1195,16 @@ def expand_as(x, y, name=None): "you must set its stop_gradient to be False by " "some_var.stop_gradient = True, supporting " "some_var as the input 'x'.") - inputs = {"X": [x], "target_tensor": [y]} + inputs = {"X": [x]} helper = LayerHelper('expand_as', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='expand_as_v2', inputs=inputs, outputs={'Out': out}) + helper.append_op( + type='expand_as_v2', + inputs=inputs, + attrs={'target_shape': y.shape}, + outputs={'Out': out}) return out -- GitLab