未验证 提交 7e5e9934 编写于 作者: L lilong12 提交者: GitHub

update expand as op to use the shape of the target tensor instead of the...

update expand as op to use the shape of the target tensor instead of the target tensor itself. (#29020)

* update, test=develop
上级 f4c894a6
......@@ -25,28 +25,22 @@ class ExpandAsV2Op : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ExpandAsV2");
OP_INOUT_CHECK(ctx->HasInput("target_tensor"), "Input", "target_tensor",
"ExpandAsV2");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ExpandAsV2");
auto x_dims = ctx->GetInputDim("X");
auto target_tensor_dims = ctx->GetInputDim("target_tensor");
auto target_shape = ctx->Attrs().Get<std::vector<int>>("target_shape");
PADDLE_ENFORCE_GE(
target_tensor_dims.size(), static_cast<size_t>(x_dims.size()),
target_shape.size(), static_cast<size_t>(x_dims.size()),
platform::errors::InvalidArgument(
"The rank of Input(target_tensor) must be greater than or equal "
"The rank of target_shape must be greater than or equal "
"to the rank of Input(X). But received Input(X): input "
"rank %u, input shape [%s]; received Input(target_tensor): "
"input rank %u, input shape [%s].",
x_dims.size(), x_dims, target_tensor_dims.size(),
target_tensor_dims));
PADDLE_ENFORCE_LE(
target_tensor_dims.size(), MAX_RANK_SUPPORTED,
platform::errors::InvalidArgument(
"The rank of Input(target_tensor) must not be less than or equal "
"to %d. But received: input rank %u, input shape [%s].",
MAX_RANK_SUPPORTED, x_dims.size(), x_dims));
std::vector<int> out_shape = framework::vectorize<int>(target_tensor_dims);
ctx->SetOutputDim("Out", framework::make_ddim(out_shape));
"rank %u; received target_shape: rank %u.",
x_dims.size(), target_shape.size()));
PADDLE_ENFORCE_LE(target_shape.size(), MAX_RANK_SUPPORTED,
platform::errors::InvalidArgument(
"The rank of target_shape must be less than or equal "
"to %d. But received: rank %u.",
MAX_RANK_SUPPORTED, target_shape.size()));
ctx->SetOutputDim("Out", framework::make_ddim(target_shape));
}
};
......@@ -62,23 +56,11 @@ class ExpandAsV2OpMaker : public framework::OpProtoAndCheckerMaker {
"After expanding, size of each dimension of Output(Out) is equal "
"to size of the corresponding dimension of Input(X) multiplying "
"the corresponding value given by Attr(expand_times).");
AddInput("target_tensor", "Expand tensor's shape for each dimension.");
AddAttr<std::vector<int>>("target_shape",
"Expand shape for each dimension.")
.SetDefault({});
AddComment(R"DOC(
Expand the input by given times number. You should set times
number for each dimension by providing tensor 'expend_tensor'. The rank of X
should be in [1, 6]. Please note that size of 'expend_tensor' must be the same
with X's rank. Following is a using case:
Input(X) is a 3-D tensor with shape [2, 3, 1]:
[
[[1], [2], [3]],
[[4], [5], [6]]
]
target_tensors'shape: [2, 6, 2]
Output(Out) is a 3-D tensor with shape [2, 6, 2]:
[
[[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]],
[[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]]
]
Expand the input to the given shape.
)DOC");
}
};
......@@ -117,7 +99,6 @@ class ExpandAsV2GradOpMaker : public framework::SingleGradOpMaker<T> {
void Apply(GradOpPtr<T> op) const override {
op->SetType("expand_as_v2_grad");
op->SetInput("X", this->Input("X"));
op->SetInput("target_tensor", this->Input("target_tensor"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetAttrMap(this->Attrs());
......
......@@ -59,8 +59,8 @@ class ExpandAsV2Kernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto rank = context.Input<Tensor>("X")->dims().size();
auto* target_tensor = context.Input<Tensor>("target_tensor");
auto target_rank = target_tensor->dims().size();
auto target_shape = context.Attr<std::vector<int>>("target_shape");
auto target_rank = target_shape.size();
PADDLE_ENFORCE_GE(target_rank, rank,
platform::errors::InvalidArgument(
"The rank (%d) of the input 'target_tensor' for "
......@@ -85,9 +85,8 @@ class ExpandAsV2Kernel : public framework::OpKernel<T> {
void ExpandAs(const framework::ExecutionContext& context) const {
auto* in0 = context.Input<Tensor>("X");
auto in_dims = in0->dims();
auto* target_tensor = context.Input<Tensor>("target_tensor");
auto target_shape = context.Attr<std::vector<int>>("target_shape");
auto vec_in_dims = framework::vectorize<int>(in_dims);
auto target_shape = framework::vectorize<int>(target_tensor->dims());
auto diff = target_shape.size() - vec_in_dims.size();
vec_in_dims.insert(vec_in_dims.begin(), diff, 1);
std::vector<int> repeat_times(vec_in_dims.size());
......@@ -132,9 +131,8 @@ class ExpandAsV2GradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* in0 = context.Input<Tensor>("X");
auto* target_tensor = context.Input<Tensor>("target_tensor");
auto target_shape = context.Attr<std::vector<int>>("target_shape");
auto x_dims = in0->dims();
auto target_shape = target_tensor->dims();
auto vec_in_dims = framework::vectorize<int>(x_dims);
auto diff = target_shape.size() - vec_in_dims.size();
vec_in_dims.insert(vec_in_dims.begin(), diff, 1);
......
......@@ -26,8 +26,8 @@ class TestExpandAsOpRank1(OpTest):
self.op_type = "expand_as_v2"
x = np.random.rand(100).astype("float64")
target_tensor = np.random.rand(2, 100).astype("float64")
self.inputs = {'X': x, 'target_tensor': target_tensor}
self.attrs = {}
self.inputs = {'X': x}
self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = [2, 1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
......@@ -44,8 +44,8 @@ class TestExpandAsOpRank2(OpTest):
self.op_type = "expand_as_v2"
x = np.random.rand(10, 12).astype("float64")
target_tensor = np.random.rand(10, 12).astype("float64")
self.inputs = {'X': x, 'target_tensor': target_tensor}
self.attrs = {}
self.inputs = {'X': x}
self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = [1, 1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
......@@ -62,8 +62,8 @@ class TestExpandAsOpRank3(OpTest):
self.op_type = "expand_as_v2"
x = np.random.rand(2, 3, 20).astype("float64")
target_tensor = np.random.rand(2, 3, 20).astype("float64")
self.inputs = {'X': x, 'target_tensor': target_tensor}
self.attrs = {}
self.inputs = {'X': x}
self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = [1, 1, 1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
......@@ -80,8 +80,8 @@ class TestExpandAsOpRank4(OpTest):
self.op_type = "expand_as_v2"
x = np.random.rand(1, 1, 7, 16).astype("float64")
target_tensor = np.random.rand(4, 6, 7, 16).astype("float64")
self.inputs = {'X': x, 'target_tensor': target_tensor}
self.attrs = {}
self.inputs = {'X': x}
self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = [4, 6, 1, 1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
......
......@@ -1183,7 +1183,7 @@ def expand_as(x, y, name=None):
# [[1, 2, 3], [1, 2, 3]]
"""
if in_dygraph_mode():
return core.ops.expand_as_v2(x, y)
return core.ops.expand_as_v2(x, 'target_shape', y.shape)
check_variable_and_dtype(
x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'expand_as')
......@@ -1195,12 +1195,16 @@ def expand_as(x, y, name=None):
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input 'x'.")
inputs = {"X": [x], "target_tensor": [y]}
inputs = {"X": [x]}
helper = LayerHelper('expand_as', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='expand_as_v2', inputs=inputs, outputs={'Out': out})
helper.append_op(
type='expand_as_v2',
inputs=inputs,
attrs={'target_shape': y.shape},
outputs={'Out': out})
return out
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册