未验证 提交 6db0e2b1 编写于 作者: A Aurelius84 提交者: GitHub

[Dy2stat] Support len syntax (#24638)

* [dy2stat] Support len

* add convert_call func

* refine code test=develop
上级 95089204
......@@ -20,15 +20,23 @@ namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
using SelectedRows = framework::SelectedRows;
template <typename T>
class ShapeKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in_t = ctx.Input<Tensor>("Input");
auto* in_var = ctx.InputVar("Input");
framework::DDim in_dims;
if (in_var->IsType<SelectedRows>()) {
in_dims = in_var->Get<SelectedRows>().value().dims();
} else {
in_dims = in_var->Get<LoDTensor>().dims();
}
auto* out_t = ctx.Output<Tensor>("Out");
out_t->Resize({in_dims.size()});
auto out_data = out_t->mutable_data<int32_t>(platform::CPUPlace());
auto in_dims = in_t->dims();
for (int i = 0; i < in_dims.size(); ++i) {
out_data[i] = in_dims[i];
}
......
......@@ -32,12 +32,23 @@ class CallTransformer(gast.NodeTransformer):
self.wrapper_root = wrapper_root
self.root = wrapper_root.node
def _is_builtin_call(self, node):
def _no_need_convert_call(self, node):
"""
Determines whether a function needs to be transformed by `convert_call`.
It doesn't need to be transformed when a function satisfies the following conditions:
1. It's a api of paddle
2. It's a python builtin function not include `len`
"""
assert isinstance(node, gast.Call)
if is_paddle_api(node):
return True
func_str = ast_to_source_code(node.func).strip()
try:
from paddle.fluid.dygraph.dygraph_to_static.convert_call_func import is_builtin
return eval("is_builtin({})".format(func_str))
from paddle.fluid.dygraph.dygraph_to_static.convert_call_func import is_builtin_len, is_builtin
is_builtin = eval("is_builtin({})".format(func_str))
is_builtin_len = eval("is_builtin_len({})".format(func_str))
return is_builtin and not is_builtin_len
except Exception:
return False
......@@ -46,10 +57,8 @@ class CallTransformer(gast.NodeTransformer):
def visit_Call(self, node):
self.generic_visit(node)
if is_paddle_api(node):
return node
if self._is_builtin_call(node):
if self._no_need_convert_call(node):
return node
func_str = ast_to_source_code(node.func).strip()
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from paddle.fluid import framework
from paddle.fluid import core
from paddle.fluid.layers import nn
from paddle.fluid.layers import control_flow
def convert_len(var):
"""
return variable(length) from shape ops based on var.type
Note: In addition to some ast transformations, some block-related
operations are added in `len` transformation, such as appending
`shape_op` in var.block.
"""
if isinstance(var, framework.Variable):
if var.type in [
core.VarDesc.VarType.LOD_TENSOR,
core.VarDesc.VarType.SELECTED_ROWS
]:
# Note: Length of var may be known ahead of time in dygraph,
# but it probably represents batch size which can be variant.
# so we return a variable dynamically inferred from var.shape.
return nn.shape(var)[0]
elif var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
return control_flow.array_length(var)
else:
raise TypeError(
'len(var) only supports LoDTensor/LoDTensorArray/SelectedRows, but received %s.'
% type(var))
else:
return len(var)
......@@ -29,6 +29,7 @@ import six
from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator
from paddle.fluid.dygraph.layers import Layer
from paddle.fluid.dygraph.dygraph_to_static.convert_builtins_func import convert_len
DECORATOR_NAMES = ['declarative', 'dygraph_to_static_func']
program_translator = ProgramTranslator()
......@@ -49,6 +50,12 @@ def is_builtin(func):
return False
def is_builtin_len(func):
if isinstance(func, types.BuiltinFunctionType) and func.__name__ == 'len':
return True
return False
def is_paddle_func(func):
m = inspect.getmodule(func)
return m is not None and m.__name__.startswith("paddle")
......@@ -91,10 +98,10 @@ def convert_call(func):
func_self = None
converted_call = None
if is_builtin(func):
return func
if is_builtin_len(func):
return convert_len
if is_paddle_func(func):
if is_builtin(func) or is_paddle_func(func):
return func
if inspect.isfunction(func):
......
......@@ -11045,8 +11045,26 @@ def shape(input):
Get the shape of the input.
.. code-block:: text
Case1:
Given N-D Tensor:
input = [ [1, 2, 3, 4], [5, 6, 7, 8] ]
Then:
input.shape = [2, 4]
Case2:
Given SelectedRows:
input.rows = [0, 4, 19]
input.height = 20
input.value = [ [1, 2], [3, 4], [5, 6] ] # inner tensor
Then:
input.shape = [3, 2]
Args:
input (Variable): The input N-D Tensor. Datatype can be float32, float64, int32, int64.
input (Variable): The input can be N-D Tensor or SelectedRows with data type float32, float64, int32, int64.
If input variable is type of SelectedRows, returns the shape of it's inner tensor.
Returns:
Variable (Tensor): The shape of the input variable.
......@@ -11057,7 +11075,7 @@ def shape(input):
import paddle.fluid as fluid
import numpy as np
inputs = fluid.layers.data(name="x", shape=[3, 100, 100], dtype="float32")
inputs = fluid.data(name="x", shape=[3, 100, 100], dtype="float32")
output = fluid.layers.shape(inputs)
exe = fluid.Executor(fluid.CPUPlace())
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph import declarative
from paddle.fluid.dygraph.dygraph_to_static import convert_call
SEED = 2020
np.random.seed(SEED)
def len_with_tensor(x):
x = fluid.dygraph.to_variable(x)
x_len = len(x)
return x_len
def len_with_lod_tensor_array(x):
x = fluid.dygraph.to_variable(x)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
arr = fluid.layers.array_write(x, i=i)
arr_len = len(arr)
return arr_len
class TestLen(unittest.TestCase):
def setUp(self):
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
self.x_data = np.random.random([10, 16]).astype('float32')
self.init_func()
def init_func(self):
self.func = len_with_tensor
def _run(self, to_static):
with fluid.dygraph.guard(self.place):
if to_static:
out = declarative(self.func)(self.x_data)
else:
out = self.func(self.x_data)
if isinstance(out, fluid.core.VarBase):
out = out.numpy()
return out
def test_len(self):
dygraph_res = self._run(to_static=False)
static_res = self._run(to_static=True)
self.assertTrue(np.allclose(dygraph_res, static_res))
class TestLenWithTensorArray(TestLen):
def init_func(self):
self.func = len_with_lod_tensor_array
# Note: Variable(SelectedRows) is not exposed directly in dygraph.
# The unittest is used to test coverage by fake transformed code.
def len_with_selected_rows(place):
block = fluid.default_main_program().global_block()
# create selected_rows variable
var = block.create_var(
name="X",
dtype="float32",
persistable=True,
type=fluid.core.VarDesc.VarType.SELECTED_ROWS)
# y is Variable(SelectedRows)
y = fluid.layers.merge_selected_rows(var)
y_len = convert_call(len)(y)
# z is inner tensor with shape [4, 2]
z = fluid.layers.get_tensor_from_selected_rows(y)
z_len = convert_call(len)(z)
# set data for selected_rows
x_rows = [0, 2, 2, 4, 19]
row_numel = 2
np_array = np.ones((len(x_rows), row_numel)).astype("float32")
x_var = fluid.global_scope().var("X").get_selected_rows()
x_var.set_rows(x_rows)
x_var.set_height(20)
x_tensor = x_var.get_tensor()
x_tensor.set(np_array, place)
exe = fluid.Executor(place=place)
result = exe.run(fluid.default_main_program(), fetch_list=[y_len, z_len])
return result
class TestLenWithSelectedRows(unittest.TestCase):
def setUp(self):
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
def test_len(self):
selected_rows_var_len, var_tensor_len = len_with_selected_rows(
self.place)
self.assertEqual(selected_rows_var_len, var_tensor_len)
if __name__ == '__main__':
unittest.main()
......@@ -17,6 +17,8 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
from paddle.fluid import core
from paddle.fluid.op import Operator
class TestShapeOp(OpTest):
......@@ -45,5 +47,41 @@ class case2(TestShapeOp):
self.shape = [1, 2, 3]
class TestShapeWithSelectedRows(unittest.TestCase):
def get_places(self):
places = [core.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(core.CUDAPlace(0))
return places
def check_with_place(self, place):
scope = core.Scope()
x_rows = [0, 1, 5, 4, 19]
height = 20
row_numel = 2
np_array = np.ones((len(x_rows), row_numel)).astype("float32")
# initialize input variable X
x = scope.var('X').get_selected_rows()
x.set_rows(x_rows)
x.set_height(height)
x_tensor = x.get_tensor()
x_tensor.set(np_array, place)
# initialize input variable Out
out_shape = scope.var("Out").get_tensor()
op = Operator("shape", Input="X", Out="Out")
op.run(scope, place)
out_shape = np.array(out_shape).tolist()
self.assertListEqual([5, 2], out_shape)
def test_check_output(self):
for place in self.get_places():
self.check_with_place(place)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册