From 44b1343aab0478fc9740ab0dfda4f9d2e2e7a572 Mon Sep 17 00:00:00 2001 From: liym27 <33742067+liym27@users.noreply.github.com> Date: Fri, 13 Mar 2020 11:13:33 +0800 Subject: [PATCH] Support LoDTensorArray in stack/concat api for transformation of list dygraph_to_static (#22987) * Support that the input(x) of stack api is a LoDTensorArray. test=develop * Support that the input of concat api is a LoDTensorArray. test=develop * Add tests to test stack/concat called in dygraph_to_static. test=develop --- python/paddle/fluid/layers/nn.py | 24 ++++- python/paddle/fluid/layers/tensor.py | 32 +++++-- .../unittests/dygraph_to_static/test_list.py | 92 ++++++++++--------- .../fluid/tests/unittests/test_concat_op.py | 37 ++++++++ .../fluid/tests/unittests/test_stack_op.py | 39 +++++++- 5 files changed, 168 insertions(+), 56 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index a526ad00cf..17abacc235 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -28,7 +28,7 @@ from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only, from .. import dygraph_utils from ..param_attr import ParamAttr from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_ -from .tensor import concat, assign, fill_constant, zeros +from .tensor import concat, assign, fill_constant, zeros, tensor_array_to_tensor from . import utils from .. import unique_name from functools import reduce @@ -9137,11 +9137,25 @@ def stack(x, axis=0): if not isinstance(x, list) and not isinstance(x, tuple): x = [x] - out = helper.create_variable_for_type_inference(x[0].dtype) - helper.append_op( - type='stack', inputs={'X': x}, outputs={'Y': out}, - attrs={'axis': axis}) + if not in_dygraph_mode() and \ + x[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY: + assert len(x) == 1, "If the elements of 'x' in stack are Variable(LoDTensorArray), " \ + "number of the elements must be 1, but received %s." % len(x) + out_index = helper.create_variable_for_type_inference(dtype="int32") + helper.append_op( + type='tensor_array_to_tensor', + inputs={'X': x[0]}, + outputs={'Out': [out], + 'OutIndex': [out_index]}, + attrs={'axis': axis, + 'use_stack': True}) + else: + helper.append_op( + type='stack', + inputs={'X': x}, + outputs={'Y': out}, + attrs={'axis': axis}) return out diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index d065e527b3..dc4eb727c4 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -273,18 +273,32 @@ def concat(input, axis=0, name=None): x, 'input[' + str(id) + ']', ['float16', 'float32', 'float64', 'int32', 'int64'], 'concat') check_type(axis, 'axis', (int, Variable), 'concat') - inputs = {'X': input} - attrs = {} - if isinstance(axis, Variable): - axis.stop_gradient = True - inputs['AxisTensor'] = axis - else: - attrs['axis'] = axis helper = LayerHelper('concat', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) - helper.append_op( - type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs) + + if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY: + assert len(input) == 1, "If the elements of 'input' in concat are Variable(LoDTensorArray), " \ + "number of the elements must be 1, but received %s." % len(x) + out_index = helper.create_variable_for_type_inference(dtype="int32") + helper.append_op( + type='tensor_array_to_tensor', + inputs={'X': input[0]}, + outputs={'Out': [out], + 'OutIndex': [out_index]}, + attrs={'axis': axis, + 'use_stack': False}) + else: + inputs = {'X': input} + attrs = {} + if isinstance(axis, Variable): + axis.stop_gradient = True + inputs['AxisTensor'] = axis + else: + attrs['axis'] = axis + + helper.append_op( + type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs) return out diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_list.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_list.py index 27073e2d50..f6a4ec5df6 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_list.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_list.py @@ -52,6 +52,16 @@ def test_list_in_for_loop(x, iter_num): return a +def test_list_in_for_loop_with_concat(x, iter_num): + # Note: for_loop can't be transformed before PR22867 merged. + x = fluid.dygraph.to_variable(x) + a = [] + for i in range(iter_num): + a.append(x) + out = fluid.layers.concat(a, axis=0) + return out + + def test_list_in_while_loop(x, iter_num): x = fluid.dygraph.to_variable(x) iter_num = fluid.layers.fill_constant( @@ -67,12 +77,28 @@ def test_list_in_while_loop(x, iter_num): return a +def test_list_in_while_loop_with_stack(x, iter_num): + x = fluid.dygraph.to_variable(x) + iter_num = fluid.layers.fill_constant( + shape=[1], value=iter_num, dtype="int32") + a = [] + i = 0 + while i < iter_num.numpy()[0]: + a.append(x) + i += 1 + out = fluid.layers.stack(a, axis=1) + return out + + class TestListWithoutControlFlow(unittest.TestCase): def setUp(self): self.input = np.random.random((3)).astype('int32') - self.dygraph_func = test_list_without_control_flow self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda( ) else fluid.CPUPlace() + self.init_dygraph_func() + + def init_dygraph_func(self): + self.dygraph_func = test_list_without_control_flow def run_dygraph_mode(self): with fluid.dygraph.guard(): @@ -100,11 +126,8 @@ class TestListWithoutControlFlow(unittest.TestCase): class TestListInIf(TestListWithoutControlFlow): - def setUp(self): - self.input = np.random.random((3)).astype('int32') + def init_dygraph_func(self): self.dygraph_func = test_list_in_if - self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda( - ) else fluid.CPUPlace() def run_static_mode(self): main_program = fluid.Program() @@ -120,13 +143,16 @@ class TestListInIf(TestListWithoutControlFlow): return numpy_res[0] -class TestListInWhileLoop(unittest.TestCase): +class TestListInWhileLoop(TestListWithoutControlFlow): def setUp(self): self.iter_num = 3 self.input = np.random.random((3)).astype('int32') - self.dygraph_func = test_list_in_while_loop self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda( ) else fluid.CPUPlace() + self.init_dygraph_func() + + def init_dygraph_func(self): + self.dygraph_func = test_list_in_while_loop def run_dygraph_mode(self): with fluid.dygraph.guard(): @@ -146,56 +172,40 @@ class TestListInWhileLoop(unittest.TestCase): tensor_array, i=fluid.layers.fill_constant( shape=[1], value=i, dtype='int64'))) + exe = fluid.Executor(self.place) numpy_res = exe.run(main_program, fetch_list=static_outs) return numpy_res - def test_transformed_static_result(self): - static_res = self.run_static_mode() - dygraph_res = self.run_dygraph_mode() - self.assertTrue( - np.array_equal(dygraph_res, static_res), - msg='dygraph res is {}\nstatic_res is {}'.format(dygraph_res, - static_res)) - -class TestListInForLoop(unittest.TestCase): - def setUp(self): - self.iter_num = 3 - self.input = np.random.random((3)).astype('int32') - self.dygraph_func = test_list_in_for_loop - self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda( - ) else fluid.CPUPlace() +class TestListInWhileLoopWithStack(TestListInWhileLoop): + def init_dygraph_func(self): + self.dygraph_func = test_list_in_while_loop_with_stack def run_dygraph_mode(self): with fluid.dygraph.guard(): var_res = self.dygraph_func(self.input, self.iter_num) - numpy_res = [ele.numpy() for ele in var_res] + numpy_res = var_res.numpy() return numpy_res def run_static_mode(self): main_program = fluid.Program() with fluid.program_guard(main_program): - tensor_array = dygraph_to_static_graph(self.dygraph_func)( - self.input, self.iter_num) - static_outs = [] - for i in range(self.iter_num): - static_outs.append( - fluid.layers.array_read( - tensor_array, - i=fluid.layers.fill_constant( - shape=[1], value=i, dtype='int64'))) + out_var = dygraph_to_static_graph(self.dygraph_func)(self.input, + self.iter_num) exe = fluid.Executor(self.place) - numpy_res = exe.run(main_program, fetch_list=static_outs) - return numpy_res + numpy_res = exe.run(main_program, fetch_list=out_var) + return numpy_res[0] - def test_transformed_static_result(self): - static_res = self.run_static_mode() - dygraph_res = self.run_dygraph_mode() - self.assertTrue( - np.array_equal(dygraph_res, static_res), - msg='dygraph res is {}\nstatic_res is {}'.format(dygraph_res, - static_res)) + +class TestListInForLoop(TestListInWhileLoop): + def init_dygraph_func(self): + self.dygraph_func = test_list_in_for_loop + + +class TestListInForLoopWithConcat(TestListInWhileLoopWithStack): + def init_dygraph_func(self): + self.dygraph_func = test_list_in_for_loop_with_concat if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_concat_op.py b/python/paddle/fluid/tests/unittests/test_concat_op.py index f7ac868552..48fd0f56b1 100644 --- a/python/paddle/fluid/tests/unittests/test_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_concat_op.py @@ -211,5 +211,42 @@ class TestConcatAPI(unittest.TestCase): assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1)) +class TestConcatAPIWithLoDTensorArray(unittest.TestCase): + """ + Test concat api when the input(x) is a LoDTensorArray. + """ + + def setUp(self): + self.axis = 1 + self.iter_num = 3 + self.input_shape = [2, 3] + self.x = np.random.random(self.input_shape).astype("float32") + self.place = fluid.CUDAPlace(0) \ + if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.set_program() + + def set_program(self): + self.program = fluid.Program() + with fluid.program_guard(self.program): + input = fluid.layers.assign(self.x) + tensor_array = fluid.layers.create_array(dtype='float32') + zero = fluid.layers.fill_constant(shape=[1], value=0, dtype="int64") + + for i in range(self.iter_num): + fluid.layers.array_write(input, zero + i, tensor_array) + + self.out_var = fluid.layers.concat(tensor_array, axis=self.axis) + + def test_case(self): + self.assertTrue(self.out_var.shape[self.axis] == -1) + exe = fluid.Executor(self.place) + res = exe.run(self.program, fetch_list=self.out_var) + self.assertTrue( + np.array_equal( + res[0], + np.concatenate( + [self.x] * self.iter_num, axis=self.axis))) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_stack_op.py b/python/paddle/fluid/tests/unittests/test_stack_op.py index 428772b022..2c81e20675 100644 --- a/python/paddle/fluid/tests/unittests/test_stack_op.py +++ b/python/paddle/fluid/tests/unittests/test_stack_op.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from op_test import OpTest import numpy as np import unittest +import paddle.fluid as fluid +from op_test import OpTest class TestStackOpBase(OpTest): @@ -88,5 +89,41 @@ class TestStackOp6(TestStackOpBase): self.axis = 3 +class TestStackAPIWithLoDTensorArray(unittest.TestCase): + """ + Test stack api when the input(x) is a LoDTensorArray. + """ + + def setUp(self): + self.axis = 1 + self.iter_num = 3 + self.input_shape = [2, 3] + self.x = np.random.random(self.input_shape).astype("float32") + self.place = fluid.CUDAPlace(0) \ + if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.set_program() + + def set_program(self): + self.program = fluid.Program() + with fluid.program_guard(self.program): + input = fluid.layers.assign(self.x) + tensor_array = fluid.layers.create_array(dtype='float32') + zero = fluid.layers.fill_constant(shape=[1], value=0, dtype="int64") + + for i in range(self.iter_num): + fluid.layers.array_write(input, zero + i, tensor_array) + + self.out_var = fluid.layers.stack(tensor_array, axis=self.axis) + + def test_case(self): + self.assertTrue(self.out_var.shape[self.axis] == -1) + exe = fluid.Executor(self.place) + res = exe.run(self.program, fetch_list=self.out_var) + self.assertTrue( + np.array_equal( + res[0], np.stack( + [self.x] * self.iter_num, axis=self.axis))) + + if __name__ == '__main__': unittest.main() -- GitLab