未验证 提交 44b1343a 编写于 作者: L liym27 提交者: GitHub

Support LoDTensorArray in stack/concat api for transformation of list dygraph_to_static (#22987)

* Support that the input(x) of stack api is a LoDTensorArray. test=develop

* Support that the input of concat api is a LoDTensorArray. test=develop

* Add tests to test stack/concat called in dygraph_to_static. test=develop
上级 63f3ada7
......@@ -28,7 +28,7 @@ from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only,
from .. import dygraph_utils
from ..param_attr import ParamAttr
from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_
from .tensor import concat, assign, fill_constant, zeros
from .tensor import concat, assign, fill_constant, zeros, tensor_array_to_tensor
from . import utils
from .. import unique_name
from functools import reduce
......@@ -9137,11 +9137,25 @@ def stack(x, axis=0):
if not isinstance(x, list) and not isinstance(x, tuple):
x = [x]
out = helper.create_variable_for_type_inference(x[0].dtype)
helper.append_op(
type='stack', inputs={'X': x}, outputs={'Y': out},
attrs={'axis': axis})
if not in_dygraph_mode() and \
x[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
assert len(x) == 1, "If the elements of 'x' in stack are Variable(LoDTensorArray), " \
"number of the elements must be 1, but received %s." % len(x)
out_index = helper.create_variable_for_type_inference(dtype="int32")
helper.append_op(
type='tensor_array_to_tensor',
inputs={'X': x[0]},
outputs={'Out': [out],
'OutIndex': [out_index]},
attrs={'axis': axis,
'use_stack': True})
else:
helper.append_op(
type='stack',
inputs={'X': x},
outputs={'Y': out},
attrs={'axis': axis})
return out
......
......@@ -273,18 +273,32 @@ def concat(input, axis=0, name=None):
x, 'input[' + str(id) + ']',
['float16', 'float32', 'float64', 'int32', 'int64'], 'concat')
check_type(axis, 'axis', (int, Variable), 'concat')
inputs = {'X': input}
attrs = {}
if isinstance(axis, Variable):
axis.stop_gradient = True
inputs['AxisTensor'] = axis
else:
attrs['axis'] = axis
helper = LayerHelper('concat', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs)
if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
assert len(input) == 1, "If the elements of 'input' in concat are Variable(LoDTensorArray), " \
"number of the elements must be 1, but received %s." % len(x)
out_index = helper.create_variable_for_type_inference(dtype="int32")
helper.append_op(
type='tensor_array_to_tensor',
inputs={'X': input[0]},
outputs={'Out': [out],
'OutIndex': [out_index]},
attrs={'axis': axis,
'use_stack': False})
else:
inputs = {'X': input}
attrs = {}
if isinstance(axis, Variable):
axis.stop_gradient = True
inputs['AxisTensor'] = axis
else:
attrs['axis'] = axis
helper.append_op(
type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs)
return out
......
......@@ -52,6 +52,16 @@ def test_list_in_for_loop(x, iter_num):
return a
def test_list_in_for_loop_with_concat(x, iter_num):
# Note: for_loop can't be transformed before PR22867 merged.
x = fluid.dygraph.to_variable(x)
a = []
for i in range(iter_num):
a.append(x)
out = fluid.layers.concat(a, axis=0)
return out
def test_list_in_while_loop(x, iter_num):
x = fluid.dygraph.to_variable(x)
iter_num = fluid.layers.fill_constant(
......@@ -67,12 +77,28 @@ def test_list_in_while_loop(x, iter_num):
return a
def test_list_in_while_loop_with_stack(x, iter_num):
x = fluid.dygraph.to_variable(x)
iter_num = fluid.layers.fill_constant(
shape=[1], value=iter_num, dtype="int32")
a = []
i = 0
while i < iter_num.numpy()[0]:
a.append(x)
i += 1
out = fluid.layers.stack(a, axis=1)
return out
class TestListWithoutControlFlow(unittest.TestCase):
def setUp(self):
self.input = np.random.random((3)).astype('int32')
self.dygraph_func = test_list_without_control_flow
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
self.init_dygraph_func()
def init_dygraph_func(self):
self.dygraph_func = test_list_without_control_flow
def run_dygraph_mode(self):
with fluid.dygraph.guard():
......@@ -100,11 +126,8 @@ class TestListWithoutControlFlow(unittest.TestCase):
class TestListInIf(TestListWithoutControlFlow):
def setUp(self):
self.input = np.random.random((3)).astype('int32')
def init_dygraph_func(self):
self.dygraph_func = test_list_in_if
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
def run_static_mode(self):
main_program = fluid.Program()
......@@ -120,13 +143,16 @@ class TestListInIf(TestListWithoutControlFlow):
return numpy_res[0]
class TestListInWhileLoop(unittest.TestCase):
class TestListInWhileLoop(TestListWithoutControlFlow):
def setUp(self):
self.iter_num = 3
self.input = np.random.random((3)).astype('int32')
self.dygraph_func = test_list_in_while_loop
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
self.init_dygraph_func()
def init_dygraph_func(self):
self.dygraph_func = test_list_in_while_loop
def run_dygraph_mode(self):
with fluid.dygraph.guard():
......@@ -146,56 +172,40 @@ class TestListInWhileLoop(unittest.TestCase):
tensor_array,
i=fluid.layers.fill_constant(
shape=[1], value=i, dtype='int64')))
exe = fluid.Executor(self.place)
numpy_res = exe.run(main_program, fetch_list=static_outs)
return numpy_res
def test_transformed_static_result(self):
static_res = self.run_static_mode()
dygraph_res = self.run_dygraph_mode()
self.assertTrue(
np.array_equal(dygraph_res, static_res),
msg='dygraph res is {}\nstatic_res is {}'.format(dygraph_res,
static_res))
class TestListInForLoop(unittest.TestCase):
def setUp(self):
self.iter_num = 3
self.input = np.random.random((3)).astype('int32')
self.dygraph_func = test_list_in_for_loop
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
class TestListInWhileLoopWithStack(TestListInWhileLoop):
def init_dygraph_func(self):
self.dygraph_func = test_list_in_while_loop_with_stack
def run_dygraph_mode(self):
with fluid.dygraph.guard():
var_res = self.dygraph_func(self.input, self.iter_num)
numpy_res = [ele.numpy() for ele in var_res]
numpy_res = var_res.numpy()
return numpy_res
def run_static_mode(self):
main_program = fluid.Program()
with fluid.program_guard(main_program):
tensor_array = dygraph_to_static_graph(self.dygraph_func)(
self.input, self.iter_num)
static_outs = []
for i in range(self.iter_num):
static_outs.append(
fluid.layers.array_read(
tensor_array,
i=fluid.layers.fill_constant(
shape=[1], value=i, dtype='int64')))
out_var = dygraph_to_static_graph(self.dygraph_func)(self.input,
self.iter_num)
exe = fluid.Executor(self.place)
numpy_res = exe.run(main_program, fetch_list=static_outs)
return numpy_res
numpy_res = exe.run(main_program, fetch_list=out_var)
return numpy_res[0]
def test_transformed_static_result(self):
static_res = self.run_static_mode()
dygraph_res = self.run_dygraph_mode()
self.assertTrue(
np.array_equal(dygraph_res, static_res),
msg='dygraph res is {}\nstatic_res is {}'.format(dygraph_res,
static_res))
class TestListInForLoop(TestListInWhileLoop):
def init_dygraph_func(self):
self.dygraph_func = test_list_in_for_loop
class TestListInForLoopWithConcat(TestListInWhileLoopWithStack):
def init_dygraph_func(self):
self.dygraph_func = test_list_in_for_loop_with_concat
if __name__ == '__main__':
......
......@@ -211,5 +211,42 @@ class TestConcatAPI(unittest.TestCase):
assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1))
class TestConcatAPIWithLoDTensorArray(unittest.TestCase):
"""
Test concat api when the input(x) is a LoDTensorArray.
"""
def setUp(self):
self.axis = 1
self.iter_num = 3
self.input_shape = [2, 3]
self.x = np.random.random(self.input_shape).astype("float32")
self.place = fluid.CUDAPlace(0) \
if fluid.is_compiled_with_cuda() else fluid.CPUPlace()
self.set_program()
def set_program(self):
self.program = fluid.Program()
with fluid.program_guard(self.program):
input = fluid.layers.assign(self.x)
tensor_array = fluid.layers.create_array(dtype='float32')
zero = fluid.layers.fill_constant(shape=[1], value=0, dtype="int64")
for i in range(self.iter_num):
fluid.layers.array_write(input, zero + i, tensor_array)
self.out_var = fluid.layers.concat(tensor_array, axis=self.axis)
def test_case(self):
self.assertTrue(self.out_var.shape[self.axis] == -1)
exe = fluid.Executor(self.place)
res = exe.run(self.program, fetch_list=self.out_var)
self.assertTrue(
np.array_equal(
res[0],
np.concatenate(
[self.x] * self.iter_num, axis=self.axis)))
if __name__ == '__main__':
unittest.main()
......@@ -12,9 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from op_test import OpTest
import numpy as np
import unittest
import paddle.fluid as fluid
from op_test import OpTest
class TestStackOpBase(OpTest):
......@@ -88,5 +89,41 @@ class TestStackOp6(TestStackOpBase):
self.axis = 3
class TestStackAPIWithLoDTensorArray(unittest.TestCase):
"""
Test stack api when the input(x) is a LoDTensorArray.
"""
def setUp(self):
self.axis = 1
self.iter_num = 3
self.input_shape = [2, 3]
self.x = np.random.random(self.input_shape).astype("float32")
self.place = fluid.CUDAPlace(0) \
if fluid.is_compiled_with_cuda() else fluid.CPUPlace()
self.set_program()
def set_program(self):
self.program = fluid.Program()
with fluid.program_guard(self.program):
input = fluid.layers.assign(self.x)
tensor_array = fluid.layers.create_array(dtype='float32')
zero = fluid.layers.fill_constant(shape=[1], value=0, dtype="int64")
for i in range(self.iter_num):
fluid.layers.array_write(input, zero + i, tensor_array)
self.out_var = fluid.layers.stack(tensor_array, axis=self.axis)
def test_case(self):
self.assertTrue(self.out_var.shape[self.axis] == -1)
exe = fluid.Executor(self.place)
res = exe.run(self.program, fetch_list=self.out_var)
self.assertTrue(
np.array_equal(
res[0], np.stack(
[self.x] * self.iter_num, axis=self.axis)))
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册