未验证 提交 faeee3c3 编写于 作者: L liym27 提交者: GitHub

[Cherry-pick 2.0] cherry pick 3 PRs about Dynamic-to-Static (#30100)

* [cherry-pick 2.0] Fix unitest test_slice (#29740)

Before this commit, test_slice use old api `dygraph_to_static_func` to use Dynamic-t-Static and use Executor explicitly,which is not recommended to users.
After fixed, use recommended API `paddle.jit.to_static` to replace `dygraph_to_static_func`, which won't trigger the random exception on coverage CI.

* [cherry-pick 2.0][Dy2Stat] Support grammar: for ele in var[idx] (#29541)

Support to transformfor ele in var stms in which var is a slice of Tensor.

* [cherry-pick 2.0][Dy2Stat] Fix bug for loop: a variable is used and created in loop, but used before created (#29769)
上级 3fe71d0a
......@@ -167,7 +167,13 @@ class NameVisitor(gast.NodeVisitor):
# var_a = func2(x)
#
if isinstance(var_name_to_ctxs[name][0], gast.Load):
is_created = False
for ctx in var_name_to_ctxs[name]:
if isinstance(ctx, gast.Store):
is_created = True
if isinstance(var_name_to_ctxs[name][0],
gast.Load) and is_created:
loop_var_names.add(name)
create_var_names.add(name)
......
......@@ -883,6 +883,8 @@ class ForNodeVisitor(object):
self.node.iter.func,
gast.Attribute) and self.node.iter.func.attr == 'numpy':
return True
elif isinstance(self.node.iter, gast.Subscript):
return True
else:
return False
......
......@@ -159,6 +159,7 @@ def for_enumerate_var_numpy_with_start_continue(x_array):
def for_iter_var(x_array):
z = fluid.layers.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array)
for x in x_array:
z = z + x
return z
......@@ -221,6 +222,17 @@ def for_enumerate_var_with_nested_range(x_array):
return x
# 16. for iter var[idx]
@paddle.jit.to_static
def for_iter_var_idx(x_array):
z = fluid.layers.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array)
for x in x_array[0:]:
z = z + x
return z
class TestTransformBase(unittest.TestCase):
def setUp(self):
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
......@@ -343,6 +355,11 @@ class TestForIterVar(TestForIterVarNumpy):
self.dygraph_func = for_iter_var
class TestForIterVarIdx(TestForIterVarNumpy):
def set_test_func(self):
self.dygraph_func = for_iter_var_idx
class TestForEnumerateVar(TestForIterVarNumpy):
def set_test_func(self):
self.dygraph_func = for_enumerate_var
......
......@@ -86,11 +86,15 @@ def for_loop_dyfunc(max_len):
def for_loop_dyfunc2(max_len):
# Test case: a variable is used and created in loop, but used before created
x = fluid.layers.fill_constant(shape=[1, 2], dtype="int32", value=1)
for i in range(max_len):
if i > 1:
s = a
a = 1
ret = fluid.layers.fill_constant(shape=[1], dtype="int32", value=s)
q, _ = x.shape # test var x.shape only used but not created in loop
ret = fluid.layers.fill_constant(shape=[1], dtype="int32", value=s + q)
return ret
......
......@@ -16,63 +16,63 @@ from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph.jit import dygraph_to_static_func
import paddle
SEED = 2020
np.random.seed(SEED)
prog_trans = paddle.jit.ProgramTranslator()
@paddle.jit.to_static
def test_slice_without_control_flow(x):
# Python slice will not be transformed.
x = fluid.dygraph.to_variable(x)
x = paddle.to_tensor(x)
a = [x]
a[0] = fluid.layers.fill_constant(shape=[2], value=2, dtype="float32")
return a
a[0] = paddle.full(shape=[2], fill_value=2, dtype="float32")
return a[0]
@paddle.jit.to_static
def test_slice_in_if(x):
x = fluid.dygraph.to_variable(x)
x = paddle.to_tensor(x)
a = []
if x.numpy()[0] > 0:
a.append(x)
else:
a.append(
fluid.layers.fill_constant(
shape=[1, 2], value=9, dtype="int64"))
a.append(paddle.full(shape=[1, 2], fill_value=9, dtype="int64"))
if x.numpy()[0] > 0:
a[0] = x
out = a[0:]
out = a[0]
return out
def test_slice_in_while_loop(x, iter_num):
x = fluid.dygraph.to_variable(x)
iter_num_var = fluid.layers.fill_constant(
shape=[1], value=iter_num, dtype="int32")
@paddle.jit.to_static
def test_slice_in_while_loop(x, iter_num=3):
x = paddle.to_tensor(x)
iter_num_var = paddle.full(shape=[1], fill_value=iter_num, dtype="int32")
a = []
i = 0
# Note: `i < iter_num` can't be supported in dygraph mode now,
# but PR22892 is fixing it https://github.com/PaddlePaddle/Paddle/pull/22892.
# If PR22892 merged, change `i < iter_num.numpy()[0]` to `i < iter_num`.
while i < iter_num_var.numpy()[0]:
while i < iter_num_var:
a.append(x)
i += 1
i = 0
while i < iter_num_var.numpy()[0]:
a[i] = fluid.layers.fill_constant(shape=[2], value=2, dtype="float32")
a[i] = paddle.full(shape=[2], fill_value=2, dtype="float32")
i += 1
out = a[0:iter_num]
return out
return out[0]
def test_slice_in_for_loop(x, iter_num):
x = fluid.dygraph.to_variable(x)
@paddle.jit.to_static
def test_slice_in_for_loop(x, iter_num=3):
x = paddle.to_tensor(x)
a = []
# Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor
iter_num = fluid.layers.fill_constant(
shape=[1], value=iter_num, dtype="int32"
# Use `paddle.full` so that static analysis can analyze the type of iter_num is Tensor
iter_num = paddle.full(
shape=[1], fill_value=iter_num, dtype="int32"
) # TODO(liym27): Delete it if the type of parameter iter_num can be resolved
for i in range(iter_num):
......@@ -87,35 +87,31 @@ def test_slice_in_for_loop(x, iter_num):
class TestSliceWithoutControlFlow(unittest.TestCase):
def setUp(self):
self.input = np.random.random((3)).astype('int32')
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda(
) else paddle.CPUPlace()
self.init_dygraph_func()
paddle.disable_static()
def init_dygraph_func(self):
self.dygraph_func = test_slice_without_control_flow
def run_dygraph_mode(self):
with fluid.dygraph.guard():
return self._run(to_static=False)
def _run(self, to_static):
prog_trans.enable(to_static)
res = self.dygraph_func(self.input)
if isinstance(res, (list, tuple)):
res = res[0]
return res.numpy()
def run_static_mode(self):
main_program = fluid.Program()
with fluid.program_guard(main_program):
tensor_list = dygraph_to_static_func(self.dygraph_func)(self.input)
exe = fluid.Executor(self.place)
static_res = exe.run(main_program, fetch_list=tensor_list[0])
return static_res[0]
return self._run(to_static=True)
def test_transformed_static_result(self):
static_res = self.run_static_mode()
dygraph_res = self.run_dygraph_mode()
self.assertTrue(
np.allclose(dygraph_res, static_res),
msg='dygraph res is {}\nstatic_res is {}'.format(dygraph_res,
msg='dygraph_res is {}\nstatic_res is {}'.format(dygraph_res,
static_res))
......@@ -123,69 +119,16 @@ class TestSliceInIf(TestSliceWithoutControlFlow):
def init_dygraph_func(self):
self.dygraph_func = test_slice_in_if
def run_static_mode(self):
main_program = fluid.Program()
with fluid.program_guard(main_program):
tensor_array = dygraph_to_static_func(self.dygraph_func)(self.input)
static_out = fluid.layers.array_read(
tensor_array,
i=fluid.layers.fill_constant(
shape=[1], value=0, dtype='int64'))
exe = fluid.Executor(self.place)
numpy_res = exe.run(main_program, fetch_list=static_out)
return numpy_res[0]
class TestSliceInWhileLoop(TestSliceWithoutControlFlow):
def setUp(self):
self.iter_num = 3
self.input = np.random.random((3)).astype('int32')
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
self.init_dygraph_func()
def init_dygraph_func(self):
self.dygraph_func = test_slice_in_while_loop
def run_dygraph_mode(self):
with fluid.dygraph.guard():
var_res = self.dygraph_func(self.input, self.iter_num)
if not isinstance(var_res, list):
var_res = [var_res]
numpy_res = [ele.numpy() for ele in var_res]
return numpy_res
def run_static_mode(self):
main_program = fluid.Program()
with fluid.program_guard(main_program):
tensor_array = dygraph_to_static_func(self.dygraph_func)(
self.input, self.iter_num)
static_outs = []
for i in range(self.iter_num):
static_outs.append(
fluid.layers.array_read(
tensor_array,
i=fluid.layers.fill_constant(
shape=[1], value=i, dtype='int64')))
exe = fluid.Executor(self.place)
numpy_res = exe.run(main_program, fetch_list=static_outs)
return numpy_res
class TestSliceInForLoop(TestSliceInWhileLoop):
def init_dygraph_func(self):
self.dygraph_func = test_slice_in_for_loop
def run_static_mode(self):
main_program = fluid.Program()
with fluid.program_guard(main_program):
static_out = dygraph_to_static_func(self.dygraph_func)(
self.input, self.iter_num)
exe = fluid.Executor(self.place)
numpy_res = exe.run(main_program, fetch_list=static_out)
return numpy_res
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册