未验证 提交 096c048b 编写于 作者: L liym27 提交者: GitHub

Fix unitest test_slice (#29740)

Before this commit, test_slice use old api `dygraph_to_static_func` to use Dynamic-t-Static and use Executor explicitly,which is not recommended to users.
After fixed, use recommended API `paddle.jit.to_static` to replace `dygraph_to_static_func`, which won't trigger the random exception on coverage CI.
上级 7c2affaa
...@@ -16,63 +16,63 @@ from __future__ import print_function ...@@ -16,63 +16,63 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph.jit import dygraph_to_static_func import paddle
SEED = 2020 SEED = 2020
np.random.seed(SEED) np.random.seed(SEED)
prog_trans = paddle.jit.ProgramTranslator()
@paddle.jit.to_static
def test_slice_without_control_flow(x): def test_slice_without_control_flow(x):
# Python slice will not be transformed. # Python slice will not be transformed.
x = fluid.dygraph.to_variable(x) x = paddle.to_tensor(x)
a = [x] a = [x]
a[0] = fluid.layers.fill_constant(shape=[2], value=2, dtype="float32") a[0] = paddle.full(shape=[2], fill_value=2, dtype="float32")
return a return a[0]
@paddle.jit.to_static
def test_slice_in_if(x): def test_slice_in_if(x):
x = fluid.dygraph.to_variable(x) x = paddle.to_tensor(x)
a = [] a = []
if x.numpy()[0] > 0: if x.numpy()[0] > 0:
a.append(x) a.append(x)
else: else:
a.append( a.append(paddle.full(shape=[1, 2], fill_value=9, dtype="int64"))
fluid.layers.fill_constant(
shape=[1, 2], value=9, dtype="int64"))
if x.numpy()[0] > 0: if x.numpy()[0] > 0:
a[0] = x a[0] = x
out = a[0:] out = a[0]
return out return out
def test_slice_in_while_loop(x, iter_num): @paddle.jit.to_static
x = fluid.dygraph.to_variable(x) def test_slice_in_while_loop(x, iter_num=3):
iter_num_var = fluid.layers.fill_constant( x = paddle.to_tensor(x)
shape=[1], value=iter_num, dtype="int32") iter_num_var = paddle.full(shape=[1], fill_value=iter_num, dtype="int32")
a = [] a = []
i = 0 i = 0
# Note: `i < iter_num` can't be supported in dygraph mode now,
# but PR22892 is fixing it https://github.com/PaddlePaddle/Paddle/pull/22892. while i < iter_num_var:
# If PR22892 merged, change `i < iter_num.numpy()[0]` to `i < iter_num`.
while i < iter_num_var.numpy()[0]:
a.append(x) a.append(x)
i += 1 i += 1
i = 0 i = 0
while i < iter_num_var.numpy()[0]: while i < iter_num_var.numpy()[0]:
a[i] = fluid.layers.fill_constant(shape=[2], value=2, dtype="float32") a[i] = paddle.full(shape=[2], fill_value=2, dtype="float32")
i += 1 i += 1
out = a[0:iter_num] out = a[0:iter_num]
return out return out[0]
def test_slice_in_for_loop(x, iter_num): @paddle.jit.to_static
x = fluid.dygraph.to_variable(x) def test_slice_in_for_loop(x, iter_num=3):
x = paddle.to_tensor(x)
a = [] a = []
# Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor # Use `paddle.full` so that static analysis can analyze the type of iter_num is Tensor
iter_num = fluid.layers.fill_constant( iter_num = paddle.full(
shape=[1], value=iter_num, dtype="int32" shape=[1], fill_value=iter_num, dtype="int32"
) # TODO(liym27): Delete it if the type of parameter iter_num can be resolved ) # TODO(liym27): Delete it if the type of parameter iter_num can be resolved
for i in range(iter_num): for i in range(iter_num):
...@@ -87,35 +87,31 @@ def test_slice_in_for_loop(x, iter_num): ...@@ -87,35 +87,31 @@ def test_slice_in_for_loop(x, iter_num):
class TestSliceWithoutControlFlow(unittest.TestCase): class TestSliceWithoutControlFlow(unittest.TestCase):
def setUp(self): def setUp(self):
self.input = np.random.random((3)).astype('int32') self.input = np.random.random((3)).astype('int32')
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda( self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda(
) else fluid.CPUPlace() ) else paddle.CPUPlace()
self.init_dygraph_func() self.init_dygraph_func()
paddle.disable_static()
def init_dygraph_func(self): def init_dygraph_func(self):
self.dygraph_func = test_slice_without_control_flow self.dygraph_func = test_slice_without_control_flow
def run_dygraph_mode(self): def run_dygraph_mode(self):
with fluid.dygraph.guard(): return self._run(to_static=False)
res = self.dygraph_func(self.input)
if isinstance(res, (list, tuple)):
res = res[0]
return res.numpy()
def run_static_mode(self): def _run(self, to_static):
main_program = fluid.Program() prog_trans.enable(to_static)
with fluid.program_guard(main_program): res = self.dygraph_func(self.input)
tensor_list = dygraph_to_static_func(self.dygraph_func)(self.input) return res.numpy()
exe = fluid.Executor(self.place)
static_res = exe.run(main_program, fetch_list=tensor_list[0])
return static_res[0] def run_static_mode(self):
return self._run(to_static=True)
def test_transformed_static_result(self): def test_transformed_static_result(self):
static_res = self.run_static_mode() static_res = self.run_static_mode()
dygraph_res = self.run_dygraph_mode() dygraph_res = self.run_dygraph_mode()
self.assertTrue( self.assertTrue(
np.allclose(dygraph_res, static_res), np.allclose(dygraph_res, static_res),
msg='dygraph res is {}\nstatic_res is {}'.format(dygraph_res, msg='dygraph_res is {}\nstatic_res is {}'.format(dygraph_res,
static_res)) static_res))
...@@ -123,69 +119,16 @@ class TestSliceInIf(TestSliceWithoutControlFlow): ...@@ -123,69 +119,16 @@ class TestSliceInIf(TestSliceWithoutControlFlow):
def init_dygraph_func(self): def init_dygraph_func(self):
self.dygraph_func = test_slice_in_if self.dygraph_func = test_slice_in_if
def run_static_mode(self):
main_program = fluid.Program()
with fluid.program_guard(main_program):
tensor_array = dygraph_to_static_func(self.dygraph_func)(self.input)
static_out = fluid.layers.array_read(
tensor_array,
i=fluid.layers.fill_constant(
shape=[1], value=0, dtype='int64'))
exe = fluid.Executor(self.place)
numpy_res = exe.run(main_program, fetch_list=static_out)
return numpy_res[0]
class TestSliceInWhileLoop(TestSliceWithoutControlFlow): class TestSliceInWhileLoop(TestSliceWithoutControlFlow):
def setUp(self):
self.iter_num = 3
self.input = np.random.random((3)).astype('int32')
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
self.init_dygraph_func()
def init_dygraph_func(self): def init_dygraph_func(self):
self.dygraph_func = test_slice_in_while_loop self.dygraph_func = test_slice_in_while_loop
def run_dygraph_mode(self):
with fluid.dygraph.guard():
var_res = self.dygraph_func(self.input, self.iter_num)
if not isinstance(var_res, list):
var_res = [var_res]
numpy_res = [ele.numpy() for ele in var_res]
return numpy_res
def run_static_mode(self):
main_program = fluid.Program()
with fluid.program_guard(main_program):
tensor_array = dygraph_to_static_func(self.dygraph_func)(
self.input, self.iter_num)
static_outs = []
for i in range(self.iter_num):
static_outs.append(
fluid.layers.array_read(
tensor_array,
i=fluid.layers.fill_constant(
shape=[1], value=i, dtype='int64')))
exe = fluid.Executor(self.place)
numpy_res = exe.run(main_program, fetch_list=static_outs)
return numpy_res
class TestSliceInForLoop(TestSliceInWhileLoop): class TestSliceInForLoop(TestSliceInWhileLoop):
def init_dygraph_func(self): def init_dygraph_func(self):
self.dygraph_func = test_slice_in_for_loop self.dygraph_func = test_slice_in_for_loop
def run_static_mode(self):
main_program = fluid.Program()
with fluid.program_guard(main_program):
static_out = dygraph_to_static_func(self.dygraph_func)(
self.input, self.iter_num)
exe = fluid.Executor(self.place)
numpy_res = exe.run(main_program, fetch_list=static_out)
return numpy_res
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册