From 26a7c1a39615263285eeec1f678d83f1b17a3cf5 Mon Sep 17 00:00:00 2001 From: wopeizl Date: Fri, 14 Jun 2019 14:43:16 +0800 Subject: [PATCH] add unit test to cover all parameters for print op test=develop (#18089) --- .../fluid/tests/unittests/test_print_op.py | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/python/paddle/fluid/tests/unittests/test_print_op.py b/python/paddle/fluid/tests/unittests/test_print_op.py index 8097b5f7343..8e89aa6b749 100644 --- a/python/paddle/fluid/tests/unittests/test_print_op.py +++ b/python/paddle/fluid/tests/unittests/test_print_op.py @@ -56,6 +56,27 @@ class TestPrintOpCPU(unittest.TestCase): fetch_list=[loss], return_numpy=False) + def test_all_parameters(self): + x = layers.data('x', shape=[3], dtype='float32', lod_level=1) + x.stop_gradient = False + + for print_tensor_name in [True, False]: + for print_tensor_type in [True, False]: + for print_tensor_shape in [True, False]: + for print_tensor_lod in [True, False]: + layers.Print( + input=x, + print_tensor_name=print_tensor_name, + print_tensor_type=print_tensor_type, + print_tensor_shape=print_tensor_shape, + print_tensor_lod=print_tensor_lod, ) + loss = layers.mean(x) + append_backward(loss=loss) + exe = Executor(self.place) + outs = exe.run(feed={'x': self.x_tensor}, + fetch_list=[loss], + return_numpy=False) + @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") -- GitLab