From 05d2b7a37f8157e66fc3a05c2c0268285f648e2e Mon Sep 17 00:00:00 2001 From: joejiong Date: Wed, 3 Feb 2021 14:26:07 +0800 Subject: [PATCH] Update paddle.static.Print with paddle2.0 api (#30846) As the title --- .../fluid/tests/unittests/test_print_op.py | 71 ++++++++++--------- 1 file changed, 36 insertions(+), 35 deletions(-) mode change 100644 => 100755 python/paddle/fluid/tests/unittests/test_print_op.py diff --git a/python/paddle/fluid/tests/unittests/test_print_op.py b/python/paddle/fluid/tests/unittests/test_print_op.py old mode 100644 new mode 100755 index 5029822e855..cc06a3cf7fa --- a/python/paddle/fluid/tests/unittests/test_print_op.py +++ b/python/paddle/fluid/tests/unittests/test_print_op.py @@ -13,25 +13,26 @@ # limitations under the License. from __future__ import print_function - import unittest -import paddle.fluid.core as core -from paddle.fluid.executor import Executor + +import numpy as np + +from op_test import OpTest +import paddle import paddle.fluid as fluid import paddle.fluid.layers as layers -from paddle.fluid.backward import append_backward +from paddle.fluid import core from paddle.fluid.framework import switch_main_program -from paddle.fluid.framework import Program -import numpy as np from simple_nets import simple_fc_net, init_data -from paddle.fluid import compiler, Program, program_guard -from op_test import OpTest +from paddle.static import Program, program_guard + +paddle.enable_static() class TestPrintOpCPU(unittest.TestCase): def setUp(self): - self.place = core.CPUPlace() - self.x_tensor = core.LoDTensor() + self.place = paddle.CPUPlace() + self.x_tensor = fluid.core.LoDTensor() tensor_np = np.random.random(size=(2, 3)).astype('float32') self.x_tensor.set(tensor_np, self.place) self.x_tensor.set_recursive_sequence_lengths([[1, 1]]) @@ -39,15 +40,15 @@ class TestPrintOpCPU(unittest.TestCase): def build_network(self, only_forward, **kargs): x = layers.data('x', shape=[3], dtype='float32', lod_level=1) x.stop_gradient = False - layers.Print(input=x, **kargs) - loss = layers.mean(x) - append_backward(loss=loss) + paddle.static.Print(input=x, **kargs) + loss = paddle.mean(x) + paddle.static.append_backward(loss=loss) return loss def test_forward(self): switch_main_program(Program()) printed = self.build_network(True, print_phase='forward') - exe = Executor(self.place) + exe = paddle.static.Executor(self.place) outs = exe.run(feed={'x': self.x_tensor}, fetch_list=[printed], return_numpy=False) @@ -55,7 +56,7 @@ class TestPrintOpCPU(unittest.TestCase): def test_backward(self): switch_main_program(Program()) loss = self.build_network(False, print_phase='backward') - exe = Executor(self.place) + exe = paddle.static.Executor(self.place) outs = exe.run(feed={'x': self.x_tensor}, fetch_list=[loss], return_numpy=False) @@ -68,15 +69,15 @@ class TestPrintOpCPU(unittest.TestCase): for print_tensor_type in [True, False]: for print_tensor_shape in [True, False]: for print_tensor_lod in [True, False]: - layers.Print( + paddle.static.Print( input=x, print_tensor_name=print_tensor_name, print_tensor_type=print_tensor_type, print_tensor_shape=print_tensor_shape, print_tensor_lod=print_tensor_lod, ) - loss = layers.mean(x) - append_backward(loss=loss) - exe = Executor(self.place) + loss = paddle.mean(x) + paddle.static.append_backward(loss=loss) + exe = paddle.static.Executor(self.place) outs = exe.run(feed={'x': self.x_tensor}, fetch_list=[loss], return_numpy=False) @@ -84,7 +85,7 @@ class TestPrintOpCPU(unittest.TestCase): def test_no_summarize(self): switch_main_program(Program()) printed = self.build_network(True, summarize=-1, print_phase='forward') - exe = Executor(self.place) + exe = paddle.static.Executor(self.place) outs = exe.run(feed={'x': self.x_tensor}, fetch_list=[printed], return_numpy=False) @@ -95,19 +96,19 @@ class TestPrintOpError(unittest.TestCase): with program_guard(Program(), Program()): # The input type of Print_op must be Variable. x1 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace()) - self.assertRaises(TypeError, fluid.layers.Print, x1) + np.array([[-1]]), [[1]], paddle.CPUPlace()) + self.assertRaises(TypeError, paddle.static.Print, x1) # The input dtype of Print_op must be float32, float64, int32_t, int64_t or bool. - x2 = fluid.layers.data(name='x2', shape=[4], dtype="float16") - self.assertRaises(TypeError, fluid.layers.Print, x2) + x2 = paddle.static.data(name='x2', shape=[4], dtype="float16") + self.assertRaises(TypeError, paddle.static.Print, x2) @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestPrintOpGPU(TestPrintOpCPU): def setUp(self): - self.place = core.CUDAPlace(0) - self.x_tensor = core.LoDTensor() + self.place = paddle.CUDAPlace(0) + self.x_tensor = fluid.core.LoDTensor() tensor_np = np.random.random(size=(2, 3)).astype('float32') self.x_tensor.set(tensor_np, self.place) self.x_tensor.set_recursive_sequence_lengths([[1, 1]]) @@ -115,22 +116,22 @@ class TestPrintOpGPU(TestPrintOpCPU): class TestPrintOpBackward(unittest.TestCase): def check_backward(self, use_cuda): - main = fluid.Program() - startup = fluid.Program() + main = paddle.static.Program() + startup = paddle.static.Program() - with fluid.program_guard(main, startup): + with program_guard(main, startup): loss = simple_fc_net() - loss = fluid.layers.Print(loss) - fluid.optimizer.Adam().minimize(loss) + loss = paddle.static.Print(loss) + paddle.optimizer.Adam().minimize(loss) print_ops = [op for op in main.blocks[0].ops if op.type == u'print'] assert len(print_ops) == 2, "The number of print op should be 2" - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() + exe = paddle.static.Executor(place) exe.run(startup) - binary = fluid.compiler.CompiledProgram(main).with_data_parallel( + binary = paddle.static.CompiledProgram(main).with_data_parallel( loss_name=loss.name) img, label = init_data() @@ -138,7 +139,7 @@ class TestPrintOpBackward(unittest.TestCase): exe.run(binary, feed_dict) def test_fw_bw(self): - if core.is_compiled_with_cuda(): + if paddle.is_compiled_with_cuda(): self.check_backward(use_cuda=True) self.check_backward(use_cuda=False) -- GitLab