test_print_op.py 5.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from __future__ import print_function
Y
Yan Chunwei 已提交
16
import unittest
17 18 19 20 21

import numpy as np

from op_test import OpTest
import paddle
22
import paddle.fluid as fluid
23
import paddle.fluid.layers as layers
24
from paddle.fluid import core
25
from paddle.fluid.framework import switch_main_program
26
from simple_nets import simple_fc_net, init_data
27 28 29
from paddle.static import Program, program_guard

paddle.enable_static()
Y
yangyaming 已提交
30 31 32 33


class TestPrintOpCPU(unittest.TestCase):
    def setUp(self):
34 35
        self.place = paddle.CPUPlace()
        self.x_tensor = fluid.core.LoDTensor()
Y
yangyaming 已提交
36 37
        tensor_np = np.random.random(size=(2, 3)).astype('float32')
        self.x_tensor.set(tensor_np, self.place)
38
        self.x_tensor.set_recursive_sequence_lengths([[1, 1]])
Y
Yan Chunwei 已提交
39

Y
yangyaming 已提交
40 41 42
    def build_network(self, only_forward, **kargs):
        x = layers.data('x', shape=[3], dtype='float32', lod_level=1)
        x.stop_gradient = False
43 44 45
        paddle.static.Print(input=x, **kargs)
        loss = paddle.mean(x)
        paddle.static.append_backward(loss=loss)
Y
yangyaming 已提交
46
        return loss
Y
Yan Chunwei 已提交
47

Y
yangyaming 已提交
48 49 50
    def test_forward(self):
        switch_main_program(Program())
        printed = self.build_network(True, print_phase='forward')
51
        exe = paddle.static.Executor(self.place)
Y
yangyaming 已提交
52 53 54
        outs = exe.run(feed={'x': self.x_tensor},
                       fetch_list=[printed],
                       return_numpy=False)
Y
Yan Chunwei 已提交
55

Y
yangyaming 已提交
56 57 58
    def test_backward(self):
        switch_main_program(Program())
        loss = self.build_network(False, print_phase='backward')
59
        exe = paddle.static.Executor(self.place)
Y
yangyaming 已提交
60 61 62
        outs = exe.run(feed={'x': self.x_tensor},
                       fetch_list=[loss],
                       return_numpy=False)
Y
Yan Chunwei 已提交
63

64 65 66 67 68 69 70 71
    def test_all_parameters(self):
        x = layers.data('x', shape=[3], dtype='float32', lod_level=1)
        x.stop_gradient = False

        for print_tensor_name in [True, False]:
            for print_tensor_type in [True, False]:
                for print_tensor_shape in [True, False]:
                    for print_tensor_lod in [True, False]:
72
                        paddle.static.Print(
73 74 75 76 77
                            input=x,
                            print_tensor_name=print_tensor_name,
                            print_tensor_type=print_tensor_type,
                            print_tensor_shape=print_tensor_shape,
                            print_tensor_lod=print_tensor_lod, )
78 79 80
        loss = paddle.mean(x)
        paddle.static.append_backward(loss=loss)
        exe = paddle.static.Executor(self.place)
81 82 83 84
        outs = exe.run(feed={'x': self.x_tensor},
                       fetch_list=[loss],
                       return_numpy=False)

85 86 87
    def test_no_summarize(self):
        switch_main_program(Program())
        printed = self.build_network(True, summarize=-1, print_phase='forward')
88
        exe = paddle.static.Executor(self.place)
89 90 91 92
        outs = exe.run(feed={'x': self.x_tensor},
                       fetch_list=[printed],
                       return_numpy=False)

Y
Yan Chunwei 已提交
93

94
class TestPrintOpError(unittest.TestCase):
95 96 97 98
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of Print_op must be Variable.
            x1 = fluid.create_lod_tensor(
99 100
                np.array([[-1]]), [[1]], paddle.CPUPlace())
            self.assertRaises(TypeError, paddle.static.Print, x1)
101
            # The input dtype of Print_op must be float32, float64, int32_t, int64_t or bool.
102 103
            x2 = paddle.static.data(name='x2', shape=[4], dtype="float16")
            self.assertRaises(TypeError, paddle.static.Print, x2)
104 105


106 107
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
Y
yangyaming 已提交
108 109
class TestPrintOpGPU(TestPrintOpCPU):
    def setUp(self):
110 111
        self.place = paddle.CUDAPlace(0)
        self.x_tensor = fluid.core.LoDTensor()
Y
yangyaming 已提交
112 113
        tensor_np = np.random.random(size=(2, 3)).astype('float32')
        self.x_tensor.set(tensor_np, self.place)
114
        self.x_tensor.set_recursive_sequence_lengths([[1, 1]])
Y
Yan Chunwei 已提交
115 116


117 118
class TestPrintOpBackward(unittest.TestCase):
    def check_backward(self, use_cuda):
119 120
        main = paddle.static.Program()
        startup = paddle.static.Program()
121

122
        with program_guard(main, startup):
123
            loss = simple_fc_net()
124 125
            loss = paddle.static.Print(loss)
            paddle.optimizer.Adam().minimize(loss)
126 127 128 129

        print_ops = [op for op in main.blocks[0].ops if op.type == u'print']
        assert len(print_ops) == 2, "The number of print op should be 2"

130 131
        place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
        exe = paddle.static.Executor(place)
132 133
        exe.run(startup)

134
        binary = paddle.static.CompiledProgram(main).with_data_parallel(
135 136 137 138 139 140 141
            loss_name=loss.name)

        img, label = init_data()
        feed_dict = {"image": img, "label": label}
        exe.run(binary, feed_dict)

    def test_fw_bw(self):
142
        if paddle.is_compiled_with_cuda():
143 144 145 146
            self.check_backward(use_cuda=True)
        self.check_backward(use_cuda=False)


Y
Yan Chunwei 已提交
147 148
if __name__ == '__main__':
    unittest.main()