test_fetch_unmerged.py 4.7 KB
Newer Older
Z
Zhen Wang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import unittest
17

Z
Zhen Wang 已提交
18
import numpy as np
19

Z
Zhen Wang 已提交
20
import paddle
21
import paddle.fluid as fluid
Z
Zhen Wang 已提交
22 23 24 25 26 27

os.environ["CPU_NUM"] = "2"


class TestFetchUnmerged(unittest.TestCase):
    def conv_net(self, img, label):
28 29 30 31 32 33 34 35 36
        conv_pool_1 = fluid.nets.simple_img_conv_pool(
            input=img,
            filter_size=5,
            num_filters=8,
            pool_size=2,
            pool_stride=2,
            pool_type='max',
            act="relu",
        )
37
        conv_pool_1 = paddle.static.nn.batch_norm(conv_pool_1)
38 39 40 41 42 43 44 45 46
        conv_pool_2 = fluid.nets.simple_img_conv_pool(
            input=conv_pool_1,
            filter_size=5,
            num_filters=16,
            pool_size=2,
            pool_stride=2,
            pool_type='avg',
            act="relu",
        )
C
Charles-hit 已提交
47 48 49 50
        hidden = paddle.static.nn.fc(x=conv_pool_2, size=32, activation='relu')
        prediction = paddle.static.nn.fc(
            x=hidden, size=10, activation='softmax'
        )
51 52 53
        loss = paddle.nn.functional.cross_entropy(
            input=prediction, label=label, reduction='none', use_softmax=False
        )
54
        avg_loss = paddle.mean(loss)
Z
Zhen Wang 已提交
55 56 57 58 59
        return avg_loss, prediction

    def build_program(self, main, startup, is_test):
        with fluid.unique_name.guard():
            with fluid.program_guard(main, startup):
60 61 62 63 64 65
                img = fluid.layers.data(
                    name='image', shape=[1, 28, 28], dtype='float32'
                )
                label = fluid.layers.data(
                    name='label', shape=[1], dtype='int64'
                )
Z
Zhen Wang 已提交
66 67 68 69 70 71 72 73 74
                loss, prediction = self.conv_net(img, label)
                if not is_test:
                    opt = fluid.optimizer.Adam(learning_rate=0.001)
                    opt.minimize(loss)
        return [img, label], loss, prediction

    def fetch_unmerged(self, use_cuda=True):
        main_program = fluid.Program()
        startup_program = fluid.Program()
75 76 77
        feeds, loss, prediction = self.build_program(
            main_program, startup_program, False
        )
Z
Zhen Wang 已提交
78 79 80 81 82 83 84

        place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(startup_program)

        build_strategy = fluid.BuildStrategy()
        binary = fluid.CompiledProgram(main_program).with_data_parallel(
85 86
            loss_name=loss.name, build_strategy=build_strategy
        )
Z
Zhen Wang 已提交
87

88 89
        iters = 2
        batch_size = 16
90 91 92 93
        train_reader = paddle.batch(
            paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500),
            batch_size=batch_size,
        )
Z
Zhen Wang 已提交
94 95 96 97 98
        feeder = fluid.DataFeeder(feed_list=feeds, place=place)

        device_num = fluid.core.get_cuda_device_count() if use_cuda else 2
        for _ in range(iters):
            data = next(train_reader())
99 100 101 102 103 104
            loss_v, prediction_v = exe.run(
                binary,
                feed=feeder.feed(data),
                fetch_list=[loss, prediction],
                return_merged=False,
            )
Z
Zhen Wang 已提交
105 106 107
            self.assertEqual(np.array(loss_v).shape, (device_num, 1))
            self.assertEqual(
                np.array(prediction_v).shape,
108 109
                (device_num, batch_size / device_num, 10),
            )
Z
Zhen Wang 已提交
110 111 112

        for _ in range(iters):
            data = next(train_reader())
113 114 115 116 117 118 119
            loss_v, prediction_v = exe.run(
                binary,
                feed=feeder.feed(data),
                fetch_list=[loss, prediction],
                return_merged=True,
            )
            self.assertEqual(np.array(loss_v).shape, (device_num,))
Z
Zhen Wang 已提交
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
            self.assertEqual(np.array(prediction_v).shape, (batch_size, 10))

    def test_fetch_unmerged(self):
        if fluid.core.is_compiled_with_cuda():
            self.fetch_unmerged(use_cuda=True)
        self.fetch_unmerged(use_cuda=False)

    def test_fetch_unmerged_parallel_graph(self):
        fluid.core.globals()['FLAGS_enable_parallel_graph'] = True
        if fluid.core.is_compiled_with_cuda():
            self.fetch_unmerged(use_cuda=True)
        self.fetch_unmerged(use_cuda=False)
        fluid.core.globals()['FLAGS_enable_parallel_graph'] = False


if __name__ == '__main__':
    unittest.main()