test_parallel_op.py 7.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Y
Yang Yang 已提交
15
import unittest
16

17
import paddle.fluid as fluid
18
import paddle.fluid.profiler as profiler
Y
Yang Yu 已提交
19 20 21 22
import numpy


class BaseParallelForTest(unittest.TestCase):
Y
Yang Yu 已提交
23 24 25 26 27 28 29 30 31
    def run_test(self, callback, feed, fetch):
        """
        Run the unittest for parallel.for
        Args:
            callback(callable): A callable function returns a generator. There 
                are two yields in the generator function. The first yield 
                returns the data layers, and the second yield returns the loss. 
                The modified data variables will be sent back during the first 
                yield.
32

Y
Yang Yu 已提交
33 34 35 36 37
            feed(dict): The executor feeding dictionary.
            fetch(list|basestr): The fetch name lists. 

        Returns:
            None
38

Y
Yang Yu 已提交
39 40 41 42 43
        Raises:
            AssertionError when the computation of cpu, parallel.for in cpu, 
                gpu, parallel.for in gpu are different.

        """
Y
Yang Yu 已提交
44
        cpu = fluid.CPUPlace()
Y
Yang Yu 已提交
45
        result_cpu = self._run_test_impl_(
Y
Yang Yu 已提交
46 47 48 49 50
            callback=callback,
            feed=feed,
            fetch=fetch,
            place=cpu,
            use_parallel=False)
Y
Yang Yu 已提交
51
        result_cpu_parallel = self._run_test_impl_(
Y
Yang Yu 已提交
52 53 54 55 56
            callback=callback,
            feed=feed,
            fetch=fetch,
            place=cpu,
            use_parallel=True)
57
        if fluid.core.is_compiled_with_cuda():
Y
Yang Yu 已提交
58
            gpu = fluid.CUDAPlace(0)
Y
Yang Yu 已提交
59
            result_gpu = self._run_test_impl_(
Y
Yang Yu 已提交
60 61 62 63
                callback=callback,
                feed=feed,
                fetch=fetch,
                place=gpu,
64 65
                use_parallel=False,
                use_gpu=True)
Y
Yang Yu 已提交
66
            result_gpu_parallel = self._run_test_impl_(
Y
Yang Yu 已提交
67 68 69 70
                callback=callback,
                feed=feed,
                fetch=fetch,
                place=gpu,
71 72
                use_parallel=True,
                use_gpu=True)
Y
Yang Yang 已提交
73 74 75 76 77 78
            result_gpu_nccl = self._run_test_impl_(
                callback=callback,
                feed=feed,
                fetch=fetch,
                place=gpu,
                use_parallel=True,
79 80
                use_nccl=True,
                use_gpu=True)
Y
Yang Yu 已提交
81
            self._assert_same_(fetch, result_cpu, result_cpu_parallel,
Y
Yang Yang 已提交
82
                               result_gpu, result_gpu_parallel, result_gpu_nccl)
Y
Yang Yu 已提交
83 84
        else:
            self._assert_same_(fetch, result_cpu, result_cpu_parallel)
Y
Yang Yu 已提交
85

Y
Yang Yang 已提交
86 87 88 89 90 91
    def _run_test_impl_(self,
                        callback,
                        feed,
                        fetch,
                        place,
                        use_parallel=False,
92 93
                        use_nccl=False,
                        use_gpu=False):
Y
Yang Yu 已提交
94 95 96 97 98 99 100 101 102 103
        """
        Run a single test, returns the fetch values
        Args:
            place(Place): the computation place. 
            use_parallel(bool): Whether use parallel.for or not. 

        Returns:
            Fetched numpy arrays.

        """
Y
Yang Yu 已提交
104 105
        if isinstance(fetch, basestring):
            fetch = [fetch]
Y
Yang Yu 已提交
106 107 108 109 110 111 112 113 114 115
        main = fluid.Program()
        startup = fluid.Program()
        # Fix seed
        main.random_seed = 10
        startup.random_seed = 10

        with fluid.program_guard(main, startup):
            generator = callback()
            # Automatically insert parallel do if use_parallel = True
            if use_parallel:
116 117 118
                thread_num = fluid.core.get_cuda_device_count(
                ) if use_gpu else 8
                places = fluid.layers.get_places(thread_num)
Y
Yang Yang 已提交
119
                pd = fluid.layers.ParallelDo(places, use_nccl=use_nccl)
Y
Yang Yu 已提交
120 121 122 123
                data = next(generator)

                if isinstance(data, fluid.Variable):
                    data = [data]
Y
Yang Yu 已提交
124

Y
Yang Yu 已提交
125 126 127 128
                with pd.do():
                    ins = map(pd.read_input, data)
                    if len(ins) == 1:
                        ins = ins[0]
Y
Yang Yu 已提交
129
                    loss = generator.send(ins)  # patch input
Y
Yang Yu 已提交
130 131 132 133 134
                    pd.write_output(loss)

                loss = pd()
            else:
                data = next(generator)
Y
Yang Yu 已提交
135 136
                loss = generator.send(data)
            self.assertIsNotNone(loss)
Y
Yu Yang 已提交
137
            avg_loss = fluid.layers.mean(loss)
Y
Yang Yu 已提交
138 139 140 141
            fluid.backward.append_backward(loss=avg_loss)

        exe = fluid.Executor(place)
        exe.run(startup)
142 143 144 145 146 147
        if use_gpu:
            profile_type = 'GPU'
        else:
            profile_type = 'CPU'
        with profiler.profiler(profile_type, 'total', '/tmp/profiler'):
            return exe.run(main, feed=feed, fetch_list=fetch)
Y
Yang Yu 已提交
148

Y
Yang Yu 已提交
149
    def _assert_same_(self, fetch, *args):
Y
Yang Yu 已提交
150 151 152 153 154 155 156 157 158 159 160 161 162 163
        """
        Assert the return values of `run_test` are same.
        Args:
            fetch: Fetch list. Used for print error message
            *args: The fetch result lists of each situations.

        Returns:
            None
            
        Raises:
            AssertionError

        """

Y
Yang Yu 已提交
164
        def _impl_(a, b, fetch_id, item_id):
Y
Yang Yang 已提交
165 166 167
            item_str = [
                'CPU', 'ParallelCPU', 'GPU', 'ParallelGPU', 'ParallelGPUNCCL'
            ]
168 169 170 171
            flag = numpy.allclose(a, b, rtol=0.1, atol=1e-3)
            self.assertTrue(flag,
                            "The {0} are different in {1}, {2} vs {3}".format(
                                fetch[fetch_id], item_str[item_id], a, b))
Y
Yang Yu 已提交
172 173 174 175 176 177

        for i, items in enumerate(zip(*args)):
            self.assertGreater(len(items), 0)
            for j in range(1, len(items)):
                _impl_(items[0], items[j], fetch_id=i, item_id=j)

Y
Yang Yu 已提交
178 179

class ParallelOpTest(BaseParallelForTest):
Y
Yu Yang 已提交
180 181 182 183 184
    @staticmethod
    def __network__():
        x = fluid.layers.data(shape=[784], dtype='float32', name='img')
        x = yield x
        hidden = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
Y
Yu Yang 已提交
185
        hidden = fluid.layers.batch_norm(input=hidden)
Y
Yu Yang 已提交
186
        loss = fluid.layers.mean(hidden)
Y
Yu Yang 已提交
187
        yield loss
Y
Yang Yu 已提交
188

Y
Yang Yang 已提交
189
    def test_simple_fc(self):
Y
Yu Yang 已提交
190
        self.run_test(
Y
Yang Yang 已提交
191
            callback=self.__network__,
Y
Yang Yang 已提交
192 193 194
            feed={
                'img': numpy.random.random(size=(51, 784)).astype('float32')
            },
Y
Yang Yang 已提交
195
            fetch=['fc1.w@GRAD'])
Y
Yu Yang 已提交
196

Y
Yang Yang 已提交
197 198 199 200 201 202
    def test_fc_with_tiny_data(self):
        self.run_test(
            callback=self.__network__,
            feed={'img': numpy.random.random(size=(1, 784)).astype('float32')},
            fetch=['fc1.w@GRAD'])

Y
Yang Yang 已提交
203

Y
Yang Yang 已提交
204 205 206
class ParallelOpTestMultipleInput(BaseParallelForTest):
    @staticmethod
    def __network__():
Y
Yang Yu 已提交
207 208 209 210
        x = fluid.layers.data(
            shape=[784], dtype='float32', name='img1', stop_gradient=False)
        y = fluid.layers.data(
            shape=[784], dtype='float32', name='img2', stop_gradient=False)
Y
Yang Yang 已提交
211 212
        yield [x, y]
        x = x + y
Y
Yang Yang 已提交
213 214 215
        hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
        hidden2 = fluid.layers.fc(input=hidden1, size=200, param_attr='fc2.w')
        hidden3 = fluid.layers.fc(input=hidden2, size=200, param_attr='fc3.w')
Y
Yu Yang 已提交
216
        loss = fluid.layers.mean(hidden3)
Y
Yang Yang 已提交
217 218 219 220 221 222 223 224 225
        yield loss

    def test_simple_fc(self):
        self.run_test(
            callback=self.__network__,
            feed={
                'img1': numpy.random.random(size=(51, 784)).astype('float32'),
                'img2': numpy.random.random(size=(51, 784)).astype('float32')
            },
Y
Yang Yang 已提交
226
            fetch=['fc1.w@GRAD', 'fc2.w@GRAD', 'fc3.w@GRAD'])
Y
Yang Yang 已提交
227 228


Y
Yang Yang 已提交
229 230
if __name__ == '__main__':
    unittest.main()