test_backward.py 13.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import paddle.fluid as fluid
17 18 19
import paddle.static as static
import paddle

20 21 22
import numpy as np


23
class BackwardNet:
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
    """
    Abstract Base Class.
    All Net inherited this Class should implement two functions:
        build_model: build net to test the logic of backward
        init_data: fake input data to test all programs.
    """

    def __init__(self):
        self.stop_gradient_grad_vars = set()
        self.no_grad_vars = set()
        self.params_names = set()
        self.op_path = []

    def build_model(self):
        """
        Build net to test the logic of backward.
        :return: loss
        """
        raise NotImplementedError

    def init_data(self):
        """
        Fake input data to test all programs.
        :return: dict, {'var_name': var_data}
        """
        raise NotImplementedError
50 51


52
class TestBackward(unittest.TestCase):
53 54 55 56 57 58
    """
    All related TestClass should inherit this class,
    and only implement test_backward function.
    """

    def _check_all(self, net):
59 60 61 62 63
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
64 65 66 67 68 69
        exe = fluid.Executor(place)

        main = fluid.Program()
        startup = fluid.Program()

        with fluid.program_guard(main, startup):
70 71
            loss = net.build_model()
            self._check_backward(loss, main)
72 73 74

            optimizer = fluid.optimizer.SGD(learning_rate=0.1)
            optimizer.minimize(loss)
75 76 77 78 79 80 81 82 83 84
            exe.run(startup)
            exe.run(feed=net.init_data())

    def _check_backward(self, loss, main_program):
        global_block_idx = self.global_block_idx
        params_grads = self._check_params_grad(loss)
        # 1.1 get_stop_gradients
        no_grad_dict = self._check_stop_gradient(main_program)
        # 1.2 find_op_path
        op_path, block_no_grad_set = self._check_op_path(
85 86
            main_program.block(global_block_idx), [loss], [], no_grad_dict
        )
87 88
        # 1.3 _find_no_grad_vars
        no_grad_vars = self._check_find_no_grad_vars(
89 90 91 92 93
            main_program.block(global_block_idx),
            op_path,
            [loss],
            block_no_grad_set,
        )
94 95 96
        # update no_grad_dict
        block_no_grad_set.update(no_grad_vars)
        no_grad_dict[global_block_idx].update(
97 98
            list(map(fluid.backward._append_grad_suffix_, block_no_grad_set))
        )
99 100

    def _check_params_grad(self, loss, parameter_list=None, no_grad_set=None):
101 102 103
        params_grads = fluid.backward.append_backward(
            loss, parameter_list, no_grad_set
        )
104
        params_names = set(
105 106
            [param_var.name for (param_var, grad_var) in params_grads]
        )
107 108 109 110 111 112 113
        self.assertSetEqual(params_names, self.net.params_names)

        return params_grads

    def _check_stop_gradient(self, program):
        no_grad_dict = fluid.backward._get_stop_gradients_(program)
        if no_grad_dict is not None and isinstance(no_grad_dict, dict):
114 115 116 117
            self.assertSetEqual(
                no_grad_dict[self.global_block_idx],
                self.net.stop_gradient_grad_vars,
            )
118 119 120 121 122 123 124 125

        return no_grad_dict

    def _check_op_path(self, root_block, outputs, inputs=[], no_grad_dict=None):
        if no_grad_dict is None or not isinstance(no_grad_dict, dict):
            block_no_grad_set = None
        else:
            block_no_grad_set = set(
126 127 128 129 130 131 132 133
                map(
                    fluid.backward._strip_grad_suffix_,
                    no_grad_dict[self.global_block_idx],
                )
            )
        op_path = fluid.backward._find_op_path_(
            root_block, outputs, inputs, block_no_grad_set
        )
134 135 136 137 138
        op_types = [op.type for op in op_path]
        self.assertListEqual(op_types, self.net.op_path)

        return op_path, block_no_grad_set

139 140 141
    def _check_find_no_grad_vars(
        self, root_block, op_path, targets, block_no_grad_set
    ):
142
        no_grad_vars = fluid.backward._find_no_grad_vars(
143 144
            root_block, op_path, targets, block_no_grad_set
        )
145 146 147 148
        self.assertSetEqual(no_grad_vars, self.net.no_grad_vars)

        return no_grad_vars

149
    def _check_error_param_list(self, net, parameter_list):
150 151 152 153 154
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
155 156 157 158 159 160 161 162 163 164 165 166
        exe = fluid.Executor(place)

        main = fluid.Program()
        startup = fluid.Program()

        with fluid.program_guard(main, startup):
            loss = net.build_model()
            optimizer = fluid.optimizer.SGD(learning_rate=0.1)
            optimizer.minimize(loss, parameter_list=parameter_list)
            exe.run(startup)
            exe.run(feed=net.init_data())

167
    def _check_error_no_grad_set(self, net, no_grad_set):
168 169 170 171 172
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
173 174 175 176 177 178 179 180 181 182 183 184
        exe = fluid.Executor(place)

        main = fluid.Program()
        startup = fluid.Program()

        with fluid.program_guard(main, startup):
            loss = net.build_model()
            optimizer = fluid.optimizer.SGD(learning_rate=0.1)
            optimizer.minimize(loss, no_grad_set=no_grad_set)
            exe.run(startup)
            exe.run(feed=net.init_data())

185 186 187

class SimpleNet(BackwardNet):
    def __init__(self):
188
        super().__init__()
189 190
        self.stop_gradient_grad_vars = set(
            [
191 192 193 194
                'x_no_grad@GRAD',
                'x2_no_grad@GRAD',
                'x3_no_grad@GRAD',
                'label_no_grad@GRAD',
195 196
            ]
        )
197
        self.no_grad_vars = set()
198
        self.params_names = set(['w2v', 'fc_predict.b_0', 'fc_w'])
199
        self.op_path = [
200 201 202 203 204 205 206 207 208
            'lookup_table_v2',
            'lookup_table_v2',  # embedding
            'elementwise_add',  # merge
            'mul',
            'elementwise_add',
            'softmax',  # fc
            'elementwise_sub',
            'square',
            'reduce_mean',
209 210 211 212 213 214 215 216 217 218 219 220 221
        ]  # loss
        self.shape = [16, 50]

    def init_data(self):
        assert len(self.shape) == 2
        x = np.random.randint(0, 90, self.shape).astype('int64')
        x2 = np.random.randint(0, 90, self.shape).astype('int64')
        x3 = np.random.randint(0, 90, self.shape).astype('int64')
        label = np.random.random([self.shape[0], 1]).astype('float32')
        return {
            'x_no_grad': x,
            'x2_no_grad': x2,
            'x3_no_grad': x3,
222
            'label_no_grad': label,
223 224 225 226 227 228 229
        }

    def build_model(self):
        # stop_gradient = True in input
        x = fluid.data(name='x_no_grad', shape=self.shape, dtype='int64')
        x2 = fluid.data(name='x2_no_grad', shape=self.shape, dtype='int64')
        x3 = fluid.data(name='x3_no_grad', shape=self.shape, dtype='int64')
230 231 232
        label = fluid.data(
            name='label_no_grad', shape=[self.shape[0], 1], dtype='float32'
        )
233 234
        # shared layer, the grad of 'w2v' will be summed and renamed.
        # To test  _addup_repetitive_outputs_
235 236 237 238 239 240 241 242 243
        x_emb = fluid.embedding(
            x, size=[100, 64], param_attr=fluid.ParamAttr(name='w2v')
        )
        x2_emb = fluid.embedding(
            x2, size=[100, 64], param_attr=fluid.ParamAttr(name='w2v')
        )
        x3_emb = fluid.embedding(
            x3, size=[100, 64], param_attr=fluid.ParamAttr(name='w2v')
        )
244 245
        # merge layers
        x_merge = fluid.layers.elementwise_add(x_emb, x2_emb, name='x_add_x2')
246 247 248
        x2_merge = fluid.layers.elementwise_add(
            x2_emb, x3_emb, name='x2_add_x3'
        )
249
        # shared fc_w
250 251 252 253 254 255 256
        predict = fluid.layers.fc(
            input=x_merge,
            size=1,
            act='softmax',
            param_attr=fluid.ParamAttr(name='fc_w'),
            name='fc_predict',
        )
257
        # useless layer for calculating loss
258 259 260 261 262 263 264
        fc_no_use = fluid.layers.fc(
            input=x2_merge,
            size=1,
            act='sigmoid',
            param_attr=fluid.ParamAttr(name='fc_w'),
            name='fc_no_use',
        )
265 266
        # loss
        cost = fluid.layers.square_error_cost(input=predict, label=label)
267
        loss = paddle.mean(cost, name='mean_loss')
268 269 270 271 272

        return loss


class TestSimpleNet(TestBackward):
273
    def test_backward(self):
274 275 276 277 278 279 280 281
        """
        Instantiate each NetClass to test backward.
        """
        self.global_block_idx = 0
        self.net = SimpleNet()
        self._check_all(self.net)


282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
class TestGradientsError(unittest.TestCase):
    def test_error(self):
        x = fluid.data(name='x', shape=[None, 2, 8, 8], dtype='float32')
        x.stop_gradient = False
        conv = fluid.layers.conv2d(x, 4, 1, bias_attr=False)
        y = fluid.layers.relu(conv)

        with self.assertRaises(TypeError):
            x_grad = fluid.gradients(y.name, x)

        with self.assertRaises(TypeError):
            x_grad = fluid.gradients(y, x.name)

        with self.assertRaises(TypeError):
            x_grad = fluid.gradients([y], [x], target_gradients=x.name)

        with self.assertRaises(TypeError):
            x_grad = fluid.gradients([y], x, no_grad_set=conv)


302 303 304 305 306 307 308
class TestSimpleNetWithErrorParamList(TestBackward):
    def test_parameter_list_type_error(self):
        self.global_block_idx = 0
        self.net = SimpleNet()
        # The type of parameter_list argument must be list or tuple
        with self.assertRaises(TypeError):
            self._check_error_param_list(self.net, "test")
309
        # The type of parameter_list's member must be Variable or str
310 311 312 313 314
        test = fluid.data(name='test', shape=[None, 90], dtype='float32')
        with self.assertRaises(TypeError):
            self._check_error_param_list(self.net, [test, "test", 3])


315 316 317 318 319 320 321 322 323 324 325 326 327
class TestSimpleNetWithErrorNoGradSet(TestBackward):
    def test_no_grad_set_type_error(self):
        self.global_block_idx = 0
        self.net = SimpleNet()
        # The type of no_grad_set argument must be set or list or tuple
        with self.assertRaises(TypeError):
            self._check_error_no_grad_set(self.net, "test")
        # The type of no_grad_set's member must be Variable or str
        test = fluid.data(name='test', shape=[None, 90], dtype='float32')
        with self.assertRaises(TypeError):
            self._check_error_no_grad_set(self.net, [test, "test", 3])


328 329 330 331 332 333 334
class TestAppendBackwardWithError(unittest.TestCase):
    def build_net(self):
        x = fluid.data(name='x', shape=[None, 13], dtype='int64')
        y = fluid.data(name='y', shape=[None, 1], dtype='float32')
        x_emb = fluid.embedding(x, size=[100, 256])
        y_predict = fluid.layers.fc(input=x_emb, size=1, name='my_fc')
        loss = fluid.layers.square_error_cost(input=y_predict, label=y)
335
        avg_loss = paddle.mean(loss)
336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
        param_names = [
            param.name
            for param in fluid.default_main_program().block(0).all_parameters()
        ]

        return avg_loss, param_names

    def setUp(self):
        main_program = fluid.Program()
        with fluid.program_guard(main_program):
            self.avg_loss, self.param_names = self.build_net()

    def test_loss_type_error(self):
        with self.assertRaises(TypeError):
            fluid.backward.append_backward(loss=self.avg_loss.name)

    def test_parameter_list_type_error(self):
        with self.assertRaises(TypeError):
            self.param_names[0] = np.random.random([10])
355 356 357
            fluid.backward.append_backward(
                loss=self.avg_loss, parameter_list=self.param_names
            )
358 359 360 361 362 363 364

    def test_callback_type_error(self):
        with self.assertRaises(TypeError):

            def callback(block, context):
                return

365 366 367
            fluid.backward.append_backward(
                loss=self.avg_loss, callbacks=callback
            )
368 369


370 371 372 373 374
class TestGradientsWithOptimizer(unittest.TestCase):
    def _check_grad_op_name(self, forward_list, optimiezed_list):
        backward_list = [op + "_grad" for op in reversed(forward_list)]
        idx = optimiezed_list.index(backward_list[0], len(backward_list))

375 376 377
        self.assertListEqual(
            backward_list, optimiezed_list[idx : idx + len(backward_list)]
        )
378 379 380 381 382 383 384 385 386 387 388 389

    def test_gradient_with_optimizer(self):
        main = fluid.Program()
        startup = fluid.Program()

        with fluid.program_guard(main, startup):
            img = static.data(name='image', shape=[None, 784])
            pred = static.nn.fc(x=img, size=10, activation='relu')
            loss = paddle.mean(pred)
            opt = paddle.optimizer.Momentum(learning_rate=0.01, momentum=0.9)

            forward_list = [o.type for o in main.current_block().ops]
390 391 392 393 394 395
            (
                optimize_ops,
                pram_grads,
            ) = paddle.autograd.backward_mode.gradients_with_optimizer(
                main, opt
            )
396 397 398 399 400 401 402 403

            optimized_list = [o.type for o in main.current_block().ops]

            self.assertGreater(len(optimized_list), len(forward_list))
            self.assertIn(opt.type, optimized_list)
            self._check_grad_op_name(forward_list, optimized_list)


404 405 406
# TODO(Aurelius84): add conditional network test
class ConditionalNet(BackwardNet):
    def __init__(self):
407
        super().__init__()
408 409 410


if __name__ == '__main__':
411
    paddle.enable_static()
412
    unittest.main()