test_cond.py 23.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import numpy as np
18
import os
19
import unittest
20
import paddle
21 22 23 24
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.layers as layers
import paddle.fluid.framework as framework
25
from paddle.fluid.backward import append_backward
26
from paddle.fluid.framework import Program, program_guard
27
from simple_nets import simple_fc_net_with_inputs, batchnorm_fc_with_inputs
28
import paddle
29 30

np.random.seed(123)
31 32


33
class TestCondInputOutput(unittest.TestCase):
34

35 36 37 38 39 40 41 42 43 44
    def test_return_single_var(self):
        """
        pseudocode:

        if 0.23 < 0.1:
            return 2
        else:
            return -1
        """

45 46
        paddle.enable_static()

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
        def true_func():
            return layers.fill_constant(shape=[2, 3], dtype='int32', value=2)

        def false_func():
            return layers.fill_constant(shape=[3, 2], dtype='int32', value=-1)

        main_program = Program()
        startup_program = Program()
        with program_guard(main_program, startup_program):
            x = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
            y = layers.fill_constant(shape=[1], dtype='float32', value=0.23)
            pred = layers.less_than(y, x)
            out = layers.cond(pred, true_func, false_func)
            # out is one tensor

62 63
        place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
        exe = fluid.Executor(place)
        ret = exe.run(main_program, fetch_list=[out.name])
        self.assertTrue(
            np.allclose(np.asarray(ret), np.full((3, 2), -1, np.int32)))

    def test_return_var_tuple(self):
        """
        pseudocode:

        if True:
            return 1, True
        else:
            return 3, 2
        """

79 80
        paddle.enable_static()

81
        def true_func():
82 83 84 85 86
            return layers.fill_constant(shape=[1, 2], dtype='int32',
                                        value=1), layers.fill_constant(
                                            shape=[2, 3],
                                            dtype='bool',
                                            value=True)
87 88

        def false_func():
89 90 91 92 93
            return layers.fill_constant(shape=[3, 4], dtype='float32',
                                        value=3), layers.fill_constant(
                                            shape=[4, 5],
                                            dtype='int64',
                                            value=2)
94 95 96 97 98 99 100 101

        main_program = Program()
        startup_program = Program()
        with program_guard(main_program, startup_program):
            pred = layers.fill_constant(shape=[1], dtype='bool', value=True)
            out = layers.cond(pred, true_func, false_func)
            # out is a tuple containing 2 tensors

102 103
        place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
104 105 106 107 108
        exe = fluid.Executor(place)
        ret = exe.run(main_program, fetch_list=out)
        self.assertTrue(
            np.allclose(np.asarray(ret[0]), np.full((1, 2), 1, np.int32)))
        self.assertTrue(
109
            np.allclose(np.asarray(ret[1]), np.full((2, 3), True, bool)))
110 111 112 113 114 115 116 117 118 119 120 121

    def test_pass_and_modify_var(self):
        """
        pseudocode:
        for i in range(5):
            a = 7
            if i % 2 == 0:
                a = a * (i + 1)
            else:
                a = a - (i - 1)
        """

122 123
        paddle.enable_static()

124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
        def true_func(a, i):
            a = a * (i + 1)
            return a

        def false_func(a, i):
            a = a - (i - 1)
            return a

        main_program = Program()
        startup_program = Program()
        with program_guard(main_program, startup_program):
            a = layers.fill_constant(shape=[3, 2, 1], dtype='int32', value=7)
            i = fluid.data(name="i", shape=[1], dtype='int32')
            pred = ((i % 2) == 0)
            a = layers.cond(pred, lambda: true_func(a, i),
                            lambda: false_func(a, i))
140 141
        place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
142 143 144 145 146 147 148
        exe = fluid.Executor(place)
        for feed_i in range(5):
            expected_a = 7 * (feed_i + 1) if feed_i % 2 == 0 else 8 - feed_i
            ret = exe.run(main_program,
                          feed={'i': np.full((1), feed_i, np.int32)},
                          fetch_list=[a])
            self.assertTrue(
149 150
                np.allclose(np.asarray(ret),
                            np.full((3, 2, 1), expected_a, np.int32)))
151 152 153 154 155 156 157 158 159 160 161

    def test_return_none(self):
        """
        pseudocode: test doing nothing in branches
        for i in range(5):
            if i % 2 == 0:
                pass
            else:
                pass
        """

162 163
        paddle.enable_static()

164 165 166 167 168 169 170 171 172 173 174 175 176 177
        def true_func():
            pass

        def false_func():
            return None

        main_program = Program()
        startup_program = Program()
        with program_guard(main_program, startup_program):
            i = fluid.data(name="i", shape=[1], dtype='int32')
            pred = ((i % 2) == 0)
            out1 = layers.cond(pred, true_func, false_func)
            out2 = layers.cond(pred, None, false_func)
            out3 = layers.cond(pred, true_func, None)
178 179
        place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
180 181 182 183 184 185 186 187 188 189 190 191 192
        exe = fluid.Executor(place)
        for feed_i in range(5):
            # Test that output is None is runnable
            exe.run(main_program, feed={'i': np.full((1), feed_i, np.int32)})
            self.assertIsNone(out1)
            self.assertIsNone(out2)
            self.assertIsNone(out3)

    def test_wrong_structure_exception(self):
        """
        test returning different number of tensors cannot merge into output
        """

193 194
        paddle.enable_static()

195 196 197 198 199 200 201
        def func_return_none():
            return None

        def func_return_one_tensor():
            return layers.fill_constant(shape=[2, 7], dtype='int32', value=3)

        def func_return_two_tensors():
202 203 204 205 206
            return layers.fill_constant(shape=[3, 1], dtype='int32',
                                        value=7), layers.fill_constant(
                                            shape=[3, 1],
                                            dtype='int32',
                                            value=8)
207 208 209 210 211 212

        main_program = Program()
        startup_program = Program()
        with program_guard(main_program, startup_program):
            i = fluid.data(name="i", shape=[1], dtype='int32')
            pred = ((i % 2) == 0)
213
            with self.assertRaises(TypeError):
214 215
                out = layers.cond(pred, i, func_return_one_tensor)

216
            with self.assertRaises(TypeError):
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
                out = layers.cond(pred, func_return_one_tensor, np.asarray([3]))

            with self.assertRaises(Exception) as e:
                out = layers.cond(pred, func_return_none,
                                  func_return_one_tensor)
            self.assertTrue(
                "Incompatible return values of true_fn and false_fn in cond" in
                str(e.exception))

            with self.assertRaises(Exception) as e:
                out = layers.cond(pred, func_return_two_tensors,
                                  func_return_none)
            self.assertTrue(
                "Incompatible return values of true_fn and false_fn in cond" in
                str(e.exception))

            with self.assertRaises(Exception) as e:
                out = layers.cond(pred, func_return_one_tensor,
                                  func_return_two_tensors)
            self.assertTrue(
237 238
                "true fn returns 1 vars, but false fn returns 2 vars, which is not equals"
                in str(e.exception))
239

240
    def test_extremely_simple_net_with_op_in_condition(self):
241
        paddle.enable_static()
242 243 244
        main_program = fluid.Program()
        startup_program = fluid.Program()
        with fluid.program_guard(main_program, startup_program):
245 246 247
            a = fluid.layers.fill_constant(shape=[1],
                                           dtype='float32',
                                           value=1.23)
248
            a.stop_gradient = False
249 250 251
            b = fluid.layers.fill_constant(shape=[1],
                                           dtype='float32',
                                           value=1.25)
252 253 254 255
            b.stop_gradient = False
            out = layers.cond(a - b < -1.0, lambda: a, lambda: b)
        append_backward(out)

256 257
        place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
258
        exe = fluid.Executor(place)
259 260
        ret = exe.run(main_program,
                      fetch_list=[out, b, a.grad_name, b.grad_name])
261 262
        # Note: fill_constant has loss of precision, you have to assertEqual
        # with values doens't lose precision in float-point number.
263 264 265
        self.assertEqual(ret[0][0], ret[1][0])
        self.assertEqual(ret[2][0], 0.0)
        self.assertEqual(ret[3][0], 1.0)
266

267

268
class TestCondNestedControlFlow(unittest.TestCase):
269

270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
    def test_cond_inside_cond(self):
        """
        pseudocode:
        for i in range(1, 10):
            a = 2 * i
            if i < 5:
                if i >= 3:
                    return a + a 
                else:
                    return a - a
            else:
                if i < 8:
                    return a * a
                else:
                    return a / a
        """

287 288
        paddle.enable_static()

289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
        def less_than_branch(i, a):
            return layers.cond(i >= 3.0, lambda: layers.elementwise_add(a, a),
                               lambda: layers.elementwise_sub(a, a))

        def greater_equal_branch(i, a):
            return layers.cond(i < 8.0, lambda: layers.elementwise_mul(a, a),
                               lambda: layers.elementwise_div(a, a))

        main_program = Program()
        startup_program = Program()
        with program_guard(main_program, startup_program):
            i = fluid.data(name="i", shape=[1], dtype='float32')
            a = 2.0 * i
            out = layers.cond(i < 5.0, lambda: less_than_branch(i, a),
                              lambda: greater_equal_branch(i, a))
304
            mean = paddle.mean(out)
305 306
            append_backward(mean)

307 308
        place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
        exe = fluid.Executor(place)
        for feed_i in range(0, 10):
            expected_a = 2.0 * feed_i
            if feed_i < 5:
                expected_ret = expected_a + expected_a if feed_i >= 3 else 0.0
                expected_a_grad = 2.0 if feed_i >= 3 else 0.0
            else:
                expected_ret = expected_a * expected_a if feed_i < 8 else 1.0
                expected_a_grad = 2.0 * expected_a if feed_i < 8 else 0.0
            ret = exe.run(main_program,
                          feed={'i': np.full((1), feed_i, np.float32)},
                          fetch_list=[out.name, a.grad_name])
            self.assertEqual(ret[0][0], expected_ret)
            self.assertEqual(ret[1][0], expected_a_grad)

324
    def test_cond_op_in_condition(self):
325
        paddle.enable_static()
326 327 328 329
        main_program = fluid.Program()
        startup_program = fluid.Program()

        with fluid.program_guard(main_program, startup_program):
330 331 332
            a = fluid.layers.fill_constant(shape=[1],
                                           dtype='float32',
                                           value=1.23)
333
            a.stop_gradient = False
334 335 336
            b = fluid.layers.fill_constant(shape=[1],
                                           dtype='float32',
                                           value=1.24)
337 338
            b.stop_gradient = False
            out = fluid.layers.cond(
339 340 341 342 343
                a < b, lambda: fluid.layers.cond(
                    a - b < -1.0, lambda: fluid.layers.elementwise_add(a, b),
                    lambda: fluid.layers.elementwise_mul(a, b)), lambda:
                fluid.layers.cond(a == b, lambda: fluid.layers.elementwise_sub(
                    a, b), lambda: fluid.layers.elementwise_pow(a, b)))
344 345
            append_backward(out)

346 347
        place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
348 349
        exe = fluid.Executor(place)
        ret = exe.run(main_program, fetch_list=[out, a.grad_name, b.grad_name])
350
        # Note: fill_constant has loss of precision, so we assertAlmostEqual.
351 352 353 354
        self.assertAlmostEqual(ret[0][0], 1.5252)
        self.assertAlmostEqual(ret[1][0], 1.24)
        self.assertAlmostEqual(ret[2][0], 1.23)

355

356
class TestCondBackward(unittest.TestCase):
357

358
    def backward_value_helper(self, cond_func, use_cuda, use_parallel_exe):
359 360 361
        """
        Helper function that compares calculated backward value is close to dy/dx
        """
362
        paddle.enable_static()
363 364 365 366 367 368 369 370 371 372 373
        main_program = Program()
        main_program.random_seed = 123
        startup_program = Program()
        startup_program.random_seed = 123
        with program_guard(main_program, startup_program):
            img = fluid.data(name='image', shape=[-1, 9], dtype='float32')
            img.stop_gradient = False
            label = fluid.data(name='label', shape=[-1, 1], dtype='int64')
            i = fluid.data(name="i", shape=[1], dtype='int32')
            loss = cond_func(i, img, label)
            append_backward(loss)
374
        place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
375 376 377
        exe = fluid.Executor(place)
        exe.run(startup_program)

378 379 380
        num_devices = 1
        if use_parallel_exe:
            os.environ['CPU_NUM'] = str(2)
381 382 383
            exe = fluid.ParallelExecutor(use_cuda=use_cuda,
                                         main_program=main_program,
                                         loss_name=loss.name)
384 385
            num_devices = exe.device_count

386 387 388
        delta = 0.005
        for feed_i in range(0, 10):
            feed_img = np.random.random(size=[1, 9]).astype(np.float32)
389 390 391 392
            feed_label = np.random.randint(low=0,
                                           high=10,
                                           size=[1, 1],
                                           dtype=np.int64)
393 394 395 396
            if use_parallel_exe:
                img_grad, loss_value = exe.run(
                    feed={
                        'i': np.full((num_devices), feed_i, np.int32),
397 398
                        'image': np.repeat(feed_img, num_devices, axis=0),
                        'label': np.repeat(feed_label, num_devices, axis=0)
399 400 401 402 403 404 405 406 407 408 409
                    },
                    fetch_list=[img.grad_name, loss.name])
            else:
                img_grad, loss_value = exe.run(
                    main_program,
                    feed={
                        'i': np.full((1), feed_i, np.int32),
                        'image': feed_img,
                        'label': feed_label
                    },
                    fetch_list=[img.grad_name, loss.name])
410

411
            numerical_grad = np.zeros(shape=[num_devices, 9], dtype=np.float32)
412 413 414
            feed_img_delta = np.copy(feed_img)
            for j in range(9):
                feed_img_delta[0][j] = feed_img[0][j] + delta
415 416
                if use_parallel_exe:
                    loss_delta = exe.run(feed={
417 418 419 420 421 422
                        'i':
                        np.full((num_devices), feed_i, np.int32),
                        'image':
                        np.repeat(feed_img_delta, num_devices, axis=0),
                        'label':
                        np.repeat(feed_label, num_devices, axis=0)
423 424
                    },
                                         fetch_list=[loss.name])
425 426
                    multi_device_grad = (loss_delta[0] -
                                         loss_value[0]) / delta / num_devices
427 428 429 430 431 432 433 434 435 436 437
                    for d in range(num_devices):
                        numerical_grad[d][j] = multi_device_grad[d]
                else:
                    loss_delta = exe.run(main_program,
                                         feed={
                                             'i': np.full((1), feed_i,
                                                          np.int32),
                                             'image': feed_img_delta,
                                             'label': feed_label
                                         },
                                         fetch_list=[loss.name])
438 439
                    numerical_grad[0][j] = (loss_delta[0] -
                                            loss_value[0]) / delta
440 441
                feed_img_delta[0][j] = feed_img[0][j]
            self.assertTrue(
442 443
                np.isclose(img_grad, numerical_grad, atol=0.05,
                           rtol=0.05).all())
444

445
    def add_optimizer_helper(self, cond_func, use_cuda, use_parallel_exe):
446 447 448 449 450 451 452 453 454 455 456 457 458
        """
        Test that program is runnable when add optimizer
        """
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program, startup_program):
            img = fluid.data(name='image', shape=[-1, 784], dtype='float32')
            label = fluid.data(name='label', shape=[-1, 1], dtype='int64')
            i = fluid.data(name="i", shape=[1], dtype='int32')
            loss = cond_func(i, img, label)
            optimizer = fluid.optimizer.SGD(learning_rate=0.1)
            optimizer.minimize(loss)

459
        place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
460 461
        exe = fluid.Executor(place)
        exe.run(startup_program)
462 463
        if use_parallel_exe:
            os.environ['CPU_NUM'] = str(2)
464 465 466
            exe = fluid.ParallelExecutor(use_cuda=use_cuda,
                                         main_program=main_program,
                                         loss_name=loss.name)
467
            num_devices = exe.device_count
468 469 470

        for feed_i in range(0, 10):
            feed_img = np.random.random(size=[16, 784]).astype(np.float32)
471 472 473 474
            feed_label = np.random.randint(low=0,
                                           high=10,
                                           size=[16, 1],
                                           dtype=np.int64)
475 476 477
            if use_parallel_exe:
                exe.run(feed={
                    'i': np.full((num_devices), feed_i, np.int32),
478 479
                    'image': np.repeat(feed_img, num_devices, axis=0),
                    'label': np.repeat(feed_label, num_devices, axis=0)
480 481 482 483 484 485 486 487 488 489
                },
                        fetch_list=[loss.name])
            else:
                exe.run(main_program,
                        feed={
                            'i': np.full((1), feed_i, np.int32),
                            'image': feed_img,
                            'label': feed_label
                        },
                        fetch_list=[loss])
490 491

    def test_cond_backward(self):
492

493 494
        paddle.enable_static()

495 496
        def cond_func(i, img, label):
            predicate = ((i % 2) == 0)
497 498 499 500
            return layers.cond(
                predicate,
                lambda: simple_fc_net_with_inputs(img, label, class_num=10),
                lambda: batchnorm_fc_with_inputs(img, label, class_num=10))
501

502
        for use_parallel_exe in [False, True]:
503 504 505 506 507 508
            if use_parallel_exe and os.name == "nt":
                print(
                    "Skip use_parallel_exe=True in Windows because of flaky test when using PE under old Windows machine"
                )
                continue

509
            self.backward_value_helper(cond_func, core.is_compiled_with_cuda(),
510
                                       use_parallel_exe)
511
            self.add_optimizer_helper(cond_func, core.is_compiled_with_cuda(),
512
                                      use_parallel_exe)
513 514

    def test_half_nested_cond_backward(self):
515
        paddle.enable_static()
516

517
        def branch(i, img, label):
518 519 520 521
            return layers.cond(
                (i % 2) == 0,
                lambda: simple_fc_net_with_inputs(img, label, class_num=10),
                lambda: batchnorm_fc_with_inputs(img, label, class_num=10))
522 523 524

        def cond_func_simple_net_at_true(i, img, label):
            return layers.cond(i < 5, lambda: branch(i, img, label),
525
                               lambda: paddle.mean(img))
526 527

        def cond_func_simple_net_at_false(i, img, label):
528
            return layers.cond(i < 5, lambda: paddle.mean(img),
529 530
                               lambda: branch(i, img, label))

531
        for use_parallel_exe in [False, True]:
532 533 534 535 536 537
            if use_parallel_exe and os.name == "nt":
                print(
                    "Skip use_parallel_exe=True in Windows because of flaky test when using PE under old Windows machine"
                )
                continue

538 539 540 541 542 543 544 545 546 547 548 549
            self.backward_value_helper(cond_func_simple_net_at_true,
                                       core.is_compiled_with_cuda(),
                                       use_parallel_exe)
            self.add_optimizer_helper(cond_func_simple_net_at_true,
                                      core.is_compiled_with_cuda(),
                                      use_parallel_exe)
            self.backward_value_helper(cond_func_simple_net_at_false,
                                       core.is_compiled_with_cuda(),
                                       use_parallel_exe)
            self.add_optimizer_helper(cond_func_simple_net_at_false,
                                      core.is_compiled_with_cuda(),
                                      use_parallel_exe)
550 551

    def test_nested_cond_backward(self):
552
        paddle.enable_static()
553

554 555 556 557 558
        def branch(i, img, label, mod_two):
            if mod_two:
                predicate = ((i % 2) == 0)
            else:
                predicate = ((i % 2) != 0)
559 560 561 562
            return layers.cond(
                predicate,
                lambda: simple_fc_net_with_inputs(img, label, class_num=10),
                lambda: batchnorm_fc_with_inputs(img, label, class_num=10))
563 564 565 566 567

        def cond_func(i, img, label):
            return layers.cond(i < 5, lambda: branch(i, img, label, True),
                               lambda: branch(i, img, label, False))

568
        for use_parallel_exe in [False, True]:
569 570 571 572 573
            if use_parallel_exe and os.name == "nt":
                print(
                    "Skip use_parallel_exe=True in Windows because of flaky test when using PE under old Windows machine"
                )
                continue
574
            self.backward_value_helper(cond_func, core.is_compiled_with_cuda(),
575
                                       use_parallel_exe)
576
            self.add_optimizer_helper(cond_func, core.is_compiled_with_cuda(),
577
                                      use_parallel_exe)
578 579


580
class TestCondWithError(unittest.TestCase):
581

582
    def test_input_type_error(self):
583
        paddle.enable_static()
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
        main_program = framework.Program()
        startup_program = framework.Program()
        with framework.program_guard(main_program, startup_program):
            pred = fluid.data(name='y', shape=[1], dtype='bool')

            def func():
                return pred

            with self.assertRaises(TypeError):
                layers.cond(None, func, func)

            with self.assertRaises(TypeError):
                layers.cond(pred, func, set())

            with self.assertRaises(TypeError):
                layers.cond(pred, set(), func)

            with self.assertRaises(TypeError):
                layers.cond(pred, func, func, set())


605 606
if __name__ == '__main__':
    unittest.main()