test_prune.py 38.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest

19
import paddle
20 21 22
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.compat as cpt
23 24 25
import numpy as np
import os
import contextlib
26 27 28


class TestPrune(unittest.TestCase):
29

30 31 32 33 34
    def net(self):
        x = fluid.layers.data(name='x', shape=[2], dtype='float32')
        label = fluid.layers.data(name="label", shape=[1], dtype="int64")
        y = fluid.layers.fc(input=[x], size=2, act="softmax")
        loss = fluid.layers.cross_entropy(input=y, label=label)
35
        loss = paddle.mean(x=loss)
36 37 38 39 40 41 42 43 44
        return x, y, label, loss

    def test_prune_with_input(self):
        program = framework.Program()
        startup_program = framework.Program()
        block = program.global_block()
        with fluid.program_guard(program, startup_program):
            (x, y, label, loss) = self.net()
        self.assertEqual(len(block.ops), 5)
45 46 47
        self.assertEqual([op.type for op in block.ops], [
            "mul", "elementwise_add", "softmax", "cross_entropy2", "reduce_mean"
        ])
48 49 50 51
        pruned_program = program._prune_with_input(
            feeded_var_names=[y.name, label.name], targets=[loss])
        self.assertEqual(len(pruned_program.global_block().ops), 2)
        self.assertEqual([op.type for op in pruned_program.global_block().ops],
52
                         ["cross_entropy2", "reduce_mean"])
53 54 55 56 57 58 59 60

    def test_prune(self):
        program = framework.Program()
        startup_program = framework.Program()
        block = program.global_block()
        with fluid.program_guard(program, startup_program):
            (x, y, label, loss) = self.net()
        self.assertEqual(len(block.ops), 5)
61 62 63
        self.assertEqual([op.type for op in block.ops], [
            "mul", "elementwise_add", "softmax", "cross_entropy2", "reduce_mean"
        ])
64 65
        pruned_program = program._prune(targets=[loss])
        self.assertEqual(len(pruned_program.global_block().ops), 5)
66 67 68 69 70
        self.assertEqual([op.type for op in pruned_program.global_block().ops],
                         [
                             "mul", "elementwise_add", "softmax",
                             "cross_entropy2", "reduce_mean"
                         ])
71 72 73 74 75 76 77 78

    def test_prune_target_not_list(self):
        program = framework.Program()
        startup_program = framework.Program()
        block = program.global_block()
        with fluid.program_guard(program, startup_program):
            (x, y, label, loss) = self.net()
        self.assertEqual(len(block.ops), 5)
79 80 81
        self.assertEqual([op.type for op in block.ops], [
            "mul", "elementwise_add", "softmax", "cross_entropy2", "reduce_mean"
        ])
82 83
        pruned_program = program._prune(targets=loss)
        self.assertEqual(len(pruned_program.global_block().ops), 5)
84 85 86 87 88
        self.assertEqual([op.type for op in pruned_program.global_block().ops],
                         [
                             "mul", "elementwise_add", "softmax",
                             "cross_entropy2", "reduce_mean"
                         ])
89 90 91 92 93 94 95 96

    def test_prune_target_none(self):
        program = framework.Program()
        startup_program = framework.Program()
        block = program.global_block()
        with fluid.program_guard(program, startup_program):
            (x, y, label, loss) = self.net()
        self.assertEqual(len(block.ops), 5)
97 98 99
        self.assertEqual([op.type for op in block.ops], [
            "mul", "elementwise_add", "softmax", "cross_entropy2", "reduce_mean"
        ])
100 101 102
        try:
            pruned_program = program._prune(targets=None)
        except ValueError as e:
103 104
            self.assertIn(
                "All targets of Program._prune_with_input() can only be Variable or Operator",
105 106 107
                cpt.get_exception_message(e))


108 109 110 111 112 113 114 115 116 117 118 119 120 121
def mock(self, program, feed, fetch, optimize_ops):
    self.prune_called_times += 1
    return program


@contextlib.contextmanager
def _mock_guard(mock):
    original = fluid.Executor._prune_program
    fluid.Executor._prune_program = mock
    yield
    fluid.Executor._prune_program = original


class TestExecutorRunAutoPrune(unittest.TestCase):
122

123 124 125 126 127 128 129 130 131 132 133 134 135
    def net1(self):
        x = fluid.layers.data(name='x', shape=[2], dtype='float32')
        label = fluid.layers.data(name="label", shape=[1], dtype="int64")
        w_param_attrs = fluid.ParamAttr(
            name="fc_weight",
            learning_rate=0.5,
            initializer=fluid.initializer.Constant(1.0),
            trainable=True)
        y = fluid.layers.fc(input=[x],
                            size=2,
                            act="softmax",
                            param_attr=w_param_attrs)
        loss1 = fluid.layers.cross_entropy(input=y, label=label)
136
        loss1 = paddle.mean(x=loss1)
137
        loss2 = fluid.layers.cross_entropy(input=y, label=label)
138
        loss2 = paddle.mean(x=loss2)
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
        loss1.persistable = True
        loss2.persistable = True
        return x, y, label, loss1, loss2, w_param_attrs

    def net2(self):
        x1 = fluid.layers.data(name='x1', shape=[2], dtype='float32')
        x2 = fluid.layers.data(name='x2', shape=[2], dtype='float32')
        label = fluid.layers.data(name="label", shape=[1], dtype="int64")
        w1_param_attrs = fluid.ParamAttr(
            name="fc_weight1",
            learning_rate=0.5,
            initializer=fluid.initializer.Constant(1.0),
            trainable=True)
        w2_param_attrs = fluid.ParamAttr(
            name="fc_weight2",
            learning_rate=0.5,
            initializer=fluid.initializer.Constant(1.0),
            trainable=True)
        y1 = fluid.layers.fc(input=[x1],
                             size=2,
                             act="softmax",
                             param_attr=w1_param_attrs)
        y2 = fluid.layers.fc(input=[x2],
                             size=2,
                             act="softmax",
                             param_attr=w2_param_attrs)
        loss1 = fluid.layers.cross_entropy(input=y1, label=label)
166
        loss1 = paddle.mean(x=loss1)
167
        loss2 = fluid.layers.cross_entropy(input=y2, label=label)
168
        loss2 = paddle.mean(x=loss2)
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
        return x1, x2, y1, y2, label, loss1, loss2, w1_param_attrs, w2_param_attrs

    def test_not_prune(self):
        """
        If use_prune = False, the targets which is not fetched will be calculated.
        """
        program = framework.Program()
        startup_program = framework.Program()
        scope = fluid.Scope()
        with fluid.scope_guard(scope):
            with fluid.program_guard(program, startup_program):
                (x, y, label, loss1, loss2, w_param_attrs) = self.net1()
                exe = fluid.Executor(fluid.CPUPlace())
                exe.run(startup_program)
                x_np = np.random.random(size=(10, 2)).astype('float32')
                label_np = np.random.randint(1, size=(10, 1)).astype('int64')
                res = exe.run(program,
186 187 188 189
                              feed={
                                  'x': x_np,
                                  'label': label_np
                              },
190 191 192 193 194 195 196
                              fetch_list=[loss1.name],
                              use_prune=False)
                self.assertIsNotNone(scope.find_var(loss1.name))
                self.assertIsNotNone(scope.find_var(loss2.name))

    def test_prune_fetches_without_optimizer(self):
        """
197
        Prune operators and variables which are not needed to generate 'fetches'.
198 199 200 201 202 203 204 205 206 207 208 209 210 211
        """
        program = framework.Program()
        startup_program = framework.Program()
        scope = fluid.Scope()
        with fluid.scope_guard(scope):
            with fluid.program_guard(program, startup_program):
                (x, y, label, loss1, loss2, w_param_attrs) = self.net1()
                exe = fluid.Executor(fluid.CPUPlace())
                exe.run(startup_program)
                weight_init = np.array(
                    scope.find_var(w_param_attrs.name).get_tensor())
                x_np = np.random.random(size=(10, 2)).astype('float32')
                label_np = np.random.randint(1, size=(10, 1)).astype('int64')
                res = exe.run(program,
212 213 214 215
                              feed={
                                  'x': x_np,
                                  'label': label_np
                              },
216 217 218 219 220 221
                              fetch_list=[loss1.name],
                              use_prune=True)
                self.assertIsNotNone(scope.find_var(loss1.name))
                self.assertIsNone(scope.find_var(loss2.name))  #loss2 is pruned
                weight = np.array(
                    scope.find_var(w_param_attrs.name).get_tensor())
222 223
                np.testing.assert_array_equal(weight_init,
                                              weight)  # weight not changed
224 225 226

    def test_prune_fetches_with_optimizer(self):
        """
227
        Prune operators and operators which are not needed to generate 'fetches'.
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
        In train mode, the operators and operators in backward and optimization should be kept.
        """
        program = framework.Program()
        startup_program = framework.Program()
        scope = fluid.Scope()
        with fluid.scope_guard(scope):
            with fluid.program_guard(program, startup_program):
                (x, y, label, loss1, loss2, w_param_attrs) = self.net1()
                sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.5)
                sgd_optimizer.minimize(loss1)
                exe = fluid.Executor(fluid.CPUPlace())
                exe.run(startup_program)
                weight_init = np.array(
                    scope.find_var(w_param_attrs.name).get_tensor())
                x_np = np.random.random(size=(10, 2)).astype('float32')
                label_np = np.random.randint(1, size=(10, 1)).astype('int64')
                res = exe.run(program,
245 246 247 248
                              feed={
                                  'x': x_np,
                                  'label': label_np
                              },
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
                              fetch_list=[loss1.name],
                              use_prune=True)
                self.assertIsNotNone(scope.find_var(loss1.name))
                self.assertIsNone(scope.find_var(loss2.name))  #loss2 is pruned
                weight = np.array(
                    scope.find_var(w_param_attrs.name).get_tensor())
                self.assertFalse(np.array_equal(weight_init,
                                                weight))  # weight changed

    def test_prune_compiled_program(self):
        program = framework.Program()
        startup_program = framework.Program()
        scope = fluid.Scope()
        with fluid.scope_guard(scope):
            with fluid.program_guard(program, startup_program):
                (x, y, label, loss1, loss2, w_param_attrs) = self.net1()
                sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.5)
                sgd_optimizer.minimize(loss1)
                exe = fluid.Executor(fluid.CPUPlace())
                exe.run(startup_program)
                compiled_prog = fluid.CompiledProgram(
270 271
                    program).with_data_parallel(loss_name=loss1.name,
                                                places=fluid.CPUPlace())
272 273 274 275 276
                weight_init = np.array(
                    scope.find_var(w_param_attrs.name).get_tensor())
                x_np = np.random.random(size=(10, 2)).astype('float32')
                label_np = np.random.randint(1, size=(10, 1)).astype('int64')
                res = exe.run(compiled_prog,
277 278 279 280
                              feed={
                                  'x': x_np,
                                  'label': label_np
                              },
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
                              fetch_list=[loss1.name],
                              use_prune=True)
                self.assertIsNotNone(scope.find_var(loss1.name))
                self.assertIsNone(scope.find_var(loss2.name))
                weight = np.array(
                    scope.find_var(w_param_attrs.name).get_tensor())
                self.assertFalse(np.array_equal(weight_init,
                                                weight))  # weight changed

    def test_prune_feed_without_optimizer(self):
        program = framework.Program()
        startup_program = framework.Program()
        scope = fluid.Scope()
        with fluid.scope_guard(scope):
            with fluid.program_guard(program, startup_program):
                (x, y, label, loss1, loss2, w_param_attrs) = self.net1()
                exe = fluid.Executor(fluid.CPUPlace())
                exe.run(startup_program)
                weight_init = np.array(
                    scope.find_var(w_param_attrs.name).get_tensor())
                x_np = np.random.random(size=(10, 2)).astype('float32')
                label_np = np.random.randint(1, size=(10, 1)).astype('int64')
                res = exe.run(program,
304 305 306 307
                              feed={
                                  y.name: x_np,
                                  'label': label_np
                              },
308 309 310 311 312 313
                              fetch_list=[loss1.name],
                              use_prune=True)
                self.assertIsNotNone(scope.find_var(loss1.name))
                self.assertIsNone(scope.find_var(loss2.name))
                weight = np.array(
                    scope.find_var(w_param_attrs.name).get_tensor())
314 315
                np.testing.assert_array_equal(weight_init,
                                              weight)  # weight unchanged
316 317 318 319 320 321 322 323 324 325 326 327 328 329

    def test_prune_feed_with_optimizer(self):
        program = framework.Program()
        startup_program = framework.Program()
        scope = fluid.Scope()
        with fluid.scope_guard(scope):
            with fluid.program_guard(program, startup_program):
                (x, y, label, loss1, loss2, w_param_attrs) = self.net1()
                sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.5)
                sgd_optimizer.minimize(loss1)
                exe = fluid.Executor(fluid.CPUPlace())
                exe.run(startup_program)
                x_np = np.random.random(size=(10, 2)).astype('float32')
                label_np = np.random.randint(1, size=(10, 1)).astype('int64')
330 331 332 333 334 335 336 337 338
                self.assertRaises(Exception,
                                  exe.run,
                                  program,
                                  feed={
                                      y.name: x_np,
                                      'label': label_np
                                  },
                                  fetch_list=[loss1.name],
                                  use_prune=True)
339 340 341 342 343
                self.assertIsNotNone(scope.find_var(loss1.name))
                self.assertIsNone(scope.find_var(loss2.name))

    def test_prune_with_cache_program(self):
        '''
344
        When use_prune=True, Executor should cache the pruned program.
345 346 347
        If in next run, the program, feed, fetch are not changed, Executor use the cached pruned program,
        and needn't to call  _prune_program() to prune the program.
        In this test, we hack the Executor._prune_program with a mock function which do nothing but increase
348
        Executor.prune_called_times, and we check prune_called_times equals 1 even if we called exe.run()
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
        10 times with the same input arguments.
        '''
        with _mock_guard(mock):
            exe = fluid.Executor(fluid.CPUPlace())
            exe.prune_called_times = 0
            program = framework.Program()
            startup_program = framework.Program()
            scope = fluid.Scope()
            with fluid.scope_guard(scope):
                with fluid.program_guard(program, startup_program):
                    (x, y, label, loss1, loss2, w_param_attrs) = self.net1()
                    sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.5)
                    sgd_optimizer.minimize(loss1)
                    exe.run(startup_program)
                    x_np = np.random.random(size=(10, 2)).astype('float32')
364 365
                    label_np = np.random.randint(1,
                                                 size=(10, 1)).astype('int64')
366 367
                    for i in range(10):
                        res = exe.run(program,
368 369 370 371
                                      feed={
                                          'x': x_np,
                                          'label': label_np
                                      },
372
                                      fetch_list=[loss1.name],
373
                                      use_prune=True)
374 375 376 377 378
                        if i == 0:
                            self.assertEqual(exe.prune_called_times, 1)
                        else:
                            self.assertEqual(exe.prune_called_times, 1)

379 380 381
    def test_prune_with_cache_program2(self):
        '''
        When use_prune=True, Executor should cache the pruned program.
382
        If the only difference in fetch_list is  optimize_ops during multiple runs,
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
        the cache_keys should be different and get different pruned program.
        '''
        with _mock_guard(mock):
            exe = fluid.Executor(fluid.CPUPlace())
            exe.prune_called_times = 0
            program = framework.Program()
            startup_program = framework.Program()
            scope = fluid.Scope()
            with fluid.scope_guard(scope):
                with fluid.program_guard(program, startup_program):
                    (x1, x2, y1, y2, label, loss1, loss2, w1_param_attrs,
                     w2_param_attrs) = self.net2()
                    adam_optimizer1 = fluid.optimizer.AdamOptimizer(
                        learning_rate=0.5)
                    train1 = adam_optimizer1.minimize(loss1)
                    adam_optimizer2 = fluid.optimizer.AdamOptimizer(
                        learning_rate=0.5)
                    train2 = adam_optimizer2.minimize(loss2)
                    exe.run(startup_program)
                    x_np = np.random.random(size=(10, 2)).astype('float32')
403 404
                    label_np = np.random.randint(1,
                                                 size=(10, 1)).astype('int64')
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431

                    for i in range(10):
                        if i % 2:
                            res = exe.run(program,
                                          feed={
                                              'x1': x_np,
                                              'x2': x_np,
                                              'label': label_np
                                          },
                                          fetch_list=[loss1, loss2, train1],
                                          use_prune=True)
                        else:
                            res = exe.run(program,
                                          feed={
                                              'x1': x_np,
                                              'x2': x_np,
                                              'label': label_np
                                          },
                                          fetch_list=[loss1, loss2, train2],
                                          use_prune=True)
                        if i == 0:
                            self.assertEqual(exe.prune_called_times, 1)
                        elif i == 1:
                            self.assertEqual(exe.prune_called_times, 2)
                        else:
                            self.assertEqual(exe.prune_called_times, 2)

432 433
    def test_prune_with_cache_compiled_program(self):
        '''
434
        When use_prune=True, Executor should cache the pruned program.
435 436 437
        If in next run, the program, feed, fetch are not changed, Executor use the cached pruned program,
        and needn't to call  _prune_program() to prune the program.
        In this test, we hack the Executor._prune_program with a mock function which do nothing but increase
438
        Executor.prune_called_times, and we check prune_called_times equals 1 even if we called exe.run()
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
        10 times with the same input arguments.
        '''
        with _mock_guard(mock):
            exe = fluid.Executor(fluid.CPUPlace())
            exe.prune_called_times = 0
            program = framework.Program()
            startup_program = framework.Program()
            scope = fluid.Scope()
            with fluid.scope_guard(scope):
                with fluid.program_guard(program, startup_program):
                    (x, y, label, loss1, loss2, w_param_attrs) = self.net1()
                    sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.5)
                    sgd_optimizer.minimize(loss1)
                    exe.run(startup_program)
                    x_np = np.random.random(size=(10, 2)).astype('float32')
454 455
                    label_np = np.random.randint(1,
                                                 size=(10, 1)).astype('int64')
456
                    compiled_prog = fluid.CompiledProgram(
457 458
                        program).with_data_parallel(loss_name=loss1.name,
                                                    places=fluid.CPUPlace())
459 460
                    for i in range(10):
                        res = exe.run(compiled_prog,
461 462 463 464
                                      feed={
                                          'x': x_np,
                                          'label': label_np
                                      },
465
                                      fetch_list=[loss1.name],
466
                                      use_prune=True)
467 468 469 470 471 472 473
                        if i == 0:
                            self.assertEqual(exe.prune_called_times, 1)
                        else:
                            self.assertEqual(exe.prune_called_times, 1)

    def test_prune_with_multi_optimizers(self):
        '''
474
        If there are multiple optimizers in the program, we can run specific one by
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
        pass the return of optimize.minimize() to fetch_list.
        '''
        exe = fluid.Executor(fluid.CPUPlace())
        program = framework.Program()
        startup_program = framework.Program()
        scope = fluid.Scope()
        # do not use_prune
        with fluid.scope_guard(scope):
            with fluid.program_guard(program, startup_program):
                (x, y, label, loss1, loss2, w_param_attrs) = self.net1()
                sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.5)
                train1, _ = sgd_optimizer.minimize(loss1)
                cloned_program = program.clone()
                train2, _ = sgd_optimizer.minimize(loss2)
                exe.run(startup_program)
                x_np = np.random.random(size=(10, 2)).astype('float32')
                label_np = np.random.randint(1, size=(10, 1)).astype('int64')
                res = exe.run(program,
493 494 495 496
                              feed={
                                  'x': x_np,
                                  'label': label_np
                              },
497 498 499 500 501 502 503 504 505 506
                              fetch_list=[loss1.name],
                              use_prune=False)
                weight_without_prune = np.array(
                    scope.find_var(w_param_attrs.name).get_tensor())

        scope = fluid.Scope()
        # use_prune
        with fluid.scope_guard(scope):
            exe.run(startup_program)
            res = exe.run(program,
507 508 509 510
                          feed={
                              'x': x_np,
                              'label': label_np
                          },
511 512 513 514 515 516 517 518 519 520
                          fetch_list=[loss1.name, train1],
                          use_prune=True)
            weight_with_prune = np.array(
                scope.find_var(w_param_attrs.name).get_tensor())

        # expected
        scope = fluid.Scope()
        with fluid.scope_guard(scope):
            exe.run(startup_program)
            exe.run(cloned_program,
521 522 523 524
                    feed={
                        'x': x_np,
                        'label': label_np
                    },
525 526 527 528 529
                    fetch_list=[loss1.name],
                    use_prune=False)
            weight_expected = np.array(
                scope.find_var(w_param_attrs.name).get_tensor())

530
        np.testing.assert_array_equal(weight_with_prune, weight_expected)
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
        self.assertFalse(np.array_equal(weight_without_prune, weight_expected))

    def test_prune_with_multi_devices(self):
        '''
        When training model with multi_devices, the pruned CompiledProgram should share same local scopes.
        This test the correctness.
        '''
        exe = fluid.Executor(fluid.CPUPlace())
        program = framework.Program()
        startup_program = framework.Program()
        scope = fluid.Scope()
        os.environ['CPU_NUM'] = str(2)
        # do not use_prune
        with fluid.scope_guard(scope):
            with fluid.program_guard(program, startup_program):
                (x1, x2, y1, y2, label, loss1, loss2, w1_param_attrs,
                 w2_param_attrs) = self.net2()
                adam_optimizer1 = fluid.optimizer.AdamOptimizer(
                    learning_rate=0.5)
                train1 = adam_optimizer1.minimize(loss1)
                cloned_program = program.clone()
                adam_optimizer2 = fluid.optimizer.AdamOptimizer(
                    learning_rate=0.5)
                train2 = adam_optimizer2.minimize(loss2)
                exe.run(startup_program)
                x_np = np.random.random(size=(10, 2)).astype('float32')
                label_np = np.random.randint(1, size=(10, 1)).astype('int64')
                compiled_prog1 = fluid.CompiledProgram(
559 560
                    program).with_data_parallel(loss_name=loss1.name,
                                                places=[fluid.CPUPlace()] * 2)
561
                compiled_prog2 = fluid.CompiledProgram(
562 563
                    program).with_data_parallel(loss_name=loss2.name,
                                                places=[fluid.CPUPlace()] * 2)
564 565 566 567 568 569 570 571 572 573 574 575 576 577
                for i in range(10):
                    if i % 2 == 1:
                        res = exe.run(compiled_prog1,
                                      feed=[{
                                          'x1': x_np[0:5, :],
                                          'label': label_np[0:5, :]
                                      }, {
                                          'x1': x_np[5:, :],
                                          'label': label_np[5:, :]
                                      }],
                                      fetch_list=[loss1.name, train1],
                                      use_prune=True)
                    else:
                        res = exe.run(compiled_prog2,
578 579 580 581
                                      feed={
                                          'x2': x_np,
                                          'label': label_np
                                      },
582 583 584 585 586 587 588 589 590 591 592
                                      fetch_list=[loss2.name, train2],
                                      use_prune=True)
                weight1 = np.array(
                    scope.find_var(w1_param_attrs.name).get_tensor())
        # expected
        scope = fluid.Scope()
        with fluid.scope_guard(scope):
            exe.run(startup_program)
            for i in range(10):
                if i % 2 == 1:
                    exe.run(cloned_program,
593 594 595 596 597
                            feed={
                                'x1': x_np,
                                'x2': x_np,
                                'label': label_np
                            },
598 599 600
                            fetch_list=[loss1.name],
                            use_prune=False)
            weight2 = np.array(scope.find_var(w1_param_attrs.name).get_tensor())
601
        np.testing.assert_allclose(weight1, weight2, rtol=1e-05)
602 603 604

    def test_prune_program_with_tupe_in_fetch_list(self):
        '''
605
        If there are multiple optimizers in the program, we can run specific one by
606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
        pass the return of optimize.minimize() to fetch_list.
        '''
        exe = fluid.Executor(fluid.CPUPlace())
        program = framework.Program()
        startup_program = framework.Program()
        scope = fluid.Scope()
        # do not use_prune
        with fluid.scope_guard(scope):
            with fluid.program_guard(program, startup_program):
                (x, y, label, loss1, loss2, w_param_attrs) = self.net1()
                sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.5)
                train1 = sgd_optimizer.minimize(loss1)
                cloned_program = program.clone()

                train2 = sgd_optimizer.minimize(loss2)
                exe.run(startup_program)
                x_np = np.random.random(size=(10, 2)).astype('float32')
                label_np = np.random.randint(1, size=(10, 1)).astype('int64')

                res = exe.run(program,
626 627 628 629
                              feed={
                                  'x': x_np,
                                  'label': label_np
                              },
630 631 632 633 634 635 636 637 638 639 640
                              fetch_list=[loss1.name],
                              use_prune=False)

                weight_without_prune = np.array(
                    scope.find_var(w_param_attrs.name).get_tensor())

        scope = fluid.Scope()
        # use_prune
        with fluid.scope_guard(scope):
            exe.run(startup_program)
            res = exe.run(program,
641 642 643 644
                          feed={
                              'x': x_np,
                              'label': label_np
                          },
645 646 647 648 649 650 651 652 653 654
                          fetch_list=[loss1.name, train1],
                          use_prune=True)
            weight_with_prune = np.array(
                scope.find_var(w_param_attrs.name).get_tensor())

        # expected
        scope = fluid.Scope()
        with fluid.scope_guard(scope):
            exe.run(startup_program)
            exe.run(cloned_program,
655 656 657 658
                    feed={
                        'x': x_np,
                        'label': label_np
                    },
659 660 661 662 663
                    fetch_list=[loss1.name],
                    use_prune=False)
            weight_expected = np.array(
                scope.find_var(w_param_attrs.name).get_tensor())

664
        np.testing.assert_array_equal(weight_with_prune, weight_expected)
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
        self.assertFalse(np.array_equal(weight_without_prune, weight_expected))

    def test_prune_program_partial_parameter_updated(self):
        """
        When running startup program, all parameters declared will be initialized.
        When running main program with prune=True, the pruned parameters will exist in scope and stay unchanged.
        """
        program = framework.Program()
        startup_program = framework.Program()
        scope = fluid.Scope()
        with fluid.scope_guard(scope):
            with fluid.program_guard(program, startup_program):
                (x1, x2, y1, y2, label, loss1, loss2, w1_param_attrs,
                 w2_param_attrs) = self.net2()
                loss1.persistable = True
                loss2.persistable = True
                sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.5)
                train1 = sgd_optimizer.minimize(loss1)
                sgd_optimizer1 = fluid.optimizer.SGD(learning_rate=0.5)
                train2 = sgd_optimizer1.minimize(loss2)
                exe = fluid.Executor(fluid.CPUPlace())
                exe.run(startup_program)
                weight1_init = np.array(
                    scope.find_var(w1_param_attrs.name).get_tensor())
                weight2_init = np.array(
                    scope.find_var(w2_param_attrs.name).get_tensor())
                x_np = np.random.random(size=(10, 2)).astype('float32')
                label_np = np.random.randint(1, size=(10, 1)).astype('int64')

                res = exe.run(program,
695 696 697 698
                              feed={
                                  'x1': x_np,
                                  'label': label_np
                              },
699 700 701 702 703 704 705 706 707 708 709 710
                              fetch_list=[loss1.name, train1],
                              use_prune=True)
                self.assertIsNotNone(scope.find_var(w1_param_attrs.name))
                self.assertIsNotNone(scope.find_var(w2_param_attrs.name))
                self.assertIsNotNone(scope.find_var(loss1.name))
                self.assertIsNone(scope.find_var(loss2.name))
                weight1 = np.array(
                    scope.find_var(w1_param_attrs.name).get_tensor())
                weight2 = np.array(
                    scope.find_var(w2_param_attrs.name).get_tensor())
                self.assertFalse(np.array_equal(weight1_init,
                                                weight1))  # weight changed
711 712
                np.testing.assert_array_equal(weight2_init,
                                              weight2)  # weight2 unchanged
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733

    def test_prune_override_use_prune(self):
        '''
        If optimize_ops in provided in the fetch_list, the argument use_prune is always override to True.
        '''
        exe = fluid.Executor(fluid.CPUPlace())
        program = framework.Program()
        startup_program = framework.Program()
        scope = fluid.Scope()
        # do not use_prune
        with fluid.scope_guard(scope):
            with fluid.program_guard(program, startup_program):
                (x, y, label, loss1, loss2, w_param_attrs) = self.net1()
                sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.5)
                train1, _ = sgd_optimizer.minimize(loss1)
                cloned_program = program.clone()
                train2, _ = sgd_optimizer.minimize(loss2)
                exe.run(startup_program)
                x_np = np.random.random(size=(10, 2)).astype('float32')
                label_np = np.random.randint(1, size=(10, 1)).astype('int64')
                res = exe.run(program,
734 735 736 737
                              feed={
                                  'x': x_np,
                                  'label': label_np
                              },
738 739 740 741 742 743 744 745 746 747 748
                              fetch_list=[loss1.name],
                              use_prune=False)

                weight_without_prune = np.array(
                    scope.find_var(w_param_attrs.name).get_tensor())

        scope = fluid.Scope()
        # use_prune
        with fluid.scope_guard(scope):
            exe.run(startup_program)
            res = exe.run(program,
749 750 751 752
                          feed={
                              'x': x_np,
                              'label': label_np
                          },
753 754 755 756 757 758 759 760 761
                          fetch_list=[loss1.name, train1])
            weight_with_prune = np.array(
                scope.find_var(w_param_attrs.name).get_tensor())

        # expected
        scope = fluid.Scope()
        with fluid.scope_guard(scope):
            exe.run(startup_program)
            exe.run(cloned_program,
762 763 764 765
                    feed={
                        'x': x_np,
                        'label': label_np
                    },
766 767 768 769 770
                    fetch_list=[loss1.name],
                    use_prune=False)
            weight_expected = np.array(
                scope.find_var(w_param_attrs.name).get_tensor())

771
        np.testing.assert_array_equal(weight_with_prune, weight_expected)
772 773
        self.assertFalse(np.array_equal(weight_without_prune, weight_expected))

774 775 776 777 778 779 780 781 782 783 784 785 786 787 788
    def test_prune_feed_var_in_fetchlist_1(self):
        # the variable to be fed is not leaf
        program = framework.Program()
        startup_program = framework.Program()
        scope = fluid.Scope()
        with fluid.scope_guard(scope):
            with fluid.program_guard(program, startup_program):
                (x, y, label, loss1, loss2, w_param_attrs) = self.net1()
                exe = fluid.Executor(fluid.CPUPlace())
                exe.run(startup_program)
                weight_init = np.array(
                    scope.find_var(w_param_attrs.name).get_tensor())
                x_np = np.random.random(size=(10, 2)).astype('float32')
                label_np = np.random.randint(1, size=(10, 1)).astype('int64')
                res = exe.run(program,
789 790 791 792
                              feed={
                                  y.name: x_np,
                                  'label': label_np
                              },
793 794 795 796 797 798 799
                              fetch_list=[y.name, loss1.name],
                              use_prune=True)
                self.assertIsNotNone(scope.find_var(loss1.name))
                self.assertIsNone(scope.find_var(loss2.name))
                self.assertIsNone(scope.find_var(x.name))
                weight = np.array(
                    scope.find_var(w_param_attrs.name).get_tensor())
800 801
                np.testing.assert_array_equal(weight_init,
                                              weight)  # weight unchanged
802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817

    def test_prune_feed_var_in_fetchlist_2(self):
        # the variable to be fed is leaf
        program = framework.Program()
        startup_program = framework.Program()
        scope = fluid.Scope()
        with fluid.scope_guard(scope):
            with fluid.program_guard(program, startup_program):
                (x, y, label, loss1, loss2, w_param_attrs) = self.net1()
                exe = fluid.Executor(fluid.CPUPlace())
                exe.run(startup_program)
                weight_init = np.array(
                    scope.find_var(w_param_attrs.name).get_tensor())
                x_np = np.random.random(size=(10, 2)).astype('float32')
                label_np = np.random.randint(1, size=(10, 1)).astype('int64')
                res = exe.run(program,
818 819 820 821
                              feed={
                                  x.name: x_np,
                                  'label': label_np
                              },
822 823 824 825 826 827
                              fetch_list=[x.name, loss1.name],
                              use_prune=True)
                self.assertIsNotNone(scope.find_var(loss1.name))
                self.assertIsNone(scope.find_var(loss2.name))
                weight = np.array(
                    scope.find_var(w_param_attrs.name).get_tensor())
828 829
                np.testing.assert_array_equal(weight_init,
                                              weight)  # weight unchanged
830

831

832 833
if __name__ == '__main__':
    unittest.main()