test_while_loop_op.py 22.9 KB
Newer Older
G
guofei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

17 18
import numpy as np

19
import paddle
G
guofei 已提交
20 21 22
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.layers as layers
23
import paddle.nn.functional as F
24
from paddle.fluid.backward import append_backward
25
from paddle.fluid.framework import Program, program_guard
G
guofei 已提交
26

27 28
paddle.enable_static()

G
guofei 已提交
29 30 31 32

class TestApiWhileLoop(unittest.TestCase):
    def test_var_tuple(self):
        def cond(i):
L
LiYuRio 已提交
33
            return paddle.less_than(i, ten)
G
guofei 已提交
34 35

        def body(i):
36
            return paddle.add(x=i, y=one)
G
guofei 已提交
37 38 39 40 41 42 43

        main_program = Program()
        startup_program = Program()
        with program_guard(main_program, startup_program):
            i = layers.fill_constant(shape=[1], dtype='int64', value=0)
            one = layers.fill_constant(shape=[1], dtype='int64', value=1)
            ten = layers.fill_constant(shape=[1], dtype='int64', value=10)
44
            out = paddle.static.nn.while_loop(cond, body, (i,))
G
guofei 已提交
45

46 47 48 49 50
        place = (
            fluid.CUDAPlace(0)
            if core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
G
guofei 已提交
51 52
        exe = fluid.Executor(place)
        res = exe.run(main_program, fetch_list=out)
53 54 55
        np.testing.assert_allclose(
            np.asarray(res[0]), np.full(1, 10, np.int64), rtol=1e-05
        )
G
guofei 已提交
56 57 58

    def test_var_list(self):
        def cond(i, mem):
L
LiYuRio 已提交
59
            return paddle.less_than(i, ten)
G
guofei 已提交
60 61

        def body(i, mem):
62
            mem = paddle.add(x=mem, y=one)
63
            i = paddle.increment(i)
G
guofei 已提交
64 65 66 67 68 69 70
            return [i, mem]

        main_program = Program()
        startup_program = Program()
        with program_guard(main_program, startup_program):
            i = layers.zeros(shape=[1], dtype='int64')
            ten = layers.fill_constant(shape=[1], dtype='int64', value=10)
71
            mem = fluid.data(name='mem', shape=[10], dtype='float32')
G
guofei 已提交
72
            one = layers.fill_constant(shape=[10], dtype='float32', value=1)
73
            out = paddle.static.nn.while_loop(cond, body, [i, mem])
G
guofei 已提交
74 75 76 77

            data = np.random.rand(10).astype('float32')
            data_one = np.ones(10).astype('float32')

78 79 80 81 82
        place = (
            fluid.CUDAPlace(0)
            if core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
G
guofei 已提交
83 84 85 86
        exe = fluid.Executor(place)
        res = exe.run(main_program, feed={'mem': data}, fetch_list=out)
        for i in range(10):
            data = np.add(data, data_one)
87
        np.testing.assert_allclose(np.asarray(res[1]), data, rtol=1e-05)
G
guofei 已提交
88

89
    def test_var_dict(self):
90
        def cond(i, ten, test_dict, test_list, test_list_dict):
L
LiYuRio 已提交
91
            return paddle.less_than(i, ten)
92

93 94 95 96
        def body(i, ten, test_dict, test_list, test_list_dict):
            test_dict["test_key"] = i
            test_dict["test_key"] += 1

97
            test_list[0] = paddle.reshape(test_list[0], [2, -1]) + 1
98 99

            test_list_dict[0]["test_key"] += 1
100
            test_list_dict[0]["test_key"] = F.relu(
101 102
                test_list_dict[0]["test_key"]
            )
103

104
            i = paddle.increment(i)
105
            return [i, ten, test_dict, test_list, test_list_dict]
106 107 108 109 110 111 112

        main_program = Program()
        startup_program = Program()
        with program_guard(main_program, startup_program):
            i = layers.zeros(shape=[1], dtype='int64')
            ten = layers.fill_constant(shape=[1], dtype='int64', value=10)
            test_data = layers.fill_constant(shape=[1], dtype='int64', value=0)
113

114
            test_dict = {"test_key": test_data}
115
            test_list = [
116
                layers.fill_constant(shape=[1, 2], dtype='int64', value=0)
117
            ]
118 119 120 121 122 123 124
            test_list_dict = [
                {
                    "test_key": layers.fill_constant(
                        shape=[1], dtype='float32', value=0
                    )
                }
            ]
125

126 127 128 129 130 131 132
            (
                i,
                ten,
                test_dict,
                test_list,
                test_list_dict,
            ) = paddle.static.nn.while_loop(
133 134 135 136 137 138 139
                cond, body, [i, ten, test_dict, test_list, test_list_dict]
            )
        place = (
            fluid.CUDAPlace(0)
            if core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
140
        exe = fluid.Executor(place)
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
        res = exe.run(
            main_program,
            fetch_list=[
                test_dict["test_key"],
                test_list[0],
                test_list_dict[0]["test_key"],
            ],
        )
        np.testing.assert_allclose(
            np.asarray(res[0]),
            np.full(shape=1, fill_value=10, dtype=np.int64),
            rtol=1e-05,
        )
        np.testing.assert_allclose(
            np.asarray(res[1]),
            np.full(shape=(2, 1), fill_value=10, dtype=np.int64),
            rtol=1e-05,
        )
        np.testing.assert_allclose(
            np.asarray(res[2]),
            np.full(shape=1, fill_value=10, dtype=np.float32),
            rtol=1e-05,
        )
164

G
guofei 已提交
165 166 167 168

class TestApiWhileLoop_Nested(unittest.TestCase):
    def test_nested_net(self):
        def external_cond(i, j, init, sums):
L
LiYuRio 已提交
169
            return paddle.less_than(i, loop_len1)
G
guofei 已提交
170 171 172

        def external_body(i, j, init, sums):
            def internal_cond(j, init, sums):
L
LiYuRio 已提交
173
                return paddle.less_than(j, loop_len2)
G
guofei 已提交
174 175

            def internal_body(j, init, sums):
176 177
                init = paddle.add(x=init, y=ones)
                sums = paddle.add(x=init, y=sums)
178
                j = paddle.increment(j)
G
guofei 已提交
179 180
                return [j, init, sums]

181
            result = paddle.static.nn.while_loop(
182 183
                internal_cond, internal_body, [j, init, sums]
            )
G
guofei 已提交
184 185 186
            j = result[0]
            init = result[1]
            sums = result[2]
187
            sums = paddle.add(x=init, y=sums)
188
            i = paddle.increment(i)
G
guofei 已提交
189 190 191 192 193 194 195
            return [i, j, init, sums]

        main_program = Program()
        startup_program = Program()
        with program_guard(main_program, startup_program):
            i = layers.zeros(shape=[1], dtype='int64')
            j = layers.zeros(shape=[1], dtype='int64')
196 197
            init = fluid.data(name='init', shape=[3, 3], dtype='float32')
            sums = fluid.data(name='sums', shape=[3, 3], dtype='float32')
G
guofei 已提交
198 199 200 201
            loop_len1 = layers.fill_constant(shape=[1], dtype='int64', value=2)
            loop_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3)
            ones = layers.fill_constant(shape=[3, 3], dtype='float32', value=1)

202
            out = paddle.static.nn.while_loop(
203 204
                external_cond, external_body, [i, j, init, sums]
            )
G
guofei 已提交
205 206 207 208

            data = np.random.rand(3, 3).astype('float32')
            data_sums = np.zeros([3, 3]).astype('float32')

209 210 211 212 213
        place = (
            fluid.CUDAPlace(0)
            if core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
G
guofei 已提交
214
        exe = fluid.Executor(place)
215 216 217
        res = exe.run(
            main_program, feed={'init': data, 'sums': data_sums}, fetch_list=out
        )
G
guofei 已提交
218 219 220 221 222
        for i in range(3):
            data = np.add(data, 1)
            data_sums = np.add(data, data_sums)
        for j in range(2):
            data_sums = np.add(data, data_sums)
223
        np.testing.assert_allclose(np.asarray(res[3]), data_sums, rtol=1e-05)
224 225 226 227 228


class TestApiWhileLoop_Backward(unittest.TestCase):
    def test_while_loop_backward(self):
        def cond(i, x):
L
LiYuRio 已提交
229
            return paddle.less_than(i, eleven)
230

231
        def body(i, x):
232
            x = paddle.multiply(x=i, y=i)
233
            i = paddle.increment(i)
234
            return [i, x]
235 236 237 238

        main_program = Program()
        startup_program = Program()
        with fluid.program_guard(main_program, startup_program):
239
            i = fluid.data(name='i', shape=[1], dtype='float32')
240 241 242
            i.stop_gradient = False
            eleven = layers.fill_constant(shape=[1], dtype='float32', value=11)
            one = layers.fill_constant(shape=[1], dtype='float32', value=1)
243
            x = fluid.data(name='x', shape=[1], dtype='float32')
244 245
            x.stop_gradient = False

246
            out = paddle.static.nn.while_loop(cond, body, [i, x])
247
            mean = paddle.mean(out[1])
248 249
            append_backward(mean)

250 251 252 253 254
        place = (
            fluid.CUDAPlace(0)
            if core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
255 256 257 258 259 260 261
        exe = fluid.Executor(place)

        feed_i = np.ones(1).astype('float32')
        feed_x = np.ones(1).astype('float32')
        data = np.asarray([100]).astype('float32')
        i_grad = np.asarray([110]).astype('float32')

262 263 264 265 266
        res = exe.run(
            main_program,
            feed={'i': feed_i, 'x': feed_x},
            fetch_list=[mean.name, i.grad_name],
        )
267 268
        np.testing.assert_allclose(np.asarray(res[0]), data, rtol=1e-05)
        np.testing.assert_allclose(np.asarray(res[1]), i_grad, rtol=1e-05)
269 270 271

    def test_while_loop_backward2(self):
        def cond(i, x):
272
            return i < 3
273 274

        def body(i, x):
275
            x = x * i
276 277 278 279 280 281 282 283 284 285 286
            i = i + 1
            return [i, x]

        main_program = Program()
        startup_program = Program()
        with fluid.program_guard(main_program, startup_program):
            i = fluid.data(name='i', shape=[1], dtype='float32')
            i.stop_gradient = False
            x = fluid.data(name='x', shape=[1], dtype='float32')
            x.stop_gradient = False

287
            out = paddle.static.nn.while_loop(cond, body, [i, x])
288
            mean = paddle.mean(out[1])
289 290
            append_backward(mean)

291 292 293 294 295
        place = (
            fluid.CUDAPlace(0)
            if core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
296 297 298 299
        exe = fluid.Executor(place)

        feed_i = np.ones(1).astype('float32')
        feed_x = np.ones(1).astype('float32')
300 301 302
        data = np.asarray([2]).astype('float32')
        i_grad = np.asarray([3]).astype('float32')
        x_grad = np.asarray([2]).astype('float32')
303

304 305 306 307 308
        res = exe.run(
            main_program,
            feed={'i': feed_i, 'x': feed_x},
            fetch_list=[mean.name, i.grad_name, x.grad_name],
        )
309 310 311
        np.testing.assert_allclose(np.asarray(res[0]), data, rtol=1e-05)
        np.testing.assert_allclose(np.asarray(res[1]), i_grad, rtol=1e-05)
        np.testing.assert_allclose(np.asarray(res[2]), x_grad, rtol=1e-05)
312 313


314 315 316
class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
    def test_nested_net_with_backward_and_lodtensor(self):
        def external_cond(i, j, x, mem_array):
L
LiYuRio 已提交
317
            return paddle.less_than(i, array_len)
318 319 320

        def external_body(i, j, x, mem_array):
            def internal_cond(j, x, mem_array):
L
LiYuRio 已提交
321
                return paddle.less_than(j, array_len2)
322 323

            def internal_body(j, x, mem_array):
324 325
                inner_data = paddle.tensor.array_read(array=data_array, i=j)
                inner_prev = paddle.tensor.array_read(array=mem_array, i=j)
326 327
                inner_sum_0 = paddle.add(x=inner_data, y=inner_prev)
                inner_sum_1 = paddle.add(x=x, y=inner_sum_0)
328
                j = paddle.increment(x=j)
329
                paddle.tensor.array_write(inner_sum_1, i=j, array=mem_array)
330 331
                return [j, x, mem_array]

332 333
            outer_data = paddle.tensor.array_read(array=data_array, i=i)
            outer_prev = paddle.tensor.array_read(array=mem_array, i=i)
334 335
            outer_sum_0 = paddle.add(x=outer_data, y=outer_prev)
            outer_sum_1 = paddle.add(x=x, y=outer_sum_0)
336
            i = paddle.increment(x=i)
337
            paddle.tensor.array_write(outer_sum_1, i=i, array=mem_array)
338
            j, x, mem_array = paddle.static.nn.while_loop(
339 340
                internal_cond, internal_body, [j, x, mem_array]
            )
341
            return [i, j, x, mem_array]
342 343 344 345

        main_program = Program()
        startup_program = Program()
        with fluid.program_guard(main_program, startup_program):
346 347 348 349
            d0 = fluid.data(name='d0', shape=[10], dtype='float32')
            d1 = fluid.data(name='d1', shape=[10], dtype='float32')
            d2 = fluid.data(name='d2', shape=[10], dtype='float32')
            x = fluid.data(name='x', shape=[10], dtype='float32')
350
            x.stop_gradient = False
351 352 353
            i = layers.zeros(shape=[1], dtype='int64')
            i.stop_gradient = True
            init = layers.zeros(shape=[10], dtype='float32')
354 355
            mem_array = paddle.tensor.array_write(x=init, i=i)
            data_array = paddle.tensor.array_write(x=d0, i=i)
356
            mem_array.stop_gradient = False
357
            i = paddle.increment(i)
358
            paddle.tensor.array_write(d1, i, array=data_array)
359
            i = paddle.increment(i)
360
            paddle.tensor.array_write(d2, i, array=data_array)
361 362 363 364 365 366
            i = layers.zeros(shape=[1], dtype='int64')
            i.stop_gradient = True
            array_len = layers.fill_constant(shape=[1], dtype='int64', value=1)
            j = layers.fill_constant(shape=[1], dtype='int64', value=1)
            j.stop_gradient = True
            array_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3)
367

368
            out = paddle.static.nn.while_loop(
369 370
                external_cond, external_body, [i, j, x, mem_array]
            )
371

372
            sum_result = paddle.tensor.array_read(array=mem_array, i=j)
373
            mean = paddle.mean(sum_result)
374
            append_backward(mean)
375

376 377 378 379 380
            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
381 382 383 384 385 386 387 388
            exe = fluid.Executor(place)

            d = []
            for i in range(3):
                d.append(np.random.random(size=[10]).astype('float32'))
            feed_x = np.ones(10).astype('float32')
            data_sum = d[0] + d[1] + d[2] + 3 * feed_x
            x_grad = [0.3] * 10
389 390 391 392 393
            res = exe.run(
                main_program,
                feed={'d0': d[0], 'd1': d[1], 'd2': d[2], 'x': feed_x},
                fetch_list=[sum_result.name, x.grad_name],
            )
394 395
            np.testing.assert_allclose(res[0], data_sum, rtol=1e-05)
            np.testing.assert_allclose(res[1], x_grad, rtol=1e-05)
396 397 398 399 400


class TestApiWhileLoopWithSwitchCase(unittest.TestCase):
    def test_with_switch_case(self):
        def cond(i):
L
LiYuRio 已提交
401
            return paddle.less_than(i, ten)
402 403 404

        def body(i):
            def fn_add_three():
405
                data_add_three = paddle.add(x=i, y=three)
406 407 408
                return data_add_three

            def fn_square():
409
                data_mul_data = paddle.multiply(x=i, y=i)
410 411 412
                return data_mul_data

            def fn_add_one():
413
                data_add_one = paddle.add(x=i, y=one)
414 415
                return data_add_one

416
            return paddle.static.nn.switch_case(
417 418 419 420
                branch_index=i,
                branch_fns={2: fn_add_three, 5: fn_square},
                default=fn_add_one,
            )
421 422 423 424 425 426 427 428

        main_program = Program()
        startup_program = Program()
        with fluid.program_guard(main_program, startup_program):
            i = layers.fill_constant(shape=[1], dtype='int64', value=1)
            ten = layers.fill_constant(shape=[1], dtype='int64', value=10)
            three = layers.fill_constant(shape=[1], dtype='int64', value=3)
            one = layers.fill_constant(shape=[1], dtype='int64', value=1)
429
            out = paddle.static.nn.while_loop(cond, body, [i])
430

431 432 433 434 435
        place = (
            fluid.CUDAPlace(0)
            if core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
436 437 438 439
        exe = fluid.Executor(place)
        res = exe.run(main_program, fetch_list=out)

        data = np.asarray([25]).astype('int64')
440
        np.testing.assert_allclose(np.asarray(res[0]), data, rtol=1e-05)
G
guofei 已提交
441 442 443 444 445 446 447 448


class TestApiWhileLoop_Error(unittest.TestCase):
    def test_error(self):
        def cond_returns_constant(i):
            return 1

        def cond_returns_not_bool_tensor(i):
449
            return paddle.increment(i)
G
guofei 已提交
450 451

        def cond_returns_bool_tensor(i):
L
LiYuRio 已提交
452
            return paddle.less_than(i, ten)
G
guofei 已提交
453 454

        def cond_returns_2d_tensor(i):
L
LiYuRio 已提交
455
            return paddle.less_than(i, ten_2d)
G
guofei 已提交
456

457
        def cond_receives_two_args(i, ten):
L
LiYuRio 已提交
458
            return paddle.less_than(i, ten)
459

G
guofei 已提交
460
        def body(i):
461
            return paddle.increment(i)
G
guofei 已提交
462

463
        def body_returns_error_length(i):
464
            i = paddle.increment(i)
465 466 467
            return [i, i]

        def body_returns_error_type(i, ten):
468
            return paddle.increment(i)
469

470 471 472 473
        def cond_returns_with_mutable_dict(i, test_dict):
            return i > 0

        def body_returns_with_mutable_dict(i, test_dict):
474 475 476
            test_dict['new_key'] = layers.fill_constant(
                shape=[1], dtype='int64', value=1
            )
477
            return paddle.increment(i), test_dict
478 479 480 481 482 483

        def cond_returns_with_mutable_list(i, test_list):
            return i > 0

        def body_returns_with_mutable_list(i, test_list):
            test_list.append(
484 485
                layers.fill_constant(shape=[1], dtype='int64', value=1)
            )
486
            return paddle.increment(i), test_list
487

G
guofei 已提交
488 489 490 491 492 493 494 495 496
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program, startup_program):
            data = layers.fill_constant(shape=[1], dtype='int64', value=1)
            data_1d = layers.fill_constant(shape=[1], dtype='int64', value=1)
            data_2d = layers.fill_constant(shape=[2, 2], dtype='int64', value=1)
            ten = layers.fill_constant(shape=[1], dtype='int64', value=10)
            ten_2d = layers.fill_constant(shape=[2, 2], dtype='int64', value=10)

497
            # The type of `cond` in Op(while_loop) must be callable
G
guofei 已提交
498
            def type_error_cond():
499
                out = paddle.static.nn.while_loop(data, body, [data_1d])
G
guofei 已提交
500 501 502 503 504

            self.assertRaises(TypeError, type_error_cond)

            # The type of `body` in Op(while_loop) must be callable
            def type_error_body():
505
                out = paddle.static.nn.while_loop(
506 507
                    cond_returns_bool_tensor, data, [data_1d]
                )
G
guofei 已提交
508 509 510 511 512

            self.assertRaises(TypeError, type_error_body)

            # The type of `loop_vars` in Op(while_loop) must be list or tuple
            def type_error_loop_vars():
513 514 515
                out = paddle.static.nn.while_loop(
                    cond_returns_bool_tensor, body, data_1d
                )
G
guofei 已提交
516 517 518 519 520

            self.assertRaises(TypeError, type_error_loop_vars)

            # The value of `loop_vars` is empty
            def value_error_loop_vars():
521 522 523
                out = paddle.static.nn.while_loop(
                    cond_returns_bool_tensor, body, []
                )
G
guofei 已提交
524 525 526 527 528

            self.assertRaises(ValueError, value_error_loop_vars)

            # The type of `cond` returns in Op(while_loop) must be Variable
            def type_error_cond_returns_not_variable():
529 530 531
                out = paddle.static.nn.while_loop(
                    cond_returns_constant, body, [data_1d]
                )
G
guofei 已提交
532 533 534 535 536

            self.assertRaises(TypeError, type_error_cond_returns_not_variable)

            # The type of `cond` returns in Op(while_loop) must be a bollean variable
            def type_error_cond_returns_not_boolean():
537
                out = paddle.static.nn.while_loop(
538 539
                    cond_returns_not_bool_tensor, body, [data_1d]
                )
G
guofei 已提交
540 541 542 543 544

            self.assertRaises(TypeError, type_error_cond_returns_not_boolean)

            # The shape of `cond` returns in Op(while_loop) must be 1
            def type_error_shape_cond_returns_2d():
545 546 547
                out = paddle.static.nn.while_loop(
                    cond_returns_2d_tensor, body, [data_2d]
                )
G
guofei 已提交
548 549 550

            self.assertRaises(TypeError, type_error_shape_cond_returns_2d)

551 552
            # The length of `body` returns in Op(while_loop) must be same as `loop_vars`
            def value_error_body_returns_error_length():
553
                out = paddle.static.nn.while_loop(
554 555
                    cond_returns_bool_tensor, body_returns_error_length, [data]
                )
556 557 558 559 560

            self.assertRaises(ValueError, value_error_body_returns_error_length)

            # The type of `body` returns in Op(while_loop) must be same as `loop_vars`
            def value_error_body_returns_error_type():
561
                out = paddle.static.nn.while_loop(
562 563
                    cond_receives_two_args, body_returns_error_type, [data, ten]
                )
564 565 566

            self.assertRaises(ValueError, value_error_body_returns_error_type)

567 568 569
            # The length of `output_vars` with mutable value should keep same with `loop_vars`
            def value_error_body_returns_with_mutable_dict():
                test_dict = {
570 571 572
                    "int_constant": layers.fill_constant(
                        shape=[2, 2], dtype='int64', value=1
                    )
573
                }
574
                out = paddle.static.nn.while_loop(
575 576 577 578
                    cond_returns_with_mutable_dict,
                    body_returns_with_mutable_dict,
                    [data, test_dict],
                )
579

580 581 582
            self.assertRaises(
                ValueError, value_error_body_returns_with_mutable_dict
            )
583 584 585

            def value_error_body_returns_with_mutable_list():
                test_list = [
586
                    layers.fill_constant(shape=[2, 2], dtype='int64', value=1)
587
                ]
588
                out = paddle.static.nn.while_loop(
589 590 591 592
                    cond_returns_with_mutable_list,
                    body_returns_with_mutable_list,
                    [data, test_list],
                )
593

594 595 596
            self.assertRaises(
                ValueError, value_error_body_returns_with_mutable_list
            )
597

G
guofei 已提交
598

599 600 601 602 603 604 605 606 607 608 609 610 611
class TestApiWhileLoopSliceInBody(unittest.TestCase):
    def test_var_slice(self):
        def cond(z, i):
            return i + 1 <= x_shape[0]

        def body(z, i):
            z = z + x[i]
            i += 1
            return z, i

        main_program = Program()
        startup_program = Program()
        with program_guard(main_program, startup_program):
G
GGBond8488 已提交
612
            x = paddle.static.data(name='x', shape=[-1, 5], dtype='int32')
613
            z = fluid.layers.fill_constant([1], 'int32', 0)
2
201716010711 已提交
614
            x_shape = paddle.shape(x)
615
            i = fluid.layers.fill_constant([1], 'int32', 0)
616
            z, _ = paddle.static.nn.while_loop(cond, body, [z, i])
617

618 619 620 621 622
        place = (
            fluid.CUDAPlace(0)
            if core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
623 624 625 626
        exe = fluid.Executor(place)

        np_x = np.array([1, 2, 3, 4, 5], dtype='int32')
        res = exe.run(main_program, feed={'x': np_x}, fetch_list=[z])
627
        np.testing.assert_array_equal(res[0], [np.sum(np_x)])
628 629


G
guofei 已提交
630 631
if __name__ == '__main__':
    unittest.main()