test_list.py 9.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15

16
import unittest
L
liym27 已提交
17

18
import numpy as np
19 20

import paddle
21
from paddle import fluid
22 23 24 25 26

SEED = 2020
np.random.seed(SEED)


L
liym27 已提交
27 28
# Situation 1: Test list append
def test_list_append_without_control_flow(x):
29 30 31
    # Python list will not be transformed.
    x = fluid.dygraph.to_variable(x)
    a = []
32 33 34
    # It's a plain python control flow which won't be transformed
    if 2 > 1:
        a.append(x)
35 36 37
    return a


L
liym27 已提交
38
def test_list_append_in_if(x):
39 40 41 42 43 44
    x = fluid.dygraph.to_variable(x)
    a = []
    if x.numpy()[0] > 0:
        a.append(x)
    else:
        a.append(
45
            paddle.tensor.fill_constant(shape=[1, 2], value=9, dtype="int64")
46
        )
47 48
    # TODO(Aurelius84): Currently, run_program_op doesn't support output LoDTensorArray.
    return a[0]
49 50


L
liym27 已提交
51
def test_list_append_in_for_loop(x, iter_num):
52
    x = fluid.dygraph.to_variable(x)
L
liym27 已提交
53
    # Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor
54
    iter_num = paddle.tensor.fill_constant(
L
liym27 已提交
55 56
        shape=[1], value=iter_num, dtype="int32"
    )  # TODO(liym27): Delete it if the type of parameter iter_num can be resolved
57 58 59
    a = []
    for i in range(iter_num):
        a.append(x)
60
    return a[0]
61 62


63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
def test_list_append_in_for_subscript(x):
    x = fluid.dygraph.to_variable(x)
    iter_num = paddle.shape(x)[0]
    a = []
    for i in range(iter_num):
        x = x + 1
        a.append(x)
    out = paddle.concat(a)
    return out[0]


def test_list_append_in_while_loop_subscript(x):
    x = fluid.dygraph.to_variable(x)
    iter_num = paddle.shape(x)[0]
    a = []
    i = 0
    while i < iter_num:
        x = x + 1
        a.append(x)
        i += 1
    out = paddle.concat(a)
    return out[0]


L
liym27 已提交
87
def test_list_append_in_for_loop_with_concat(x, iter_num):
88 89
    x = fluid.dygraph.to_variable(x)
    a = []
L
liym27 已提交
90
    # Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor
91
    iter_num = paddle.tensor.fill_constant(
L
liym27 已提交
92 93
        shape=[1], value=iter_num, dtype="int32"
    )  # TODO(liym27): Delete it if the type of parameter iter_num can be resolved
94 95
    for i in range(iter_num):
        a.append(x)
96
    a = paddle.concat(a, axis=0)
97
    return a
98 99


L
liym27 已提交
100
def test_list_append_in_while_loop(x, iter_num):
101
    x = fluid.dygraph.to_variable(x)
102
    iter_num = paddle.tensor.fill_constant(
103 104
        shape=[1], value=iter_num, dtype="int32"
    )
105 106
    a = []
    i = 0
L
liym27 已提交
107
    while i < iter_num:
108 109
        a.append(x)
        i += 1
110
    return a[0]
111 112


L
liym27 已提交
113
def test_list_append_in_while_loop_with_stack(x, iter_num):
114
    x = fluid.dygraph.to_variable(x)
115
    iter_num = paddle.tensor.fill_constant(
116 117
        shape=[1], value=iter_num, dtype="int32"
    )
118 119 120 121 122
    a = []
    i = 0
    while i < iter_num.numpy()[0]:
        a.append(x)
        i += 1
123
    out = paddle.stack(a, axis=1)
124 125 126
    return out


127 128 129 130 131 132 133 134
def test_tensor_array_slice(x, iter_num):
    a = []
    for i in range(paddle.to_tensor(3)):
        a.append(paddle.to_tensor(i))
    t = a[1:3]
    return a[2]


L
liym27 已提交
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
# Situation 2: Test list pop
def test_list_pop_without_control_flow_1(x):
    x = fluid.dygraph.to_variable(x)
    a = []
    if 2 > 1:
        a.append(x)
    a.pop()
    return a


def test_list_pop_without_control_flow_2(x):
    x = fluid.dygraph.to_variable(x)
    a = []
    if 2 > 1:
        a.append(x)
        a.append(x + 1)
151 152
    last_item = a.pop(1)
    return last_item
L
liym27 已提交
153 154 155 156 157


def test_list_pop_in_if(x):
    x = fluid.dygraph.to_variable(x)
    a = []
158
    b = [x * 2 + (x + 1)]
L
liym27 已提交
159 160
    if x.numpy()[0] > 0:
        a.append(x)
161
        b.append(x + 1)
162
        a.append(paddle.tensor.fill_constant(shape=[1], value=1, dtype="int64"))
L
liym27 已提交
163 164
    else:
        a.append(x + 1)
165
        b.append(x - 1)
166
        a.append(paddle.tensor.fill_constant(shape=[2], value=2, dtype="int64"))
L
liym27 已提交
167
    item1 = a.pop(1)
168
    return item1, b[-1]
L
liym27 已提交
169 170 171 172 173


def test_list_pop_in_for_loop(x, iter_num):
    x = fluid.dygraph.to_variable(x)
    # Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor
174
    iter_num = paddle.tensor.fill_constant(
L
liym27 已提交
175 176 177 178
        shape=[1], value=iter_num, dtype="int32"
    )  # TODO(liym27): Delete it if the type of parameter iter_num can be resolved

    a = []
179
    b = [x - 1, x + 1]
L
liym27 已提交
180 181
    for i in range(iter_num):
        a.append(x + i)
182
        b.append(x * 2)
L
liym27 已提交
183

184
    one = paddle.ones(shape=[1], dtype="int32")
L
liym27 已提交
185 186
    for i in range(one.numpy()[0]):
        item = a.pop()
187
    return a[0], item, b[1]
L
liym27 已提交
188 189 190 191


def test_list_pop_in_while_loop(x, iter_num):
    x = fluid.dygraph.to_variable(x)
192
    iter_num = paddle.tensor.fill_constant(
193 194
        shape=[1], value=iter_num, dtype="int32"
    )
L
liym27 已提交
195
    a = []
196 197 198
    b = [x]
    b.append(x)
    b.pop()
L
liym27 已提交
199
    i = 0
200

L
liym27 已提交
201 202
    while i < iter_num:
        a.append(x + i)
203
        b.append(x - i)
L
liym27 已提交
204 205 206
        i += 1
        if i % 2 == 1:
            a.pop()
207
    return a[0], b[2]
L
liym27 已提交
208 209


210 211
class TestListWithoutControlFlow(unittest.TestCase):
    def setUp(self):
212 213 214 215 216
        self.place = (
            fluid.CUDAPlace(0)
            if fluid.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
L
liym27 已提交
217 218

        self.init_data()
219 220
        self.init_dygraph_func()

L
liym27 已提交
221
    def init_data(self):
222
        self.input = np.random.random(3).astype('int32')
L
liym27 已提交
223

224
    def init_dygraph_func(self):
L
liym27 已提交
225 226 227 228 229 230 231 232
        self.all_dygraph_funcs = [
            test_list_append_without_control_flow,
            test_list_pop_without_control_flow_1,
            test_list_pop_without_control_flow_2,
        ]

    def varbase_to_numpy(self, res):
        if isinstance(res, (list, tuple)):
233
            res = paddle.utils.map_structure(lambda x: x.numpy(), res)
L
liym27 已提交
234 235 236
        else:
            res = [res.numpy()]
        return res
237

238 239 240
    def run_static_mode(self):
        return self.train(to_static=True)

241
    def run_dygraph_mode(self):
242
        return self.train(to_static=False)
243

244 245 246 247
    def train(self, to_static=False):

        with fluid.dygraph.guard():
            if to_static:
H
hjyp 已提交
248
                res = paddle.jit.to_static(self.dygraph_func)(self.input)
249 250
            else:
                res = self.dygraph_func(self.input)
L
liym27 已提交
251
            return self.varbase_to_numpy(res)
252 253

    def test_transformed_static_result(self):
L
liym27 已提交
254 255 256 257 258 259 260
        for dyfunc in self.all_dygraph_funcs:
            self.dygraph_func = dyfunc
            static_res_list = self.run_static_mode()
            dygraph_res_list = self.run_dygraph_mode()

            self.assertEqual(len(static_res_list), len(dygraph_res_list))
            for stat_res, dy_res in zip(static_res_list, dygraph_res_list):
261 262 263 264 265
                np.testing.assert_allclose(
                    stat_res,
                    dy_res,
                    rtol=1e-05,
                    err_msg='dygraph_res is {}\nstatic_res is {}'.format(
266 267 268
                        dy_res, stat_res
                    ),
                )
269 270 271


class TestListInIf(TestListWithoutControlFlow):
272
    def init_dygraph_func(self):
273
        self.all_dygraph_funcs = [test_list_append_in_if]
274 275


276
class TestListInWhileLoop(TestListWithoutControlFlow):
L
liym27 已提交
277
    def init_data(self):
278
        self.input = np.random.random(3).astype('int32')
L
liym27 已提交
279
        self.iter_num = 3
280 281

    def init_dygraph_func(self):
L
liym27 已提交
282
        self.all_dygraph_funcs = [
283 284
            test_list_append_in_while_loop,
            test_list_pop_in_while_loop,
L
liym27 已提交
285
        ]
286

287 288 289 290
    def train(self, to_static=False):

        with fluid.dygraph.guard():
            if to_static:
H
hjyp 已提交
291 292 293 294
                print(paddle.jit.to_static(self.dygraph_func).code)
                res = paddle.jit.to_static(self.dygraph_func)(
                    self.input, self.iter_num
                )
295 296 297 298
            else:
                res = self.dygraph_func(self.input, self.iter_num)
            return self.varbase_to_numpy(res)

299

300 301
class TestListInWhileLoopWithStack(TestListInWhileLoop):
    def init_dygraph_func(self):
302
        self.all_dygraph_funcs = [test_list_append_in_while_loop_with_stack]
303

304

305 306 307 308 309
class TestTensorArraySlice(TestListInWhileLoop):
    def init_dygraph_func(self):
        self.all_dygraph_funcs = [test_tensor_array_slice]


310 311
class TestListInForLoop(TestListInWhileLoop):
    def init_dygraph_func(self):
L
liym27 已提交
312
        self.all_dygraph_funcs = [
313 314
            test_list_append_in_for_loop,
            test_list_pop_in_for_loop,
L
liym27 已提交
315
        ]
316 317 318 319


class TestListInForLoopWithConcat(TestListInWhileLoopWithStack):
    def init_dygraph_func(self):
320 321 322
        self.all_dygraph_funcs = [
            test_list_append_in_for_loop_with_concat,
        ]
323 324


325 326 327 328
class TestListInForLoopWithSubscript(TestListWithoutControlFlow):
    def init_dygraph_func(self):
        self.all_dygraph_funcs = [
            test_list_append_in_for_subscript,
329
            test_list_append_in_while_loop_subscript,
330 331 332 333 334 335
        ]

    def init_data(self):
        self.input = np.random.random((3, 4)).astype('float32')


336 337
class ListWithCondNet(paddle.nn.Layer):
    def __init__(self):
338
        super().__init__()
339

340 341
    # Add *args to test function.__self__ in FunctionSpec.
    # DO NOT remove *args.
342
    @paddle.jit.to_static
343
    def forward(self, x, index, *args):
344 345 346 347 348 349 350 351
        y = paddle.nn.functional.relu(x)
        a = []

        for i in y:
            a.append(i)

        if index > 0:
            res = a[0] * a[0]
352
            y = y + 1
353 354
        else:
            res = a[-1] * a[-1]
355
            y = y - 1
356

357
        z = a[-1] * res * y[0]
358 359 360 361 362 363 364 365 366
        return z


class TestListWithCondGradInferVarType(unittest.TestCase):
    def test_to_static(self):
        net = ListWithCondNet()
        x = paddle.to_tensor([2, 3, 4], dtype='float32')
        index = paddle.to_tensor([1])
        res = net(x, index)
J
JYChen 已提交
367
        self.assertEqual(res, 48.0)
368 369


370 371
if __name__ == '__main__':
    unittest.main()