test_layers.py 113.0 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import contextlib
import inspect
Q
Qiao Longfei 已提交
17 18
import unittest

19
import numpy as np
20
from decorator_helper import prog_scope
21
from test_imperative_base import new_program_scope
22 23 24

import paddle
import paddle.fluid as fluid
25
import paddle.fluid.layers as layers
26
import paddle.fluid.nets as nets
27
import paddle.nn.functional as F
28
from paddle.fluid import core
29
from paddle.fluid.dygraph import base, to_variable
30 31 32 33 34 35
from paddle.fluid.framework import (
    Program,
    _test_eager_guard,
    default_main_program,
    program_guard,
)
36
from paddle.tensor import random
37 38 39 40 41 42 43 44 45 46 47


class LayerTest(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        cls.seed = 111

    @classmethod
    def tearDownClass(cls):
        pass

48 49 50 51 52 53 54 55
    def _get_place(self, force_to_use_cpu=False):
        # this option for ops that only have cpu kernel
        if force_to_use_cpu:
            return core.CPUPlace()
        else:
            if core.is_compiled_with_cuda():
                return core.CUDAPlace(0)
            return core.CPUPlace()
56 57 58 59

    @contextlib.contextmanager
    def static_graph(self):
        with new_program_scope():
C
cnn 已提交
60
            paddle.seed(self.seed)
L
Leo Chen 已提交
61
            paddle.framework.random._manual_program_seed(self.seed)
62 63
            yield

64 65 66
    def get_static_graph_result(
        self, feed, fetch_list, with_lod=False, force_to_use_cpu=False
    ):
67
        exe = fluid.Executor(self._get_place(force_to_use_cpu))
68
        exe.run(fluid.default_startup_program())
69 70 71 72 73 74
        return exe.run(
            fluid.default_main_program(),
            feed=feed,
            fetch_list=fetch_list,
            return_numpy=(not with_lod),
        )
75 76

    @contextlib.contextmanager
77
    def dynamic_graph(self, force_to_use_cpu=False):
L
lujun 已提交
78
        with fluid.dygraph.guard(
79 80
            self._get_place(force_to_use_cpu=force_to_use_cpu)
        ):
C
cnn 已提交
81
            paddle.seed(self.seed)
L
Leo Chen 已提交
82
            paddle.framework.random._manual_program_seed(self.seed)
83 84 85 86
            yield


class TestLayer(LayerTest):
87 88
    def test_custom_layer_with_kwargs(self):
        class CustomLayer(fluid.Layer):
89
            def __init__(self, input_size, linear1_size=4):
90
                super().__init__()
91
                self.linear1 = paddle.nn.Linear(
92 93
                    input_size, linear1_size, bias_attr=False
                )
94 95 96
                self.linear2 = paddle.nn.Linear(
                    linear1_size, 1, bias_attr=False
                )
97 98 99 100 101

            def forward(self, x, do_linear2=False):
                ret = self.linear1(x)
                if do_linear2:
                    ret = self.linear2(ret)
102 103 104
                return ret

        with self.dynamic_graph():
105 106 107 108 109
            with _test_eager_guard():
                inp = np.ones([3, 3], dtype='float32')
                x = base.to_variable(inp)
                custom = CustomLayer(input_size=3, linear1_size=2)
                ret = custom(x, do_linear2=False)
110
                np.testing.assert_array_equal(ret.numpy().shape, [3, 2])
111
                ret = custom(x, do_linear2=True)
112
                np.testing.assert_array_equal(ret.numpy().shape, [3, 1])
113 114
            inp = np.ones([3, 3], dtype='float32')
            x = base.to_variable(inp)
115 116
            custom = CustomLayer(input_size=3, linear1_size=2)
            ret = custom(x, do_linear2=False)
117
            np.testing.assert_array_equal(ret.numpy().shape, [3, 2])
118
            ret = custom(x, do_linear2=True)
119
            np.testing.assert_array_equal(ret.numpy().shape, [3, 1])
120

S
songyouwei 已提交
121 122 123
    def test_linear(self):
        inp = np.ones([3, 32, 32], dtype='float32')
        with self.static_graph():
124 125 126 127 128 129
            t = layers.data(
                name='data',
                shape=[3, 32, 32],
                dtype='float32',
                append_batch_size=False,
            )
130
            linear = paddle.nn.Linear(
131 132
                32, 4, bias_attr=fluid.initializer.ConstantInitializer(value=1)
            )
S
songyouwei 已提交
133
            ret = linear(t)
134 135 136
            static_ret = self.get_static_graph_result(
                feed={'data': inp}, fetch_list=[ret]
            )[0]
S
songyouwei 已提交
137
        with self.dynamic_graph():
138 139
            with _test_eager_guard():
                t = base.to_variable(inp)
140
                linear = paddle.nn.Linear(
141 142
                    32,
                    4,
143 144
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                )
145 146 147
                dy_eager_ret = linear(t)
                dy_eager_ret_value = dy_eager_ret.numpy()

S
songyouwei 已提交
148
            t = base.to_variable(inp)
149
            linear = paddle.nn.Linear(
150 151
                32, 4, bias_attr=fluid.initializer.ConstantInitializer(value=1)
            )
S
songyouwei 已提交
152 153 154
            dy_ret = linear(t)
            dy_ret_value = dy_ret.numpy()

155 156
        np.testing.assert_array_equal(static_ret, dy_eager_ret_value)
        np.testing.assert_array_equal(static_ret, dy_ret_value)
S
songyouwei 已提交
157

158 159 160 161 162
        with self.static_graph():

            # the input of Linear must be Variable.
            def test_Variable():
                inp = np.ones([3, 32, 32], dtype='float32')
163
                linear = paddle.nn.Linear(
164 165
                    32,
                    4,
166 167
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                )
168 169 170 171 172 173 174 175
                linear_ret1 = linear(inp)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Linear must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
                inp = np.ones([3, 32, 32], dtype='int32')
176
                linear = paddle.nn.Linear(
177 178
                    32,
                    4,
179 180
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                )
181 182 183 184 185 186 187
                linear_ret2 = linear(inp)

            self.assertRaises(TypeError, test_type)

    def test_Flatten(self):
        inp = np.ones([3, 4, 4, 5], dtype='float32')
        with self.static_graph():
188 189 190 191 192 193
            t = layers.data(
                name='data',
                shape=[3, 4, 4, 5],
                dtype='float32',
                append_batch_size=False,
            )
194
            flatten = paddle.nn.Flatten()
195
            ret = flatten(t)
196 197 198
            static_ret = self.get_static_graph_result(
                feed={'data': inp}, fetch_list=[ret]
            )[0]
199
        with self.dynamic_graph():
200 201
            with _test_eager_guard():
                t = base.to_variable(inp)
202
                flatten = paddle.nn.Flatten()
203 204 205
                dy_eager_ret = flatten(t)
                dy_eager_ret_value = dy_eager_ret.numpy()

206
            t = base.to_variable(inp)
207
            flatten = paddle.nn.Flatten()
208 209 210
            dy_ret = flatten(t)
            dy_ret_value = dy_ret.numpy()

211 212
        np.testing.assert_array_equal(static_ret, dy_eager_ret_value)
        np.testing.assert_array_equal(static_ret, dy_ret_value)
213 214 215 216 217 218

        with self.static_graph():

            # the input of Linear must be Variable.
            def test_Variable():
                inp = np.ones([3, 32, 32], dtype='float32')
219
                linear = paddle.nn.Linear(
220 221
                    32,
                    4,
222 223
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                )
224 225 226 227 228 229 230 231
                linear_ret1 = linear(inp)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Linear must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
                inp = np.ones([3, 32, 32], dtype='int32')
232
                linear = paddle.nn.Linear(
233 234
                    32,
                    4,
235 236
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                )
237 238 239 240
                linear_ret2 = linear(inp)

            self.assertRaises(TypeError, test_type)

C
ceci3 已提交
241 242 243 244
    def test_SyncBatchNorm(self):
        if core.is_compiled_with_cuda():
            with self.static_graph():
                t = layers.data(name='t', shape=[-1, 3, 5, 5], dtype='float32')
C
ceci3 已提交
245
                my_sync_bn = paddle.nn.SyncBatchNorm(3)
C
ceci3 已提交
246 247
                ret = my_sync_bn(t)
                static_ret = self.get_static_graph_result(
248
                    feed={'t': np.ones([3, 3, 5, 5], dtype='float32')},
249 250
                    fetch_list=[ret],
                )[0]
C
ceci3 已提交
251 252

            with self.dynamic_graph():
253 254 255 256 257 258
                with _test_eager_guard():
                    t = np.ones([3, 3, 5, 5], dtype='float32')
                    my_syncbn = paddle.nn.SyncBatchNorm(3)
                    dy_eager_ret = my_syncbn(base.to_variable(t))
                    dy_eager_ret_value = dy_eager_ret.numpy()

C
ceci3 已提交
259 260 261 262
                t = np.ones([3, 3, 5, 5], dtype='float32')
                my_syncbn = paddle.nn.SyncBatchNorm(3)
                dy_ret = my_syncbn(base.to_variable(t))
                dy_ret_value = dy_ret.numpy()
263 264
            np.testing.assert_array_equal(static_ret, dy_ret_value)
            np.testing.assert_array_equal(static_ret, dy_eager_ret_value)
C
ceci3 已提交
265

266 267 268 269 270
    def test_relu(self):
        with self.static_graph():
            t = layers.data(name='t', shape=[3, 3], dtype='float32')
            ret = layers.relu(t)
            static_ret = self.get_static_graph_result(
271 272
                feed={'t': np.ones([3, 3], dtype='float32')}, fetch_list=[ret]
            )[0]
273 274

        with self.dynamic_graph():
275 276 277 278 279
            with _test_eager_guard():
                t = np.ones([3, 3], dtype='float32')
                dy_eager_ret = layers.relu(base.to_variable(t))
                dy_eager_ret_value = dy_eager_ret.numpy()

280 281
            t = np.ones([3, 3], dtype='float32')
            dy_ret = layers.relu(base.to_variable(t))
282
            dy_ret_value = dy_ret.numpy()
283

284 285
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05)
C
ceci3 已提交
286

287 288 289 290
    def test_matmul(self):
        with self.static_graph():
            t = layers.data(name='t', shape=[3, 3], dtype='float32')
            t2 = layers.data(name='t2', shape=[3, 3], dtype='float32')
K
kangguangli 已提交
291
            ret = paddle.matmul(t, t2)
292 293 294 295 296 297 298
            static_ret = self.get_static_graph_result(
                feed={
                    't': np.ones([3, 3], dtype='float32'),
                    't2': np.ones([3, 3], dtype='float32'),
                },
                fetch_list=[ret],
            )[0]
299 300

        with self.dynamic_graph():
301 302 303
            with _test_eager_guard():
                t = np.ones([3, 3], dtype='float32')
                t2 = np.ones([3, 3], dtype='float32')
K
kangguangli 已提交
304
                dy_eager_ret = paddle.matmul(
305 306
                    base.to_variable(t), base.to_variable(t2)
                )
307 308
                dy_eager_ret_value = dy_eager_ret.numpy()

309 310
            t = np.ones([3, 3], dtype='float32')
            t2 = np.ones([3, 3], dtype='float32')
K
kangguangli 已提交
311
            dy_ret = paddle.matmul(base.to_variable(t), base.to_variable(t2))
312
            dy_ret_value = dy_ret.numpy()
313

314 315
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05)
316

X
Xin Pan 已提交
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
    def test_elementwise_math(self):
        n = np.ones([3, 3], dtype='float32')
        n2 = np.ones([3, 3], dtype='float32') * 1.1
        n3 = np.ones([3, 3], dtype='float32') * 2
        n4 = np.ones([3, 3], dtype='float32') * 3
        n5 = np.ones([3, 3], dtype='float32') * 4
        n6 = np.ones([3, 3], dtype='float32') * 5

        with self.static_graph():
            t = layers.data(name='t', shape=[3, 3], dtype='float32')
            t2 = layers.data(name='t2', shape=[3, 3], dtype='float32')
            t3 = layers.data(name='t3', shape=[3, 3], dtype='float32')
            t4 = layers.data(name='t4', shape=[3, 3], dtype='float32')
            t5 = layers.data(name='t5', shape=[3, 3], dtype='float32')
            t6 = layers.data(name='t6', shape=[3, 3], dtype='float32')

333
            ret = paddle.add(t, t2)
334
            ret = paddle.pow(ret, t3)
335 336 337
            ret = paddle.divide(ret, t4)
            ret = paddle.subtract(ret, t5)
            ret = paddle.multiply(ret, t6)
X
Xin Pan 已提交
338

339 340 341 342
            static_ret = self.get_static_graph_result(
                feed={'t': n, 't2': n2, 't3': n3, 't4': n4, 't5': n5, 't6': n6},
                fetch_list=[ret],
            )[0]
X
Xin Pan 已提交
343 344

        with self.dynamic_graph():
345
            with _test_eager_guard():
346
                ret = paddle.add(to_variable(n), to_variable(n2))
347
                ret = paddle.pow(ret, to_variable(n3))
348 349 350
                ret = paddle.divide(ret, to_variable(n4))
                ret = paddle.subtract(ret, to_variable(n5))
                dy_eager_ret = paddle.multiply(ret, to_variable(n6))
351 352
                dy_eager_ret_value = dy_eager_ret.numpy()

353
            ret = paddle.add(to_variable(n), to_variable(n2))
354
            ret = paddle.pow(ret, to_variable(n3))
355 356 357
            ret = paddle.divide(ret, to_variable(n4))
            ret = paddle.subtract(ret, to_variable(n5))
            dy_ret = paddle.multiply(ret, to_variable(n6))
358
            dy_ret_value = dy_ret.numpy()
359

360 361
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05)
X
Xin Pan 已提交
362 363 364 365 366 367

    def test_elementwise_minmax(self):
        n = np.ones([3, 3], dtype='float32')
        n2 = np.ones([3, 3], dtype='float32') * 2

        with self.dynamic_graph():
368
            with _test_eager_guard():
369
                min_eager_ret = paddle.minimum(to_variable(n), to_variable(n2))
H
HongyuJia 已提交
370
                max_eager_ret = paddle.maximum(to_variable(n), to_variable(n2))
371 372 373
                min_eager_ret_value = min_eager_ret.numpy()
                max_eager_ret_value = max_eager_ret.numpy()

374
            min_ret = paddle.minimum(to_variable(n), to_variable(n2))
H
HongyuJia 已提交
375
            max_ret = paddle.maximum(to_variable(n), to_variable(n2))
376 377
            min_ret_value = min_ret.numpy()
            max_ret_value = max_ret.numpy()
X
Xin Pan 已提交
378

379 380 381 382
        np.testing.assert_allclose(n, min_ret_value, rtol=1e-05)
        np.testing.assert_allclose(n2, max_ret_value, rtol=1e-05)
        np.testing.assert_allclose(n, min_eager_ret_value, rtol=1e-05)
        np.testing.assert_allclose(n2, max_eager_ret_value, rtol=1e-05)
X
Xin Pan 已提交
383

384 385 386 387
    def test_conv2d_transpose(self):
        inp_np = np.arange(0, 24).reshape([2, 3, 2, 2]).astype('float32')
        with self.static_graph():
            img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32')
388
            out = paddle.static.nn.conv2d_transpose(
389 390
                input=img,
                num_filters=10,
391
                filter_size=27,
392
                act='sigmoid',
393 394 395 396 397
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
            )
            static_rlt = self.get_static_graph_result(
                feed={'pixel': inp_np}, fetch_list=[out]
            )[0]
398 399
        with self.static_graph():
            img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32')
400 401 402 403
            conv2d_transpose = paddle.nn.Conv2DTranspose(
                3,
                10,
                27,
404 405
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
            )
406
            out = conv2d_transpose(img)
407
            out = paddle.nn.functional.sigmoid(out)
408 409 410
            static_rlt2 = self.get_static_graph_result(
                feed={'pixel': inp_np}, fetch_list=[out]
            )[0]
411
        with self.dynamic_graph():
412
            with _test_eager_guard():
413 414 415 416
                conv2d_transpose = paddle.nn.Conv2DTranspose(
                    3,
                    10,
                    27,
417 418
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                )
419
                dy_eager_rlt = conv2d_transpose(base.to_variable(inp_np))
420
                dy_eager_rlt = paddle.nn.functional.sigmoid(dy_eager_rlt)
421 422
                dy_eager_rlt_value = dy_eager_rlt.numpy()

423 424 425 426
            conv2d_transpose = paddle.nn.Conv2DTranspose(
                3,
                10,
                27,
427 428
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
            )
429
            dy_rlt = conv2d_transpose(base.to_variable(inp_np))
430
            dy_rlt = paddle.nn.functional.sigmoid(dy_rlt)
431
            dy_rlt_value = dy_rlt.numpy()
432 433 434
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt2, rtol=1e-05)
        np.testing.assert_allclose(dy_eager_rlt_value, static_rlt2, rtol=1e-05)
435

436
        with self.dynamic_graph():
437 438 439 440 441
            with _test_eager_guard():
                images = np.ones([2, 3, 5, 5], dtype='float32')
                custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
442 443 444
                        custom_weight
                    )
                )
445 446 447 448 449 450
                conv2d1 = paddle.nn.Conv2DTranspose(3, 3, [2, 2])
                conv2d2 = paddle.nn.Conv2DTranspose(
                    3,
                    3,
                    [2, 2],
                    weight_attr=weight_attr,
451
                )
452 453 454
                dy_ret1 = conv2d1(base.to_variable(images))
                dy_ret2 = conv2d2(base.to_variable(images))
                self.assertFalse(
455 456
                    np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())
                )
457 458 459 460

                conv2d1_weight_np = conv2d1.weight.numpy()
                conv2d1_bias = conv2d1.bias
                self.assertFalse(
461 462
                    np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())
                )
463
                conv2d2.weight.set_value(conv2d1_weight_np)
464 465 466
                np.testing.assert_array_equal(
                    conv2d1_weight_np, conv2d2.weight.numpy()
                )
467 468 469
                conv2d2.bias.set_value(conv2d1_bias)
                dy_ret1 = conv2d1(base.to_variable(images))
                dy_ret2 = conv2d2(base.to_variable(images))
470
                np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
471 472 473

                conv2d2.weight = conv2d1.weight
                conv2d2.bias = conv2d1.bias
474 475 476 477 478 479
                np.testing.assert_array_equal(
                    conv2d1.weight.numpy(), conv2d2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    conv2d1.bias.numpy(), conv2d2.bias.numpy()
                )
480

481 482
            images = np.ones([2, 3, 5, 5], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
483 484 485 486 487
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
488 489 490 491 492 493
            conv2d1 = paddle.nn.Conv2DTranspose(3, 3, [2, 2])
            conv2d2 = paddle.nn.Conv2DTranspose(
                3,
                3,
                [2, 2],
                weight_attr=weight_attr,
494
            )
495 496 497 498 499 500 501
            dy_ret1 = conv2d1(base.to_variable(images))
            dy_ret2 = conv2d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv2d1_weight_np = conv2d1.weight.numpy()
            conv2d1_bias = conv2d1.bias
            self.assertFalse(
502 503
                np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())
            )
504
            conv2d2.weight.set_value(conv2d1_weight_np)
505 506 507
            np.testing.assert_array_equal(
                conv2d1_weight_np, conv2d2.weight.numpy()
            )
508 509 510
            conv2d2.bias.set_value(conv2d1_bias)
            dy_ret1 = conv2d1(base.to_variable(images))
            dy_ret2 = conv2d2(base.to_variable(images))
511
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
512 513 514

            conv2d2.weight = conv2d1.weight
            conv2d2.bias = conv2d1.bias
515 516 517 518 519 520
            np.testing.assert_array_equal(
                conv2d1.weight.numpy(), conv2d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv2d1.bias.numpy(), conv2d2.bias.numpy()
            )
521

522 523 524 525 526
        with self.static_graph():

            # the input of Conv2DTranspose must be Variable.
            def test_Variable():
                images = np.ones([2, 3, 5, 5], dtype='float32')
527
                conv2d = paddle.nn.Conv2DTranspose(3, 3, [2, 2])
528 529 530 531 532 533 534
                conv2d_ret1 = conv2d(images)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Conv2DTranspose must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
535 536 537
                images = layers.data(
                    name='pixel', shape=[3, 5, 5], dtype='int32'
                )
538
                conv2d = paddle.nn.Conv2DTranspose(3, 3, [2, 2])
539 540 541 542
                conv2d_ret2 = conv2d(images)

            self.assertRaises(TypeError, test_type)

543 544 545 546 547
    def test_bilinear_tensor_product(self):
        inp_np_x = np.array([[1, 2, 3]]).astype('float32')
        inp_np_y = np.array([[4, 5, 6]]).astype('float32')

        with self.static_graph():
548 549 550 551 552 553
            data_x = layers.data(
                name='x', shape=[1, 3], dtype="float32", append_batch_size=False
            )
            data_y = layers.data(
                name='y', shape=[1, 3], dtype="float32", append_batch_size=False
            )
554
            out = paddle.static.nn.common.bilinear_tensor_product(
555 556 557 558
                data_x,
                data_y,
                6,
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
559 560
                act='sigmoid',
            )
561

562 563 564
            static_rlt = self.get_static_graph_result(
                feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out]
            )[0]
565

566
        with self.static_graph():
567 568 569 570 571 572
            data_x = layers.data(
                name='x', shape=[1, 3], dtype="float32", append_batch_size=False
            )
            data_y = layers.data(
                name='y', shape=[1, 3], dtype="float32", append_batch_size=False
            )
573
            btp = paddle.nn.Bilinear(
574 575
                3,
                3,
576 577
                6,
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
578
            )
579
            out = btp(data_x, data_y)
580
            out = paddle.nn.functional.sigmoid(out)
581 582 583
            static_rlt2 = self.get_static_graph_result(
                feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out]
            )[0]
584
        with self.dynamic_graph():
585
            with _test_eager_guard():
586
                btp = paddle.nn.Bilinear(
587 588 589 590
                    3,
                    3,
                    6,
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
591 592 593 594
                )
                dy_eager_rlt = btp(
                    base.to_variable(inp_np_x), base.to_variable(inp_np_y)
                )
595
                dy_eager_rlt = paddle.nn.functional.sigmoid(dy_eager_rlt)
596 597
                dy_eager_rlt_value = dy_eager_rlt.numpy()

598
            btp = paddle.nn.Bilinear(
599 600
                3,
                3,
601 602
                6,
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
603
            )
604
            dy_rlt = btp(base.to_variable(inp_np_x), base.to_variable(inp_np_y))
605
            dy_rlt = paddle.nn.functional.sigmoid(dy_rlt)
606
            dy_rlt_value = dy_rlt.numpy()
607

608
        with self.dynamic_graph():
609
            with _test_eager_guard():
610
                btp2 = paddle.nn.Bilinear(3, 3, 6)
611 612 613
                dy_eager_rlt2 = btp2(
                    base.to_variable(inp_np_x), base.to_variable(inp_np_y)
                )
614
                dy_eager_rlt2 = paddle.nn.functional.sigmoid(dy_eager_rlt2)
615 616
                dy_eager_rlt2_value = dy_eager_rlt2.numpy()

617
            btp2 = paddle.nn.Bilinear(3, 3, 6)
618 619 620
            dy_rlt2 = btp2(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
621
            dy_rlt2 = paddle.nn.functional.sigmoid(dy_rlt2)
622
            dy_rlt2_value = dy_rlt2.numpy()
623

624
        with self.static_graph():
625 626 627 628 629 630
            data_x2 = layers.data(
                name='x', shape=[1, 3], dtype="float32", append_batch_size=False
            )
            data_y2 = layers.data(
                name='y', shape=[1, 3], dtype="float32", append_batch_size=False
            )
631
            out2 = paddle.static.nn.common.bilinear_tensor_product(
632 633 634 635 636 637
                data_x2, data_y2, 6, act='sigmoid'
            )

            static_rlt3 = self.get_static_graph_result(
                feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out2]
            )[0]
638

639 640 641 642 643
        np.testing.assert_array_equal(dy_rlt2_value, static_rlt3)
        np.testing.assert_array_equal(dy_eager_rlt2_value, static_rlt3)
        np.testing.assert_array_equal(static_rlt2, static_rlt)
        np.testing.assert_array_equal(dy_rlt_value, static_rlt)
        np.testing.assert_array_equal(dy_eager_rlt_value, static_rlt)
644

645
        with self.dynamic_graph():
646 647 648 649
            with _test_eager_guard():
                custom_weight = np.random.randn(6, 3, 3).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
650 651 652
                        custom_weight
                    )
                )
653 654
                btp1 = paddle.nn.Bilinear(3, 3, 6)
                btp2 = paddle.nn.Bilinear(3, 3, 6, weight_attr=weight_attr)
655 656 657
                dy_rlt1 = btp1(
                    base.to_variable(inp_np_x), base.to_variable(inp_np_y)
                )
658
                dy_rlt1 = paddle.nn.functional.sigmoid(dy_rlt1)
659 660 661
                dy_rlt2 = btp2(
                    base.to_variable(inp_np_x), base.to_variable(inp_np_y)
                )
662
                dy_rlt2 = paddle.nn.functional.sigmoid(dy_rlt2)
663
                self.assertFalse(
664 665
                    np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
                )
666 667
                btp2.weight.set_value(btp1.weight.numpy())
                btp2.bias.set_value(btp1.bias)
668 669 670 671 672 673
                dy_rlt1 = btp1(
                    base.to_variable(inp_np_x), base.to_variable(inp_np_y)
                )
                dy_rlt2 = btp2(
                    base.to_variable(inp_np_x), base.to_variable(inp_np_y)
                )
674
                np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
675 676 677

                btp2.weight = btp1.weight
                btp2.bias = btp1.bias
678 679 680 681 682 683
                np.testing.assert_array_equal(
                    btp1.weight.numpy(), btp2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    btp1.bias.numpy(), btp2.bias.numpy()
                )
684

685
            custom_weight = np.random.randn(6, 3, 3).astype("float32")
686 687 688 689 690
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
691 692
            btp1 = paddle.nn.Bilinear(3, 3, 6)
            btp2 = paddle.nn.Bilinear(3, 3, 6, weight_attr=weight_attr)
693 694 695
            dy_rlt1 = btp1(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
696
            dy_rlt1 = paddle.nn.functional.sigmoid(dy_rlt1)
697 698 699
            dy_rlt2 = btp2(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
700
            dy_rlt2 = paddle.nn.functional.sigmoid(dy_rlt2)
701 702 703
            self.assertFalse(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()))
            btp2.weight.set_value(btp1.weight.numpy())
            btp2.bias.set_value(btp1.bias)
704 705 706 707 708 709
            dy_rlt1 = btp1(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
            dy_rlt2 = btp2(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
710
            np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
711 712 713

            btp2.weight = btp1.weight
            btp2.bias = btp1.bias
714 715 716
            np.testing.assert_array_equal(
                btp1.weight.numpy(), btp2.weight.numpy()
            )
717
            np.testing.assert_array_equal(btp1.bias.numpy(), btp2.bias.numpy())
718

719 720 721 722 723
    def test_embeding(self):
        inp_word = np.array([[[1]]]).astype('int64')
        dict_size = 20
        with self.static_graph():
            data_t = layers.data(name='word', shape=[1], dtype='int64')
724 725 726 727 728 729 730 731 732
            emb = layers.embedding(
                input=data_t,
                size=[dict_size, 32],
                param_attr='emb.w',
                is_sparse=False,
            )
            static_rlt = self.get_static_graph_result(
                feed={'word': inp_word}, fetch_list=[emb]
            )[0]
733 734
        with self.static_graph():
            data_t = layers.data(name='word', shape=[1], dtype='int64')
735 736
            emb2 = paddle.nn.Embedding(
                dict_size, 32, weight_attr='emb.w', sparse=False
737
            )
738
            emb_rlt = emb2(data_t)
739 740 741
            static_rlt2 = self.get_static_graph_result(
                feed={'word': inp_word}, fetch_list=[emb_rlt]
            )[0]
742
        with self.dynamic_graph():
743
            with _test_eager_guard():
744 745 746 747 748
                emb2 = paddle.nn.Embedding(
                    dict_size,
                    32,
                    weight_attr='eager_emb.w',
                    sparse=False,
749
                )
750 751 752
                dy_eager_rlt = emb2(base.to_variable(inp_word))
                dy_eager_rlt_value = dy_eager_rlt.numpy()

753 754
            emb2 = paddle.nn.Embedding(
                dict_size, 32, weight_attr='emb.w', sparse=False
755
            )
756 757
            dy_rlt = emb2(base.to_variable(inp_word))
            dy_rlt_value = dy_rlt.numpy()
758 759

        self.assertTrue(np.allclose(static_rlt2, static_rlt))
760
        self.assertTrue(np.allclose(dy_rlt_value, static_rlt))
761
        self.assertTrue(np.allclose(dy_eager_rlt_value, static_rlt))
762

763
        with self.dynamic_graph():
764 765 766 767
            with _test_eager_guard():
                custom_weight = np.random.randn(dict_size, 32).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
768 769 770
                        custom_weight
                    )
                )
771 772 773 774 775 776
                emb1 = paddle.nn.Embedding(dict_size, 32, sparse=False)
                emb2 = paddle.nn.Embedding(
                    dict_size,
                    32,
                    weight_attr=weight_attr,
                    sparse=False,
777
                )
778 779 780
                rep1 = emb1(base.to_variable(inp_word))
                rep2 = emb2(base.to_variable(inp_word))
                self.assertFalse(
781 782 783 784 785
                    np.array_equal(emb1.weight.numpy(), custom_weight)
                )
                np.testing.assert_array_equal(
                    emb2.weight.numpy(), custom_weight
                )
786 787 788
                self.assertFalse(np.array_equal(rep1.numpy(), rep2.numpy()))
                emb2.weight.set_value(emb1.weight.numpy())
                rep2 = emb2(base.to_variable(inp_word))
789
                np.testing.assert_array_equal(rep1.numpy(), rep2.numpy())
790 791

                emb2.weight = emb1.weight
792 793 794
                np.testing.assert_array_equal(
                    emb1.weight.numpy(), emb2.weight.numpy()
                )
795

796
            custom_weight = np.random.randn(dict_size, 32).astype("float32")
797 798 799 800 801
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
802 803 804
            emb1 = paddle.nn.Embedding(dict_size, 32, sparse=False)
            emb2 = paddle.nn.Embedding(
                dict_size, 32, weight_attr=weight_attr, sparse=False
805
            )
806 807 808
            rep1 = emb1(base.to_variable(inp_word))
            rep2 = emb2(base.to_variable(inp_word))
            self.assertFalse(np.array_equal(emb1.weight.numpy(), custom_weight))
809
            np.testing.assert_array_equal(emb2.weight.numpy(), custom_weight)
810 811 812
            self.assertFalse(np.array_equal(rep1.numpy(), rep2.numpy()))
            emb2.weight.set_value(emb1.weight.numpy())
            rep2 = emb2(base.to_variable(inp_word))
813
            np.testing.assert_array_equal(rep1.numpy(), rep2.numpy())
814 815

            emb2.weight = emb1.weight
816 817 818
            np.testing.assert_array_equal(
                emb1.weight.numpy(), emb2.weight.numpy()
            )
819

S
songyouwei 已提交
820 821
    def test_one_hot(self):
        with self.dynamic_graph():
822
            with _test_eager_guard():
823 824 825
                label = fluid.dygraph.to_variable(
                    np.array([[1], [1], [3], [0]])
                )
826 827
                one_hot_label1 = fluid.layers.one_hot(input=label, depth=4)
                one_hot_label2 = fluid.layers.one_hot(
828 829 830 831 832
                    input=label, depth=fluid.dygraph.to_variable(np.array([4]))
                )
                np.testing.assert_array_equal(
                    one_hot_label1.numpy(), one_hot_label2.numpy()
                )
833

S
songyouwei 已提交
834 835 836
            label = fluid.dygraph.to_variable(np.array([[1], [1], [3], [0]]))
            one_hot_label1 = fluid.layers.one_hot(input=label, depth=4)
            one_hot_label2 = fluid.layers.one_hot(
837 838 839 840 841
                input=label, depth=fluid.dygraph.to_variable(np.array([4]))
            )
            np.testing.assert_array_equal(
                one_hot_label1.numpy(), one_hot_label2.numpy()
            )
S
songyouwei 已提交
842 843 844

    def test_split(self):
        with self.dynamic_graph():
845 846 847
            with _test_eager_guard():
                input = fluid.dygraph.to_variable(np.random.random((3, 8, 5)))
                x0, x1 = fluid.layers.split(input, num_or_sections=2, dim=1)
848 849 850 851 852
                x00, x11 = fluid.layers.split(
                    input,
                    num_or_sections=2,
                    dim=fluid.dygraph.to_variable(np.array([1])),
                )
853 854
                np.testing.assert_array_equal(x0.numpy(), x00.numpy())
                np.testing.assert_array_equal(x1.numpy(), x11.numpy())
855

S
songyouwei 已提交
856 857
            input = fluid.dygraph.to_variable(np.random.random((3, 8, 5)))
            x0, x1 = fluid.layers.split(input, num_or_sections=2, dim=1)
858 859 860 861 862
            x00, x11 = fluid.layers.split(
                input,
                num_or_sections=2,
                dim=fluid.dygraph.to_variable(np.array([1])),
            )
863 864
            np.testing.assert_array_equal(x0.numpy(), x00.numpy())
            np.testing.assert_array_equal(x1.numpy(), x11.numpy())
S
songyouwei 已提交
865 866 867

    def test_topk(self):
        with self.dynamic_graph():
868 869
            with _test_eager_guard():
                input = fluid.dygraph.to_variable(np.random.random((13, 11)))
870 871
                top5_values1, top5_indices1 = paddle.topk(input, k=5)
                top5_values2, top5_indices2 = paddle.topk(
872 873 874 875 876 877 878 879
                    input, k=fluid.dygraph.to_variable(np.array([5]))
                )
                np.testing.assert_array_equal(
                    top5_values1.numpy(), top5_values2.numpy()
                )
                np.testing.assert_array_equal(
                    top5_indices1.numpy(), top5_indices2.numpy()
                )
880

S
songyouwei 已提交
881
            input = fluid.dygraph.to_variable(np.random.random((13, 11)))
882 883
            top5_values1, top5_indices1 = paddle.topk(input, k=5)
            top5_values2, top5_indices2 = paddle.topk(
884 885 886 887 888 889 890 891
                input, k=fluid.dygraph.to_variable(np.array([5]))
            )
            np.testing.assert_array_equal(
                top5_values1.numpy(), top5_values2.numpy()
            )
            np.testing.assert_array_equal(
                top5_indices1.numpy(), top5_indices2.numpy()
            )
S
songyouwei 已提交
892

L
lujun 已提交
893 894
    def test_conv3d(self):
        with self.static_graph():
895 896 897
            images = layers.data(
                name='pixel', shape=[3, 6, 6, 6], dtype='float32'
            )
898 899 900
            ret = paddle.static.nn.conv3d(
                input=images, num_filters=3, filter_size=2
            )
L
lujun 已提交
901
            static_ret = self.get_static_graph_result(
902
                feed={'pixel': np.ones([2, 3, 6, 6, 6], dtype='float32')},
903 904
                fetch_list=[ret],
            )[0]
L
lujun 已提交
905 906

        with self.static_graph():
907 908 909
            images = layers.data(
                name='pixel', shape=[3, 6, 6, 6], dtype='float32'
            )
910 911 912
            conv3d = paddle.nn.Conv3D(
                in_channels=3, out_channels=3, kernel_size=2
            )
L
lujun 已提交
913 914
            ret = conv3d(images)
            static_ret2 = self.get_static_graph_result(
915
                feed={'pixel': np.ones([2, 3, 6, 6, 6], dtype='float32')},
916 917
                fetch_list=[ret],
            )[0]
L
lujun 已提交
918 919

        with self.dynamic_graph():
920 921
            with _test_eager_guard():
                images = np.ones([2, 3, 6, 6, 6], dtype='float32')
922 923 924
                conv3d = paddle.nn.Conv3D(
                    in_channels=3, out_channels=3, kernel_size=2
                )
925 926 927
                dy_eager_ret = conv3d(base.to_variable(images))
                dy_eager_rlt_value = dy_eager_ret.numpy()

L
lujun 已提交
928
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
929 930 931
            conv3d = paddle.nn.Conv3D(
                in_channels=3, out_channels=3, kernel_size=2
            )
L
lujun 已提交
932
            dy_ret = conv3d(base.to_variable(images))
933
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
934

935 936 937
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
938

939
        with self.dynamic_graph():
940 941 942 943 944
            with _test_eager_guard():
                images = np.ones([2, 3, 6, 6, 6], dtype='float32')
                custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
945 946 947
                        custom_weight
                    )
                )
948 949
                conv3d1 = paddle.nn.Conv3D(
                    in_channels=3, out_channels=3, kernel_size=2
950
                )
951 952 953 954 955
                conv3d2 = paddle.nn.Conv3D(
                    in_channels=3,
                    out_channels=3,
                    kernel_size=2,
                    weight_attr=weight_attr,
956
                )
957 958 959
                dy_ret1 = conv3d1(base.to_variable(images))
                dy_ret2 = conv3d2(base.to_variable(images))
                self.assertFalse(
960 961
                    np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())
                )
962 963 964 965

                conv3d1_weight_np = conv3d1.weight.numpy()
                conv3d1_bias = conv3d1.bias
                self.assertFalse(
966 967
                    np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
                )
968
                conv3d2.weight.set_value(conv3d1_weight_np)
969 970 971
                np.testing.assert_array_equal(
                    conv3d1_weight_np, conv3d2.weight.numpy()
                )
972 973 974
                conv3d1.bias.set_value(conv3d1_bias)
                dy_ret1 = conv3d1(base.to_variable(images))
                dy_ret2 = conv3d2(base.to_variable(images))
975
                np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
976 977 978

                conv3d2.weight = conv3d1.weight
                conv3d2.bias = conv3d1.bias
979 980 981 982 983 984
                np.testing.assert_array_equal(
                    conv3d1.weight.numpy(), conv3d2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    conv3d1.bias.numpy(), conv3d2.bias.numpy()
                )
985

986 987
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
988 989 990 991 992
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
993 994 995 996 997 998 999 1000
            conv3d1 = paddle.nn.Conv3D(
                in_channels=3, out_channels=3, kernel_size=2
            )
            conv3d2 = paddle.nn.Conv3D(
                in_channels=3,
                out_channels=3,
                kernel_size=2,
                weight_attr=weight_attr,
1001
            )
1002 1003 1004 1005 1006 1007 1008
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv3d1_weight_np = conv3d1.weight.numpy()
            conv3d1_bias = conv3d1.bias
            self.assertFalse(
1009 1010
                np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
            )
1011
            conv3d2.weight.set_value(conv3d1_weight_np)
1012 1013 1014
            np.testing.assert_array_equal(
                conv3d1_weight_np, conv3d2.weight.numpy()
            )
1015 1016 1017
            conv3d1.bias.set_value(conv3d1_bias)
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
1018
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
1019 1020 1021

            conv3d2.weight = conv3d1.weight
            conv3d2.bias = conv3d1.bias
1022 1023 1024 1025 1026 1027
            np.testing.assert_array_equal(
                conv3d1.weight.numpy(), conv3d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv3d1.bias.numpy(), conv3d2.bias.numpy()
            )
1028

1029
    def func_group_norm(self):
L
lujun 已提交
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
1040 1041 1042 1043 1044 1045 1046
            X = fluid.layers.data(
                name='X',
                shape=shape,
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
1047
            ret = paddle.static.nn.group_norm(
1048 1049
                input=X,
                groups=2,
1050
                param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5),
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
            )
            static_ret = self.get_static_graph_result(
                feed={
                    'X': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
1062 1063

        with self.static_graph():
1064 1065 1066 1067 1068 1069 1070
            X = fluid.layers.data(
                name='X',
                shape=shape,
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
1071 1072 1073 1074
            groupNorm = paddle.nn.GroupNorm(
                num_channels=shape[1],
                num_groups=2,
                weight_attr=fluid.initializer.Uniform(low=-0.5, high=0.5),
1075 1076
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
            )
L
lujun 已提交
1077
            ret = groupNorm(X)
1078 1079 1080 1081 1082 1083 1084 1085 1086
            static_ret2 = self.get_static_graph_result(
                feed={
                    'X': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
1087 1088

        with self.dynamic_graph():
1089 1090 1091 1092
            groupNorm = paddle.nn.GroupNorm(
                num_channels=shape[1],
                num_groups=2,
                weight_attr=fluid.initializer.Uniform(low=-0.5, high=0.5),
1093 1094
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
            )
L
lujun 已提交
1095
            dy_ret = groupNorm(base.to_variable(input))
1096
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
1097

1098 1099
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
1100

1101 1102 1103 1104 1105
    def test_group_norm(self):
        with _test_eager_guard():
            self.func_group_norm()
        self.func_group_norm()

1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
    def test_instance_norm(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
1117 1118 1119
            X = fluid.layers.data(
                name='X', shape=shape, dtype='float32', append_batch_size=False
            )
1120
            ret = paddle.static.nn.instance_norm(input=X)
1121 1122 1123
            static_ret = self.get_static_graph_result(
                feed={'X': input}, fetch_list=[ret]
            )[0]
1124 1125

        with self.static_graph():
1126 1127 1128
            X = fluid.layers.data(
                name='X', shape=shape, dtype='float32', append_batch_size=False
            )
1129
            instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
1130
            ret = instanceNorm(X)
1131 1132 1133
            static_ret2 = self.get_static_graph_result(
                feed={'X': input}, fetch_list=[ret]
            )[0]
1134 1135

        with self.dynamic_graph():
1136
            with _test_eager_guard():
1137
                instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
1138 1139 1140
                dy_eager_ret = instanceNorm(base.to_variable(input))
                dy_eager_rlt_value = dy_eager_ret.numpy()

1141
            instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
1142 1143 1144 1145
            dy_ret = instanceNorm(base.to_variable(input))
            dy_rlt_value = dy_ret.numpy()

        with self.dynamic_graph():
1146
            with _test_eager_guard():
1147
                instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
1148 1149 1150
                dy_eager_ret = instanceNorm(base.to_variable(input))
                dy_eager_rlt_value2 = dy_eager_ret.numpy()

1151
            instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
1152 1153 1154
            dy_ret = instanceNorm(base.to_variable(input))
            dy_rlt_value2 = dy_ret.numpy()

1155 1156 1157 1158 1159
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_rlt_value2, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value2, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
1160 1161 1162 1163

        with self.static_graph():
            # the input of InstanceNorm must be Variable.
            def test_Variable():
1164
                instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
1165 1166 1167 1168 1169 1170 1171
                ret1 = instanceNorm(input)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of InstanceNorm must be float32 or float64
            def test_type():
                input = np.random.random(shape).astype('int32')
1172
                instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
1173 1174 1175 1176
                ret2 = instanceNorm(input)

            self.assertRaises(TypeError, test_type)

L
lujun 已提交
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
    def test_spectral_norm(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
1188 1189 1190 1191 1192 1193 1194
            Weight = fluid.layers.data(
                name='Weight',
                shape=shape,
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
L
lujun 已提交
1195
            ret = layers.spectral_norm(weight=Weight, dim=1, power_iters=2)
1196 1197 1198 1199 1200 1201 1202 1203 1204
            static_ret = self.get_static_graph_result(
                feed={
                    'Weight': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    ),
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
1205 1206

        with self.static_graph():
1207 1208 1209 1210 1211 1212 1213
            Weight = fluid.layers.data(
                name='Weight',
                shape=shape,
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
1214
            spectralNorm = paddle.nn.SpectralNorm(shape, axis=1, power_iters=2)
L
lujun 已提交
1215
            ret = spectralNorm(Weight)
1216 1217 1218 1219 1220 1221 1222 1223 1224
            static_ret2 = self.get_static_graph_result(
                feed={
                    'Weight': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
1225 1226

        with self.dynamic_graph():
1227
            with _test_eager_guard():
1228 1229 1230
                spectralNorm = paddle.nn.SpectralNorm(
                    shape, axis=1, power_iters=2
                )
1231 1232 1233
                dy_eager_ret = spectralNorm(base.to_variable(input))
                dy_eager_rlt_value = dy_eager_ret.numpy()

1234
            spectralNorm = paddle.nn.SpectralNorm(shape, axis=1, power_iters=2)
L
lujun 已提交
1235
            dy_ret = spectralNorm(base.to_variable(input))
1236
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
1237

1238 1239 1240
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
1241 1242

    def test_conv3d_transpose(self):
1243 1244 1245
        input_array = (
            np.arange(0, 48).reshape([2, 3, 2, 2, 2]).astype('float32')
        )
L
lujun 已提交
1246 1247 1248

        with self.static_graph():
            img = layers.data(name='pixel', shape=[3, 2, 2, 2], dtype='float32')
1249
            out = paddle.static.nn.conv3d_transpose(
1250
                input=img, num_filters=12, filter_size=12, use_cudnn=True
1251
            )
L
lujun 已提交
1252
            static_rlt = self.get_static_graph_result(
1253 1254
                feed={'pixel': input_array}, fetch_list=[out]
            )[0]
L
lujun 已提交
1255 1256
        with self.static_graph():
            img = layers.data(name='pixel', shape=[3, 2, 2, 2], dtype='float32')
1257 1258
            conv3d_transpose = paddle.nn.Conv3DTranspose(
                in_channels=3, out_channels=12, kernel_size=12
1259
            )
L
lujun 已提交
1260 1261
            out = conv3d_transpose(img)
            static_rlt2 = self.get_static_graph_result(
1262 1263
                feed={'pixel': input_array}, fetch_list=[out]
            )[0]
L
lujun 已提交
1264
        with self.dynamic_graph():
1265
            with _test_eager_guard():
1266 1267 1268 1269
                conv3d_transpose = paddle.nn.Conv3DTranspose(
                    in_channels=3,
                    out_channels=12,
                    kernel_size=12,
1270
                )
1271 1272 1273
                dy_eager_rlt = conv3d_transpose(base.to_variable(input_array))
                dy_eager_rlt_value = dy_eager_rlt.numpy()

1274 1275
            conv3d_transpose = paddle.nn.Conv3DTranspose(
                in_channels=3, out_channels=12, kernel_size=12
1276
            )
L
lujun 已提交
1277
            dy_rlt = conv3d_transpose(base.to_variable(input_array))
1278
            dy_rlt_value = dy_rlt.numpy()
1279 1280 1281
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_eager_rlt_value, static_rlt, rtol=1e-05)
L
lujun 已提交
1282

1283
        with self.dynamic_graph():
1284 1285 1286 1287 1288
            with _test_eager_guard():
                images = np.ones([2, 3, 6, 6, 6], dtype='float32')
                custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
1289 1290 1291
                        custom_weight
                    )
                )
1292 1293 1294 1295
                conv3d1 = paddle.nn.Conv3DTranspose(
                    in_channels=3,
                    out_channels=3,
                    kernel_size=2,
1296 1297
                    bias_attr='eager_conv3d1_b',
                )
1298 1299 1300 1301 1302
                conv3d2 = paddle.nn.Conv3DTranspose(
                    in_channels=3,
                    out_channels=3,
                    kernel_size=2,
                    weight_attr=weight_attr,
1303 1304
                    bias_attr='eager_conv3d2_b',
                )
1305 1306 1307
                dy_ret1 = conv3d1(base.to_variable(images))
                dy_ret2 = conv3d2(base.to_variable(images))
                self.assertFalse(
1308 1309
                    np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())
                )
1310 1311 1312 1313

                conv3d1_weight_np = conv3d1.weight.numpy()
                conv3d1_bias = conv3d1.bias
                self.assertFalse(
1314 1315
                    np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
                )
1316
                conv3d2.weight.set_value(conv3d1_weight_np)
1317 1318 1319
                np.testing.assert_array_equal(
                    conv3d1_weight_np, conv3d2.weight.numpy()
                )
1320 1321 1322
                conv3d1.bias.set_value(conv3d1_bias)
                dy_ret1 = conv3d1(base.to_variable(images))
                dy_ret2 = conv3d2(base.to_variable(images))
1323
                np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
1324 1325 1326

                conv3d2.weight = conv3d1.weight
                conv3d2.bias = conv3d1.bias
1327 1328 1329 1330 1331 1332
                np.testing.assert_array_equal(
                    conv3d1.weight.numpy(), conv3d2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    conv3d1.bias.numpy(), conv3d2.bias.numpy()
                )
1333

1334 1335
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
1336 1337 1338 1339 1340
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
1341 1342 1343 1344
            conv3d1 = paddle.nn.Conv3DTranspose(
                in_channels=3,
                out_channels=3,
                kernel_size=2,
1345 1346
                bias_attr='conv3d1_b',
            )
1347 1348 1349 1350 1351
            conv3d2 = paddle.nn.Conv3DTranspose(
                in_channels=3,
                out_channels=3,
                kernel_size=2,
                weight_attr=weight_attr,
1352 1353
                bias_attr='conv3d2_b',
            )
1354 1355 1356 1357 1358 1359 1360
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv3d1_weight_np = conv3d1.weight.numpy()
            conv3d1_bias = conv3d1.bias
            self.assertFalse(
1361 1362
                np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
            )
1363
            conv3d2.weight.set_value(conv3d1_weight_np)
1364 1365 1366
            np.testing.assert_array_equal(
                conv3d1_weight_np, conv3d2.weight.numpy()
            )
1367 1368 1369
            conv3d1.bias.set_value(conv3d1_bias)
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
1370
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
1371 1372 1373

            conv3d2.weight = conv3d1.weight
            conv3d2.bias = conv3d1.bias
1374 1375 1376 1377 1378 1379
            np.testing.assert_array_equal(
                conv3d1.weight.numpy(), conv3d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv3d1.bias.numpy(), conv3d2.bias.numpy()
            )
1380

1381
    def func_while_loop(self):
1382 1383 1384 1385 1386
        with self.static_graph():
            i = layers.fill_constant(shape=[1], dtype='int64', value=0)
            ten = layers.fill_constant(shape=[1], dtype='int64', value=10)

            def cond(i):
L
LiYuRio 已提交
1387
                return paddle.less_than(i, ten)
1388 1389 1390 1391

            def body(i):
                return i + 1

1392
            out = paddle.static.nn.while_loop(cond, body, [i])
1393 1394 1395 1396 1397 1398
            static_ret = self.get_static_graph_result(feed={}, fetch_list=out)

        with self.dynamic_graph():
            i = layers.fill_constant(shape=[1], dtype='int64', value=0)
            ten = layers.fill_constant(shape=[1], dtype='int64', value=10)

1399
            def cond1(i):
L
LiYuRio 已提交
1400
                return paddle.less_than(i, ten)
1401

1402
            def body1(i):
1403 1404
                return i + 1

1405
            dy_ret = paddle.static.nn.while_loop(cond1, body1, [i])
1406 1407 1408 1409 1410 1411
            with self.assertRaises(ValueError):
                j = layers.fill_constant(shape=[1], dtype='int64', value=0)

                def body2(i):
                    return i + 1, i + 2

1412
                paddle.static.nn.while_loop(cond1, body2, [j])
1413

1414
        np.testing.assert_array_equal(static_ret[0], dy_ret[0].numpy())
1415

1416 1417 1418 1419 1420
    def test_while_loop(self):
        with _test_eager_guard():
            self.func_while_loop()
        self.func_while_loop()

1421 1422 1423 1424 1425 1426 1427
    def test_compare(self):
        value_a = np.arange(3)
        value_b = np.arange(3)
        # less than
        with self.static_graph():
            a = layers.data(name='a', shape=[1], dtype='int64')
            b = layers.data(name='b', shape=[1], dtype='int64')
L
LiYuRio 已提交
1428
            cond = paddle.less_than(x=a, y=b)
1429 1430 1431
            static_ret = self.get_static_graph_result(
                feed={"a": value_a, "b": value_b}, fetch_list=[cond]
            )[0]
1432
        with self.dynamic_graph():
1433 1434 1435
            with _test_eager_guard():
                da = base.to_variable(value_a)
                db = base.to_variable(value_b)
L
LiYuRio 已提交
1436
                dcond = paddle.less_than(x=da, y=db)
1437 1438 1439 1440

                for i in range(len(static_ret)):
                    self.assertTrue(dcond.numpy()[i] == static_ret[i])

1441 1442
            da = base.to_variable(value_a)
            db = base.to_variable(value_b)
L
LiYuRio 已提交
1443
            dcond = paddle.less_than(x=da, y=db)
1444

1445 1446
            for i in range(len(static_ret)):
                self.assertTrue(dcond.numpy()[i] == static_ret[i])
1447 1448 1449 1450 1451

        # less equal
        with self.static_graph():
            a1 = layers.data(name='a1', shape=[1], dtype='int64')
            b1 = layers.data(name='b1', shape=[1], dtype='int64')
1452
            cond1 = paddle.less_equal(x=a1, y=b1)
1453 1454 1455
            static_ret1 = self.get_static_graph_result(
                feed={"a1": value_a, "b1": value_b}, fetch_list=[cond1]
            )[0]
1456
        with self.dynamic_graph():
1457 1458 1459
            with _test_eager_guard():
                da1 = base.to_variable(value_a)
                db1 = base.to_variable(value_b)
1460
                dcond1 = paddle.less_equal(x=da1, y=db1)
1461 1462 1463 1464

                for i in range(len(static_ret1)):
                    self.assertTrue(dcond1.numpy()[i] == static_ret1[i])

1465 1466
            da1 = base.to_variable(value_a)
            db1 = base.to_variable(value_b)
1467
            dcond1 = paddle.less_equal(x=da1, y=db1)
1468 1469 1470 1471

            for i in range(len(static_ret1)):
                self.assertTrue(dcond1.numpy()[i] == static_ret1[i])

1472
        # greater than
1473 1474 1475
        with self.static_graph():
            a2 = layers.data(name='a2', shape=[1], dtype='int64')
            b2 = layers.data(name='b2', shape=[1], dtype='int64')
1476
            cond2 = paddle.greater_than(x=a2, y=b2)
1477 1478 1479
            static_ret2 = self.get_static_graph_result(
                feed={"a2": value_a, "b2": value_b}, fetch_list=[cond2]
            )[0]
1480
        with self.dynamic_graph():
1481 1482 1483
            with _test_eager_guard():
                da2 = base.to_variable(value_a)
                db2 = base.to_variable(value_b)
1484
                dcond2 = paddle.greater_than(x=da2, y=db2)
1485 1486 1487 1488

                for i in range(len(static_ret2)):
                    self.assertTrue(dcond2.numpy()[i] == static_ret2[i])

1489 1490
            da2 = base.to_variable(value_a)
            db2 = base.to_variable(value_b)
1491
            dcond2 = paddle.greater_than(x=da2, y=db2)
1492 1493 1494 1495

            for i in range(len(static_ret2)):
                self.assertTrue(dcond2.numpy()[i] == static_ret2[i])

1496
        # greater equal
1497 1498 1499
        with self.static_graph():
            a3 = layers.data(name='a3', shape=[1], dtype='int64')
            b3 = layers.data(name='b3', shape=[1], dtype='int64')
1500
            cond3 = paddle.greater_equal(x=a3, y=b3)
1501 1502 1503
            static_ret3 = self.get_static_graph_result(
                feed={"a3": value_a, "b3": value_b}, fetch_list=[cond3]
            )[0]
1504
        with self.dynamic_graph():
1505 1506 1507
            with _test_eager_guard():
                da3 = base.to_variable(value_a)
                db3 = base.to_variable(value_b)
1508
                dcond3 = paddle.greater_equal(x=da3, y=db3)
1509 1510 1511 1512

                for i in range(len(static_ret3)):
                    self.assertTrue(dcond3.numpy()[i] == static_ret3[i])

1513 1514
            da3 = base.to_variable(value_a)
            db3 = base.to_variable(value_b)
1515
            dcond3 = paddle.greater_equal(x=da3, y=db3)
1516 1517 1518 1519 1520 1521 1522 1523

            for i in range(len(static_ret3)):
                self.assertTrue(dcond3.numpy()[i] == static_ret3[i])

        # equal
        with self.static_graph():
            a4 = layers.data(name='a4', shape=[1], dtype='int64')
            b4 = layers.data(name='b4', shape=[1], dtype='int64')
1524
            cond4 = paddle.equal(x=a4, y=b4)
1525 1526 1527
            static_ret4 = self.get_static_graph_result(
                feed={"a4": value_a, "b4": value_b}, fetch_list=[cond4]
            )[0]
1528
        with self.dynamic_graph():
1529 1530 1531
            with _test_eager_guard():
                da4 = base.to_variable(value_a)
                db4 = base.to_variable(value_b)
1532
                dcond4 = paddle.equal(x=da4, y=db4)
1533 1534 1535 1536

                for i in range(len(static_ret4)):
                    self.assertTrue(dcond4.numpy()[i] == static_ret4[i])

1537 1538
            da4 = base.to_variable(value_a)
            db4 = base.to_variable(value_b)
1539
            dcond4 = paddle.equal(x=da4, y=db4)
1540 1541 1542 1543 1544 1545 1546 1547

            for i in range(len(static_ret4)):
                self.assertTrue(dcond4.numpy()[i] == static_ret4[i])

        # not equal
        with self.static_graph():
            a5 = layers.data(name='a5', shape=[1], dtype='int64')
            b5 = layers.data(name='b5', shape=[1], dtype='int64')
1548
            cond5 = paddle.equal(x=a5, y=b5)
1549 1550 1551
            static_ret5 = self.get_static_graph_result(
                feed={"a5": value_a, "b5": value_b}, fetch_list=[cond5]
            )[0]
1552
        with self.dynamic_graph():
1553 1554 1555
            with _test_eager_guard():
                da5 = base.to_variable(value_a)
                db5 = base.to_variable(value_b)
1556
                dcond5 = paddle.equal(x=da5, y=db5)
1557 1558 1559 1560

                for i in range(len(static_ret5)):
                    self.assertTrue(dcond5.numpy()[i] == static_ret5[i])

1561 1562
            da5 = base.to_variable(value_a)
            db5 = base.to_variable(value_b)
1563
            dcond5 = paddle.equal(x=da5, y=db5)
1564 1565 1566 1567

            for i in range(len(static_ret5)):
                self.assertTrue(dcond5.numpy()[i] == static_ret5[i])

1568 1569
    def test_cond(self):
        def less_than_branch(a, b):
1570
            return paddle.add(a, b)
1571 1572

        def greater_equal_branch(a, b):
1573
            return paddle.subtract(a, b)
1574 1575

        with self.static_graph():
1576 1577 1578 1579 1580 1581
            a = fluid.layers.fill_constant(
                shape=[1], dtype='float32', value=0.1
            )
            b = fluid.layers.fill_constant(
                shape=[1], dtype='float32', value=0.23
            )
1582
            out = paddle.static.nn.cond(
1583 1584 1585 1586 1587 1588 1589 1590 1591
                a >= b,
                lambda: greater_equal_branch(a, b),
                lambda: less_than_branch(a, b),
            )
            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
1592 1593 1594 1595 1596
            exe = fluid.Executor(place)
            ret = exe.run(fetch_list=[out])
            static_res = ret[0]

        with self.dynamic_graph():
1597 1598 1599
            with _test_eager_guard():
                a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32'))
                b = fluid.dygraph.to_variable(
1600 1601
                    np.array([0.23]).astype('float32')
                )
1602
                out = paddle.static.nn.cond(
1603 1604 1605 1606
                    a < b,
                    lambda: less_than_branch(a, b),
                    lambda: greater_equal_branch(a, b),
                )
1607
                out2 = paddle.static.nn.cond(
1608 1609 1610 1611
                    a >= b,
                    lambda: greater_equal_branch(a, b),
                    lambda: less_than_branch(a, b),
                )
1612 1613
                eager_dynamic_res = out.numpy()
                eager_dynamic_res2 = out2.numpy()
1614 1615 1616
                np.testing.assert_array_equal(
                    eager_dynamic_res, eager_dynamic_res2
                )
1617
                with self.assertRaises(TypeError):
1618
                    paddle.static.nn.cond(a < b, 'str', 'str')
1619
                with self.assertRaises(TypeError):
1620
                    paddle.static.nn.cond(a >= b, 'str', 'str')
1621

1622 1623
            a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32'))
            b = fluid.dygraph.to_variable(np.array([0.23]).astype('float32'))
1624
            out = paddle.static.nn.cond(
1625 1626 1627 1628
                a < b,
                lambda: less_than_branch(a, b),
                lambda: greater_equal_branch(a, b),
            )
1629
            out2 = paddle.static.nn.cond(
1630 1631 1632 1633
                a >= b,
                lambda: greater_equal_branch(a, b),
                lambda: less_than_branch(a, b),
            )
1634 1635
            dynamic_res = out.numpy()
            dynamic_res2 = out2.numpy()
1636
            np.testing.assert_array_equal(dynamic_res, dynamic_res2)
1637
            with self.assertRaises(TypeError):
1638
                paddle.static.nn.cond(a < b, 'str', 'str')
1639
            with self.assertRaises(TypeError):
1640
                paddle.static.nn.cond(a >= b, 'str', 'str')
1641

1642 1643
        np.testing.assert_array_equal(static_res, dynamic_res)
        np.testing.assert_array_equal(static_res, eager_dynamic_res)
1644

1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659
    def test_case(self):
        def fn_1():
            return layers.fill_constant(shape=[1, 2], dtype='float32', value=1)

        def fn_2():
            return layers.fill_constant(shape=[2, 2], dtype='int32', value=2)

        def fn_3():
            return layers.fill_constant(shape=[3], dtype='int32', value=3)

        with self.static_graph():
            x = layers.fill_constant(shape=[1], dtype='float32', value=0.3)
            y = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
            z = layers.fill_constant(shape=[1], dtype='float32', value=0.2)

L
LiYuRio 已提交
1660 1661
            pred_1 = paddle.less_than(z, x)  # true: 0.2 < 0.3
            pred_2 = paddle.less_than(x, y)  # false: 0.3 < 0.1
1662
            pred_3 = paddle.equal(x, y)  # false: 0.3 == 0.1
1663

1664
            out_1 = paddle.static.nn.case(
1665 1666
                pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3
            )
1667 1668 1669
            out_2 = paddle.static.nn.case(
                pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)]
            )
1670

1671 1672 1673 1674 1675
            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
1676 1677 1678 1679
            exe = fluid.Executor(place)
            static_res1, static_res2 = exe.run(fetch_list=[out_1, out_2])

        with self.dynamic_graph():
1680 1681 1682 1683 1684
            with _test_eager_guard():
                x = layers.fill_constant(shape=[1], dtype='float32', value=0.3)
                y = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
                z = layers.fill_constant(shape=[1], dtype='float32', value=0.2)

L
LiYuRio 已提交
1685 1686
                pred_1 = paddle.less_than(z, x)  # true: 0.2 < 0.3
                pred_2 = paddle.less_than(x, y)  # false: 0.3 < 0.1
1687
                pred_3 = paddle.equal(x, y)  # false: 0.3 == 0.1
1688

1689
                out_1 = paddle.static.nn.case(
1690 1691
                    pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3
                )
1692
                out_2 = paddle.static.nn.case(
1693 1694
                    pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)]
                )
1695 1696 1697
                eager_dynamic_res1 = out_1.numpy()
                eager_dynamic_res2 = out_2.numpy()

1698 1699 1700 1701
            x = layers.fill_constant(shape=[1], dtype='float32', value=0.3)
            y = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
            z = layers.fill_constant(shape=[1], dtype='float32', value=0.2)

L
LiYuRio 已提交
1702 1703
            pred_1 = paddle.less_than(z, x)  # true: 0.2 < 0.3
            pred_2 = paddle.less_than(x, y)  # false: 0.3 < 0.1
1704
            pred_3 = paddle.equal(x, y)  # false: 0.3 == 0.1
1705

1706
            out_1 = paddle.static.nn.case(
1707 1708
                pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3
            )
1709 1710 1711
            out_2 = paddle.static.nn.case(
                pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)]
            )
1712 1713 1714
            dynamic_res1 = out_1.numpy()
            dynamic_res2 = out_2.numpy()

1715 1716 1717 1718
        np.testing.assert_array_equal(static_res1, dynamic_res1)
        np.testing.assert_array_equal(static_res2, dynamic_res2)
        np.testing.assert_array_equal(static_res1, eager_dynamic_res1)
        np.testing.assert_array_equal(static_res2, eager_dynamic_res2)
1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733

    def test_switch_case(self):
        def fn_1():
            return layers.fill_constant(shape=[1, 2], dtype='float32', value=1)

        def fn_2():
            return layers.fill_constant(shape=[2, 2], dtype='int32', value=2)

        def fn_3():
            return layers.fill_constant(shape=[3], dtype='int32', value=3)

        with self.static_graph():
            index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1)
            index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2)

1734
            out_1 = paddle.static.nn.switch_case(
1735 1736 1737 1738
                branch_index=index_1,
                branch_fns={1: fn_1, 2: fn_2},
                default=fn_3,
            )
1739
            out_2 = paddle.static.nn.switch_case(
1740 1741 1742 1743
                branch_index=index_2,
                branch_fns=[(1, fn_1), (2, fn_2)],
                default=fn_3,
            )
1744
            out_3 = paddle.static.nn.switch_case(
1745 1746 1747 1748 1749 1750 1751 1752 1753
                branch_index=index_2,
                branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)],
            )

            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
1754 1755
            exe = fluid.Executor(place)
            static_res1, static_res2, static_res3 = exe.run(
1756 1757
                fetch_list=[out_1, out_2, out_3]
            )
1758 1759

        with self.dynamic_graph():
1760
            with _test_eager_guard():
1761 1762 1763 1764 1765 1766 1767
                index_1 = layers.fill_constant(
                    shape=[1], dtype='int32', value=1
                )
                index_2 = layers.fill_constant(
                    shape=[1], dtype='int32', value=2
                )

1768
                out_1 = paddle.static.nn.switch_case(
1769 1770 1771 1772
                    branch_index=index_1,
                    branch_fns={1: fn_1, 2: fn_2},
                    default=fn_3,
                )
1773
                out_2 = paddle.static.nn.switch_case(
1774 1775 1776 1777
                    branch_index=index_2,
                    branch_fns=[(1, fn_1), (2, fn_2)],
                    default=fn_3,
                )
1778
                out_3 = paddle.static.nn.switch_case(
1779 1780 1781
                    branch_index=index_2,
                    branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)],
                )
1782 1783 1784 1785 1786

                eager_dynamic_res1 = out_1.numpy()
                eager_dynamic_res2 = out_2.numpy()
                eager_dynamic_res3 = out_3.numpy()

1787 1788 1789
            index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1)
            index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2)

1790
            out_1 = paddle.static.nn.switch_case(
1791 1792 1793 1794
                branch_index=index_1,
                branch_fns={1: fn_1, 2: fn_2},
                default=fn_3,
            )
1795
            out_2 = paddle.static.nn.switch_case(
1796 1797 1798 1799
                branch_index=index_2,
                branch_fns=[(1, fn_1), (2, fn_2)],
                default=fn_3,
            )
1800
            out_3 = paddle.static.nn.switch_case(
1801 1802 1803
                branch_index=index_2,
                branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)],
            )
1804 1805 1806 1807 1808

            dynamic_res1 = out_1.numpy()
            dynamic_res2 = out_2.numpy()
            dynamic_res3 = out_3.numpy()

1809 1810 1811 1812 1813 1814
        np.testing.assert_array_equal(static_res1, dynamic_res1)
        np.testing.assert_array_equal(static_res2, dynamic_res2)
        np.testing.assert_array_equal(static_res3, dynamic_res3)
        np.testing.assert_array_equal(static_res1, eager_dynamic_res1)
        np.testing.assert_array_equal(static_res2, eager_dynamic_res2)
        np.testing.assert_array_equal(static_res3, eager_dynamic_res3)
1815

1816 1817 1818 1819
    def test_crop_tensor(self):
        with self.static_graph():
            x = fluid.layers.data(name="x1", shape=[6, 5, 8])

1820 1821 1822 1823 1824 1825
            dim1 = fluid.layers.data(
                name="dim1", shape=[1], append_batch_size=False
            )
            dim2 = fluid.layers.data(
                name="dim2", shape=[1], append_batch_size=False
            )
1826
            crop_shape1 = (1, 2, 4, 4)
1827 1828 1829
            crop_shape2 = fluid.layers.data(
                name="crop_shape", shape=[4], append_batch_size=False
            )
1830 1831
            crop_shape3 = [-1, dim1, dim2, 4]
            crop_offsets1 = [0, 0, 1, 0]
1832 1833 1834
            crop_offsets2 = fluid.layers.data(
                name="crop_offset", shape=[4], append_batch_size=False
            )
1835 1836
            crop_offsets3 = [0, dim1, dim2, 0]

1837 1838 1839
            out1 = paddle.crop(x, shape=crop_shape1, offsets=crop_offsets1)
            out2 = paddle.crop(x, shape=crop_shape2, offsets=crop_offsets2)
            out3 = paddle.crop(x, shape=crop_shape3, offsets=crop_offsets3)
1840 1841 1842 1843 1844

            self.assertIsNotNone(out1)
            self.assertIsNotNone(out2)
            self.assertIsNotNone(out3)

1845 1846 1847
    def test_shard_index(self):
        with self.static_graph():
            x = fluid.layers.data(name="label", shape=[4, 1], dtype='int64')
1848
            shard_label = paddle.shard_index(
1849 1850
                input=x, index_num=20, nshards=2, shard_id=0
            )
1851 1852 1853

        self.assertIsNotNone(shard_label)

1854 1855 1856 1857 1858 1859 1860
    def test_accuracy(self):
        x = np.random.rand(3, 32, 32).astype("float32")
        y = np.array([[1], [0], [1]])
        with self.static_graph():
            data = fluid.data(name="input", shape=[-1, 32, 32], dtype="float32")
            label = fluid.data(name="label", shape=[-1, 1], dtype="int")
            fc_out = fluid.layers.fc(input=data, size=10)
1861
            predict = paddle.nn.functional.softmax(fc_out)
1862
            result = paddle.static.accuracy(input=predict, label=label, k=5)
1863 1864 1865 1866
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)

            exe.run(fluid.default_startup_program())
L
Leo Chen 已提交
1867 1868
            # x = np.random.rand(3, 32, 32).astype("float32")
            # y = np.array([[1], [0], [1]])
1869 1870 1871
            static_out = exe.run(
                feed={"input": x, "label": y}, fetch_list=result[0]
            )
1872

L
Leo Chen 已提交
1873
        with self.dynamic_graph(force_to_use_cpu=True):
1874 1875 1876
            data = base.to_variable(x)
            label = base.to_variable(y)
            fc_out = fluid.layers.fc(data, size=10)
1877
            predict = paddle.nn.functional.softmax(fc_out)
1878 1879 1880
            dynamic_out = paddle.static.accuracy(
                input=predict, label=label, k=5
            )
1881

1882
        np.testing.assert_array_equal(static_out[0], dynamic_out.numpy())
1883

Y
Yu Yang 已提交
1884

1885
class TestBook(LayerTest):
H
hong 已提交
1886 1887
    def setUp(self):
        self.only_static_set = set({"make_word_embedding"})
1888 1889 1890 1891 1892 1893 1894
        self.not_compare_static_dygraph_set = set(
            {
                "make_gaussian_random",
                "make_kldiv_loss",
                "make_uniform_random_batch_size_like",
            }
        )
1895
        self.all_close_compare = set({"make_spectral_norm"})
H
hong 已提交
1896

1897
    def func_all_layers(self):
1898 1899 1900 1901 1902
        attrs = (getattr(self, name) for name in dir(self))
        methods = filter(inspect.ismethod, attrs)
        for method in methods:
            if not method.__name__.startswith('make_'):
                continue
M
minqiyang 已提交
1903 1904 1905
            self._low_data_bound = 0
            self._high_data_bound = 2
            self._batch_size = 2
1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917
            self._feed_dict = {}
            self._force_to_use_cpu = False
            with self.static_graph():
                static_var = method()
                if isinstance(static_var, tuple):
                    static_var = static_var[0]

                if static_var is not None:
                    fetch_list = [static_var.name]
                    static_result = self.get_static_graph_result(
                        feed=self._feed_dict,
                        fetch_list=fetch_list,
1918 1919
                        force_to_use_cpu=self._force_to_use_cpu,
                    )
H
hong 已提交
1920

1921 1922
                else:
                    continue
H
hong 已提交
1923 1924
            if method.__name__ in self.only_static_set:
                continue
1925 1926 1927 1928 1929

            with self.dynamic_graph(self._force_to_use_cpu):
                dy_result = method()
                if isinstance(dy_result, tuple):
                    dy_result = dy_result[0]
1930
                dy_result_value = dy_result.numpy()
1931

1932
            if method.__name__ in self.all_close_compare:
1933 1934 1935 1936 1937 1938
                np.testing.assert_allclose(
                    static_result[0],
                    dy_result_value,
                    rtol=1e-05,
                    atol=0,
                    err_msg='Result of function [{}] compare failed'.format(
1939 1940 1941
                        method.__name__
                    ),
                )
1942 1943
                continue

H
hong 已提交
1944
            if method.__name__ not in self.not_compare_static_dygraph_set:
1945 1946 1947 1948
                np.testing.assert_array_equal(
                    static_result[0],
                    dy_result_value,
                    err_msg='Result of function [{}] not equal'.format(
1949 1950 1951
                        method.__name__
                    ),
                )
1952

1953 1954 1955 1956 1957
    def test_all_layers(self):
        with _test_eager_guard():
            self.func_all_layers()
        self.func_all_layers()

1958 1959 1960
    def _get_np_data(self, shape, dtype, append_batch_size=True):
        np.random.seed(self.seed)
        if append_batch_size:
M
minqiyang 已提交
1961
            shape = [self._batch_size] + shape
1962 1963 1964 1965 1966
        if dtype == 'float32':
            return np.random.random(shape).astype(dtype)
        elif dtype == 'float64':
            return np.random.random(shape).astype(dtype)
        elif dtype == 'int32':
1967 1968 1969
            return np.random.randint(
                self._low_data_bound, self._high_data_bound, shape
            ).astype(dtype)
1970
        elif dtype == 'int64':
1971 1972 1973 1974 1975 1976 1977
            return np.random.randint(
                self._low_data_bound, self._high_data_bound, shape
            ).astype(dtype)

    def _get_data(
        self, name, shape, dtype, set_feed_dict=True, append_batch_size=True
    ):
1978
        if base.enabled():
1979 1980 1981 1982 1983
            return base.to_variable(
                value=self._get_np_data(shape, dtype, append_batch_size),
                name=name,
                zero_copy=False,
            )
1984 1985
        else:
            if set_feed_dict:
1986
                self._feed_dict[name] = self._get_np_data(
1987 1988 1989 1990 1991 1992 1993 1994
                    shape, dtype, append_batch_size
                )
            return layers.data(
                name=name,
                shape=shape,
                dtype=dtype,
                append_batch_size=append_batch_size,
            )
1995 1996

    def make_fit_a_line(self):
1997 1998 1999 2000
        with program_guard(
            fluid.default_main_program(),
            startup_program=fluid.default_startup_program(),
        ):
2001
            x = self._get_data(name='x', shape=[13], dtype='float32')
Y
Yu Yang 已提交
2002
            y_predict = layers.fc(input=x, size=1, act=None)
2003
            y = self._get_data(name='y', shape=[1], dtype='float32')
2004 2005 2006
            cost = paddle.nn.functional.square_error_cost(
                input=y_predict, label=y
            )
2007
            avg_cost = paddle.mean(cost)
2008
            return avg_cost
Y
Yu Yang 已提交
2009

2010
    def make_recognize_digits_mlp(self):
2011 2012 2013
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
Y
Yu Yang 已提交
2014
            # Change g_program, so the rest layers use `g_program`
2015 2016
            images = self._get_data(name='pixel', shape=[784], dtype='float32')
            label = self._get_data(name='label', shape=[1], dtype='int64')
Y
Yu Yang 已提交
2017 2018
            hidden1 = layers.fc(input=images, size=128, act='relu')
            hidden2 = layers.fc(input=hidden1, size=64, act='relu')
2019 2020 2021 2022 2023 2024
            predict = layers.fc(
                input=[hidden2, hidden1],
                size=10,
                act='softmax',
                param_attr=["sftmax.w1", "sftmax.w2"],
            )
Y
Yu Yang 已提交
2025
            cost = layers.cross_entropy(input=predict, label=label)
2026
            avg_cost = paddle.mean(cost)
2027
            return avg_cost
Y
Yu Yang 已提交
2028

2029
    def make_conv2d_transpose(self):
2030 2031 2032
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
2033
            img = self._get_data(name='pixel', shape=[3, 2, 2], dtype='float32')
2034
            return paddle.static.nn.conv2d_transpose(
2035 2036
                input=img, num_filters=10, output_size=28
            )
2037

2038
    def make_recognize_digits_conv(self):
2039 2040 2041 2042 2043 2044
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            images = self._get_data(
                name='pixel', shape=[1, 28, 28], dtype='float32'
            )
2045
            label = self._get_data(name='label', shape=[1], dtype='int64')
2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
            conv_pool_1 = nets.simple_img_conv_pool(
                input=images,
                filter_size=5,
                num_filters=2,
                pool_size=2,
                pool_stride=2,
                act="relu",
            )
            conv_pool_2 = nets.simple_img_conv_pool(
                input=conv_pool_1,
                filter_size=5,
                num_filters=4,
                pool_size=2,
                pool_stride=2,
                act="relu",
            )
Y
Yu Yang 已提交
2062 2063 2064

            predict = layers.fc(input=conv_pool_2, size=10, act="softmax")
            cost = layers.cross_entropy(input=predict, label=label)
2065
            avg_cost = paddle.mean(cost)
2066
            return avg_cost
Y
Yu Yang 已提交
2067

2068
    def make_word_embedding(self):
2069 2070 2071
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
Y
Yu Yang 已提交
2072 2073
            dict_size = 10000
            embed_size = 32
2074
            first_word = self._get_data(name='firstw', shape=[1], dtype='int64')
2075 2076 2077
            second_word = self._get_data(
                name='secondw', shape=[1], dtype='int64'
            )
2078 2079 2080
            third_word = self._get_data(name='thirdw', shape=[1], dtype='int64')
            forth_word = self._get_data(name='forthw', shape=[1], dtype='int64')
            next_word = self._get_data(name='nextw', shape=[1], dtype='int64')
Y
Yu Yang 已提交
2081

2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106
            embed_first = layers.embedding(
                input=first_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )
            embed_second = layers.embedding(
                input=second_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )

            embed_third = layers.embedding(
                input=third_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )
            embed_forth = layers.embedding(
                input=forth_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )
Y
Yu Yang 已提交
2107 2108 2109

            concat_embed = layers.concat(
                input=[embed_first, embed_second, embed_third, embed_forth],
2110 2111
                axis=1,
            )
Y
Yu Yang 已提交
2112 2113

            hidden1 = layers.fc(input=concat_embed, size=256, act='sigmoid')
2114 2115 2116
            predict_word = layers.fc(
                input=hidden1, size=dict_size, act='softmax'
            )
Y
Yu Yang 已提交
2117
            cost = layers.cross_entropy(input=predict_word, label=next_word)
2118
            avg_cost = paddle.mean(cost)
2119
            return avg_cost
Y
Yu Yang 已提交
2120

2121
    def make_pool2d(self):
2122 2123 2124
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
2125
            x = self._get_data(name='x', shape=[3, 224, 224], dtype='float32')
C
ccrrong 已提交
2126 2127
            return paddle.nn.functional.max_pool2d(
                x, kernel_size=[5, 3], stride=[1, 2], padding=(2, 1)
2128
            )
2129

K
Kaipeng Deng 已提交
2130
    def make_pool2d_infershape(self):
2131 2132 2133
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
2134
            theta = self._get_data("theta", shape=[2, 3], dtype='float32')
2135 2136 2137
            x = paddle.nn.functional.affine_grid(
                theta, out_shape=[2, 3, 244, 244]
            )
C
ccrrong 已提交
2138 2139
            return paddle.nn.functional.max_pool2d(
                x, kernel_size=[5, 3], stride=[1, 2], padding=(2, 1)
2140
            )
K
Kaipeng Deng 已提交
2141

2142
    def make_lstm_unit(self):
2143 2144 2145 2146 2147 2148
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x_t_data = self._get_data(
                name='x_t_data', shape=[10, 10], dtype='float32'
            )
Y
yangyaming 已提交
2149
            x_t = layers.fc(input=x_t_data, size=10)
2150 2151 2152
            prev_hidden_data = self._get_data(
                name='prev_hidden_data', shape=[10, 30], dtype='float32'
            )
Y
yangyaming 已提交
2153
            prev_hidden = layers.fc(input=prev_hidden_data, size=30)
2154 2155 2156
            prev_cell_data = self._get_data(
                name='prev_cell', shape=[10, 30], dtype='float32'
            )
Y
yangyaming 已提交
2157
            prev_cell = layers.fc(input=prev_cell_data, size=30)
2158 2159 2160
            return layers.lstm_unit(
                x_t=x_t, hidden_t_prev=prev_hidden, cell_t_prev=prev_cell
            )
2161

2162
    def make_softmax(self):
2163 2164 2165
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
2166
            data = self._get_data(name='data', shape=[10], dtype='float32')
D
dangqingqing 已提交
2167
            hid = layers.fc(input=data, size=20)
2168
            return paddle.nn.functional.softmax(hid, axis=1)
D
dangqingqing 已提交
2169

2170
    @prog_scope()
2171
    def make_nce(self):
Y
Yang Yu 已提交
2172 2173
        window_size = 5
        words = []
2174
        for i in range(window_size):
Y
Yang Yu 已提交
2175
            words.append(
2176 2177 2178 2179
                self._get_data(
                    name='word_{0}'.format(i), shape=[1], dtype='int64'
                )
            )
Y
Yang Yu 已提交
2180 2181

        dict_size = 10000
M
minqiyang 已提交
2182
        label_word = int(window_size // 2) + 1
Y
Yang Yu 已提交
2183 2184

        embs = []
2185
        for i in range(window_size):
Y
Yang Yu 已提交
2186 2187 2188
            if i == label_word:
                continue

2189 2190 2191 2192 2193 2194
            emb = layers.embedding(
                input=words[i],
                size=[dict_size, 32],
                param_attr='emb.w',
                is_sparse=True,
            )
Y
Yang Yu 已提交
2195 2196 2197 2198

            embs.append(emb)

        embs = layers.concat(input=embs, axis=1)
2199
        loss = paddle.static.nn.nce(
2200 2201 2202 2203 2204 2205
            input=embs,
            label=words[label_word],
            num_total_classes=dict_size,
            param_attr='nce.w',
            bias_attr='nce.b',
        )
2206
        avg_loss = paddle.mean(loss)
2207
        return avg_loss
Y
Yang Yu 已提交
2208

2209
    def make_multiplex(self):
2210 2211 2212
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
2213 2214 2215
            x1 = self._get_data(name='x1', shape=[4], dtype='float32')
            x2 = self._get_data(name='x2', shape=[4], dtype='float32')
            index = self._get_data(name='index', shape=[1], dtype='int32')
2216
            out = paddle.multiplex(inputs=[x1, x2], index=index)
2217
            return out
2218 2219

    def make_softmax_with_cross_entropy(self):
2220 2221 2222
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
2223 2224
            x = self._get_data(name='x', shape=[16], dtype='float32')
            y = self._get_data(name='label', shape=[1], dtype='int64')
2225
            loss, softmax = paddle.nn.functional.softmax_with_cross_entropy(
2226 2227
                x, y, return_softmax=True
            )
2228 2229 2230
            self.assertIsNotNone(loss)
            self.assertIsNotNone(softmax)

2231
            loss = paddle.nn.functional.softmax_with_cross_entropy(x, y)
2232 2233 2234 2235 2236 2237
            self.assertIsNotNone(loss)

            x1 = self._get_data(name='x1', shape=[16, 32, 64], dtype='float32')
            y1 = self._get_data(name='label1', shape=[1, 32, 64], dtype='int64')
            y2 = self._get_data(name='label2', shape=[16, 1, 64], dtype='int64')
            y3 = self._get_data(name='label3', shape=[16, 32, 1], dtype='int64')
2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249
            loss1 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y1, axis=1
            )
            loss2 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y2, axis=2
            )
            loss3 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y3, axis=3
            )
            loss4 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y3, axis=-1
            )
2250 2251 2252 2253
            self.assertIsNotNone(loss1)
            self.assertIsNotNone(loss2)
            self.assertIsNotNone(loss3)
            self.assertIsNotNone(loss4)
2254
            return loss4
2255 2256

    def make_scatter(self):
2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x = self._get_data(
                name='x', shape=[3, 3], append_batch_size=False, dtype='float32'
            )
            idx = self._get_data(
                name='idx', shape=[2], append_batch_size=False, dtype='int32'
            )
            updates = self._get_data(
                name='updates',
                shape=[2, 3],
                append_batch_size=False,
                dtype='float32',
            )
2272
            out = paddle.scatter(x, index=idx, updates=updates)
2273
            return out
Y
yangyaming 已提交
2274

2275 2276 2277 2278
    def make_one_hot(self):
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            label = self._get_data(name="label", shape=[1], dtype="int32")
            one_hot_label = layers.one_hot(input=label, depth=10)
2279
            return one_hot_label
2280

2281 2282 2283 2284 2285
    def make_label_smooth(self):
        # TODO(minqiyang): support gpu ut
        self._force_to_use_cpu = True
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            label = self._get_data(name="label", shape=[1], dtype="int32")
2286
            one_hot_label = layers.one_hot(input=label, depth=10)
2287
            smooth_label = F.label_smooth(label=one_hot_label, epsilon=0.1)
2288
            return smooth_label
2289

2290
    def make_topk(self):
2291 2292 2293
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
2294
            data = self._get_data(name="label", shape=[200], dtype="float32")
2295
            values, indices = paddle.topk(data, k=5)
2296 2297
            return values
            return indices
J
jerrywgz 已提交
2298

2299
    def make_l2_normalize(self):
2300 2301 2302
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
2303
            x = self._get_data(name='x', shape=[8, 7, 10], dtype="float32")
2304
            output = layers.l2_normalize(x, axis=1)
2305
            return output
2306

2307
    def make_shape(self):
2308 2309 2310 2311 2312 2313
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 100, 100], dtype="float32"
            )
2
201716010711 已提交
2314
            out = paddle.shape(input)
2315
            return out
B
Bai Yifan 已提交
2316

2317
    def make_pad2d(self):
2318 2319 2320 2321 2322 2323
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 100, 100], dtype="float32"
            )
傅剑寒 已提交
2324 2325 2326

            tmp_pad = paddle.nn.Pad2D(
                padding=[1, 2, 3, 4],
2327 2328 2329 2330
                mode='reflect',
                data_format='NCHW',
                name="shape",
            )
傅剑寒 已提交
2331
            out = tmp_pad(input)
2332
            return out
W
whs 已提交
2333

K
Kaipeng Deng 已提交
2334
    def make_mish(self):
2335 2336 2337
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
2338
            input = self._get_data(name="input", shape=[16], dtype="float32")
2339
            out = paddle.nn.functional.mish(input, name='mish')
2340
            return out
K
Kaipeng Deng 已提交
2341

2342
    def make_cross_entropy(self):
2343 2344 2345
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
2346 2347
            x = self._get_data(name="x", shape=[30, 10], dtype="float32")
            label = self._get_data(name="label", shape=[30, 1], dtype="int64")
2348 2349
            mode = 'channel'
            out = layers.cross_entropy(x, label, False, 4)
2350
            return out
2351

2352
    def make_uniform_random_batch_size_like(self):
2353 2354 2355 2356 2357 2358
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[13, 11], dtype='float32'
            )
2359
            out = random.uniform_random_batch_size_like(input, [-1, 11])
2360
            return out
G
fix  
gongweibao 已提交
2361

2362
    def make_gaussian_random(self):
2363 2364 2365
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
2366
            out = random.gaussian(shape=[20, 30])
2367
            return out
G
fix  
gongweibao 已提交
2368

2369
    def make_sum(self):
2370 2371 2372 2373 2374 2375
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[13, 11], dtype='float32'
            )
G
fix  
gongweibao 已提交
2376

2377
            out = paddle.add_n(input)
2378
            return out
G
fix  
gongweibao 已提交
2379

2380
    def make_slice(self):
G
fix  
gongweibao 已提交
2381 2382 2383 2384
        starts = [1, 0, 2]
        ends = [3, 3, 4]
        axes = [0, 1, 2]

2385 2386 2387 2388 2389 2390
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 4, 5, 6], dtype='float32'
            )
G
fix  
gongweibao 已提交
2391

2
201716010711 已提交
2392
            out = paddle.slice(input, axes=axes, starts=starts, ends=ends)
2393
            return out
G
merge  
gongweibao 已提交
2394

2395
    def make_scale_variable(self):
2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 4, 5, 6], dtype='float32'
            )
            scale_var = self._get_data(
                name="scale",
                shape=[1],
                dtype='float32',
                append_batch_size=False,
            )
2
201716010711 已提交
2408
            out = paddle.scale(input, scale=scale_var)
2409 2410
            return out

2411
    def make_bilinear_tensor_product_layer(self):
2412 2413 2414
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
2415 2416 2417
            data = self._get_data(name='data', shape=[4], dtype="float32")

            theta = self._get_data(name="theta", shape=[5], dtype="float32")
2418 2419 2420
            out = paddle.static.nn.common.bilinear_tensor_product(
                data, theta, 6
            )
2421
            return out
2422 2423

    def make_batch_norm(self):
2424 2425 2426 2427 2428 2429
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            data = self._get_data(
                name='data', shape=[32, 128, 128], dtype="float32"
            )
2430
            out = paddle.static.nn.batch_norm(data)
2431
            return out
2432

2433
    def make_batch_norm_momentum_variable(self):
2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            data = self._get_data(
                name='data', shape=[32, 128, 128], dtype="float32"
            )
            momentum = self._get_data(
                name='momentum',
                shape=[1],
                dtype='float32',
                append_batch_size=False,
            )
2446
            out = paddle.static.nn.batch_norm(data, momentum=momentum)
2447
            return out
2448

2449
    def make_range(self):
2450 2451 2452
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
C
ccrrong 已提交
2453 2454 2455
            paddle.arange(0, 10, 2, 'int32')
            paddle.arange(0.1, 10.0, 0.2, 'float32')
            paddle.arange(0.1, 10.0, 0.2, 'float64')
2456 2457 2458
            start = layers.fill_constant(shape=[1], value=0.1, dtype="float32")
            end = layers.fill_constant(shape=[1], value=10.0, dtype="float32")
            step = layers.fill_constant(shape=[1], value=0.2, dtype="float32")
C
ccrrong 已提交
2459
            y = paddle.arange(start, end, step, 'float64')
2460 2461 2462
            return y

    def make_spectral_norm(self):
2463 2464 2465 2466 2467 2468 2469 2470 2471
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            weight = self._get_data(
                name='weight',
                shape=[2, 3, 32, 32],
                dtype="float32",
                append_batch_size=False,
            )
2472
            out = layers.spectral_norm(weight, dim=1, power_iters=1)
2473
            return out
2474 2475

    def make_kldiv_loss(self):
2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x = self._get_data(
                name='x',
                shape=[32, 128, 128],
                dtype="float32",
                append_batch_size=False,
            )
            target = self._get_data(
                name='target',
                shape=[32, 128, 128],
                dtype="float32",
                append_batch_size=False,
            )
2491 2492 2493
            loss = paddle.nn.functional.kl_div(
                input=x, label=target, reduction='batchmean'
            )
2494
            return loss
2495

M
minqiyang 已提交
2496
    def make_pixel_shuffle(self):
2497 2498 2499
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
M
minqiyang 已提交
2500
            x = self._get_data(name="X", shape=[9, 4, 4], dtype="float32")
2501
            out = paddle.nn.functional.pixel_shuffle(x, upscale_factor=3)
2502
            return out
M
minqiyang 已提交
2503

R
ruri 已提交
2504
    def make_mse_loss(self):
2505 2506 2507
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
R
ruri 已提交
2508 2509
            x = self._get_data(name="X", shape=[1], dtype="float32")
            y = self._get_data(name="Y", shape=[1], dtype="float32")
2510
            out = paddle.nn.functional.mse_loss(input=x, label=y)
2511
            return out
R
ruri 已提交
2512

2513
    def make_square_error_cost(self):
2514 2515 2516
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
2517 2518
            x = self._get_data(name="X", shape=[1], dtype="float32")
            y = self._get_data(name="Y", shape=[1], dtype="float32")
2519
            out = paddle.nn.functional.square_error_cost(input=x, label=y)
2520
            return out
2521

2522 2523 2524 2525
    def test_dynamic_lstmp(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            hidden_dim, proj_dim = 16, 8
2526 2527 2528
            seq_data = layers.data(
                name='seq_data', shape=[10, 10], dtype='float32', lod_level=1
            )
2529 2530
            fc_out = layers.fc(input=seq_data, size=4 * hidden_dim)
            self.assertIsNotNone(
2531 2532 2533 2534
                layers.dynamic_lstmp(
                    input=fc_out, size=4 * hidden_dim, proj_size=proj_dim
                )
            )
2535 2536 2537 2538

    def test_lod_reset(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
2539
            # case 1
2540
            x = layers.data(name='x', shape=[10], dtype='float32')
2541 2542 2543
            y = layers.data(
                name='y', shape=[10, 20], dtype='float32', lod_level=2
            )
2544 2545 2546
            z = layers.lod_reset(x=x, y=y)
            self.assertTrue(z.lod_level == 2)
            # case 2
2547
            lod_tensor_in = layers.data(name='lod_in', shape=[1], dtype='int32')
2548 2549 2550 2551 2552 2553
            z = layers.lod_reset(x=x, y=lod_tensor_in)
            self.assertTrue(z.lod_level == 1)
            # case 3
            z = layers.lod_reset(x=x, target_lod=[1, 2, 3])
            self.assertTrue(z.lod_level == 1)
            return z
2554

W
whs 已提交
2555
    def test_affine_grid(self):
2556
        with self.static_graph():
W
whs 已提交
2557
            data = layers.data(name='data', shape=[2, 3, 3], dtype="float32")
2558
            out = paddle.argsort(x=data, axis=1)
W
whs 已提交
2559 2560

            theta = layers.data(name="theta", shape=[2, 3], dtype="float32")
2561
            out_shape = layers.data(name="out_shape", shape=[-1], dtype="int32")
2562 2563
            data_0 = paddle.nn.functional.affine_grid(theta, out_shape)
            data_1 = paddle.nn.functional.affine_grid(theta, [5, 3, 28, 28])
W
whs 已提交
2564 2565 2566

            self.assertIsNotNone(data_0)
            self.assertIsNotNone(data_1)
D
dengkaipeng 已提交
2567

W
wangchaochaohu 已提交
2568 2569 2570 2571 2572 2573 2574
    def test_stridedslice(self):
        axes = [0, 1, 2]
        starts = [1, 0, 2]
        ends = [3, 3, 4]
        strides = [1, 1, 1]
        with self.static_graph():
            x = layers.data(name="x", shape=[245, 30, 30], dtype="float32")
2
201716010711 已提交
2575
            out = paddle.strided_slice(
2576 2577
                x, axes=axes, starts=starts, ends=ends, strides=strides
            )
W
wangchaochaohu 已提交
2578 2579
            return out

2580 2581
    def test_fill_constant_batch_size_like(self):
        with self.static_graph():
2582 2583 2584 2585 2586 2587
            like = fluid.layers.fill_constant(
                shape=[1, 200], value=10, dtype='int64'
            )
            out = layers.fill_constant_batch_size_like(
                input=like, shape=[2, 3300], value=1315454564656, dtype='int64'
            )
2588 2589
            return out

2590 2591 2592 2593
    def test_sequence_expand(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[10], dtype='float32')
2594 2595 2596 2597
            y = layers.data(
                name='y', shape=[10, 20], dtype='float32', lod_level=2
            )
            return layers.sequence_expand(x=x, y=y, ref_level=1)
2598

2599 2600 2601 2602 2603
    def test_sequence_reshape(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[8], dtype='float32', lod_level=1)
            out = layers.sequence_reshape(input=x, new_dim=16)
2604
            return out
2605

2606 2607 2608 2609
    def test_sequence_unpad(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[10, 5], dtype='float32')
2610
            length = layers.data(name='length', shape=[], dtype='int64')
2611
            return layers.sequence_unpad(x=x, length=length)
2612

2613 2614 2615
    def test_sequence_softmax(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
2616 2617 2618
            seq_data = layers.data(
                name='seq_data', shape=[10, 10], dtype='float32', lod_level=1
            )
2619
            seq = layers.fc(input=seq_data, size=20)
2620
            return layers.sequence_softmax(seq)
2621

2622 2623 2624 2625 2626
    def test_sequence_unsqueeze(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[8, 2], dtype='float32')
            out = layers.unsqueeze(input=x, axes=[1])
2627
            return out
2628

2629 2630 2631
    def test_sequence_scatter(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648
            x = layers.data(
                name='x', shape=[3, 6], append_batch_size=False, dtype='float32'
            )
            idx = layers.data(
                name='idx',
                shape=[12, 1],
                append_batch_size=False,
                dtype='int32',
                lod_level=1,
            )
            updates = layers.data(
                name='updates',
                shape=[12, 1],
                append_batch_size=False,
                dtype='float32',
                lod_level=1,
            )
2649
            out = layers.sequence_scatter(input=x, index=idx, updates=updates)
2650
            return out
W
whs 已提交
2651

2652 2653 2654 2655
    def test_sequence_slice(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            import numpy as np
2656 2657 2658 2659

            seqs = layers.data(
                name='x', shape=[10, 5], dtype='float32', lod_level=1
            )
2660 2661
            offset = layers.assign(input=np.array([[0, 1]]).astype('int32'))
            length = layers.assign(input=np.array([[2, 1]]).astype('int32'))
2662 2663 2664 2665
            out = layers.sequence_slice(
                input=seqs, offset=offset, length=length
            )
            return out
W
whs 已提交
2666

Z
zhoushiyu 已提交
2667 2668 2669
    def test_shuffle_batch(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
2670 2671 2672
            x = layers.data(
                name='X', shape=[4, 50], dtype='float32', lod_level=0
            )
Z
zhoushiyu 已提交
2673 2674 2675 2676 2677
            out1 = fluid.contrib.layers.shuffle_batch(x)
            default_main_program().random_seed = 1000
            out2 = fluid.contrib.layers.shuffle_batch(x)
            self.assertIsNotNone(out1)
            self.assertIsNotNone(out2)
2678
            return out1
Z
zhoushiyu 已提交
2679

2680 2681 2682 2683
    def test_partial_sum(self):
        with self.static_graph():
            x = fluid.data(name="x", shape=[None, 3], dtype="float32")
            y = fluid.data(name="y", shape=[None, 3], dtype="float32")
2684 2685 2686 2687
            sum = fluid.contrib.layers.partial_sum(
                [x, y], start_index=0, length=2
            )
            return sum
2688

S
ShenLiang 已提交
2689 2690 2691 2692 2693 2694 2695 2696 2697
    def test_batch_fc(self):
        with self.static_graph():
            input = fluid.data(name="input", shape=[16, 2, 3], dtype="float32")
            out = fluid.contrib.layers.batch_fc(
                input=input,
                param_size=[16, 3, 10],
                param_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="w_0",
2698 2699
                    initializer=fluid.initializer.Xavier(uniform=False),
                ),
S
ShenLiang 已提交
2700 2701 2702 2703
                bias_size=[16, 10],
                bias_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="b_0",
2704 2705 2706 2707 2708
                    initializer=fluid.initializer.Xavier(uniform=False),
                ),
                act="relu",
            )
        return out
S
ShenLiang 已提交
2709

S
ShenLiang 已提交
2710 2711 2712
    def test_rank_attention(self):
        with self.static_graph():
            input = fluid.data(name="input", shape=[None, 2], dtype="float32")
2713 2714 2715
            rank_offset = fluid.data(
                name="rank_offset", shape=[None, 7], dtype="int32"
            )
S
ShenLiang 已提交
2716 2717 2718 2719 2720 2721 2722
            out = fluid.contrib.layers.rank_attention(
                input=input,
                rank_offset=rank_offset,
                rank_param_shape=[18, 3],
                rank_param_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="ubm_rank_param.w_0",
2723 2724 2725 2726 2727
                    initializer=fluid.initializer.Xavier(uniform=False),
                ),
                max_rank=3,
            )
            return out
S
ShenLiang 已提交
2728

2729 2730 2731 2732 2733 2734 2735 2736 2737 2738
    def test_sequence_enumerate(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name="input", shape=[1], dtype='int32', lod_level=1)
            out = layers.sequence_enumerate(input=x, win_size=2, pad_value=0)

    def test_roi_perspective_transform(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name="x", shape=[256, 30, 30], dtype="float32")
2739 2740 2741
            rois = layers.data(
                name="rois", shape=[8], dtype="float32", lod_level=1
            )
2742
            output = layers.roi_perspective_transform(x, rois, 7, 7, 0.6)
2743
            return output
2744 2745 2746 2747 2748 2749

    def test_row_conv(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[16], dtype='float32', lod_level=1)
            out = layers.row_conv(input=x, future_context_size=2)
2750
            return out
2751 2752 2753 2754

    def test_simple_conv2d(self):
        # TODO(minqiyang): dygraph do not support layers with param now
        with self.static_graph():
2755 2756 2757 2758 2759 2760
            images = layers.data(
                name='pixel', shape=[3, 48, 48], dtype='float32'
            )
            return layers.conv2d(
                input=images, num_filters=3, filter_size=[4, 4]
            )
2761 2762 2763 2764 2765

    def test_squeeze(self):
        # TODO(minqiyang): dygraph do not support layers with param now
        with self.static_graph():
            x = layers.data(name='x', shape=[1, 1, 4], dtype='float32')
2766
            out = paddle.squeeze(x, axis=[2])
2767
            return out
2768 2769 2770 2771

    def test_flatten(self):
        # TODO(minqiyang): dygraph do not support op without kernel now
        with self.static_graph():
2772 2773 2774 2775 2776 2777
            x = layers.data(
                name='x',
                append_batch_size=False,
                shape=[4, 4, 3],
                dtype="float32",
            )
2778
            out = paddle.flatten(x, 1, -1, name="flatten")
2779
            return out
2780

Z
zhoukunsheng 已提交
2781 2782 2783
    def test_linspace(self):
        program = Program()
        with program_guard(program):
2784
            out = paddle.linspace(20, 10, 5, 'float64')
Z
zhoukunsheng 已提交
2785 2786 2787
            self.assertIsNotNone(out)
        print(str(program))

2788 2789 2790
    def test_unfold(self):
        with self.static_graph():
            x = layers.data(name='x', shape=[3, 20, 20], dtype='float32')
2791
            out = paddle.nn.functional.unfold(x, [3, 3], 1, 1, 1)
2792
            return out
2793

2794 2795 2796 2797
    def test_partial_concat(self):
        with self.static_graph():
            x = fluid.data(name="x", shape=[None, 3], dtype="float32")
            y = fluid.data(name="y", shape=[None, 3], dtype="float32")
2798 2799 2800 2801 2802 2803
            concat1 = fluid.contrib.layers.partial_concat(
                [x, y], start_index=0, length=2
            )
            concat2 = fluid.contrib.layers.partial_concat(
                x, start_index=0, length=-1
            )
2804 2805
            return concat1, concat2

2806
    def test_addmm(self):
2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = layers.data(
                name='input_data',
                shape=[3, 3],
                append_batch_size=False,
                dtype='float32',
            )
            x = layers.data(
                name='x', shape=[3, 2], append_batch_size=False, dtype='float32'
            )
            y = layers.data(
                name='y', shape=[2, 3], append_batch_size=False, dtype='float32'
            )
2822 2823

            out = paddle.addmm(input=input, x=x, y=y)
2824
            return out
2825

2826
    def test_retinanet_detection_output(self):
2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            bboxes = layers.data(
                name='bboxes',
                shape=[1, 21, 4],
                append_batch_size=False,
                dtype='float32',
            )
            scores = layers.data(
                name='scores',
                shape=[1, 21, 10],
                append_batch_size=False,
                dtype='float32',
            )
            anchors = layers.data(
                name='anchors',
                shape=[21, 4],
                append_batch_size=False,
                dtype='float32',
            )
            im_info = layers.data(
                name="im_info",
                shape=[1, 3],
                append_batch_size=False,
                dtype='float32',
            )
2854 2855 2856 2857 2858 2859 2860 2861 2862
            nmsed_outs = layers.retinanet_detection_output(
                bboxes=[bboxes, bboxes],
                scores=[scores, scores],
                anchors=[anchors, anchors],
                im_info=im_info,
                score_threshold=0.05,
                nms_top_k=1000,
                keep_top_k=100,
                nms_threshold=0.3,
2863 2864 2865
                nms_eta=1.0,
            )
            return nmsed_outs
2866

2867 2868 2869
    def test_warpctc_with_padding(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
2870
            input_length = paddle.static.data(
2871 2872
                name='logits_length', shape=[11], dtype='int64'
            )
2873
            label_length = paddle.static.data(
2874 2875
                name='labels_length', shape=[12], dtype='int64'
            )
2876 2877 2878 2879
            label = paddle.static.data(
                name='label', shape=[12, 1], dtype='int32'
            )
            predict = paddle.static.data(
2880 2881
                name='predict', shape=[4, 4, 8], dtype='float32'
            )
2882 2883 2884 2885 2886 2887
            output = paddle.nn.functional.ctc_loss(
                log_probs=predict,
                labels=label,
                input_lengths=input_length,
                label_lengths=label_length,
                reduction='none',
2888 2889
            )
            return output
2890

2891 2892 2893 2894
    def test_basic_gru(self):
        input_size = 128
        hidden_size = 256
        with self.static_graph():
2895 2896 2897 2898 2899 2900 2901 2902 2903
            input = fluid.data(
                name="input", shape=[None, None, input_size], dtype='float32'
            )
            pre_hidden = fluid.data(
                name="pre_hidden", shape=[None, hidden_size], dtype='float32'
            )
            sequence_length = fluid.data(
                name="sequence_length", shape=[None], dtype='int32'
            )
2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914

            for bidirectional in [True, False]:
                for batch_first in [True, False]:
                    rnn_out, last_hidden = fluid.contrib.layers.basic_gru(
                        input,
                        pre_hidden,
                        hidden_size=256,
                        num_layers=2,
                        sequence_length=sequence_length,
                        dropout_prob=0.5,
                        bidirectional=bidirectional,
2915 2916
                        batch_first=batch_first,
                    )
2917

Y
Yu Yang 已提交
2918

2919 2920
class ExampleNet(paddle.nn.Layer):
    def __init__(self):
2921
        super().__init__()
2922
        self.weight = self.create_parameter(
2923 2924
            shape=[1, 1], attr=paddle.ParamAttr(trainable=False)
        )
2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937

    def forward(self):
        # only for test parameter trainable attr
        pass


class TestLayerParameterTrainableSet(unittest.TestCase):
    def test_layer_parameter_set(self):
        with fluid.dygraph.guard():
            net = ExampleNet()
            self.assertFalse(net.weight.trainable)


2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954
class TestLayerTrainingAttribute(unittest.TestCase):
    def test_set_train_eval_in_dynamic_mode(self):
        with fluid.dygraph.guard():
            net = paddle.nn.Dropout()
            net.train()
            self.assertTrue(net.training)
            net.eval()
            self.assertFalse(net.training)

    def test_set_train_eval_in_static_mode(self):
        net = paddle.nn.Dropout()
        net.train()
        self.assertTrue(net.training)
        net.eval()
        self.assertFalse(net.training)


J
Jiabin Yang 已提交
2955 2956
class MyLayer(paddle.nn.Layer):
    def __init__(self):
2957
        super().__init__()
J
Jiabin Yang 已提交
2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968
        self._linear = paddle.nn.Linear(1, 1)
        self._dropout = paddle.nn.Dropout(p=0.5)

    def forward(self, input):
        temp = self._linear(input)
        temp = self._dropout(temp)
        return temp


class MySuperLayer(paddle.nn.Layer):
    def __init__(self):
2969
        super().__init__()
J
Jiabin Yang 已提交
2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984
        self._mylayer = MyLayer()

    def forward(self, input):
        temp = self._mylayer(input)
        return temp


class TestSubLayerCount(unittest.TestCase):
    def test_sublayer(self):
        with fluid.dygraph.guard():
            mySuperlayer = MySuperLayer()
            self.assertTrue(len(mySuperlayer.sublayers()) == 3)
            self.assertTrue(len(mySuperlayer.sublayers(include_self=True)) == 4)


Y
Yu Yang 已提交
2985
if __name__ == '__main__':
2986
    paddle.enable_static()
Y
Yu Yang 已提交
2987
    unittest.main()