test_layers.py 86.6 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import contextlib
import inspect
Q
Qiao Longfei 已提交
17 18
import unittest

19
import numpy as np
20
from decorator_helper import prog_scope
21
from test_imperative_base import new_program_scope
22 23

import paddle
24
import paddle.nn.functional as F
25 26
from paddle import fluid
from paddle.fluid import core, layers, nets
27
from paddle.fluid.dygraph import base, to_variable
28
from paddle.fluid.framework import Program, default_main_program, program_guard
29 30 31 32 33 34 35
from paddle.incubate.layers.nn import (
    batch_fc,
    partial_concat,
    partial_sum,
    rank_attention,
    shuffle_batch,
)
36
from paddle.tensor import random
37 38 39 40 41 42 43 44 45 46 47


class LayerTest(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        cls.seed = 111

    @classmethod
    def tearDownClass(cls):
        pass

48 49 50 51 52 53 54 55
    def _get_place(self, force_to_use_cpu=False):
        # this option for ops that only have cpu kernel
        if force_to_use_cpu:
            return core.CPUPlace()
        else:
            if core.is_compiled_with_cuda():
                return core.CUDAPlace(0)
            return core.CPUPlace()
56 57 58 59

    @contextlib.contextmanager
    def static_graph(self):
        with new_program_scope():
C
cnn 已提交
60
            paddle.seed(self.seed)
L
Leo Chen 已提交
61
            paddle.framework.random._manual_program_seed(self.seed)
62 63
            yield

64 65 66
    def get_static_graph_result(
        self, feed, fetch_list, with_lod=False, force_to_use_cpu=False
    ):
67
        exe = fluid.Executor(self._get_place(force_to_use_cpu))
68
        exe.run(fluid.default_startup_program())
69 70 71 72 73 74
        return exe.run(
            fluid.default_main_program(),
            feed=feed,
            fetch_list=fetch_list,
            return_numpy=(not with_lod),
        )
75 76

    @contextlib.contextmanager
77
    def dynamic_graph(self, force_to_use_cpu=False):
L
lujun 已提交
78
        with fluid.dygraph.guard(
79 80
            self._get_place(force_to_use_cpu=force_to_use_cpu)
        ):
C
cnn 已提交
81
            paddle.seed(self.seed)
L
Leo Chen 已提交
82
            paddle.framework.random._manual_program_seed(self.seed)
83 84 85 86
            yield


class TestLayer(LayerTest):
87
    def test_custom_layer_with_kwargs(self):
88
        class CustomLayer(paddle.nn.Layer):
89
            def __init__(self, input_size, linear1_size=4):
90
                super().__init__()
91
                self.linear1 = paddle.nn.Linear(
92 93
                    input_size, linear1_size, bias_attr=False
                )
94 95 96
                self.linear2 = paddle.nn.Linear(
                    linear1_size, 1, bias_attr=False
                )
97 98 99 100 101

            def forward(self, x, do_linear2=False):
                ret = self.linear1(x)
                if do_linear2:
                    ret = self.linear2(ret)
102 103 104 105 106
                return ret

        with self.dynamic_graph():
            inp = np.ones([3, 3], dtype='float32')
            x = base.to_variable(inp)
107 108
            custom = CustomLayer(input_size=3, linear1_size=2)
            ret = custom(x, do_linear2=False)
109
            np.testing.assert_array_equal(ret.numpy().shape, [3, 2])
110
            ret = custom(x, do_linear2=True)
111
            np.testing.assert_array_equal(ret.numpy().shape, [3, 1])
112

C
ccrrong 已提交
113 114 115
    def test_dropout(self):
        inp = np.ones([3, 32, 32], dtype='float32')
        with self.static_graph():
G
GGBond8488 已提交
116
            t = paddle.static.data(
C
ccrrong 已提交
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
                name='data',
                shape=[3, 32, 32],
                dtype='float32',
            )
            dropout = paddle.nn.Dropout(p=0.35)
            ret = dropout(t)
            ret2 = paddle.nn.functional.dropout(t, p=0.35)
            static_ret, static_ret2 = self.get_static_graph_result(
                feed={'data': inp}, fetch_list=[ret, ret2]
            )
        with self.dynamic_graph():
            t = base.to_variable(inp)
            dropout = paddle.nn.Dropout(p=0.35)
            dy_ret = dropout(t)
            dy_ret2 = paddle.nn.functional.dropout(t, p=0.35)
            dy_ret_value = dy_ret.numpy()
            dy_ret2_value = dy_ret2.numpy()

        np.testing.assert_array_equal(static_ret, static_ret2)
        np.testing.assert_array_equal(dy_ret_value, dy_ret2_value)
        np.testing.assert_array_equal(static_ret, dy_ret_value)

S
songyouwei 已提交
139 140 141
    def test_linear(self):
        inp = np.ones([3, 32, 32], dtype='float32')
        with self.static_graph():
G
GGBond8488 已提交
142 143
            t = paddle.static.data(
                name='data', shape=[3, 32, 32], dtype='float32'
144
            )
145
            linear = paddle.nn.Linear(
146 147 148
                32,
                4,
                bias_attr=paddle.nn.initializer.Constant(value=1),
149
            )
S
songyouwei 已提交
150
            ret = linear(t)
151 152 153
            static_ret = self.get_static_graph_result(
                feed={'data': inp}, fetch_list=[ret]
            )[0]
S
songyouwei 已提交
154 155
        with self.dynamic_graph():
            t = base.to_variable(inp)
156
            linear = paddle.nn.Linear(
157 158 159
                32,
                4,
                bias_attr=paddle.nn.initializer.Constant(value=1),
160
            )
S
songyouwei 已提交
161 162 163
            dy_ret = linear(t)
            dy_ret_value = dy_ret.numpy()

164
        np.testing.assert_array_equal(static_ret, dy_ret_value)
S
songyouwei 已提交
165

166 167 168 169 170
        with self.static_graph():

            # the input of Linear must be Variable.
            def test_Variable():
                inp = np.ones([3, 32, 32], dtype='float32')
171
                linear = paddle.nn.Linear(
172 173
                    32,
                    4,
174
                    bias_attr=paddle.nn.initializer.Constant(value=1),
175
                )
176 177 178 179 180 181 182 183
                linear_ret1 = linear(inp)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Linear must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
                inp = np.ones([3, 32, 32], dtype='int32')
184
                linear = paddle.nn.Linear(
185 186
                    32,
                    4,
187
                    bias_attr=paddle.nn.initializer.Constant(value=1),
188
                )
189 190 191 192
                linear_ret2 = linear(inp)

            self.assertRaises(TypeError, test_type)

W
wangzhen38 已提交
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
    def test_cvm(self):
        inp = np.ones([10, 10], dtype='float32')
        arr = [[0.6931472, -1.904654e-09, 1, 1, 1, 1, 1, 1, 1, 1]] * 10
        cvm1 = np.array(arr, dtype='float32')
        cvm2 = np.ones([10, 8], dtype='float32')
        show_clk = np.ones([10, 2], dtype='float32')
        with self.static_graph():
            x = paddle.static.data(
                name='data',
                shape=[10, 10],
                dtype='float32',
            )
            u = paddle.static.data(
                name='show_click',
                shape=[10, 2],
                dtype='float32',
            )
            no_cvm = paddle.static.nn.continuous_value_model(x, u, True)
            static_ret1 = self.get_static_graph_result(
                feed={'data': inp, 'show_click': show_clk},
                fetch_list=[no_cvm],
            )[0]
        with self.static_graph():
            x = paddle.static.data(
                name='data',
                shape=[10, 10],
                dtype='float32',
            )
            u = paddle.static.data(
                name='show_click',
                shape=[10, 2],
                dtype='float32',
            )
            cvm = paddle.static.nn.continuous_value_model(x, u, False)
            static_ret2 = self.get_static_graph_result(
                feed={'data': inp, 'show_click': show_clk}, fetch_list=[cvm]
            )[0]
        np.testing.assert_allclose(static_ret1, cvm1, rtol=1e-5, atol=1e-06)
        np.testing.assert_allclose(static_ret2, cvm2, rtol=1e-5, atol=1e-06)

233 234 235
    def test_Flatten(self):
        inp = np.ones([3, 4, 4, 5], dtype='float32')
        with self.static_graph():
G
GGBond8488 已提交
236 237
            t = paddle.static.data(
                name='data', shape=[3, 4, 4, 5], dtype='float32'
238
            )
239
            flatten = paddle.nn.Flatten()
240
            ret = flatten(t)
241 242 243
            static_ret = self.get_static_graph_result(
                feed={'data': inp}, fetch_list=[ret]
            )[0]
244 245
        with self.dynamic_graph():
            t = base.to_variable(inp)
246
            flatten = paddle.nn.Flatten()
247 248 249
            dy_ret = flatten(t)
            dy_ret_value = dy_ret.numpy()

250
        np.testing.assert_array_equal(static_ret, dy_ret_value)
251 252 253 254 255 256

        with self.static_graph():

            # the input of Linear must be Variable.
            def test_Variable():
                inp = np.ones([3, 32, 32], dtype='float32')
257
                linear = paddle.nn.Linear(
258 259
                    32,
                    4,
260
                    bias_attr=paddle.nn.initializer.Constant(value=1),
261
                )
262 263 264 265 266 267 268 269
                linear_ret1 = linear(inp)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Linear must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
                inp = np.ones([3, 32, 32], dtype='int32')
270
                linear = paddle.nn.Linear(
271 272
                    32,
                    4,
273
                    bias_attr=paddle.nn.initializer.Constant(value=1),
274
                )
275 276 277 278
                linear_ret2 = linear(inp)

            self.assertRaises(TypeError, test_type)

C
ceci3 已提交
279 280 281
    def test_SyncBatchNorm(self):
        if core.is_compiled_with_cuda():
            with self.static_graph():
G
GGBond8488 已提交
282 283 284
                t = paddle.static.data(
                    name='t', shape=[-1, 3, 5, 5], dtype='float32'
                )
C
ceci3 已提交
285
                my_sync_bn = paddle.nn.SyncBatchNorm(3)
C
ceci3 已提交
286 287
                ret = my_sync_bn(t)
                static_ret = self.get_static_graph_result(
288
                    feed={'t': np.ones([3, 3, 5, 5], dtype='float32')},
289 290
                    fetch_list=[ret],
                )[0]
C
ceci3 已提交
291 292 293 294 295 296

            with self.dynamic_graph():
                t = np.ones([3, 3, 5, 5], dtype='float32')
                my_syncbn = paddle.nn.SyncBatchNorm(3)
                dy_ret = my_syncbn(base.to_variable(t))
                dy_ret_value = dy_ret.numpy()
297
            np.testing.assert_array_equal(static_ret, dy_ret_value)
C
ceci3 已提交
298

299 300
    def test_relu(self):
        with self.static_graph():
G
GGBond8488 已提交
301
            t = paddle.static.data(name='t', shape=[-1, 3, 3], dtype='float32')
302
            ret = F.relu(t)
303
            static_ret = self.get_static_graph_result(
304 305
                feed={'t': np.ones([3, 3], dtype='float32')}, fetch_list=[ret]
            )[0]
306 307 308

        with self.dynamic_graph():
            t = np.ones([3, 3], dtype='float32')
309
            dy_ret = F.relu(base.to_variable(t))
310
            dy_ret_value = dy_ret.numpy()
311

312
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
C
ceci3 已提交
313

314 315
    def test_matmul(self):
        with self.static_graph():
G
GGBond8488 已提交
316 317 318 319
            t = paddle.static.data(name='t', shape=[-1, 3, 3], dtype='float32')
            t2 = paddle.static.data(
                name='t2', shape=[-1, 3, 3], dtype='float32'
            )
K
kangguangli 已提交
320
            ret = paddle.matmul(t, t2)
321 322 323 324 325 326 327
            static_ret = self.get_static_graph_result(
                feed={
                    't': np.ones([3, 3], dtype='float32'),
                    't2': np.ones([3, 3], dtype='float32'),
                },
                fetch_list=[ret],
            )[0]
328 329 330 331

        with self.dynamic_graph():
            t = np.ones([3, 3], dtype='float32')
            t2 = np.ones([3, 3], dtype='float32')
K
kangguangli 已提交
332
            dy_ret = paddle.matmul(base.to_variable(t), base.to_variable(t2))
333
            dy_ret_value = dy_ret.numpy()
334

335
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
336

X
Xin Pan 已提交
337 338 339 340 341 342 343 344 345
    def test_elementwise_math(self):
        n = np.ones([3, 3], dtype='float32')
        n2 = np.ones([3, 3], dtype='float32') * 1.1
        n3 = np.ones([3, 3], dtype='float32') * 2
        n4 = np.ones([3, 3], dtype='float32') * 3
        n5 = np.ones([3, 3], dtype='float32') * 4
        n6 = np.ones([3, 3], dtype='float32') * 5

        with self.static_graph():
G
GGBond8488 已提交
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
            t = paddle.static.data(name='t', shape=[-1, 3, 3], dtype='float32')
            t2 = paddle.static.data(
                name='t2', shape=[-1, 3, 3], dtype='float32'
            )
            t3 = paddle.static.data(
                name='t3', shape=[-1, 3, 3], dtype='float32'
            )
            t4 = paddle.static.data(
                name='t4', shape=[-1, 3, 3], dtype='float32'
            )
            t5 = paddle.static.data(
                name='t5', shape=[-1, 3, 3], dtype='float32'
            )
            t6 = paddle.static.data(
                name='t6', shape=[-1, 3, 3], dtype='float32'
            )
X
Xin Pan 已提交
362

363
            ret = paddle.add(t, t2)
364
            ret = paddle.pow(ret, t3)
365 366 367
            ret = paddle.divide(ret, t4)
            ret = paddle.subtract(ret, t5)
            ret = paddle.multiply(ret, t6)
X
Xin Pan 已提交
368

369 370 371 372
            static_ret = self.get_static_graph_result(
                feed={'t': n, 't2': n2, 't3': n3, 't4': n4, 't5': n5, 't6': n6},
                fetch_list=[ret],
            )[0]
X
Xin Pan 已提交
373 374

        with self.dynamic_graph():
375
            ret = paddle.add(to_variable(n), to_variable(n2))
376
            ret = paddle.pow(ret, to_variable(n3))
377 378 379
            ret = paddle.divide(ret, to_variable(n4))
            ret = paddle.subtract(ret, to_variable(n5))
            dy_ret = paddle.multiply(ret, to_variable(n6))
380
            dy_ret_value = dy_ret.numpy()
381

382
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
X
Xin Pan 已提交
383 384 385 386 387 388

    def test_elementwise_minmax(self):
        n = np.ones([3, 3], dtype='float32')
        n2 = np.ones([3, 3], dtype='float32') * 2

        with self.dynamic_graph():
389
            min_ret = paddle.minimum(to_variable(n), to_variable(n2))
H
HongyuJia 已提交
390
            max_ret = paddle.maximum(to_variable(n), to_variable(n2))
391 392
            min_ret_value = min_ret.numpy()
            max_ret_value = max_ret.numpy()
X
Xin Pan 已提交
393

394 395
        np.testing.assert_allclose(n, min_ret_value, rtol=1e-05)
        np.testing.assert_allclose(n2, max_ret_value, rtol=1e-05)
X
Xin Pan 已提交
396

397 398 399
    def test_conv2d_transpose(self):
        inp_np = np.arange(0, 24).reshape([2, 3, 2, 2]).astype('float32')
        with self.static_graph():
G
GGBond8488 已提交
400 401 402
            img = paddle.static.data(
                name='pixel', shape=[-1, 3, 2, 2], dtype='float32'
            )
403
            out = paddle.static.nn.conv2d_transpose(
404 405
                input=img,
                num_filters=10,
406
                filter_size=27,
407
                act='sigmoid',
408
                bias_attr=paddle.nn.initializer.Constant(value=1),
409 410 411 412
            )
            static_rlt = self.get_static_graph_result(
                feed={'pixel': inp_np}, fetch_list=[out]
            )[0]
413
        with self.static_graph():
G
GGBond8488 已提交
414 415 416
            img = paddle.static.data(
                name='pixel', shape=[-1, 3, 2, 2], dtype='float32'
            )
417 418 419 420
            conv2d_transpose = paddle.nn.Conv2DTranspose(
                3,
                10,
                27,
421
                bias_attr=paddle.nn.initializer.Constant(value=1),
422
            )
423
            out = conv2d_transpose(img)
424
            out = paddle.nn.functional.sigmoid(out)
425 426 427
            static_rlt2 = self.get_static_graph_result(
                feed={'pixel': inp_np}, fetch_list=[out]
            )[0]
428
        with self.dynamic_graph():
429 430 431 432
            conv2d_transpose = paddle.nn.Conv2DTranspose(
                3,
                10,
                27,
433
                bias_attr=paddle.nn.initializer.Constant(value=1),
434
            )
435
            dy_rlt = conv2d_transpose(base.to_variable(inp_np))
436
            dy_rlt = paddle.nn.functional.sigmoid(dy_rlt)
437
            dy_rlt_value = dy_rlt.numpy()
438 439
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt2, rtol=1e-05)
440

441 442 443
        with self.dynamic_graph():
            images = np.ones([2, 3, 5, 5], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
444
            weight_attr = fluid.ParamAttr(
445
                initializer=paddle.nn.initializer.Assign(custom_weight)
446
            )
447 448 449 450 451 452
            conv2d1 = paddle.nn.Conv2DTranspose(3, 3, [2, 2])
            conv2d2 = paddle.nn.Conv2DTranspose(
                3,
                3,
                [2, 2],
                weight_attr=weight_attr,
453
            )
454 455 456 457 458 459 460
            dy_ret1 = conv2d1(base.to_variable(images))
            dy_ret2 = conv2d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv2d1_weight_np = conv2d1.weight.numpy()
            conv2d1_bias = conv2d1.bias
            self.assertFalse(
461 462
                np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())
            )
463
            conv2d2.weight.set_value(conv2d1_weight_np)
464 465 466
            np.testing.assert_array_equal(
                conv2d1_weight_np, conv2d2.weight.numpy()
            )
467 468 469
            conv2d2.bias.set_value(conv2d1_bias)
            dy_ret1 = conv2d1(base.to_variable(images))
            dy_ret2 = conv2d2(base.to_variable(images))
470
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
471 472 473

            conv2d2.weight = conv2d1.weight
            conv2d2.bias = conv2d1.bias
474 475 476 477 478 479
            np.testing.assert_array_equal(
                conv2d1.weight.numpy(), conv2d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv2d1.bias.numpy(), conv2d2.bias.numpy()
            )
480

481 482 483 484 485
        with self.static_graph():

            # the input of Conv2DTranspose must be Variable.
            def test_Variable():
                images = np.ones([2, 3, 5, 5], dtype='float32')
486
                conv2d = paddle.nn.Conv2DTranspose(3, 3, [2, 2])
487 488 489 490 491 492 493
                conv2d_ret1 = conv2d(images)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Conv2DTranspose must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
G
GGBond8488 已提交
494 495
                images = paddle.static.data(
                    name='pixel', shape=[-1, 3, 5, 5], dtype='int32'
496
                )
497
                conv2d = paddle.nn.Conv2DTranspose(3, 3, [2, 2])
498 499 500 501
                conv2d_ret2 = conv2d(images)

            self.assertRaises(TypeError, test_type)

502 503 504 505 506
    def test_bilinear_tensor_product(self):
        inp_np_x = np.array([[1, 2, 3]]).astype('float32')
        inp_np_y = np.array([[4, 5, 6]]).astype('float32')

        with self.static_graph():
G
GGBond8488 已提交
507 508
            data_x = paddle.static.data(name='x', shape=[1, 3], dtype="float32")
            data_y = paddle.static.data(name='y', shape=[1, 3], dtype="float32")
509
            out = paddle.static.nn.common.bilinear_tensor_product(
510 511 512
                data_x,
                data_y,
                6,
513
                bias_attr=paddle.nn.initializer.Constant(value=1),
514 515
                act='sigmoid',
            )
516

517 518 519
            static_rlt = self.get_static_graph_result(
                feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out]
            )[0]
520

521
        with self.static_graph():
G
GGBond8488 已提交
522 523
            data_x = paddle.static.data(name='x', shape=[1, 3], dtype="float32")
            data_y = paddle.static.data(name='y', shape=[1, 3], dtype="float32")
524
            btp = paddle.nn.Bilinear(
525 526
                3,
                3,
527
                6,
528
                bias_attr=paddle.nn.initializer.Constant(value=1),
529
            )
530
            out = btp(data_x, data_y)
531
            out = paddle.nn.functional.sigmoid(out)
532 533 534
            static_rlt2 = self.get_static_graph_result(
                feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out]
            )[0]
535
        with self.dynamic_graph():
536
            btp = paddle.nn.Bilinear(
537 538
                3,
                3,
539
                6,
540
                bias_attr=paddle.nn.initializer.Constant(value=1),
541
            )
542
            dy_rlt = btp(base.to_variable(inp_np_x), base.to_variable(inp_np_y))
543
            dy_rlt = paddle.nn.functional.sigmoid(dy_rlt)
544
            dy_rlt_value = dy_rlt.numpy()
545

546
        with self.dynamic_graph():
547
            btp2 = paddle.nn.Bilinear(3, 3, 6)
548 549 550
            dy_rlt2 = btp2(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
551
            dy_rlt2 = paddle.nn.functional.sigmoid(dy_rlt2)
552
            dy_rlt2_value = dy_rlt2.numpy()
553

554
        with self.static_graph():
G
GGBond8488 已提交
555 556
            data_x2 = paddle.static.data(
                name='x', shape=[1, 3], dtype="float32"
557
            )
G
GGBond8488 已提交
558 559
            data_y2 = paddle.static.data(
                name='y', shape=[1, 3], dtype="float32"
560
            )
561
            out2 = paddle.static.nn.common.bilinear_tensor_product(
562 563 564 565 566 567
                data_x2, data_y2, 6, act='sigmoid'
            )

            static_rlt3 = self.get_static_graph_result(
                feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out2]
            )[0]
568

569 570 571
        np.testing.assert_array_equal(dy_rlt2_value, static_rlt3)
        np.testing.assert_array_equal(static_rlt2, static_rlt)
        np.testing.assert_array_equal(dy_rlt_value, static_rlt)
572

573 574
        with self.dynamic_graph():
            custom_weight = np.random.randn(6, 3, 3).astype("float32")
575
            weight_attr = fluid.ParamAttr(
576
                initializer=paddle.nn.initializer.Assign(custom_weight)
577
            )
578 579
            btp1 = paddle.nn.Bilinear(3, 3, 6)
            btp2 = paddle.nn.Bilinear(3, 3, 6, weight_attr=weight_attr)
580 581 582
            dy_rlt1 = btp1(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
583
            dy_rlt1 = paddle.nn.functional.sigmoid(dy_rlt1)
584 585 586
            dy_rlt2 = btp2(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
587
            dy_rlt2 = paddle.nn.functional.sigmoid(dy_rlt2)
588 589 590
            self.assertFalse(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()))
            btp2.weight.set_value(btp1.weight.numpy())
            btp2.bias.set_value(btp1.bias)
591 592 593 594 595 596
            dy_rlt1 = btp1(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
            dy_rlt2 = btp2(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
597
            np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
598 599 600

            btp2.weight = btp1.weight
            btp2.bias = btp1.bias
601 602 603
            np.testing.assert_array_equal(
                btp1.weight.numpy(), btp2.weight.numpy()
            )
604
            np.testing.assert_array_equal(btp1.bias.numpy(), btp2.bias.numpy())
605

606 607 608 609
    def test_embeding(self):
        inp_word = np.array([[[1]]]).astype('int64')
        dict_size = 20
        with self.static_graph():
G
GGBond8488 已提交
610 611 612 613
            data_t = paddle.static.data(
                name='word', shape=[-1, 1], dtype='int64'
            )
            data_t.desc.set_need_check_feed(False)
614 615 616 617 618 619 620 621 622
            emb = layers.embedding(
                input=data_t,
                size=[dict_size, 32],
                param_attr='emb.w',
                is_sparse=False,
            )
            static_rlt = self.get_static_graph_result(
                feed={'word': inp_word}, fetch_list=[emb]
            )[0]
623
        with self.static_graph():
G
GGBond8488 已提交
624 625 626 627
            data_t = paddle.static.data(
                name='word', shape=[-1, 1], dtype='int64'
            )
            data_t.desc.set_need_check_feed(False)
628 629
            emb2 = paddle.nn.Embedding(
                dict_size, 32, weight_attr='emb.w', sparse=False
630
            )
631
            emb_rlt = emb2(data_t)
632 633 634
            static_rlt2 = self.get_static_graph_result(
                feed={'word': inp_word}, fetch_list=[emb_rlt]
            )[0]
635
        with self.dynamic_graph():
636

637 638
            emb2 = paddle.nn.Embedding(
                dict_size, 32, weight_attr='emb.w', sparse=False
639
            )
640 641
            dy_rlt = emb2(base.to_variable(inp_word))
            dy_rlt_value = dy_rlt.numpy()
642 643

        self.assertTrue(np.allclose(static_rlt2, static_rlt))
644
        self.assertTrue(np.allclose(dy_rlt_value, static_rlt))
645

646 647
        with self.dynamic_graph():
            custom_weight = np.random.randn(dict_size, 32).astype("float32")
648
            weight_attr = fluid.ParamAttr(
649
                initializer=paddle.nn.initializer.Assign(custom_weight)
650
            )
651 652 653
            emb1 = paddle.nn.Embedding(dict_size, 32, sparse=False)
            emb2 = paddle.nn.Embedding(
                dict_size, 32, weight_attr=weight_attr, sparse=False
654
            )
655 656 657
            rep1 = emb1(base.to_variable(inp_word))
            rep2 = emb2(base.to_variable(inp_word))
            self.assertFalse(np.array_equal(emb1.weight.numpy(), custom_weight))
658
            np.testing.assert_array_equal(emb2.weight.numpy(), custom_weight)
659 660 661
            self.assertFalse(np.array_equal(rep1.numpy(), rep2.numpy()))
            emb2.weight.set_value(emb1.weight.numpy())
            rep2 = emb2(base.to_variable(inp_word))
662
            np.testing.assert_array_equal(rep1.numpy(), rep2.numpy())
663 664

            emb2.weight = emb1.weight
665 666 667
            np.testing.assert_array_equal(
                emb1.weight.numpy(), emb2.weight.numpy()
            )
668

S
songyouwei 已提交
669 670 671
    def test_one_hot(self):
        with self.dynamic_graph():
            label = fluid.dygraph.to_variable(np.array([[1], [1], [3], [0]]))
672 673 674
            one_hot_label1 = paddle.nn.functional.one_hot(label, 4)
            one_hot_label2 = paddle.nn.functional.one_hot(
                label, fluid.dygraph.to_variable(np.array([4]))
675 676 677 678
            )
            np.testing.assert_array_equal(
                one_hot_label1.numpy(), one_hot_label2.numpy()
            )
S
songyouwei 已提交
679 680 681 682

    def test_split(self):
        with self.dynamic_graph():
            input = fluid.dygraph.to_variable(np.random.random((3, 8, 5)))
683 684
            x0, x1 = paddle.split(input, num_or_sections=2, axis=1)
            x00, x11 = paddle.split(
685 686
                input,
                num_or_sections=2,
687
                axis=fluid.dygraph.to_variable(np.array([1])),
688
            )
689 690
            np.testing.assert_array_equal(x0.numpy(), x00.numpy())
            np.testing.assert_array_equal(x1.numpy(), x11.numpy())
S
songyouwei 已提交
691 692 693 694

    def test_topk(self):
        with self.dynamic_graph():
            input = fluid.dygraph.to_variable(np.random.random((13, 11)))
695 696
            top5_values1, top5_indices1 = paddle.topk(input, k=5)
            top5_values2, top5_indices2 = paddle.topk(
697 698 699 700 701 702 703 704
                input, k=fluid.dygraph.to_variable(np.array([5]))
            )
            np.testing.assert_array_equal(
                top5_values1.numpy(), top5_values2.numpy()
            )
            np.testing.assert_array_equal(
                top5_indices1.numpy(), top5_indices2.numpy()
            )
S
songyouwei 已提交
705

L
lujun 已提交
706 707
    def test_conv3d(self):
        with self.static_graph():
G
GGBond8488 已提交
708 709
            images = paddle.static.data(
                name='pixel', shape=[-1, 3, 6, 6, 6], dtype='float32'
710
            )
711 712 713
            ret = paddle.static.nn.conv3d(
                input=images, num_filters=3, filter_size=2
            )
L
lujun 已提交
714
            static_ret = self.get_static_graph_result(
715
                feed={'pixel': np.ones([2, 3, 6, 6, 6], dtype='float32')},
716 717
                fetch_list=[ret],
            )[0]
L
lujun 已提交
718 719

        with self.static_graph():
G
GGBond8488 已提交
720 721
            images = paddle.static.data(
                name='pixel', shape=[-1, 3, 6, 6, 6], dtype='float32'
722
            )
723 724 725
            conv3d = paddle.nn.Conv3D(
                in_channels=3, out_channels=3, kernel_size=2
            )
L
lujun 已提交
726 727
            ret = conv3d(images)
            static_ret2 = self.get_static_graph_result(
728
                feed={'pixel': np.ones([2, 3, 6, 6, 6], dtype='float32')},
729 730
                fetch_list=[ret],
            )[0]
L
lujun 已提交
731 732 733

        with self.dynamic_graph():
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
734 735 736
            conv3d = paddle.nn.Conv3D(
                in_channels=3, out_channels=3, kernel_size=2
            )
L
lujun 已提交
737
            dy_ret = conv3d(base.to_variable(images))
738
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
739

740 741
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
742

743 744 745
        with self.dynamic_graph():
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
746
            weight_attr = fluid.ParamAttr(
747
                initializer=paddle.nn.initializer.Assign(custom_weight)
748
            )
749 750 751 752 753 754 755 756
            conv3d1 = paddle.nn.Conv3D(
                in_channels=3, out_channels=3, kernel_size=2
            )
            conv3d2 = paddle.nn.Conv3D(
                in_channels=3,
                out_channels=3,
                kernel_size=2,
                weight_attr=weight_attr,
757
            )
758 759 760 761 762 763 764
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv3d1_weight_np = conv3d1.weight.numpy()
            conv3d1_bias = conv3d1.bias
            self.assertFalse(
765 766
                np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
            )
767
            conv3d2.weight.set_value(conv3d1_weight_np)
768 769 770
            np.testing.assert_array_equal(
                conv3d1_weight_np, conv3d2.weight.numpy()
            )
771 772 773
            conv3d1.bias.set_value(conv3d1_bias)
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
774
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
775 776 777

            conv3d2.weight = conv3d1.weight
            conv3d2.bias = conv3d1.bias
778 779 780 781 782 783
            np.testing.assert_array_equal(
                conv3d1.weight.numpy(), conv3d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv3d1.bias.numpy(), conv3d2.bias.numpy()
            )
784

785
    def test_group_norm(self):
L
lujun 已提交
786 787 788 789 790 791 792 793 794 795
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
G
GGBond8488 已提交
796 797
            X = paddle.static.data(
                name='X', shape=shape, dtype='float32', lod_level=1
798
            )
799
            ret = paddle.static.nn.group_norm(
800 801
                input=X,
                groups=2,
802 803
                param_attr=paddle.nn.initializer.Uniform(low=-0.5, high=0.5),
                bias_attr=paddle.nn.initializer.Constant(value=1),
804 805 806 807 808 809 810 811 812 813
            )
            static_ret = self.get_static_graph_result(
                feed={
                    'X': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
814 815

        with self.static_graph():
G
GGBond8488 已提交
816 817
            X = paddle.static.data(
                name='X', shape=shape, dtype='float32', lod_level=1
818
            )
819 820 821
            groupNorm = paddle.nn.GroupNorm(
                num_channels=shape[1],
                num_groups=2,
822 823
                weight_attr=paddle.nn.initializer.Uniform(low=-0.5, high=0.5),
                bias_attr=paddle.nn.initializer.Constant(value=1),
824
            )
L
lujun 已提交
825
            ret = groupNorm(X)
826 827 828 829 830 831 832 833 834
            static_ret2 = self.get_static_graph_result(
                feed={
                    'X': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
835 836

        with self.dynamic_graph():
837 838 839
            groupNorm = paddle.nn.GroupNorm(
                num_channels=shape[1],
                num_groups=2,
840 841
                weight_attr=paddle.nn.initializer.Uniform(low=-0.5, high=0.5),
                bias_attr=paddle.nn.initializer.Constant(value=1),
842
            )
L
lujun 已提交
843
            dy_ret = groupNorm(base.to_variable(input))
844
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
845

846 847
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
848

849 850 851 852 853 854 855 856 857 858 859
    def test_instance_norm(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
G
GGBond8488 已提交
860
            X = paddle.static.data(name='X', shape=shape, dtype='float32')
861
            ret = paddle.static.nn.instance_norm(input=X)
862 863 864
            static_ret = self.get_static_graph_result(
                feed={'X': input}, fetch_list=[ret]
            )[0]
865 866

        with self.static_graph():
G
GGBond8488 已提交
867
            X = paddle.static.data(name='X', shape=shape, dtype='float32')
868
            instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
869
            ret = instanceNorm(X)
870 871 872
            static_ret2 = self.get_static_graph_result(
                feed={'X': input}, fetch_list=[ret]
            )[0]
873 874

        with self.dynamic_graph():
875
            instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
876 877 878 879
            dy_ret = instanceNorm(base.to_variable(input))
            dy_rlt_value = dy_ret.numpy()

        with self.dynamic_graph():
880
            instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
881 882 883
            dy_ret = instanceNorm(base.to_variable(input))
            dy_rlt_value2 = dy_ret.numpy()

884 885 886
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_rlt_value2, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
887 888 889 890

        with self.static_graph():
            # the input of InstanceNorm must be Variable.
            def test_Variable():
891
                instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
892 893 894 895 896 897 898
                ret1 = instanceNorm(input)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of InstanceNorm must be float32 or float64
            def test_type():
                input = np.random.random(shape).astype('int32')
899
                instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
900 901 902 903
                ret2 = instanceNorm(input)

            self.assertRaises(TypeError, test_type)

L
lujun 已提交
904 905 906 907 908 909 910 911 912 913 914
    def test_spectral_norm(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
G
GGBond8488 已提交
915 916
            Weight = paddle.static.data(
                name='Weight', shape=shape, dtype='float32', lod_level=1
917
            )
918 919 920
            ret = paddle.static.nn.spectral_norm(
                weight=Weight, dim=1, power_iters=2
            )
921 922 923 924 925 926 927 928 929
            static_ret = self.get_static_graph_result(
                feed={
                    'Weight': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    ),
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
930 931

        with self.static_graph():
G
GGBond8488 已提交
932 933
            Weight = paddle.static.data(
                name='Weight', shape=shape, dtype='float32', lod_level=1
934
            )
935
            spectralNorm = paddle.nn.SpectralNorm(shape, dim=1, power_iters=2)
L
lujun 已提交
936
            ret = spectralNorm(Weight)
937 938 939 940 941 942 943 944 945
            static_ret2 = self.get_static_graph_result(
                feed={
                    'Weight': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
946 947

        with self.dynamic_graph():
948
            spectralNorm = paddle.nn.SpectralNorm(shape, dim=1, power_iters=2)
L
lujun 已提交
949
            dy_ret = spectralNorm(base.to_variable(input))
950
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
951

952 953
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
954 955

    def test_conv3d_transpose(self):
956 957 958
        input_array = (
            np.arange(0, 48).reshape([2, 3, 2, 2, 2]).astype('float32')
        )
L
lujun 已提交
959 960

        with self.static_graph():
G
GGBond8488 已提交
961 962 963
            img = paddle.static.data(
                name='pixel', shape=[-1, 3, 2, 2, 2], dtype='float32'
            )
964
            out = paddle.static.nn.conv3d_transpose(
965
                input=img, num_filters=12, filter_size=12, use_cudnn=True
966
            )
L
lujun 已提交
967
            static_rlt = self.get_static_graph_result(
968 969
                feed={'pixel': input_array}, fetch_list=[out]
            )[0]
L
lujun 已提交
970
        with self.static_graph():
G
GGBond8488 已提交
971 972 973
            img = paddle.static.data(
                name='pixel', shape=[-1, 3, 2, 2, 2], dtype='float32'
            )
974 975
            conv3d_transpose = paddle.nn.Conv3DTranspose(
                in_channels=3, out_channels=12, kernel_size=12
976
            )
L
lujun 已提交
977 978
            out = conv3d_transpose(img)
            static_rlt2 = self.get_static_graph_result(
979 980
                feed={'pixel': input_array}, fetch_list=[out]
            )[0]
L
lujun 已提交
981
        with self.dynamic_graph():
982 983
            conv3d_transpose = paddle.nn.Conv3DTranspose(
                in_channels=3, out_channels=12, kernel_size=12
984
            )
L
lujun 已提交
985
            dy_rlt = conv3d_transpose(base.to_variable(input_array))
986
            dy_rlt_value = dy_rlt.numpy()
987 988
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05)
L
lujun 已提交
989

990 991 992
        with self.dynamic_graph():
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
993
            weight_attr = fluid.ParamAttr(
994
                initializer=paddle.nn.initializer.Assign(custom_weight)
995
            )
996 997 998 999
            conv3d1 = paddle.nn.Conv3DTranspose(
                in_channels=3,
                out_channels=3,
                kernel_size=2,
1000 1001
                bias_attr='conv3d1_b',
            )
1002 1003 1004 1005 1006
            conv3d2 = paddle.nn.Conv3DTranspose(
                in_channels=3,
                out_channels=3,
                kernel_size=2,
                weight_attr=weight_attr,
1007 1008
                bias_attr='conv3d2_b',
            )
1009 1010 1011 1012 1013 1014 1015
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv3d1_weight_np = conv3d1.weight.numpy()
            conv3d1_bias = conv3d1.bias
            self.assertFalse(
1016 1017
                np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
            )
1018
            conv3d2.weight.set_value(conv3d1_weight_np)
1019 1020 1021
            np.testing.assert_array_equal(
                conv3d1_weight_np, conv3d2.weight.numpy()
            )
1022 1023 1024
            conv3d1.bias.set_value(conv3d1_bias)
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
1025
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
1026 1027 1028

            conv3d2.weight = conv3d1.weight
            conv3d2.bias = conv3d1.bias
1029 1030 1031 1032 1033 1034
            np.testing.assert_array_equal(
                conv3d1.weight.numpy(), conv3d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv3d1.bias.numpy(), conv3d2.bias.numpy()
            )
1035

1036
    def test_while_loop(self):
1037
        with self.static_graph():
1038 1039 1040 1041
            i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
            ten = paddle.tensor.fill_constant(
                shape=[1], dtype='int64', value=10
            )
1042 1043

            def cond(i):
L
LiYuRio 已提交
1044
                return paddle.less_than(i, ten)
1045 1046 1047 1048

            def body(i):
                return i + 1

1049
            out = paddle.static.nn.while_loop(cond, body, [i])
1050 1051 1052
            static_ret = self.get_static_graph_result(feed={}, fetch_list=out)

        with self.dynamic_graph():
1053 1054 1055 1056
            i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
            ten = paddle.tensor.fill_constant(
                shape=[1], dtype='int64', value=10
            )
1057

1058
            def cond1(i):
L
LiYuRio 已提交
1059
                return paddle.less_than(i, ten)
1060

1061
            def body1(i):
1062 1063
                return i + 1

1064
            dy_ret = paddle.static.nn.while_loop(cond1, body1, [i])
1065
            with self.assertRaises(ValueError):
1066 1067 1068
                j = paddle.tensor.fill_constant(
                    shape=[1], dtype='int64', value=0
                )
1069 1070 1071 1072

                def body2(i):
                    return i + 1, i + 2

1073
                paddle.static.nn.while_loop(cond1, body2, [j])
1074

1075
        np.testing.assert_array_equal(static_ret[0], dy_ret[0].numpy())
1076

1077 1078 1079 1080 1081
    def test_compare(self):
        value_a = np.arange(3)
        value_b = np.arange(3)
        # less than
        with self.static_graph():
G
GGBond8488 已提交
1082 1083
            a = paddle.static.data(name='a', shape=[-1, 1], dtype='int64')
            b = paddle.static.data(name='b', shape=[-1, 1], dtype='int64')
L
LiYuRio 已提交
1084
            cond = paddle.less_than(x=a, y=b)
1085 1086 1087
            static_ret = self.get_static_graph_result(
                feed={"a": value_a, "b": value_b}, fetch_list=[cond]
            )[0]
1088 1089 1090
        with self.dynamic_graph():
            da = base.to_variable(value_a)
            db = base.to_variable(value_b)
L
LiYuRio 已提交
1091
            dcond = paddle.less_than(x=da, y=db)
1092

1093 1094
            for i in range(len(static_ret)):
                self.assertTrue(dcond.numpy()[i] == static_ret[i])
1095 1096 1097

        # less equal
        with self.static_graph():
G
GGBond8488 已提交
1098 1099
            a1 = paddle.static.data(name='a1', shape=[-1, 1], dtype='int64')
            b1 = paddle.static.data(name='b1', shape=[-1, 1], dtype='int64')
1100
            cond1 = paddle.less_equal(x=a1, y=b1)
1101 1102 1103
            static_ret1 = self.get_static_graph_result(
                feed={"a1": value_a, "b1": value_b}, fetch_list=[cond1]
            )[0]
1104 1105 1106
        with self.dynamic_graph():
            da1 = base.to_variable(value_a)
            db1 = base.to_variable(value_b)
1107
            dcond1 = paddle.less_equal(x=da1, y=db1)
1108 1109 1110 1111

            for i in range(len(static_ret1)):
                self.assertTrue(dcond1.numpy()[i] == static_ret1[i])

1112
        # greater than
1113
        with self.static_graph():
G
GGBond8488 已提交
1114 1115
            a2 = paddle.static.data(name='a2', shape=[-1, 1], dtype='int64')
            b2 = paddle.static.data(name='b2', shape=[-1, 1], dtype='int64')
1116
            cond2 = paddle.greater_than(x=a2, y=b2)
1117 1118 1119
            static_ret2 = self.get_static_graph_result(
                feed={"a2": value_a, "b2": value_b}, fetch_list=[cond2]
            )[0]
1120 1121 1122
        with self.dynamic_graph():
            da2 = base.to_variable(value_a)
            db2 = base.to_variable(value_b)
1123
            dcond2 = paddle.greater_than(x=da2, y=db2)
1124 1125 1126 1127

            for i in range(len(static_ret2)):
                self.assertTrue(dcond2.numpy()[i] == static_ret2[i])

1128
        # greater equal
1129
        with self.static_graph():
G
GGBond8488 已提交
1130 1131
            a3 = paddle.static.data(name='a3', shape=[-1, 1], dtype='int64')
            b3 = paddle.static.data(name='b3', shape=[-1, 1], dtype='int64')
1132
            cond3 = paddle.greater_equal(x=a3, y=b3)
1133 1134 1135
            static_ret3 = self.get_static_graph_result(
                feed={"a3": value_a, "b3": value_b}, fetch_list=[cond3]
            )[0]
1136 1137 1138
        with self.dynamic_graph():
            da3 = base.to_variable(value_a)
            db3 = base.to_variable(value_b)
1139
            dcond3 = paddle.greater_equal(x=da3, y=db3)
1140 1141 1142 1143 1144 1145

            for i in range(len(static_ret3)):
                self.assertTrue(dcond3.numpy()[i] == static_ret3[i])

        # equal
        with self.static_graph():
G
GGBond8488 已提交
1146 1147
            a4 = paddle.static.data(name='a4', shape=[-1, 1], dtype='int64')
            b4 = paddle.static.data(name='b4', shape=[-1, 1], dtype='int64')
1148
            cond4 = paddle.equal(x=a4, y=b4)
1149 1150 1151
            static_ret4 = self.get_static_graph_result(
                feed={"a4": value_a, "b4": value_b}, fetch_list=[cond4]
            )[0]
1152 1153 1154
        with self.dynamic_graph():
            da4 = base.to_variable(value_a)
            db4 = base.to_variable(value_b)
1155
            dcond4 = paddle.equal(x=da4, y=db4)
1156 1157 1158 1159 1160 1161

            for i in range(len(static_ret4)):
                self.assertTrue(dcond4.numpy()[i] == static_ret4[i])

        # not equal
        with self.static_graph():
G
GGBond8488 已提交
1162 1163
            a5 = paddle.static.data(name='a5', shape=[-1, 1], dtype='int64')
            b5 = paddle.static.data(name='b5', shape=[-1, 1], dtype='int64')
1164
            cond5 = paddle.equal(x=a5, y=b5)
1165 1166 1167
            static_ret5 = self.get_static_graph_result(
                feed={"a5": value_a, "b5": value_b}, fetch_list=[cond5]
            )[0]
1168 1169 1170
        with self.dynamic_graph():
            da5 = base.to_variable(value_a)
            db5 = base.to_variable(value_b)
1171
            dcond5 = paddle.equal(x=da5, y=db5)
1172 1173 1174 1175

            for i in range(len(static_ret5)):
                self.assertTrue(dcond5.numpy()[i] == static_ret5[i])

1176 1177
    def test_cond(self):
        def less_than_branch(a, b):
1178
            return paddle.add(a, b)
1179 1180

        def greater_equal_branch(a, b):
1181
            return paddle.subtract(a, b)
1182 1183

        with self.static_graph():
1184
            a = paddle.tensor.fill_constant(
1185 1186
                shape=[1], dtype='float32', value=0.1
            )
1187
            b = paddle.tensor.fill_constant(
1188 1189
                shape=[1], dtype='float32', value=0.23
            )
1190
            out = paddle.static.nn.cond(
1191 1192 1193 1194 1195 1196 1197 1198 1199
                a >= b,
                lambda: greater_equal_branch(a, b),
                lambda: less_than_branch(a, b),
            )
            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
1200 1201 1202 1203 1204 1205 1206
            exe = fluid.Executor(place)
            ret = exe.run(fetch_list=[out])
            static_res = ret[0]

        with self.dynamic_graph():
            a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32'))
            b = fluid.dygraph.to_variable(np.array([0.23]).astype('float32'))
1207
            out = paddle.static.nn.cond(
1208 1209 1210 1211
                a < b,
                lambda: less_than_branch(a, b),
                lambda: greater_equal_branch(a, b),
            )
1212
            out2 = paddle.static.nn.cond(
1213 1214 1215 1216
                a >= b,
                lambda: greater_equal_branch(a, b),
                lambda: less_than_branch(a, b),
            )
1217 1218
            dynamic_res = out.numpy()
            dynamic_res2 = out2.numpy()
1219
            np.testing.assert_array_equal(dynamic_res, dynamic_res2)
1220
            with self.assertRaises(TypeError):
1221
                paddle.static.nn.cond(a < b, 'str', 'str')
1222
            with self.assertRaises(TypeError):
1223
                paddle.static.nn.cond(a >= b, 'str', 'str')
1224

1225
        np.testing.assert_array_equal(static_res, dynamic_res)
1226

1227 1228
    def test_case(self):
        def fn_1():
1229 1230 1231
            return paddle.tensor.fill_constant(
                shape=[1, 2], dtype='float32', value=1
            )
1232 1233

        def fn_2():
1234 1235 1236
            return paddle.tensor.fill_constant(
                shape=[2, 2], dtype='int32', value=2
            )
1237 1238

        def fn_3():
1239 1240 1241
            return paddle.tensor.fill_constant(
                shape=[3], dtype='int32', value=3
            )
1242 1243

        with self.static_graph():
1244 1245 1246 1247 1248 1249 1250 1251 1252
            x = paddle.tensor.fill_constant(
                shape=[1], dtype='float32', value=0.3
            )
            y = paddle.tensor.fill_constant(
                shape=[1], dtype='float32', value=0.1
            )
            z = paddle.tensor.fill_constant(
                shape=[1], dtype='float32', value=0.2
            )
1253

L
LiYuRio 已提交
1254 1255
            pred_1 = paddle.less_than(z, x)  # true: 0.2 < 0.3
            pred_2 = paddle.less_than(x, y)  # false: 0.3 < 0.1
1256
            pred_3 = paddle.equal(x, y)  # false: 0.3 == 0.1
1257

1258
            out_1 = paddle.static.nn.case(
1259 1260
                pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3
            )
1261 1262 1263
            out_2 = paddle.static.nn.case(
                pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)]
            )
1264

1265 1266 1267 1268 1269
            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
1270 1271 1272 1273
            exe = fluid.Executor(place)
            static_res1, static_res2 = exe.run(fetch_list=[out_1, out_2])

        with self.dynamic_graph():
1274 1275 1276 1277 1278 1279 1280 1281 1282
            x = paddle.tensor.fill_constant(
                shape=[1], dtype='float32', value=0.3
            )
            y = paddle.tensor.fill_constant(
                shape=[1], dtype='float32', value=0.1
            )
            z = paddle.tensor.fill_constant(
                shape=[1], dtype='float32', value=0.2
            )
1283

L
LiYuRio 已提交
1284 1285
            pred_1 = paddle.less_than(z, x)  # true: 0.2 < 0.3
            pred_2 = paddle.less_than(x, y)  # false: 0.3 < 0.1
1286
            pred_3 = paddle.equal(x, y)  # false: 0.3 == 0.1
1287

1288
            out_1 = paddle.static.nn.case(
1289 1290
                pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3
            )
1291 1292 1293
            out_2 = paddle.static.nn.case(
                pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)]
            )
1294 1295 1296
            dynamic_res1 = out_1.numpy()
            dynamic_res2 = out_2.numpy()

1297 1298
        np.testing.assert_array_equal(static_res1, dynamic_res1)
        np.testing.assert_array_equal(static_res2, dynamic_res2)
1299 1300 1301

    def test_switch_case(self):
        def fn_1():
1302 1303 1304
            return paddle.tensor.fill_constant(
                shape=[1, 2], dtype='float32', value=1
            )
1305 1306

        def fn_2():
1307 1308 1309
            return paddle.tensor.fill_constant(
                shape=[2, 2], dtype='int32', value=2
            )
1310 1311

        def fn_3():
1312 1313 1314
            return paddle.tensor.fill_constant(
                shape=[3], dtype='int32', value=3
            )
1315 1316

        with self.static_graph():
1317 1318 1319 1320 1321 1322
            index_1 = paddle.tensor.fill_constant(
                shape=[1], dtype='int32', value=1
            )
            index_2 = paddle.tensor.fill_constant(
                shape=[1], dtype='int32', value=2
            )
1323

1324
            out_1 = paddle.static.nn.switch_case(
1325 1326 1327 1328
                branch_index=index_1,
                branch_fns={1: fn_1, 2: fn_2},
                default=fn_3,
            )
1329
            out_2 = paddle.static.nn.switch_case(
1330 1331 1332 1333
                branch_index=index_2,
                branch_fns=[(1, fn_1), (2, fn_2)],
                default=fn_3,
            )
1334
            out_3 = paddle.static.nn.switch_case(
1335 1336 1337 1338 1339 1340 1341 1342 1343
                branch_index=index_2,
                branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)],
            )

            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
1344 1345
            exe = fluid.Executor(place)
            static_res1, static_res2, static_res3 = exe.run(
1346 1347
                fetch_list=[out_1, out_2, out_3]
            )
1348 1349

        with self.dynamic_graph():
1350 1351 1352 1353 1354 1355
            index_1 = paddle.tensor.fill_constant(
                shape=[1], dtype='int32', value=1
            )
            index_2 = paddle.tensor.fill_constant(
                shape=[1], dtype='int32', value=2
            )
1356

1357
            out_1 = paddle.static.nn.switch_case(
1358 1359 1360 1361
                branch_index=index_1,
                branch_fns={1: fn_1, 2: fn_2},
                default=fn_3,
            )
1362
            out_2 = paddle.static.nn.switch_case(
1363 1364 1365 1366
                branch_index=index_2,
                branch_fns=[(1, fn_1), (2, fn_2)],
                default=fn_3,
            )
1367
            out_3 = paddle.static.nn.switch_case(
1368 1369 1370
                branch_index=index_2,
                branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)],
            )
1371 1372 1373 1374 1375

            dynamic_res1 = out_1.numpy()
            dynamic_res2 = out_2.numpy()
            dynamic_res3 = out_3.numpy()

1376 1377 1378
        np.testing.assert_array_equal(static_res1, dynamic_res1)
        np.testing.assert_array_equal(static_res2, dynamic_res2)
        np.testing.assert_array_equal(static_res3, dynamic_res3)
1379

1380 1381
    def test_crop_tensor(self):
        with self.static_graph():
G
GGBond8488 已提交
1382 1383
            x = paddle.static.data(
                name="x1", shape=[-1, 6, 5, 8], dtype="float32"
1384
            )
G
GGBond8488 已提交
1385 1386 1387

            dim1 = paddle.static.data(name="dim1", shape=[1], dtype="float32")
            dim2 = paddle.static.data(name="dim2", shape=[1], dtype="float32")
1388
            crop_shape1 = (1, 2, 4, 4)
G
GGBond8488 已提交
1389 1390
            crop_shape2 = paddle.static.data(
                name="crop_shape", shape=[4], dtype="float32"
1391
            )
1392 1393
            crop_shape3 = [-1, dim1, dim2, 4]
            crop_offsets1 = [0, 0, 1, 0]
G
GGBond8488 已提交
1394 1395
            crop_offsets2 = paddle.static.data(
                name="crop_offset", shape=[4], dtype="float32"
1396
            )
1397 1398
            crop_offsets3 = [0, dim1, dim2, 0]

1399 1400 1401
            out1 = paddle.crop(x, shape=crop_shape1, offsets=crop_offsets1)
            out2 = paddle.crop(x, shape=crop_shape2, offsets=crop_offsets2)
            out3 = paddle.crop(x, shape=crop_shape3, offsets=crop_offsets3)
1402 1403 1404 1405 1406

            self.assertIsNotNone(out1)
            self.assertIsNotNone(out2)
            self.assertIsNotNone(out3)

1407 1408
    def test_shard_index(self):
        with self.static_graph():
G
GGBond8488 已提交
1409 1410 1411
            x = paddle.static.data(
                name="label", shape=[-1, 4, 1], dtype='int64'
            )
1412
            shard_label = paddle.shard_index(
1413 1414
                input=x, index_num=20, nshards=2, shard_id=0
            )
1415 1416 1417

        self.assertIsNotNone(shard_label)

1418 1419 1420 1421
    def test_accuracy(self):
        x = np.random.rand(3, 32, 32).astype("float32")
        y = np.array([[1], [0], [1]])
        with self.static_graph():
1422 1423 1424 1425
            data = paddle.static.data(
                name="input", shape=[-1, 32, 32], dtype="float32"
            )
            label = paddle.static.data(name="label", shape=[-1, 1], dtype="int")
C
Charles-hit 已提交
1426 1427
            data_new = paddle.reshape(data, [3, 32 * 32])
            fc_out = paddle.nn.Linear(32 * 32, 10)(data_new)
1428
            predict = paddle.nn.functional.softmax(fc_out)
1429
            result = paddle.static.accuracy(input=predict, label=label, k=5)
1430 1431 1432 1433
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)

            exe.run(fluid.default_startup_program())
L
Leo Chen 已提交
1434 1435
            # x = np.random.rand(3, 32, 32).astype("float32")
            # y = np.array([[1], [0], [1]])
1436

1437
            static_out = exe.run(
1438
                feed={"input": x, "label": y}, fetch_list=result
1439
            )
1440

L
Leo Chen 已提交
1441
        with self.dynamic_graph(force_to_use_cpu=True):
1442 1443
            data = base.to_variable(x)
            label = base.to_variable(y)
C
Charles-hit 已提交
1444 1445
            data_new = paddle.reshape(data, [3, 32 * 32])
            fc_out = paddle.nn.Linear(32 * 32, 10)(data_new)
1446
            predict = paddle.nn.functional.softmax(fc_out)
1447 1448 1449
            dynamic_out = paddle.static.accuracy(
                input=predict, label=label, k=5
            )
1450

1451
        np.testing.assert_array_equal(static_out[0], dynamic_out.numpy())
1452

Y
Yu Yang 已提交
1453

1454
class TestBook(LayerTest):
H
hong 已提交
1455 1456
    def setUp(self):
        self.only_static_set = set({"make_word_embedding"})
1457 1458 1459 1460 1461 1462 1463
        self.not_compare_static_dygraph_set = set(
            {
                "make_gaussian_random",
                "make_kldiv_loss",
                "make_uniform_random_batch_size_like",
            }
        )
1464
        self.all_close_compare = set({"make_spectral_norm"})
H
hong 已提交
1465

1466
    def test_all_layers(self):
1467 1468 1469 1470 1471
        attrs = (getattr(self, name) for name in dir(self))
        methods = filter(inspect.ismethod, attrs)
        for method in methods:
            if not method.__name__.startswith('make_'):
                continue
M
minqiyang 已提交
1472 1473 1474
            self._low_data_bound = 0
            self._high_data_bound = 2
            self._batch_size = 2
1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
            self._feed_dict = {}
            self._force_to_use_cpu = False
            with self.static_graph():
                static_var = method()
                if isinstance(static_var, tuple):
                    static_var = static_var[0]

                if static_var is not None:
                    fetch_list = [static_var.name]
                    static_result = self.get_static_graph_result(
                        feed=self._feed_dict,
                        fetch_list=fetch_list,
1487 1488
                        force_to_use_cpu=self._force_to_use_cpu,
                    )
H
hong 已提交
1489

1490 1491
                else:
                    continue
H
hong 已提交
1492 1493
            if method.__name__ in self.only_static_set:
                continue
1494 1495 1496 1497 1498

            with self.dynamic_graph(self._force_to_use_cpu):
                dy_result = method()
                if isinstance(dy_result, tuple):
                    dy_result = dy_result[0]
1499
                dy_result_value = dy_result.numpy()
1500

1501
            if method.__name__ in self.all_close_compare:
1502 1503 1504 1505 1506 1507
                np.testing.assert_allclose(
                    static_result[0],
                    dy_result_value,
                    rtol=1e-05,
                    atol=0,
                    err_msg='Result of function [{}] compare failed'.format(
1508 1509 1510
                        method.__name__
                    ),
                )
1511 1512
                continue

H
hong 已提交
1513
            if method.__name__ not in self.not_compare_static_dygraph_set:
1514 1515 1516 1517
                np.testing.assert_array_equal(
                    static_result[0],
                    dy_result_value,
                    err_msg='Result of function [{}] not equal'.format(
1518 1519 1520
                        method.__name__
                    ),
                )
1521 1522 1523 1524

    def _get_np_data(self, shape, dtype, append_batch_size=True):
        np.random.seed(self.seed)
        if append_batch_size:
M
minqiyang 已提交
1525
            shape = [self._batch_size] + shape
1526 1527 1528 1529 1530
        if dtype == 'float32':
            return np.random.random(shape).astype(dtype)
        elif dtype == 'float64':
            return np.random.random(shape).astype(dtype)
        elif dtype == 'int32':
1531 1532 1533
            return np.random.randint(
                self._low_data_bound, self._high_data_bound, shape
            ).astype(dtype)
1534
        elif dtype == 'int64':
1535 1536 1537 1538 1539 1540 1541
            return np.random.randint(
                self._low_data_bound, self._high_data_bound, shape
            ).astype(dtype)

    def _get_data(
        self, name, shape, dtype, set_feed_dict=True, append_batch_size=True
    ):
1542
        if base.enabled():
1543 1544 1545 1546 1547
            return base.to_variable(
                value=self._get_np_data(shape, dtype, append_batch_size),
                name=name,
                zero_copy=False,
            )
1548 1549
        else:
            if set_feed_dict:
1550
                self._feed_dict[name] = self._get_np_data(
1551 1552
                    shape, dtype, append_batch_size
                )
G
GGBond8488 已提交
1553 1554 1555
            if append_batch_size:
                shape = [-1] + shape
            data = paddle.static.data(
1556 1557 1558 1559
                name=name,
                shape=shape,
                dtype=dtype,
            )
G
GGBond8488 已提交
1560 1561
            data.desc.set_need_check_feed(False)
            return data
1562 1563

    def make_fit_a_line(self):
1564 1565 1566 1567
        with program_guard(
            fluid.default_main_program(),
            startup_program=fluid.default_startup_program(),
        ):
1568
            x = self._get_data(name='x', shape=[13], dtype='float32')
C
Charles-hit 已提交
1569
            y_predict = paddle.nn.Linear(13, 1)(x)
1570
            y = self._get_data(name='y', shape=[1], dtype='float32')
1571 1572 1573
            cost = paddle.nn.functional.square_error_cost(
                input=y_predict, label=y
            )
1574
            avg_cost = paddle.mean(cost)
1575
            return avg_cost
Y
Yu Yang 已提交
1576

1577
    def make_recognize_digits_mlp(self):
1578 1579 1580
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
Y
Yu Yang 已提交
1581
            # Change g_program, so the rest layers use `g_program`
1582 1583
            images = self._get_data(name='pixel', shape=[784], dtype='float32')
            label = self._get_data(name='label', shape=[1], dtype='int64')
C
Charles-hit 已提交
1584 1585 1586 1587 1588 1589 1590 1591
            hidden1 = paddle.nn.Linear(784, 128)(images)
            hidden1 = paddle.nn.functional.relu(hidden1)
            hidden2 = paddle.nn.Linear(128, 64)(hidden1)
            hidden2 = paddle.nn.functional.relu(hidden2)
            hidden1 = paddle.nn.Linear(128, 10, "sftmax.w1")(hidden1)
            hidden2 = paddle.nn.Linear(64, 10, "sftmax.w2")(hidden2)
            hidden = hidden1 + hidden2
            predict = paddle.nn.functional.softmax(hidden)
1592 1593 1594
            cost = paddle.nn.functional.cross_entropy(
                input=predict, label=label, reduction='none', use_softmax=False
            )
1595
            avg_cost = paddle.mean(cost)
1596
            return avg_cost
Y
Yu Yang 已提交
1597

1598
    def make_conv2d_transpose(self):
1599 1600 1601
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1602
            img = self._get_data(name='pixel', shape=[3, 2, 2], dtype='float32')
1603
            return paddle.static.nn.conv2d_transpose(
1604 1605
                input=img, num_filters=10, output_size=28
            )
1606

1607
    def make_recognize_digits_conv(self):
1608 1609 1610 1611 1612 1613
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            images = self._get_data(
                name='pixel', shape=[1, 28, 28], dtype='float32'
            )
1614
            label = self._get_data(name='label', shape=[1], dtype='int64')
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
            conv_pool_1 = nets.simple_img_conv_pool(
                input=images,
                filter_size=5,
                num_filters=2,
                pool_size=2,
                pool_stride=2,
                act="relu",
            )
            conv_pool_2 = nets.simple_img_conv_pool(
                input=conv_pool_1,
                filter_size=5,
                num_filters=4,
                pool_size=2,
                pool_stride=2,
                act="relu",
            )
Y
Yu Yang 已提交
1631

C
Charles-hit 已提交
1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647
            conv_pool_2_new = paddle.reshape(
                conv_pool_2,
                [
                    conv_pool_2.shape[0],
                    conv_pool_2.shape[1]
                    * conv_pool_2.shape[2]
                    * conv_pool_2.shape[3],
                ],
            )
            predict = paddle.nn.Linear(
                conv_pool_2.shape[1]
                * conv_pool_2.shape[2]
                * conv_pool_2.shape[3],
                10,
            )(conv_pool_2_new)
            predict = paddle.nn.functional.softmax(predict)
1648 1649 1650
            cost = paddle.nn.functional.cross_entropy(
                input=predict, label=label, reduction='none', use_softmax=False
            )
1651
            avg_cost = paddle.mean(cost)
1652
            return avg_cost
Y
Yu Yang 已提交
1653

1654
    def make_word_embedding(self):
1655 1656 1657
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
Y
Yu Yang 已提交
1658 1659
            dict_size = 10000
            embed_size = 32
1660
            first_word = self._get_data(name='firstw', shape=[1], dtype='int64')
1661 1662 1663
            second_word = self._get_data(
                name='secondw', shape=[1], dtype='int64'
            )
1664 1665 1666
            third_word = self._get_data(name='thirdw', shape=[1], dtype='int64')
            forth_word = self._get_data(name='forthw', shape=[1], dtype='int64')
            next_word = self._get_data(name='nextw', shape=[1], dtype='int64')
Y
Yu Yang 已提交
1667

1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692
            embed_first = layers.embedding(
                input=first_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )
            embed_second = layers.embedding(
                input=second_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )

            embed_third = layers.embedding(
                input=third_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )
            embed_forth = layers.embedding(
                input=forth_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )
Y
Yu Yang 已提交
1693

1694 1695
            concat_embed = paddle.concat(
                [embed_first, embed_second, embed_third, embed_forth],
1696 1697
                axis=1,
            )
Y
Yu Yang 已提交
1698

C
Charles-hit 已提交
1699 1700 1701 1702 1703
            hidden1 = paddle.static.nn.fc(
                x=concat_embed, size=256, activation='sigmoid'
            )
            predict_word = paddle.static.nn.fc(
                x=hidden1, size=dict_size, activation='softmax'
1704
            )
1705 1706 1707 1708 1709 1710
            cost = paddle.nn.functional.cross_entropy(
                input=predict_word,
                label=next_word,
                reduction='none',
                use_softmax=False,
            )
1711
            avg_cost = paddle.mean(cost)
1712
            return avg_cost
Y
Yu Yang 已提交
1713

1714
    def make_pool2d(self):
1715 1716 1717
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1718
            x = self._get_data(name='x', shape=[3, 224, 224], dtype='float32')
C
ccrrong 已提交
1719 1720
            return paddle.nn.functional.max_pool2d(
                x, kernel_size=[5, 3], stride=[1, 2], padding=(2, 1)
1721
            )
1722

K
Kaipeng Deng 已提交
1723
    def make_pool2d_infershape(self):
1724 1725 1726
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
1727
            theta = self._get_data("theta", shape=[2, 3], dtype='float32')
1728 1729 1730
            x = paddle.nn.functional.affine_grid(
                theta, out_shape=[2, 3, 244, 244]
            )
C
ccrrong 已提交
1731 1732
            return paddle.nn.functional.max_pool2d(
                x, kernel_size=[5, 3], stride=[1, 2], padding=(2, 1)
1733
            )
K
Kaipeng Deng 已提交
1734

1735
    def make_softmax(self):
1736 1737 1738
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1739
            data = self._get_data(name='data', shape=[10], dtype='float32')
C
Charles-hit 已提交
1740
            hid = paddle.nn.Linear(10, 20)(data)
1741
            return paddle.nn.functional.softmax(hid, axis=1)
D
dangqingqing 已提交
1742

1743
    @prog_scope()
1744
    def make_nce(self):
Y
Yang Yu 已提交
1745 1746
        window_size = 5
        words = []
1747
        for i in range(window_size):
Y
Yang Yu 已提交
1748
            words.append(
1749
                self._get_data(name=f'word_{i}', shape=[1], dtype='int64')
1750
            )
Y
Yang Yu 已提交
1751 1752

        dict_size = 10000
M
minqiyang 已提交
1753
        label_word = int(window_size // 2) + 1
Y
Yang Yu 已提交
1754 1755

        embs = []
1756
        for i in range(window_size):
Y
Yang Yu 已提交
1757 1758 1759
            if i == label_word:
                continue

1760 1761 1762 1763 1764 1765
            emb = layers.embedding(
                input=words[i],
                size=[dict_size, 32],
                param_attr='emb.w',
                is_sparse=True,
            )
Y
Yang Yu 已提交
1766 1767 1768

            embs.append(emb)

1769
        embs = paddle.concat(embs, axis=1)
1770
        loss = paddle.static.nn.nce(
1771 1772 1773 1774 1775 1776
            input=embs,
            label=words[label_word],
            num_total_classes=dict_size,
            param_attr='nce.w',
            bias_attr='nce.b',
        )
1777
        avg_loss = paddle.mean(loss)
1778
        return avg_loss
Y
Yang Yu 已提交
1779

1780
    def make_multiplex(self):
1781 1782 1783
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1784 1785 1786
            x1 = self._get_data(name='x1', shape=[4], dtype='float32')
            x2 = self._get_data(name='x2', shape=[4], dtype='float32')
            index = self._get_data(name='index', shape=[1], dtype='int32')
1787
            out = paddle.multiplex(inputs=[x1, x2], index=index)
1788
            return out
1789 1790

    def make_softmax_with_cross_entropy(self):
1791 1792 1793
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1794 1795
            x = self._get_data(name='x', shape=[16], dtype='float32')
            y = self._get_data(name='label', shape=[1], dtype='int64')
1796
            loss, softmax = paddle.nn.functional.softmax_with_cross_entropy(
1797 1798
                x, y, return_softmax=True
            )
1799 1800 1801
            self.assertIsNotNone(loss)
            self.assertIsNotNone(softmax)

1802
            loss = paddle.nn.functional.softmax_with_cross_entropy(x, y)
1803 1804 1805 1806 1807 1808
            self.assertIsNotNone(loss)

            x1 = self._get_data(name='x1', shape=[16, 32, 64], dtype='float32')
            y1 = self._get_data(name='label1', shape=[1, 32, 64], dtype='int64')
            y2 = self._get_data(name='label2', shape=[16, 1, 64], dtype='int64')
            y3 = self._get_data(name='label3', shape=[16, 32, 1], dtype='int64')
1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820
            loss1 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y1, axis=1
            )
            loss2 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y2, axis=2
            )
            loss3 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y3, axis=3
            )
            loss4 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y3, axis=-1
            )
1821 1822 1823 1824
            self.assertIsNotNone(loss1)
            self.assertIsNotNone(loss2)
            self.assertIsNotNone(loss3)
            self.assertIsNotNone(loss4)
1825
            return loss4
1826 1827

    def make_scatter(self):
1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x = self._get_data(
                name='x', shape=[3, 3], append_batch_size=False, dtype='float32'
            )
            idx = self._get_data(
                name='idx', shape=[2], append_batch_size=False, dtype='int32'
            )
            updates = self._get_data(
                name='updates',
                shape=[2, 3],
                dtype='float32',
G
GGBond8488 已提交
1841
                append_batch_size=False,
1842
            )
1843
            out = paddle.scatter(x, index=idx, updates=updates)
1844
            return out
Y
yangyaming 已提交
1845

1846 1847 1848
    def make_one_hot(self):
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            label = self._get_data(name="label", shape=[1], dtype="int32")
1849
            one_hot_label = paddle.nn.functional.one_hot(label, 10)
1850
            return one_hot_label
1851

1852 1853 1854 1855 1856
    def make_label_smooth(self):
        # TODO(minqiyang): support gpu ut
        self._force_to_use_cpu = True
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            label = self._get_data(name="label", shape=[1], dtype="int32")
1857
            one_hot_label = paddle.nn.functional.one_hot(label, 10)
1858
            smooth_label = F.label_smooth(label=one_hot_label, epsilon=0.1)
1859
            return smooth_label
1860

1861
    def make_topk(self):
1862 1863 1864
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1865
            data = self._get_data(name="label", shape=[200], dtype="float32")
1866
            values, indices = paddle.topk(data, k=5)
1867 1868
            return values
            return indices
J
jerrywgz 已提交
1869

1870
    def make_l2_normalize(self):
1871 1872 1873
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1874
            x = self._get_data(name='x', shape=[8, 7, 10], dtype="float32")
1875
            output = paddle.nn.functional.normalize(x, axis=1)
1876
            return output
1877

1878
    def make_shape(self):
1879 1880 1881 1882 1883 1884
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 100, 100], dtype="float32"
            )
2
201716010711 已提交
1885
            out = paddle.shape(input)
1886
            return out
B
Bai Yifan 已提交
1887

1888
    def make_pad2d(self):
1889 1890 1891 1892 1893 1894
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 100, 100], dtype="float32"
            )
傅剑寒 已提交
1895 1896 1897

            tmp_pad = paddle.nn.Pad2D(
                padding=[1, 2, 3, 4],
1898 1899 1900 1901
                mode='reflect',
                data_format='NCHW',
                name="shape",
            )
傅剑寒 已提交
1902
            out = tmp_pad(input)
1903
            return out
W
whs 已提交
1904

K
Kaipeng Deng 已提交
1905
    def make_mish(self):
1906 1907 1908
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
1909
            input = self._get_data(name="input", shape=[16], dtype="float32")
1910
            out = paddle.nn.functional.mish(input, name='mish')
1911
            return out
K
Kaipeng Deng 已提交
1912

1913
    def make_cross_entropy(self):
1914 1915 1916
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1917 1918
            x = self._get_data(name="x", shape=[30, 10], dtype="float32")
            label = self._get_data(name="label", shape=[30, 1], dtype="int64")
1919
            mode = 'channel'
1920 1921 1922 1923 1924 1925 1926 1927
            out = paddle.nn.functional.cross_entropy(
                x,
                label,
                soft_label=False,
                ignore_index=4,
                reduction='none',
                use_softmax=False,
            )
1928
            return out
1929

1930
    def make_uniform_random_batch_size_like(self):
1931 1932 1933 1934 1935 1936
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[13, 11], dtype='float32'
            )
1937
            out = random.uniform_random_batch_size_like(input, [-1, 11])
1938
            return out
G
fix  
gongweibao 已提交
1939

1940
    def make_gaussian_random(self):
1941 1942 1943
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1944
            out = random.gaussian(shape=[20, 30])
1945
            return out
G
fix  
gongweibao 已提交
1946

1947
    def make_sum(self):
1948 1949 1950 1951 1952 1953
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[13, 11], dtype='float32'
            )
G
fix  
gongweibao 已提交
1954

1955
            out = paddle.add_n(input)
1956
            return out
G
fix  
gongweibao 已提交
1957

1958
    def make_slice(self):
G
fix  
gongweibao 已提交
1959 1960 1961 1962
        starts = [1, 0, 2]
        ends = [3, 3, 4]
        axes = [0, 1, 2]

1963 1964 1965 1966 1967 1968
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 4, 5, 6], dtype='float32'
            )
G
fix  
gongweibao 已提交
1969

2
201716010711 已提交
1970
            out = paddle.slice(input, axes=axes, starts=starts, ends=ends)
1971
            return out
G
merge  
gongweibao 已提交
1972

1973
    def make_scale_variable(self):
1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 4, 5, 6], dtype='float32'
            )
            scale_var = self._get_data(
                name="scale",
                shape=[1],
                dtype='float32',
                append_batch_size=False,
            )
2
201716010711 已提交
1986
            out = paddle.scale(input, scale=scale_var)
1987 1988
            return out

1989
    def make_bilinear_tensor_product_layer(self):
1990 1991 1992
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1993 1994 1995
            data = self._get_data(name='data', shape=[4], dtype="float32")

            theta = self._get_data(name="theta", shape=[5], dtype="float32")
1996 1997 1998
            out = paddle.static.nn.common.bilinear_tensor_product(
                data, theta, 6
            )
1999
            return out
2000 2001

    def make_batch_norm(self):
2002 2003 2004 2005 2006 2007
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            data = self._get_data(
                name='data', shape=[32, 128, 128], dtype="float32"
            )
2008
            out = paddle.static.nn.batch_norm(data)
2009
            return out
2010

2011
    def make_batch_norm_momentum_variable(self):
2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            data = self._get_data(
                name='data', shape=[32, 128, 128], dtype="float32"
            )
            momentum = self._get_data(
                name='momentum',
                shape=[1],
                dtype='float32',
                append_batch_size=False,
            )
2024
            out = paddle.static.nn.batch_norm(data, momentum=momentum)
2025
            return out
2026

2027
    def make_range(self):
2028 2029 2030
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
C
ccrrong 已提交
2031 2032 2033
            paddle.arange(0, 10, 2, 'int32')
            paddle.arange(0.1, 10.0, 0.2, 'float32')
            paddle.arange(0.1, 10.0, 0.2, 'float64')
2034 2035 2036 2037 2038 2039 2040 2041 2042
            start = paddle.tensor.fill_constant(
                shape=[1], value=0.1, dtype="float32"
            )
            end = paddle.tensor.fill_constant(
                shape=[1], value=10.0, dtype="float32"
            )
            step = paddle.tensor.fill_constant(
                shape=[1], value=0.2, dtype="float32"
            )
C
ccrrong 已提交
2043
            y = paddle.arange(start, end, step, 'float64')
2044 2045 2046
            return y

    def make_spectral_norm(self):
2047 2048 2049 2050 2051 2052 2053 2054 2055
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            weight = self._get_data(
                name='weight',
                shape=[2, 3, 32, 32],
                dtype="float32",
                append_batch_size=False,
            )
2056
            out = paddle.static.nn.spectral_norm(weight, dim=1, power_iters=1)
2057
            return out
2058 2059

    def make_kldiv_loss(self):
2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x = self._get_data(
                name='x',
                shape=[32, 128, 128],
                dtype="float32",
                append_batch_size=False,
            )
            target = self._get_data(
                name='target',
                shape=[32, 128, 128],
                dtype="float32",
                append_batch_size=False,
            )
2075 2076 2077
            loss = paddle.nn.functional.kl_div(
                input=x, label=target, reduction='batchmean'
            )
2078
            return loss
2079

M
minqiyang 已提交
2080
    def make_pixel_shuffle(self):
2081 2082 2083
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
M
minqiyang 已提交
2084
            x = self._get_data(name="X", shape=[9, 4, 4], dtype="float32")
2085
            out = paddle.nn.functional.pixel_shuffle(x, upscale_factor=3)
2086
            return out
M
minqiyang 已提交
2087

R
ruri 已提交
2088
    def make_mse_loss(self):
2089 2090 2091
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
R
ruri 已提交
2092 2093
            x = self._get_data(name="X", shape=[1], dtype="float32")
            y = self._get_data(name="Y", shape=[1], dtype="float32")
2094
            out = paddle.nn.functional.mse_loss(input=x, label=y)
2095
            return out
R
ruri 已提交
2096

2097
    def make_square_error_cost(self):
2098 2099 2100
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
2101 2102
            x = self._get_data(name="X", shape=[1], dtype="float32")
            y = self._get_data(name="Y", shape=[1], dtype="float32")
2103
            out = paddle.nn.functional.square_error_cost(input=x, label=y)
2104
            return out
2105

W
whs 已提交
2106
    def test_affine_grid(self):
2107
        with self.static_graph():
G
GGBond8488 已提交
2108 2109 2110
            data = paddle.static.data(
                name='data', shape=[-1, 2, 3, 3], dtype="float32"
            )
2111
            out = paddle.argsort(x=data, axis=1)
W
whs 已提交
2112

G
GGBond8488 已提交
2113 2114 2115 2116 2117 2118
            theta = paddle.static.data(
                name="theta", shape=[-1, 2, 3], dtype="float32"
            )
            out_shape = paddle.static.data(
                name="out_shape", shape=[-1], dtype="int32"
            )
2119 2120
            data_0 = paddle.nn.functional.affine_grid(theta, out_shape)
            data_1 = paddle.nn.functional.affine_grid(theta, [5, 3, 28, 28])
W
whs 已提交
2121 2122 2123

            self.assertIsNotNone(data_0)
            self.assertIsNotNone(data_1)
D
dengkaipeng 已提交
2124

W
wangchaochaohu 已提交
2125 2126 2127 2128 2129 2130
    def test_stridedslice(self):
        axes = [0, 1, 2]
        starts = [1, 0, 2]
        ends = [3, 3, 4]
        strides = [1, 1, 1]
        with self.static_graph():
G
GGBond8488 已提交
2131 2132 2133
            x = paddle.static.data(
                name="x", shape=[-1, 245, 30, 30], dtype="float32"
            )
2
201716010711 已提交
2134
            out = paddle.strided_slice(
2135 2136
                x, axes=axes, starts=starts, ends=ends, strides=strides
            )
W
wangchaochaohu 已提交
2137 2138
            return out

2139 2140
    def test_fill_constant_batch_size_like(self):
        with self.static_graph():
2141
            like = paddle.tensor.fill_constant(
2142 2143 2144 2145 2146
                shape=[1, 200], value=10, dtype='int64'
            )
            out = layers.fill_constant_batch_size_like(
                input=like, shape=[2, 3300], value=1315454564656, dtype='int64'
            )
2147 2148
            return out

Z
zhoushiyu 已提交
2149 2150 2151
    def test_shuffle_batch(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
G
GGBond8488 已提交
2152 2153
            x = paddle.static.data(
                name='X', shape=[-1, 4, 50], dtype='float32', lod_level=0
2154
            )
2155
            out1 = shuffle_batch(x)
Z
zhoushiyu 已提交
2156
            default_main_program().random_seed = 1000
2157
            out2 = shuffle_batch(x)
Z
zhoushiyu 已提交
2158 2159
            self.assertIsNotNone(out1)
            self.assertIsNotNone(out2)
2160
            return out1
Z
zhoushiyu 已提交
2161

2162 2163
    def test_partial_sum(self):
        with self.static_graph():
2164 2165
            x = paddle.static.data(name="x", shape=[None, 3], dtype="float32")
            y = paddle.static.data(name="y", shape=[None, 3], dtype="float32")
2166
            sum = partial_sum([x, y], start_index=0, length=2)
2167
            return sum
2168

S
ShenLiang 已提交
2169 2170
    def test_batch_fc(self):
        with self.static_graph():
2171 2172 2173
            input = paddle.static.data(
                name="input", shape=[16, 2, 3], dtype="float32"
            )
2174
            out = batch_fc(
S
ShenLiang 已提交
2175 2176 2177 2178 2179
                input=input,
                param_size=[16, 3, 10],
                param_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="w_0",
2180
                    initializer=paddle.nn.initializer.XavierNormal(),
2181
                ),
S
ShenLiang 已提交
2182 2183 2184 2185
                bias_size=[16, 10],
                bias_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="b_0",
2186
                    initializer=paddle.nn.initializer.XavierNormal(),
2187 2188 2189 2190
                ),
                act="relu",
            )
        return out
S
ShenLiang 已提交
2191

S
ShenLiang 已提交
2192 2193
    def test_rank_attention(self):
        with self.static_graph():
2194 2195 2196 2197
            input = paddle.static.data(
                name="input", shape=[None, 2], dtype="float32"
            )
            rank_offset = paddle.static.data(
2198 2199
                name="rank_offset", shape=[None, 7], dtype="int32"
            )
2200
            out = rank_attention(
S
ShenLiang 已提交
2201 2202 2203 2204 2205 2206
                input=input,
                rank_offset=rank_offset,
                rank_param_shape=[18, 3],
                rank_param_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="ubm_rank_param.w_0",
2207
                    initializer=paddle.nn.initializer.XavierNormal(),
2208 2209 2210 2211
                ),
                max_rank=3,
            )
            return out
S
ShenLiang 已提交
2212

2213 2214 2215
    def test_row_conv(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
G
GGBond8488 已提交
2216 2217 2218
            x = paddle.static.data(
                name='x', shape=[-1, 16], dtype='float32', lod_level=1
            )
2219
            out = paddle.static.nn.row_conv(input=x, future_context_size=2)
2220
            return out
2221 2222 2223 2224

    def test_simple_conv2d(self):
        # TODO(minqiyang): dygraph do not support layers with param now
        with self.static_graph():
G
GGBond8488 已提交
2225 2226
            images = paddle.static.data(
                name='pixel', shape=[-1, 3, 48, 48], dtype='float32'
2227
            )
2228
            return paddle.static.nn.conv2d(
2229 2230
                input=images, num_filters=3, filter_size=[4, 4]
            )
2231 2232 2233 2234

    def test_squeeze(self):
        # TODO(minqiyang): dygraph do not support layers with param now
        with self.static_graph():
G
GGBond8488 已提交
2235 2236 2237
            x = paddle.static.data(
                name='x', shape=[-1, 1, 1, 4], dtype='float32'
            )
2238
            out = paddle.squeeze(x, axis=[2])
2239
            return out
2240 2241 2242 2243

    def test_flatten(self):
        # TODO(minqiyang): dygraph do not support op without kernel now
        with self.static_graph():
G
GGBond8488 已提交
2244
            x = paddle.static.data(
2245 2246 2247 2248
                name='x',
                shape=[4, 4, 3],
                dtype="float32",
            )
2249
            out = paddle.flatten(x, 1, -1, name="flatten")
2250
            return out
2251

Z
zhoukunsheng 已提交
2252 2253 2254
    def test_linspace(self):
        program = Program()
        with program_guard(program):
2255
            out = paddle.linspace(20, 10, 5, 'float64')
Z
zhoukunsheng 已提交
2256 2257 2258
            self.assertIsNotNone(out)
        print(str(program))

2259 2260
    def test_unfold(self):
        with self.static_graph():
G
GGBond8488 已提交
2261 2262 2263
            x = paddle.static.data(
                name='x', shape=[-1, 3, 20, 20], dtype='float32'
            )
2264
            out = paddle.nn.functional.unfold(x, [3, 3], 1, 1, 1)
2265
            return out
2266

2267 2268
    def test_partial_concat(self):
        with self.static_graph():
2269 2270
            x = paddle.static.data(name="x", shape=[None, 3], dtype="float32")
            y = paddle.static.data(name="y", shape=[None, 3], dtype="float32")
2271 2272
            concat1 = partial_concat([x, y], start_index=0, length=2)
            concat2 = partial_concat(x, start_index=0, length=-1)
2273 2274
            return concat1, concat2

2275
    def test_addmm(self):
2276 2277 2278
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
G
GGBond8488 已提交
2279
            input = paddle.static.data(
2280 2281 2282 2283
                name='input_data',
                shape=[3, 3],
                dtype='float32',
            )
G
GGBond8488 已提交
2284 2285
            x = paddle.static.data(name='x', shape=[3, 2], dtype='float32')
            y = paddle.static.data(name='y', shape=[2, 3], dtype='float32')
2286 2287

            out = paddle.addmm(input=input, x=x, y=y)
2288
            return out
2289

2290 2291 2292
    def test_warpctc_with_padding(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
2293
            input_length = paddle.static.data(
2294 2295
                name='logits_length', shape=[11], dtype='int64'
            )
2296
            label_length = paddle.static.data(
2297 2298
                name='labels_length', shape=[12], dtype='int64'
            )
2299 2300 2301 2302
            label = paddle.static.data(
                name='label', shape=[12, 1], dtype='int32'
            )
            predict = paddle.static.data(
2303 2304
                name='predict', shape=[4, 4, 8], dtype='float32'
            )
2305 2306 2307 2308 2309 2310
            output = paddle.nn.functional.ctc_loss(
                log_probs=predict,
                labels=label,
                input_lengths=input_length,
                label_lengths=label_length,
                reduction='none',
2311 2312
            )
            return output
2313

Y
Yu Yang 已提交
2314

2315 2316
class ExampleNet(paddle.nn.Layer):
    def __init__(self):
2317
        super().__init__()
2318
        self.weight = self.create_parameter(
2319 2320
            shape=[1, 1], attr=paddle.ParamAttr(trainable=False)
        )
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333

    def forward(self):
        # only for test parameter trainable attr
        pass


class TestLayerParameterTrainableSet(unittest.TestCase):
    def test_layer_parameter_set(self):
        with fluid.dygraph.guard():
            net = ExampleNet()
            self.assertFalse(net.weight.trainable)


2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350
class TestLayerTrainingAttribute(unittest.TestCase):
    def test_set_train_eval_in_dynamic_mode(self):
        with fluid.dygraph.guard():
            net = paddle.nn.Dropout()
            net.train()
            self.assertTrue(net.training)
            net.eval()
            self.assertFalse(net.training)

    def test_set_train_eval_in_static_mode(self):
        net = paddle.nn.Dropout()
        net.train()
        self.assertTrue(net.training)
        net.eval()
        self.assertFalse(net.training)


J
Jiabin Yang 已提交
2351 2352
class MyLayer(paddle.nn.Layer):
    def __init__(self):
2353
        super().__init__()
J
Jiabin Yang 已提交
2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364
        self._linear = paddle.nn.Linear(1, 1)
        self._dropout = paddle.nn.Dropout(p=0.5)

    def forward(self, input):
        temp = self._linear(input)
        temp = self._dropout(temp)
        return temp


class MySuperLayer(paddle.nn.Layer):
    def __init__(self):
2365
        super().__init__()
J
Jiabin Yang 已提交
2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380
        self._mylayer = MyLayer()

    def forward(self, input):
        temp = self._mylayer(input)
        return temp


class TestSubLayerCount(unittest.TestCase):
    def test_sublayer(self):
        with fluid.dygraph.guard():
            mySuperlayer = MySuperLayer()
            self.assertTrue(len(mySuperlayer.sublayers()) == 3)
            self.assertTrue(len(mySuperlayer.sublayers(include_self=True)) == 4)


Y
Yu Yang 已提交
2381
if __name__ == '__main__':
2382
    paddle.enable_static()
Y
Yu Yang 已提交
2383
    unittest.main()