test_layers.py 86.6 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import contextlib
import inspect
Q
Qiao Longfei 已提交
17 18
import unittest

L
LoneRanger 已提交
19
import nets
20
import numpy as np
21
from decorator_helper import prog_scope
22
from test_imperative_base import new_program_scope
23 24

import paddle
25
import paddle.nn.functional as F
26
from paddle import fluid
L
LoneRanger 已提交
27
from paddle.fluid import core, layers
28
from paddle.fluid.dygraph import base, to_variable
29
from paddle.fluid.framework import Program, default_main_program, program_guard
30 31 32 33 34 35 36
from paddle.incubate.layers.nn import (
    batch_fc,
    partial_concat,
    partial_sum,
    rank_attention,
    shuffle_batch,
)
37
from paddle.tensor import random
38 39 40 41 42 43 44 45 46 47 48


class LayerTest(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        cls.seed = 111

    @classmethod
    def tearDownClass(cls):
        pass

49 50 51 52 53 54 55 56
    def _get_place(self, force_to_use_cpu=False):
        # this option for ops that only have cpu kernel
        if force_to_use_cpu:
            return core.CPUPlace()
        else:
            if core.is_compiled_with_cuda():
                return core.CUDAPlace(0)
            return core.CPUPlace()
57 58 59 60

    @contextlib.contextmanager
    def static_graph(self):
        with new_program_scope():
C
cnn 已提交
61
            paddle.seed(self.seed)
L
Leo Chen 已提交
62
            paddle.framework.random._manual_program_seed(self.seed)
63 64
            yield

65 66 67
    def get_static_graph_result(
        self, feed, fetch_list, with_lod=False, force_to_use_cpu=False
    ):
68
        exe = fluid.Executor(self._get_place(force_to_use_cpu))
69
        exe.run(fluid.default_startup_program())
70 71 72 73 74 75
        return exe.run(
            fluid.default_main_program(),
            feed=feed,
            fetch_list=fetch_list,
            return_numpy=(not with_lod),
        )
76 77

    @contextlib.contextmanager
78
    def dynamic_graph(self, force_to_use_cpu=False):
L
lujun 已提交
79
        with fluid.dygraph.guard(
80 81
            self._get_place(force_to_use_cpu=force_to_use_cpu)
        ):
C
cnn 已提交
82
            paddle.seed(self.seed)
L
Leo Chen 已提交
83
            paddle.framework.random._manual_program_seed(self.seed)
84 85 86 87
            yield


class TestLayer(LayerTest):
88
    def test_custom_layer_with_kwargs(self):
89
        class CustomLayer(paddle.nn.Layer):
90
            def __init__(self, input_size, linear1_size=4):
91
                super().__init__()
92
                self.linear1 = paddle.nn.Linear(
93 94
                    input_size, linear1_size, bias_attr=False
                )
95 96 97
                self.linear2 = paddle.nn.Linear(
                    linear1_size, 1, bias_attr=False
                )
98 99 100 101 102

            def forward(self, x, do_linear2=False):
                ret = self.linear1(x)
                if do_linear2:
                    ret = self.linear2(ret)
103 104 105 106 107
                return ret

        with self.dynamic_graph():
            inp = np.ones([3, 3], dtype='float32')
            x = base.to_variable(inp)
108 109
            custom = CustomLayer(input_size=3, linear1_size=2)
            ret = custom(x, do_linear2=False)
110
            np.testing.assert_array_equal(ret.numpy().shape, [3, 2])
111
            ret = custom(x, do_linear2=True)
112
            np.testing.assert_array_equal(ret.numpy().shape, [3, 1])
113

C
ccrrong 已提交
114 115 116
    def test_dropout(self):
        inp = np.ones([3, 32, 32], dtype='float32')
        with self.static_graph():
G
GGBond8488 已提交
117
            t = paddle.static.data(
C
ccrrong 已提交
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
                name='data',
                shape=[3, 32, 32],
                dtype='float32',
            )
            dropout = paddle.nn.Dropout(p=0.35)
            ret = dropout(t)
            ret2 = paddle.nn.functional.dropout(t, p=0.35)
            static_ret, static_ret2 = self.get_static_graph_result(
                feed={'data': inp}, fetch_list=[ret, ret2]
            )
        with self.dynamic_graph():
            t = base.to_variable(inp)
            dropout = paddle.nn.Dropout(p=0.35)
            dy_ret = dropout(t)
            dy_ret2 = paddle.nn.functional.dropout(t, p=0.35)
            dy_ret_value = dy_ret.numpy()
            dy_ret2_value = dy_ret2.numpy()

        np.testing.assert_array_equal(static_ret, static_ret2)
        np.testing.assert_array_equal(dy_ret_value, dy_ret2_value)
        np.testing.assert_array_equal(static_ret, dy_ret_value)

S
songyouwei 已提交
140 141 142
    def test_linear(self):
        inp = np.ones([3, 32, 32], dtype='float32')
        with self.static_graph():
G
GGBond8488 已提交
143 144
            t = paddle.static.data(
                name='data', shape=[3, 32, 32], dtype='float32'
145
            )
146
            linear = paddle.nn.Linear(
147 148 149
                32,
                4,
                bias_attr=paddle.nn.initializer.Constant(value=1),
150
            )
S
songyouwei 已提交
151
            ret = linear(t)
152 153 154
            static_ret = self.get_static_graph_result(
                feed={'data': inp}, fetch_list=[ret]
            )[0]
S
songyouwei 已提交
155 156
        with self.dynamic_graph():
            t = base.to_variable(inp)
157
            linear = paddle.nn.Linear(
158 159 160
                32,
                4,
                bias_attr=paddle.nn.initializer.Constant(value=1),
161
            )
S
songyouwei 已提交
162 163 164
            dy_ret = linear(t)
            dy_ret_value = dy_ret.numpy()

165
        np.testing.assert_array_equal(static_ret, dy_ret_value)
S
songyouwei 已提交
166

167 168 169 170
        with self.static_graph():
            # the input of Linear must be Variable.
            def test_Variable():
                inp = np.ones([3, 32, 32], dtype='float32')
171
                linear = paddle.nn.Linear(
172 173
                    32,
                    4,
174
                    bias_attr=paddle.nn.initializer.Constant(value=1),
175
                )
176 177 178 179 180 181 182 183
                linear_ret1 = linear(inp)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Linear must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
                inp = np.ones([3, 32, 32], dtype='int32')
184
                linear = paddle.nn.Linear(
185 186
                    32,
                    4,
187
                    bias_attr=paddle.nn.initializer.Constant(value=1),
188
                )
189 190 191 192
                linear_ret2 = linear(inp)

            self.assertRaises(TypeError, test_type)

W
wangzhen38 已提交
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
    def test_cvm(self):
        inp = np.ones([10, 10], dtype='float32')
        arr = [[0.6931472, -1.904654e-09, 1, 1, 1, 1, 1, 1, 1, 1]] * 10
        cvm1 = np.array(arr, dtype='float32')
        cvm2 = np.ones([10, 8], dtype='float32')
        show_clk = np.ones([10, 2], dtype='float32')
        with self.static_graph():
            x = paddle.static.data(
                name='data',
                shape=[10, 10],
                dtype='float32',
            )
            u = paddle.static.data(
                name='show_click',
                shape=[10, 2],
                dtype='float32',
            )
            no_cvm = paddle.static.nn.continuous_value_model(x, u, True)
            static_ret1 = self.get_static_graph_result(
                feed={'data': inp, 'show_click': show_clk},
                fetch_list=[no_cvm],
            )[0]
        with self.static_graph():
            x = paddle.static.data(
                name='data',
                shape=[10, 10],
                dtype='float32',
            )
            u = paddle.static.data(
                name='show_click',
                shape=[10, 2],
                dtype='float32',
            )
            cvm = paddle.static.nn.continuous_value_model(x, u, False)
            static_ret2 = self.get_static_graph_result(
                feed={'data': inp, 'show_click': show_clk}, fetch_list=[cvm]
            )[0]
        np.testing.assert_allclose(static_ret1, cvm1, rtol=1e-5, atol=1e-06)
        np.testing.assert_allclose(static_ret2, cvm2, rtol=1e-5, atol=1e-06)

233 234 235
    def test_Flatten(self):
        inp = np.ones([3, 4, 4, 5], dtype='float32')
        with self.static_graph():
G
GGBond8488 已提交
236 237
            t = paddle.static.data(
                name='data', shape=[3, 4, 4, 5], dtype='float32'
238
            )
239
            flatten = paddle.nn.Flatten()
240
            ret = flatten(t)
241 242 243
            static_ret = self.get_static_graph_result(
                feed={'data': inp}, fetch_list=[ret]
            )[0]
244 245
        with self.dynamic_graph():
            t = base.to_variable(inp)
246
            flatten = paddle.nn.Flatten()
247 248 249
            dy_ret = flatten(t)
            dy_ret_value = dy_ret.numpy()

250
        np.testing.assert_array_equal(static_ret, dy_ret_value)
251 252 253 254 255

        with self.static_graph():
            # the input of Linear must be Variable.
            def test_Variable():
                inp = np.ones([3, 32, 32], dtype='float32')
256
                linear = paddle.nn.Linear(
257 258
                    32,
                    4,
259
                    bias_attr=paddle.nn.initializer.Constant(value=1),
260
                )
261 262 263 264 265 266 267 268
                linear_ret1 = linear(inp)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Linear must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
                inp = np.ones([3, 32, 32], dtype='int32')
269
                linear = paddle.nn.Linear(
270 271
                    32,
                    4,
272
                    bias_attr=paddle.nn.initializer.Constant(value=1),
273
                )
274 275 276 277
                linear_ret2 = linear(inp)

            self.assertRaises(TypeError, test_type)

C
ceci3 已提交
278 279 280
    def test_SyncBatchNorm(self):
        if core.is_compiled_with_cuda():
            with self.static_graph():
G
GGBond8488 已提交
281 282 283
                t = paddle.static.data(
                    name='t', shape=[-1, 3, 5, 5], dtype='float32'
                )
C
ceci3 已提交
284
                my_sync_bn = paddle.nn.SyncBatchNorm(3)
C
ceci3 已提交
285 286
                ret = my_sync_bn(t)
                static_ret = self.get_static_graph_result(
287
                    feed={'t': np.ones([3, 3, 5, 5], dtype='float32')},
288 289
                    fetch_list=[ret],
                )[0]
C
ceci3 已提交
290 291 292 293 294 295

            with self.dynamic_graph():
                t = np.ones([3, 3, 5, 5], dtype='float32')
                my_syncbn = paddle.nn.SyncBatchNorm(3)
                dy_ret = my_syncbn(base.to_variable(t))
                dy_ret_value = dy_ret.numpy()
296
            np.testing.assert_array_equal(static_ret, dy_ret_value)
C
ceci3 已提交
297

298 299
    def test_relu(self):
        with self.static_graph():
G
GGBond8488 已提交
300
            t = paddle.static.data(name='t', shape=[-1, 3, 3], dtype='float32')
301
            ret = F.relu(t)
302
            static_ret = self.get_static_graph_result(
303 304
                feed={'t': np.ones([3, 3], dtype='float32')}, fetch_list=[ret]
            )[0]
305 306 307

        with self.dynamic_graph():
            t = np.ones([3, 3], dtype='float32')
308
            dy_ret = F.relu(base.to_variable(t))
309
            dy_ret_value = dy_ret.numpy()
310

311
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
C
ceci3 已提交
312

313 314
    def test_matmul(self):
        with self.static_graph():
G
GGBond8488 已提交
315 316 317 318
            t = paddle.static.data(name='t', shape=[-1, 3, 3], dtype='float32')
            t2 = paddle.static.data(
                name='t2', shape=[-1, 3, 3], dtype='float32'
            )
K
kangguangli 已提交
319
            ret = paddle.matmul(t, t2)
320 321 322 323 324 325 326
            static_ret = self.get_static_graph_result(
                feed={
                    't': np.ones([3, 3], dtype='float32'),
                    't2': np.ones([3, 3], dtype='float32'),
                },
                fetch_list=[ret],
            )[0]
327 328 329 330

        with self.dynamic_graph():
            t = np.ones([3, 3], dtype='float32')
            t2 = np.ones([3, 3], dtype='float32')
K
kangguangli 已提交
331
            dy_ret = paddle.matmul(base.to_variable(t), base.to_variable(t2))
332
            dy_ret_value = dy_ret.numpy()
333

334
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
335

X
Xin Pan 已提交
336 337 338 339 340 341 342 343 344
    def test_elementwise_math(self):
        n = np.ones([3, 3], dtype='float32')
        n2 = np.ones([3, 3], dtype='float32') * 1.1
        n3 = np.ones([3, 3], dtype='float32') * 2
        n4 = np.ones([3, 3], dtype='float32') * 3
        n5 = np.ones([3, 3], dtype='float32') * 4
        n6 = np.ones([3, 3], dtype='float32') * 5

        with self.static_graph():
G
GGBond8488 已提交
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
            t = paddle.static.data(name='t', shape=[-1, 3, 3], dtype='float32')
            t2 = paddle.static.data(
                name='t2', shape=[-1, 3, 3], dtype='float32'
            )
            t3 = paddle.static.data(
                name='t3', shape=[-1, 3, 3], dtype='float32'
            )
            t4 = paddle.static.data(
                name='t4', shape=[-1, 3, 3], dtype='float32'
            )
            t5 = paddle.static.data(
                name='t5', shape=[-1, 3, 3], dtype='float32'
            )
            t6 = paddle.static.data(
                name='t6', shape=[-1, 3, 3], dtype='float32'
            )
X
Xin Pan 已提交
361

362
            ret = paddle.add(t, t2)
363
            ret = paddle.pow(ret, t3)
364 365 366
            ret = paddle.divide(ret, t4)
            ret = paddle.subtract(ret, t5)
            ret = paddle.multiply(ret, t6)
X
Xin Pan 已提交
367

368 369 370 371
            static_ret = self.get_static_graph_result(
                feed={'t': n, 't2': n2, 't3': n3, 't4': n4, 't5': n5, 't6': n6},
                fetch_list=[ret],
            )[0]
X
Xin Pan 已提交
372 373

        with self.dynamic_graph():
374
            ret = paddle.add(to_variable(n), to_variable(n2))
375
            ret = paddle.pow(ret, to_variable(n3))
376 377 378
            ret = paddle.divide(ret, to_variable(n4))
            ret = paddle.subtract(ret, to_variable(n5))
            dy_ret = paddle.multiply(ret, to_variable(n6))
379
            dy_ret_value = dy_ret.numpy()
380

381
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
X
Xin Pan 已提交
382 383 384 385 386 387

    def test_elementwise_minmax(self):
        n = np.ones([3, 3], dtype='float32')
        n2 = np.ones([3, 3], dtype='float32') * 2

        with self.dynamic_graph():
388
            min_ret = paddle.minimum(to_variable(n), to_variable(n2))
H
HongyuJia 已提交
389
            max_ret = paddle.maximum(to_variable(n), to_variable(n2))
390 391
            min_ret_value = min_ret.numpy()
            max_ret_value = max_ret.numpy()
X
Xin Pan 已提交
392

393 394
        np.testing.assert_allclose(n, min_ret_value, rtol=1e-05)
        np.testing.assert_allclose(n2, max_ret_value, rtol=1e-05)
X
Xin Pan 已提交
395

396 397 398
    def test_conv2d_transpose(self):
        inp_np = np.arange(0, 24).reshape([2, 3, 2, 2]).astype('float32')
        with self.static_graph():
G
GGBond8488 已提交
399 400 401
            img = paddle.static.data(
                name='pixel', shape=[-1, 3, 2, 2], dtype='float32'
            )
402
            out = paddle.static.nn.conv2d_transpose(
403 404
                input=img,
                num_filters=10,
405
                filter_size=27,
406
                act='sigmoid',
407
                bias_attr=paddle.nn.initializer.Constant(value=1),
408 409 410 411
            )
            static_rlt = self.get_static_graph_result(
                feed={'pixel': inp_np}, fetch_list=[out]
            )[0]
412
        with self.static_graph():
G
GGBond8488 已提交
413 414 415
            img = paddle.static.data(
                name='pixel', shape=[-1, 3, 2, 2], dtype='float32'
            )
416 417 418 419
            conv2d_transpose = paddle.nn.Conv2DTranspose(
                3,
                10,
                27,
420
                bias_attr=paddle.nn.initializer.Constant(value=1),
421
            )
422
            out = conv2d_transpose(img)
423
            out = paddle.nn.functional.sigmoid(out)
424 425 426
            static_rlt2 = self.get_static_graph_result(
                feed={'pixel': inp_np}, fetch_list=[out]
            )[0]
427
        with self.dynamic_graph():
428 429 430 431
            conv2d_transpose = paddle.nn.Conv2DTranspose(
                3,
                10,
                27,
432
                bias_attr=paddle.nn.initializer.Constant(value=1),
433
            )
434
            dy_rlt = conv2d_transpose(base.to_variable(inp_np))
435
            dy_rlt = paddle.nn.functional.sigmoid(dy_rlt)
436
            dy_rlt_value = dy_rlt.numpy()
437 438
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt2, rtol=1e-05)
439

440 441 442
        with self.dynamic_graph():
            images = np.ones([2, 3, 5, 5], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
443
            weight_attr = fluid.ParamAttr(
444
                initializer=paddle.nn.initializer.Assign(custom_weight)
445
            )
446 447 448 449 450 451
            conv2d1 = paddle.nn.Conv2DTranspose(3, 3, [2, 2])
            conv2d2 = paddle.nn.Conv2DTranspose(
                3,
                3,
                [2, 2],
                weight_attr=weight_attr,
452
            )
453 454 455 456 457 458 459
            dy_ret1 = conv2d1(base.to_variable(images))
            dy_ret2 = conv2d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv2d1_weight_np = conv2d1.weight.numpy()
            conv2d1_bias = conv2d1.bias
            self.assertFalse(
460 461
                np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())
            )
462
            conv2d2.weight.set_value(conv2d1_weight_np)
463 464 465
            np.testing.assert_array_equal(
                conv2d1_weight_np, conv2d2.weight.numpy()
            )
466 467 468
            conv2d2.bias.set_value(conv2d1_bias)
            dy_ret1 = conv2d1(base.to_variable(images))
            dy_ret2 = conv2d2(base.to_variable(images))
469
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
470 471 472

            conv2d2.weight = conv2d1.weight
            conv2d2.bias = conv2d1.bias
473 474 475 476 477 478
            np.testing.assert_array_equal(
                conv2d1.weight.numpy(), conv2d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv2d1.bias.numpy(), conv2d2.bias.numpy()
            )
479

480 481 482 483
        with self.static_graph():
            # the input of Conv2DTranspose must be Variable.
            def test_Variable():
                images = np.ones([2, 3, 5, 5], dtype='float32')
484
                conv2d = paddle.nn.Conv2DTranspose(3, 3, [2, 2])
485 486 487 488 489 490 491
                conv2d_ret1 = conv2d(images)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Conv2DTranspose must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
G
GGBond8488 已提交
492 493
                images = paddle.static.data(
                    name='pixel', shape=[-1, 3, 5, 5], dtype='int32'
494
                )
495
                conv2d = paddle.nn.Conv2DTranspose(3, 3, [2, 2])
496 497 498 499
                conv2d_ret2 = conv2d(images)

            self.assertRaises(TypeError, test_type)

500 501 502 503 504
    def test_bilinear_tensor_product(self):
        inp_np_x = np.array([[1, 2, 3]]).astype('float32')
        inp_np_y = np.array([[4, 5, 6]]).astype('float32')

        with self.static_graph():
G
GGBond8488 已提交
505 506
            data_x = paddle.static.data(name='x', shape=[1, 3], dtype="float32")
            data_y = paddle.static.data(name='y', shape=[1, 3], dtype="float32")
507
            out = paddle.static.nn.common.bilinear_tensor_product(
508 509 510
                data_x,
                data_y,
                6,
511
                bias_attr=paddle.nn.initializer.Constant(value=1),
512 513
                act='sigmoid',
            )
514

515 516 517
            static_rlt = self.get_static_graph_result(
                feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out]
            )[0]
518

519
        with self.static_graph():
G
GGBond8488 已提交
520 521
            data_x = paddle.static.data(name='x', shape=[1, 3], dtype="float32")
            data_y = paddle.static.data(name='y', shape=[1, 3], dtype="float32")
522
            btp = paddle.nn.Bilinear(
523 524
                3,
                3,
525
                6,
526
                bias_attr=paddle.nn.initializer.Constant(value=1),
527
            )
528
            out = btp(data_x, data_y)
529
            out = paddle.nn.functional.sigmoid(out)
530 531 532
            static_rlt2 = self.get_static_graph_result(
                feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out]
            )[0]
533
        with self.dynamic_graph():
534
            btp = paddle.nn.Bilinear(
535 536
                3,
                3,
537
                6,
538
                bias_attr=paddle.nn.initializer.Constant(value=1),
539
            )
540
            dy_rlt = btp(base.to_variable(inp_np_x), base.to_variable(inp_np_y))
541
            dy_rlt = paddle.nn.functional.sigmoid(dy_rlt)
542
            dy_rlt_value = dy_rlt.numpy()
543

544
        with self.dynamic_graph():
545
            btp2 = paddle.nn.Bilinear(3, 3, 6)
546 547 548
            dy_rlt2 = btp2(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
549
            dy_rlt2 = paddle.nn.functional.sigmoid(dy_rlt2)
550
            dy_rlt2_value = dy_rlt2.numpy()
551

552
        with self.static_graph():
G
GGBond8488 已提交
553 554
            data_x2 = paddle.static.data(
                name='x', shape=[1, 3], dtype="float32"
555
            )
G
GGBond8488 已提交
556 557
            data_y2 = paddle.static.data(
                name='y', shape=[1, 3], dtype="float32"
558
            )
559
            out2 = paddle.static.nn.common.bilinear_tensor_product(
560 561 562 563 564 565
                data_x2, data_y2, 6, act='sigmoid'
            )

            static_rlt3 = self.get_static_graph_result(
                feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out2]
            )[0]
566

567 568 569
        np.testing.assert_array_equal(dy_rlt2_value, static_rlt3)
        np.testing.assert_array_equal(static_rlt2, static_rlt)
        np.testing.assert_array_equal(dy_rlt_value, static_rlt)
570

571 572
        with self.dynamic_graph():
            custom_weight = np.random.randn(6, 3, 3).astype("float32")
573
            weight_attr = fluid.ParamAttr(
574
                initializer=paddle.nn.initializer.Assign(custom_weight)
575
            )
576 577
            btp1 = paddle.nn.Bilinear(3, 3, 6)
            btp2 = paddle.nn.Bilinear(3, 3, 6, weight_attr=weight_attr)
578 579 580
            dy_rlt1 = btp1(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
581
            dy_rlt1 = paddle.nn.functional.sigmoid(dy_rlt1)
582 583 584
            dy_rlt2 = btp2(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
585
            dy_rlt2 = paddle.nn.functional.sigmoid(dy_rlt2)
586 587 588
            self.assertFalse(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()))
            btp2.weight.set_value(btp1.weight.numpy())
            btp2.bias.set_value(btp1.bias)
589 590 591 592 593 594
            dy_rlt1 = btp1(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
            dy_rlt2 = btp2(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
595
            np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
596 597 598

            btp2.weight = btp1.weight
            btp2.bias = btp1.bias
599 600 601
            np.testing.assert_array_equal(
                btp1.weight.numpy(), btp2.weight.numpy()
            )
602
            np.testing.assert_array_equal(btp1.bias.numpy(), btp2.bias.numpy())
603

604 605 606 607
    def test_embeding(self):
        inp_word = np.array([[[1]]]).astype('int64')
        dict_size = 20
        with self.static_graph():
G
GGBond8488 已提交
608 609 610 611
            data_t = paddle.static.data(
                name='word', shape=[-1, 1], dtype='int64'
            )
            data_t.desc.set_need_check_feed(False)
612 613 614 615 616 617 618 619 620
            emb = layers.embedding(
                input=data_t,
                size=[dict_size, 32],
                param_attr='emb.w',
                is_sparse=False,
            )
            static_rlt = self.get_static_graph_result(
                feed={'word': inp_word}, fetch_list=[emb]
            )[0]
621
        with self.static_graph():
G
GGBond8488 已提交
622 623 624 625
            data_t = paddle.static.data(
                name='word', shape=[-1, 1], dtype='int64'
            )
            data_t.desc.set_need_check_feed(False)
626 627
            emb2 = paddle.nn.Embedding(
                dict_size, 32, weight_attr='emb.w', sparse=False
628
            )
629
            emb_rlt = emb2(data_t)
630 631 632
            static_rlt2 = self.get_static_graph_result(
                feed={'word': inp_word}, fetch_list=[emb_rlt]
            )[0]
633
        with self.dynamic_graph():
634 635
            emb2 = paddle.nn.Embedding(
                dict_size, 32, weight_attr='emb.w', sparse=False
636
            )
637 638
            dy_rlt = emb2(base.to_variable(inp_word))
            dy_rlt_value = dy_rlt.numpy()
639 640

        self.assertTrue(np.allclose(static_rlt2, static_rlt))
641
        self.assertTrue(np.allclose(dy_rlt_value, static_rlt))
642

643 644
        with self.dynamic_graph():
            custom_weight = np.random.randn(dict_size, 32).astype("float32")
645
            weight_attr = fluid.ParamAttr(
646
                initializer=paddle.nn.initializer.Assign(custom_weight)
647
            )
648 649 650
            emb1 = paddle.nn.Embedding(dict_size, 32, sparse=False)
            emb2 = paddle.nn.Embedding(
                dict_size, 32, weight_attr=weight_attr, sparse=False
651
            )
652 653 654
            rep1 = emb1(base.to_variable(inp_word))
            rep2 = emb2(base.to_variable(inp_word))
            self.assertFalse(np.array_equal(emb1.weight.numpy(), custom_weight))
655
            np.testing.assert_array_equal(emb2.weight.numpy(), custom_weight)
656 657 658
            self.assertFalse(np.array_equal(rep1.numpy(), rep2.numpy()))
            emb2.weight.set_value(emb1.weight.numpy())
            rep2 = emb2(base.to_variable(inp_word))
659
            np.testing.assert_array_equal(rep1.numpy(), rep2.numpy())
660 661

            emb2.weight = emb1.weight
662 663 664
            np.testing.assert_array_equal(
                emb1.weight.numpy(), emb2.weight.numpy()
            )
665

S
songyouwei 已提交
666 667 668
    def test_one_hot(self):
        with self.dynamic_graph():
            label = fluid.dygraph.to_variable(np.array([[1], [1], [3], [0]]))
669 670 671
            one_hot_label1 = paddle.nn.functional.one_hot(label, 4)
            one_hot_label2 = paddle.nn.functional.one_hot(
                label, fluid.dygraph.to_variable(np.array([4]))
672 673 674 675
            )
            np.testing.assert_array_equal(
                one_hot_label1.numpy(), one_hot_label2.numpy()
            )
S
songyouwei 已提交
676 677 678 679

    def test_split(self):
        with self.dynamic_graph():
            input = fluid.dygraph.to_variable(np.random.random((3, 8, 5)))
680 681
            x0, x1 = paddle.split(input, num_or_sections=2, axis=1)
            x00, x11 = paddle.split(
682 683
                input,
                num_or_sections=2,
684
                axis=fluid.dygraph.to_variable(np.array([1])),
685
            )
686 687
            np.testing.assert_array_equal(x0.numpy(), x00.numpy())
            np.testing.assert_array_equal(x1.numpy(), x11.numpy())
S
songyouwei 已提交
688 689 690 691

    def test_topk(self):
        with self.dynamic_graph():
            input = fluid.dygraph.to_variable(np.random.random((13, 11)))
692 693
            top5_values1, top5_indices1 = paddle.topk(input, k=5)
            top5_values2, top5_indices2 = paddle.topk(
694 695 696 697 698 699 700 701
                input, k=fluid.dygraph.to_variable(np.array([5]))
            )
            np.testing.assert_array_equal(
                top5_values1.numpy(), top5_values2.numpy()
            )
            np.testing.assert_array_equal(
                top5_indices1.numpy(), top5_indices2.numpy()
            )
S
songyouwei 已提交
702

L
lujun 已提交
703 704
    def test_conv3d(self):
        with self.static_graph():
G
GGBond8488 已提交
705 706
            images = paddle.static.data(
                name='pixel', shape=[-1, 3, 6, 6, 6], dtype='float32'
707
            )
708 709 710
            ret = paddle.static.nn.conv3d(
                input=images, num_filters=3, filter_size=2
            )
L
lujun 已提交
711
            static_ret = self.get_static_graph_result(
712
                feed={'pixel': np.ones([2, 3, 6, 6, 6], dtype='float32')},
713 714
                fetch_list=[ret],
            )[0]
L
lujun 已提交
715 716

        with self.static_graph():
G
GGBond8488 已提交
717 718
            images = paddle.static.data(
                name='pixel', shape=[-1, 3, 6, 6, 6], dtype='float32'
719
            )
720 721 722
            conv3d = paddle.nn.Conv3D(
                in_channels=3, out_channels=3, kernel_size=2
            )
L
lujun 已提交
723 724
            ret = conv3d(images)
            static_ret2 = self.get_static_graph_result(
725
                feed={'pixel': np.ones([2, 3, 6, 6, 6], dtype='float32')},
726 727
                fetch_list=[ret],
            )[0]
L
lujun 已提交
728 729 730

        with self.dynamic_graph():
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
731 732 733
            conv3d = paddle.nn.Conv3D(
                in_channels=3, out_channels=3, kernel_size=2
            )
L
lujun 已提交
734
            dy_ret = conv3d(base.to_variable(images))
735
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
736

737 738
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
739

740 741 742
        with self.dynamic_graph():
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
743
            weight_attr = fluid.ParamAttr(
744
                initializer=paddle.nn.initializer.Assign(custom_weight)
745
            )
746 747 748 749 750 751 752 753
            conv3d1 = paddle.nn.Conv3D(
                in_channels=3, out_channels=3, kernel_size=2
            )
            conv3d2 = paddle.nn.Conv3D(
                in_channels=3,
                out_channels=3,
                kernel_size=2,
                weight_attr=weight_attr,
754
            )
755 756 757 758 759 760 761
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv3d1_weight_np = conv3d1.weight.numpy()
            conv3d1_bias = conv3d1.bias
            self.assertFalse(
762 763
                np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
            )
764
            conv3d2.weight.set_value(conv3d1_weight_np)
765 766 767
            np.testing.assert_array_equal(
                conv3d1_weight_np, conv3d2.weight.numpy()
            )
768 769 770
            conv3d1.bias.set_value(conv3d1_bias)
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
771
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
772 773 774

            conv3d2.weight = conv3d1.weight
            conv3d2.bias = conv3d1.bias
775 776 777 778 779 780
            np.testing.assert_array_equal(
                conv3d1.weight.numpy(), conv3d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv3d1.bias.numpy(), conv3d2.bias.numpy()
            )
781

782
    def test_group_norm(self):
L
lujun 已提交
783 784 785 786 787 788 789 790 791 792
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
G
GGBond8488 已提交
793 794
            X = paddle.static.data(
                name='X', shape=shape, dtype='float32', lod_level=1
795
            )
796
            ret = paddle.static.nn.group_norm(
797 798
                input=X,
                groups=2,
799 800
                param_attr=paddle.nn.initializer.Uniform(low=-0.5, high=0.5),
                bias_attr=paddle.nn.initializer.Constant(value=1),
801 802 803 804 805 806 807 808 809 810
            )
            static_ret = self.get_static_graph_result(
                feed={
                    'X': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
811 812

        with self.static_graph():
G
GGBond8488 已提交
813 814
            X = paddle.static.data(
                name='X', shape=shape, dtype='float32', lod_level=1
815
            )
816 817 818
            groupNorm = paddle.nn.GroupNorm(
                num_channels=shape[1],
                num_groups=2,
819 820
                weight_attr=paddle.nn.initializer.Uniform(low=-0.5, high=0.5),
                bias_attr=paddle.nn.initializer.Constant(value=1),
821
            )
L
lujun 已提交
822
            ret = groupNorm(X)
823 824 825 826 827 828 829 830 831
            static_ret2 = self.get_static_graph_result(
                feed={
                    'X': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
832 833

        with self.dynamic_graph():
834 835 836
            groupNorm = paddle.nn.GroupNorm(
                num_channels=shape[1],
                num_groups=2,
837 838
                weight_attr=paddle.nn.initializer.Uniform(low=-0.5, high=0.5),
                bias_attr=paddle.nn.initializer.Constant(value=1),
839
            )
L
lujun 已提交
840
            dy_ret = groupNorm(base.to_variable(input))
841
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
842

843 844
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
845

846 847 848 849 850 851 852 853 854 855 856
    def test_instance_norm(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
G
GGBond8488 已提交
857
            X = paddle.static.data(name='X', shape=shape, dtype='float32')
858
            ret = paddle.static.nn.instance_norm(input=X)
859 860 861
            static_ret = self.get_static_graph_result(
                feed={'X': input}, fetch_list=[ret]
            )[0]
862 863

        with self.static_graph():
G
GGBond8488 已提交
864
            X = paddle.static.data(name='X', shape=shape, dtype='float32')
865
            instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
866
            ret = instanceNorm(X)
867 868 869
            static_ret2 = self.get_static_graph_result(
                feed={'X': input}, fetch_list=[ret]
            )[0]
870 871

        with self.dynamic_graph():
872
            instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
873 874 875 876
            dy_ret = instanceNorm(base.to_variable(input))
            dy_rlt_value = dy_ret.numpy()

        with self.dynamic_graph():
877
            instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
878 879 880
            dy_ret = instanceNorm(base.to_variable(input))
            dy_rlt_value2 = dy_ret.numpy()

881 882 883
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_rlt_value2, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
884 885 886 887

        with self.static_graph():
            # the input of InstanceNorm must be Variable.
            def test_Variable():
888
                instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
889 890 891 892 893 894 895
                ret1 = instanceNorm(input)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of InstanceNorm must be float32 or float64
            def test_type():
                input = np.random.random(shape).astype('int32')
896
                instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
897 898 899 900
                ret2 = instanceNorm(input)

            self.assertRaises(TypeError, test_type)

L
lujun 已提交
901 902 903 904 905 906 907 908 909 910 911
    def test_spectral_norm(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
G
GGBond8488 已提交
912 913
            Weight = paddle.static.data(
                name='Weight', shape=shape, dtype='float32', lod_level=1
914
            )
915 916 917
            ret = paddle.static.nn.spectral_norm(
                weight=Weight, dim=1, power_iters=2
            )
918 919 920 921 922 923 924 925 926
            static_ret = self.get_static_graph_result(
                feed={
                    'Weight': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    ),
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
927 928

        with self.static_graph():
G
GGBond8488 已提交
929 930
            Weight = paddle.static.data(
                name='Weight', shape=shape, dtype='float32', lod_level=1
931
            )
932
            spectralNorm = paddle.nn.SpectralNorm(shape, dim=1, power_iters=2)
L
lujun 已提交
933
            ret = spectralNorm(Weight)
934 935 936 937 938 939 940 941 942
            static_ret2 = self.get_static_graph_result(
                feed={
                    'Weight': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
943 944

        with self.dynamic_graph():
945
            spectralNorm = paddle.nn.SpectralNorm(shape, dim=1, power_iters=2)
L
lujun 已提交
946
            dy_ret = spectralNorm(base.to_variable(input))
947
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
948

949 950
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
951 952

    def test_conv3d_transpose(self):
953 954 955
        input_array = (
            np.arange(0, 48).reshape([2, 3, 2, 2, 2]).astype('float32')
        )
L
lujun 已提交
956 957

        with self.static_graph():
G
GGBond8488 已提交
958 959 960
            img = paddle.static.data(
                name='pixel', shape=[-1, 3, 2, 2, 2], dtype='float32'
            )
961
            out = paddle.static.nn.conv3d_transpose(
962
                input=img, num_filters=12, filter_size=12, use_cudnn=True
963
            )
L
lujun 已提交
964
            static_rlt = self.get_static_graph_result(
965 966
                feed={'pixel': input_array}, fetch_list=[out]
            )[0]
L
lujun 已提交
967
        with self.static_graph():
G
GGBond8488 已提交
968 969 970
            img = paddle.static.data(
                name='pixel', shape=[-1, 3, 2, 2, 2], dtype='float32'
            )
971 972
            conv3d_transpose = paddle.nn.Conv3DTranspose(
                in_channels=3, out_channels=12, kernel_size=12
973
            )
L
lujun 已提交
974 975
            out = conv3d_transpose(img)
            static_rlt2 = self.get_static_graph_result(
976 977
                feed={'pixel': input_array}, fetch_list=[out]
            )[0]
L
lujun 已提交
978
        with self.dynamic_graph():
979 980
            conv3d_transpose = paddle.nn.Conv3DTranspose(
                in_channels=3, out_channels=12, kernel_size=12
981
            )
L
lujun 已提交
982
            dy_rlt = conv3d_transpose(base.to_variable(input_array))
983
            dy_rlt_value = dy_rlt.numpy()
984 985
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05)
L
lujun 已提交
986

987 988 989
        with self.dynamic_graph():
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
990
            weight_attr = fluid.ParamAttr(
991
                initializer=paddle.nn.initializer.Assign(custom_weight)
992
            )
993 994 995 996
            conv3d1 = paddle.nn.Conv3DTranspose(
                in_channels=3,
                out_channels=3,
                kernel_size=2,
997 998
                bias_attr='conv3d1_b',
            )
999 1000 1001 1002 1003
            conv3d2 = paddle.nn.Conv3DTranspose(
                in_channels=3,
                out_channels=3,
                kernel_size=2,
                weight_attr=weight_attr,
1004 1005
                bias_attr='conv3d2_b',
            )
1006 1007 1008 1009 1010 1011 1012
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv3d1_weight_np = conv3d1.weight.numpy()
            conv3d1_bias = conv3d1.bias
            self.assertFalse(
1013 1014
                np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
            )
1015
            conv3d2.weight.set_value(conv3d1_weight_np)
1016 1017 1018
            np.testing.assert_array_equal(
                conv3d1_weight_np, conv3d2.weight.numpy()
            )
1019 1020 1021
            conv3d1.bias.set_value(conv3d1_bias)
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
1022
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
1023 1024 1025

            conv3d2.weight = conv3d1.weight
            conv3d2.bias = conv3d1.bias
1026 1027 1028 1029 1030 1031
            np.testing.assert_array_equal(
                conv3d1.weight.numpy(), conv3d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv3d1.bias.numpy(), conv3d2.bias.numpy()
            )
1032

1033
    def test_while_loop(self):
1034
        with self.static_graph():
1035 1036 1037 1038
            i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
            ten = paddle.tensor.fill_constant(
                shape=[1], dtype='int64', value=10
            )
1039 1040

            def cond(i):
L
LiYuRio 已提交
1041
                return paddle.less_than(i, ten)
1042 1043 1044 1045

            def body(i):
                return i + 1

1046
            out = paddle.static.nn.while_loop(cond, body, [i])
1047 1048 1049
            static_ret = self.get_static_graph_result(feed={}, fetch_list=out)

        with self.dynamic_graph():
1050 1051 1052 1053
            i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
            ten = paddle.tensor.fill_constant(
                shape=[1], dtype='int64', value=10
            )
1054

1055
            def cond1(i):
L
LiYuRio 已提交
1056
                return paddle.less_than(i, ten)
1057

1058
            def body1(i):
1059 1060
                return i + 1

1061
            dy_ret = paddle.static.nn.while_loop(cond1, body1, [i])
1062
            with self.assertRaises(ValueError):
1063 1064 1065
                j = paddle.tensor.fill_constant(
                    shape=[1], dtype='int64', value=0
                )
1066 1067 1068 1069

                def body2(i):
                    return i + 1, i + 2

1070
                paddle.static.nn.while_loop(cond1, body2, [j])
1071

1072
        np.testing.assert_array_equal(static_ret[0], dy_ret[0].numpy())
1073

1074 1075 1076 1077 1078
    def test_compare(self):
        value_a = np.arange(3)
        value_b = np.arange(3)
        # less than
        with self.static_graph():
G
GGBond8488 已提交
1079 1080
            a = paddle.static.data(name='a', shape=[-1, 1], dtype='int64')
            b = paddle.static.data(name='b', shape=[-1, 1], dtype='int64')
L
LiYuRio 已提交
1081
            cond = paddle.less_than(x=a, y=b)
1082 1083 1084
            static_ret = self.get_static_graph_result(
                feed={"a": value_a, "b": value_b}, fetch_list=[cond]
            )[0]
1085 1086 1087
        with self.dynamic_graph():
            da = base.to_variable(value_a)
            db = base.to_variable(value_b)
L
LiYuRio 已提交
1088
            dcond = paddle.less_than(x=da, y=db)
1089

1090 1091
            for i in range(len(static_ret)):
                self.assertTrue(dcond.numpy()[i] == static_ret[i])
1092 1093 1094

        # less equal
        with self.static_graph():
G
GGBond8488 已提交
1095 1096
            a1 = paddle.static.data(name='a1', shape=[-1, 1], dtype='int64')
            b1 = paddle.static.data(name='b1', shape=[-1, 1], dtype='int64')
1097
            cond1 = paddle.less_equal(x=a1, y=b1)
1098 1099 1100
            static_ret1 = self.get_static_graph_result(
                feed={"a1": value_a, "b1": value_b}, fetch_list=[cond1]
            )[0]
1101 1102 1103
        with self.dynamic_graph():
            da1 = base.to_variable(value_a)
            db1 = base.to_variable(value_b)
1104
            dcond1 = paddle.less_equal(x=da1, y=db1)
1105 1106 1107 1108

            for i in range(len(static_ret1)):
                self.assertTrue(dcond1.numpy()[i] == static_ret1[i])

1109
        # greater than
1110
        with self.static_graph():
G
GGBond8488 已提交
1111 1112
            a2 = paddle.static.data(name='a2', shape=[-1, 1], dtype='int64')
            b2 = paddle.static.data(name='b2', shape=[-1, 1], dtype='int64')
1113
            cond2 = paddle.greater_than(x=a2, y=b2)
1114 1115 1116
            static_ret2 = self.get_static_graph_result(
                feed={"a2": value_a, "b2": value_b}, fetch_list=[cond2]
            )[0]
1117 1118 1119
        with self.dynamic_graph():
            da2 = base.to_variable(value_a)
            db2 = base.to_variable(value_b)
1120
            dcond2 = paddle.greater_than(x=da2, y=db2)
1121 1122 1123 1124

            for i in range(len(static_ret2)):
                self.assertTrue(dcond2.numpy()[i] == static_ret2[i])

1125
        # greater equal
1126
        with self.static_graph():
G
GGBond8488 已提交
1127 1128
            a3 = paddle.static.data(name='a3', shape=[-1, 1], dtype='int64')
            b3 = paddle.static.data(name='b3', shape=[-1, 1], dtype='int64')
1129
            cond3 = paddle.greater_equal(x=a3, y=b3)
1130 1131 1132
            static_ret3 = self.get_static_graph_result(
                feed={"a3": value_a, "b3": value_b}, fetch_list=[cond3]
            )[0]
1133 1134 1135
        with self.dynamic_graph():
            da3 = base.to_variable(value_a)
            db3 = base.to_variable(value_b)
1136
            dcond3 = paddle.greater_equal(x=da3, y=db3)
1137 1138 1139 1140 1141 1142

            for i in range(len(static_ret3)):
                self.assertTrue(dcond3.numpy()[i] == static_ret3[i])

        # equal
        with self.static_graph():
G
GGBond8488 已提交
1143 1144
            a4 = paddle.static.data(name='a4', shape=[-1, 1], dtype='int64')
            b4 = paddle.static.data(name='b4', shape=[-1, 1], dtype='int64')
1145
            cond4 = paddle.equal(x=a4, y=b4)
1146 1147 1148
            static_ret4 = self.get_static_graph_result(
                feed={"a4": value_a, "b4": value_b}, fetch_list=[cond4]
            )[0]
1149 1150 1151
        with self.dynamic_graph():
            da4 = base.to_variable(value_a)
            db4 = base.to_variable(value_b)
1152
            dcond4 = paddle.equal(x=da4, y=db4)
1153 1154 1155 1156 1157 1158

            for i in range(len(static_ret4)):
                self.assertTrue(dcond4.numpy()[i] == static_ret4[i])

        # not equal
        with self.static_graph():
G
GGBond8488 已提交
1159 1160
            a5 = paddle.static.data(name='a5', shape=[-1, 1], dtype='int64')
            b5 = paddle.static.data(name='b5', shape=[-1, 1], dtype='int64')
1161
            cond5 = paddle.equal(x=a5, y=b5)
1162 1163 1164
            static_ret5 = self.get_static_graph_result(
                feed={"a5": value_a, "b5": value_b}, fetch_list=[cond5]
            )[0]
1165 1166 1167
        with self.dynamic_graph():
            da5 = base.to_variable(value_a)
            db5 = base.to_variable(value_b)
1168
            dcond5 = paddle.equal(x=da5, y=db5)
1169 1170 1171 1172

            for i in range(len(static_ret5)):
                self.assertTrue(dcond5.numpy()[i] == static_ret5[i])

1173 1174
    def test_cond(self):
        def less_than_branch(a, b):
1175
            return paddle.add(a, b)
1176 1177

        def greater_equal_branch(a, b):
1178
            return paddle.subtract(a, b)
1179 1180

        with self.static_graph():
1181
            a = paddle.tensor.fill_constant(
1182 1183
                shape=[1], dtype='float32', value=0.1
            )
1184
            b = paddle.tensor.fill_constant(
1185 1186
                shape=[1], dtype='float32', value=0.23
            )
1187
            out = paddle.static.nn.cond(
1188 1189 1190 1191 1192 1193 1194 1195 1196
                a >= b,
                lambda: greater_equal_branch(a, b),
                lambda: less_than_branch(a, b),
            )
            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
1197 1198 1199 1200 1201 1202 1203
            exe = fluid.Executor(place)
            ret = exe.run(fetch_list=[out])
            static_res = ret[0]

        with self.dynamic_graph():
            a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32'))
            b = fluid.dygraph.to_variable(np.array([0.23]).astype('float32'))
1204
            out = paddle.static.nn.cond(
1205 1206 1207 1208
                a < b,
                lambda: less_than_branch(a, b),
                lambda: greater_equal_branch(a, b),
            )
1209
            out2 = paddle.static.nn.cond(
1210 1211 1212 1213
                a >= b,
                lambda: greater_equal_branch(a, b),
                lambda: less_than_branch(a, b),
            )
1214 1215
            dynamic_res = out.numpy()
            dynamic_res2 = out2.numpy()
1216
            np.testing.assert_array_equal(dynamic_res, dynamic_res2)
1217
            with self.assertRaises(TypeError):
1218
                paddle.static.nn.cond(a < b, 'str', 'str')
1219
            with self.assertRaises(TypeError):
1220
                paddle.static.nn.cond(a >= b, 'str', 'str')
1221

1222
        np.testing.assert_array_equal(static_res, dynamic_res)
1223

1224 1225
    def test_case(self):
        def fn_1():
1226 1227 1228
            return paddle.tensor.fill_constant(
                shape=[1, 2], dtype='float32', value=1
            )
1229 1230

        def fn_2():
1231 1232 1233
            return paddle.tensor.fill_constant(
                shape=[2, 2], dtype='int32', value=2
            )
1234 1235

        def fn_3():
1236 1237 1238
            return paddle.tensor.fill_constant(
                shape=[3], dtype='int32', value=3
            )
1239 1240

        with self.static_graph():
1241 1242 1243 1244 1245 1246 1247 1248 1249
            x = paddle.tensor.fill_constant(
                shape=[1], dtype='float32', value=0.3
            )
            y = paddle.tensor.fill_constant(
                shape=[1], dtype='float32', value=0.1
            )
            z = paddle.tensor.fill_constant(
                shape=[1], dtype='float32', value=0.2
            )
1250

L
LiYuRio 已提交
1251 1252
            pred_1 = paddle.less_than(z, x)  # true: 0.2 < 0.3
            pred_2 = paddle.less_than(x, y)  # false: 0.3 < 0.1
1253
            pred_3 = paddle.equal(x, y)  # false: 0.3 == 0.1
1254

1255
            out_1 = paddle.static.nn.case(
1256 1257
                pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3
            )
1258 1259 1260
            out_2 = paddle.static.nn.case(
                pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)]
            )
1261

1262 1263 1264 1265 1266
            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
1267 1268 1269 1270
            exe = fluid.Executor(place)
            static_res1, static_res2 = exe.run(fetch_list=[out_1, out_2])

        with self.dynamic_graph():
1271 1272 1273 1274 1275 1276 1277 1278 1279
            x = paddle.tensor.fill_constant(
                shape=[1], dtype='float32', value=0.3
            )
            y = paddle.tensor.fill_constant(
                shape=[1], dtype='float32', value=0.1
            )
            z = paddle.tensor.fill_constant(
                shape=[1], dtype='float32', value=0.2
            )
1280

L
LiYuRio 已提交
1281 1282
            pred_1 = paddle.less_than(z, x)  # true: 0.2 < 0.3
            pred_2 = paddle.less_than(x, y)  # false: 0.3 < 0.1
1283
            pred_3 = paddle.equal(x, y)  # false: 0.3 == 0.1
1284

1285
            out_1 = paddle.static.nn.case(
1286 1287
                pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3
            )
1288 1289 1290
            out_2 = paddle.static.nn.case(
                pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)]
            )
1291 1292 1293
            dynamic_res1 = out_1.numpy()
            dynamic_res2 = out_2.numpy()

1294 1295
        np.testing.assert_array_equal(static_res1, dynamic_res1)
        np.testing.assert_array_equal(static_res2, dynamic_res2)
1296 1297 1298

    def test_switch_case(self):
        def fn_1():
1299 1300 1301
            return paddle.tensor.fill_constant(
                shape=[1, 2], dtype='float32', value=1
            )
1302 1303

        def fn_2():
1304 1305 1306
            return paddle.tensor.fill_constant(
                shape=[2, 2], dtype='int32', value=2
            )
1307 1308

        def fn_3():
1309 1310 1311
            return paddle.tensor.fill_constant(
                shape=[3], dtype='int32', value=3
            )
1312 1313

        with self.static_graph():
1314 1315 1316 1317 1318 1319
            index_1 = paddle.tensor.fill_constant(
                shape=[1], dtype='int32', value=1
            )
            index_2 = paddle.tensor.fill_constant(
                shape=[1], dtype='int32', value=2
            )
1320

1321
            out_1 = paddle.static.nn.switch_case(
1322 1323 1324 1325
                branch_index=index_1,
                branch_fns={1: fn_1, 2: fn_2},
                default=fn_3,
            )
1326
            out_2 = paddle.static.nn.switch_case(
1327 1328 1329 1330
                branch_index=index_2,
                branch_fns=[(1, fn_1), (2, fn_2)],
                default=fn_3,
            )
1331
            out_3 = paddle.static.nn.switch_case(
1332 1333 1334 1335 1336 1337 1338 1339 1340
                branch_index=index_2,
                branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)],
            )

            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
1341 1342
            exe = fluid.Executor(place)
            static_res1, static_res2, static_res3 = exe.run(
1343 1344
                fetch_list=[out_1, out_2, out_3]
            )
1345 1346

        with self.dynamic_graph():
1347 1348 1349 1350 1351 1352
            index_1 = paddle.tensor.fill_constant(
                shape=[1], dtype='int32', value=1
            )
            index_2 = paddle.tensor.fill_constant(
                shape=[1], dtype='int32', value=2
            )
1353

1354
            out_1 = paddle.static.nn.switch_case(
1355 1356 1357 1358
                branch_index=index_1,
                branch_fns={1: fn_1, 2: fn_2},
                default=fn_3,
            )
1359
            out_2 = paddle.static.nn.switch_case(
1360 1361 1362 1363
                branch_index=index_2,
                branch_fns=[(1, fn_1), (2, fn_2)],
                default=fn_3,
            )
1364
            out_3 = paddle.static.nn.switch_case(
1365 1366 1367
                branch_index=index_2,
                branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)],
            )
1368 1369 1370 1371 1372

            dynamic_res1 = out_1.numpy()
            dynamic_res2 = out_2.numpy()
            dynamic_res3 = out_3.numpy()

1373 1374 1375
        np.testing.assert_array_equal(static_res1, dynamic_res1)
        np.testing.assert_array_equal(static_res2, dynamic_res2)
        np.testing.assert_array_equal(static_res3, dynamic_res3)
1376

1377 1378
    def test_crop_tensor(self):
        with self.static_graph():
G
GGBond8488 已提交
1379 1380
            x = paddle.static.data(
                name="x1", shape=[-1, 6, 5, 8], dtype="float32"
1381
            )
G
GGBond8488 已提交
1382 1383 1384

            dim1 = paddle.static.data(name="dim1", shape=[1], dtype="float32")
            dim2 = paddle.static.data(name="dim2", shape=[1], dtype="float32")
1385
            crop_shape1 = (1, 2, 4, 4)
G
GGBond8488 已提交
1386 1387
            crop_shape2 = paddle.static.data(
                name="crop_shape", shape=[4], dtype="float32"
1388
            )
1389 1390
            crop_shape3 = [-1, dim1, dim2, 4]
            crop_offsets1 = [0, 0, 1, 0]
G
GGBond8488 已提交
1391 1392
            crop_offsets2 = paddle.static.data(
                name="crop_offset", shape=[4], dtype="float32"
1393
            )
1394 1395
            crop_offsets3 = [0, dim1, dim2, 0]

1396 1397 1398
            out1 = paddle.crop(x, shape=crop_shape1, offsets=crop_offsets1)
            out2 = paddle.crop(x, shape=crop_shape2, offsets=crop_offsets2)
            out3 = paddle.crop(x, shape=crop_shape3, offsets=crop_offsets3)
1399 1400 1401 1402 1403

            self.assertIsNotNone(out1)
            self.assertIsNotNone(out2)
            self.assertIsNotNone(out3)

1404 1405
    def test_shard_index(self):
        with self.static_graph():
G
GGBond8488 已提交
1406 1407 1408
            x = paddle.static.data(
                name="label", shape=[-1, 4, 1], dtype='int64'
            )
1409
            shard_label = paddle.shard_index(
1410 1411
                input=x, index_num=20, nshards=2, shard_id=0
            )
1412 1413 1414

        self.assertIsNotNone(shard_label)

1415 1416 1417 1418
    def test_accuracy(self):
        x = np.random.rand(3, 32, 32).astype("float32")
        y = np.array([[1], [0], [1]])
        with self.static_graph():
1419 1420 1421 1422
            data = paddle.static.data(
                name="input", shape=[-1, 32, 32], dtype="float32"
            )
            label = paddle.static.data(name="label", shape=[-1, 1], dtype="int")
C
Charles-hit 已提交
1423 1424
            data_new = paddle.reshape(data, [3, 32 * 32])
            fc_out = paddle.nn.Linear(32 * 32, 10)(data_new)
1425
            predict = paddle.nn.functional.softmax(fc_out)
1426
            result = paddle.static.accuracy(input=predict, label=label, k=5)
1427 1428 1429 1430
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)

            exe.run(fluid.default_startup_program())
L
Leo Chen 已提交
1431 1432
            # x = np.random.rand(3, 32, 32).astype("float32")
            # y = np.array([[1], [0], [1]])
1433

1434
            static_out = exe.run(
1435
                feed={"input": x, "label": y}, fetch_list=result
1436
            )
1437

L
Leo Chen 已提交
1438
        with self.dynamic_graph(force_to_use_cpu=True):
1439 1440
            data = base.to_variable(x)
            label = base.to_variable(y)
C
Charles-hit 已提交
1441 1442
            data_new = paddle.reshape(data, [3, 32 * 32])
            fc_out = paddle.nn.Linear(32 * 32, 10)(data_new)
1443
            predict = paddle.nn.functional.softmax(fc_out)
1444 1445 1446
            dynamic_out = paddle.static.accuracy(
                input=predict, label=label, k=5
            )
1447

1448
        np.testing.assert_array_equal(static_out[0], dynamic_out.numpy())
1449

Y
Yu Yang 已提交
1450

1451
class TestBook(LayerTest):
H
hong 已提交
1452 1453
    def setUp(self):
        self.only_static_set = set({"make_word_embedding"})
1454 1455 1456 1457 1458 1459 1460
        self.not_compare_static_dygraph_set = set(
            {
                "make_gaussian_random",
                "make_kldiv_loss",
                "make_uniform_random_batch_size_like",
            }
        )
1461
        self.all_close_compare = set({"make_spectral_norm"})
H
hong 已提交
1462

1463
    def test_all_layers(self):
1464 1465 1466 1467 1468
        attrs = (getattr(self, name) for name in dir(self))
        methods = filter(inspect.ismethod, attrs)
        for method in methods:
            if not method.__name__.startswith('make_'):
                continue
M
minqiyang 已提交
1469 1470 1471
            self._low_data_bound = 0
            self._high_data_bound = 2
            self._batch_size = 2
1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483
            self._feed_dict = {}
            self._force_to_use_cpu = False
            with self.static_graph():
                static_var = method()
                if isinstance(static_var, tuple):
                    static_var = static_var[0]

                if static_var is not None:
                    fetch_list = [static_var.name]
                    static_result = self.get_static_graph_result(
                        feed=self._feed_dict,
                        fetch_list=fetch_list,
1484 1485
                        force_to_use_cpu=self._force_to_use_cpu,
                    )
H
hong 已提交
1486

1487 1488
                else:
                    continue
H
hong 已提交
1489 1490
            if method.__name__ in self.only_static_set:
                continue
1491 1492 1493 1494 1495

            with self.dynamic_graph(self._force_to_use_cpu):
                dy_result = method()
                if isinstance(dy_result, tuple):
                    dy_result = dy_result[0]
1496
                dy_result_value = dy_result.numpy()
1497

1498
            if method.__name__ in self.all_close_compare:
1499 1500 1501 1502 1503 1504
                np.testing.assert_allclose(
                    static_result[0],
                    dy_result_value,
                    rtol=1e-05,
                    atol=0,
                    err_msg='Result of function [{}] compare failed'.format(
1505 1506 1507
                        method.__name__
                    ),
                )
1508 1509
                continue

H
hong 已提交
1510
            if method.__name__ not in self.not_compare_static_dygraph_set:
1511 1512 1513 1514
                np.testing.assert_array_equal(
                    static_result[0],
                    dy_result_value,
                    err_msg='Result of function [{}] not equal'.format(
1515 1516 1517
                        method.__name__
                    ),
                )
1518 1519 1520 1521

    def _get_np_data(self, shape, dtype, append_batch_size=True):
        np.random.seed(self.seed)
        if append_batch_size:
M
minqiyang 已提交
1522
            shape = [self._batch_size] + shape
1523 1524 1525 1526 1527
        if dtype == 'float32':
            return np.random.random(shape).astype(dtype)
        elif dtype == 'float64':
            return np.random.random(shape).astype(dtype)
        elif dtype == 'int32':
1528 1529 1530
            return np.random.randint(
                self._low_data_bound, self._high_data_bound, shape
            ).astype(dtype)
1531
        elif dtype == 'int64':
1532 1533 1534 1535 1536 1537 1538
            return np.random.randint(
                self._low_data_bound, self._high_data_bound, shape
            ).astype(dtype)

    def _get_data(
        self, name, shape, dtype, set_feed_dict=True, append_batch_size=True
    ):
1539
        if base.enabled():
1540 1541 1542 1543 1544
            return base.to_variable(
                value=self._get_np_data(shape, dtype, append_batch_size),
                name=name,
                zero_copy=False,
            )
1545 1546
        else:
            if set_feed_dict:
1547
                self._feed_dict[name] = self._get_np_data(
1548 1549
                    shape, dtype, append_batch_size
                )
G
GGBond8488 已提交
1550 1551 1552
            if append_batch_size:
                shape = [-1] + shape
            data = paddle.static.data(
1553 1554 1555 1556
                name=name,
                shape=shape,
                dtype=dtype,
            )
G
GGBond8488 已提交
1557 1558
            data.desc.set_need_check_feed(False)
            return data
1559 1560

    def make_fit_a_line(self):
1561 1562 1563 1564
        with program_guard(
            fluid.default_main_program(),
            startup_program=fluid.default_startup_program(),
        ):
1565
            x = self._get_data(name='x', shape=[13], dtype='float32')
C
Charles-hit 已提交
1566
            y_predict = paddle.nn.Linear(13, 1)(x)
1567
            y = self._get_data(name='y', shape=[1], dtype='float32')
1568 1569 1570
            cost = paddle.nn.functional.square_error_cost(
                input=y_predict, label=y
            )
1571
            avg_cost = paddle.mean(cost)
1572
            return avg_cost
Y
Yu Yang 已提交
1573

1574
    def make_recognize_digits_mlp(self):
1575 1576 1577
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
Y
Yu Yang 已提交
1578
            # Change g_program, so the rest layers use `g_program`
1579 1580
            images = self._get_data(name='pixel', shape=[784], dtype='float32')
            label = self._get_data(name='label', shape=[1], dtype='int64')
C
Charles-hit 已提交
1581 1582 1583 1584 1585 1586 1587 1588
            hidden1 = paddle.nn.Linear(784, 128)(images)
            hidden1 = paddle.nn.functional.relu(hidden1)
            hidden2 = paddle.nn.Linear(128, 64)(hidden1)
            hidden2 = paddle.nn.functional.relu(hidden2)
            hidden1 = paddle.nn.Linear(128, 10, "sftmax.w1")(hidden1)
            hidden2 = paddle.nn.Linear(64, 10, "sftmax.w2")(hidden2)
            hidden = hidden1 + hidden2
            predict = paddle.nn.functional.softmax(hidden)
1589 1590 1591
            cost = paddle.nn.functional.cross_entropy(
                input=predict, label=label, reduction='none', use_softmax=False
            )
1592
            avg_cost = paddle.mean(cost)
1593
            return avg_cost
Y
Yu Yang 已提交
1594

1595
    def make_conv2d_transpose(self):
1596 1597 1598
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1599
            img = self._get_data(name='pixel', shape=[3, 2, 2], dtype='float32')
1600
            return paddle.static.nn.conv2d_transpose(
1601 1602
                input=img, num_filters=10, output_size=28
            )
1603

1604
    def make_recognize_digits_conv(self):
1605 1606 1607 1608 1609 1610
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            images = self._get_data(
                name='pixel', shape=[1, 28, 28], dtype='float32'
            )
1611
            label = self._get_data(name='label', shape=[1], dtype='int64')
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627
            conv_pool_1 = nets.simple_img_conv_pool(
                input=images,
                filter_size=5,
                num_filters=2,
                pool_size=2,
                pool_stride=2,
                act="relu",
            )
            conv_pool_2 = nets.simple_img_conv_pool(
                input=conv_pool_1,
                filter_size=5,
                num_filters=4,
                pool_size=2,
                pool_stride=2,
                act="relu",
            )
Y
Yu Yang 已提交
1628

C
Charles-hit 已提交
1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644
            conv_pool_2_new = paddle.reshape(
                conv_pool_2,
                [
                    conv_pool_2.shape[0],
                    conv_pool_2.shape[1]
                    * conv_pool_2.shape[2]
                    * conv_pool_2.shape[3],
                ],
            )
            predict = paddle.nn.Linear(
                conv_pool_2.shape[1]
                * conv_pool_2.shape[2]
                * conv_pool_2.shape[3],
                10,
            )(conv_pool_2_new)
            predict = paddle.nn.functional.softmax(predict)
1645 1646 1647
            cost = paddle.nn.functional.cross_entropy(
                input=predict, label=label, reduction='none', use_softmax=False
            )
1648
            avg_cost = paddle.mean(cost)
1649
            return avg_cost
Y
Yu Yang 已提交
1650

1651
    def make_word_embedding(self):
1652 1653 1654
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
Y
Yu Yang 已提交
1655 1656
            dict_size = 10000
            embed_size = 32
1657
            first_word = self._get_data(name='firstw', shape=[1], dtype='int64')
1658 1659 1660
            second_word = self._get_data(
                name='secondw', shape=[1], dtype='int64'
            )
1661 1662 1663
            third_word = self._get_data(name='thirdw', shape=[1], dtype='int64')
            forth_word = self._get_data(name='forthw', shape=[1], dtype='int64')
            next_word = self._get_data(name='nextw', shape=[1], dtype='int64')
Y
Yu Yang 已提交
1664

1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
            embed_first = layers.embedding(
                input=first_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )
            embed_second = layers.embedding(
                input=second_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )

            embed_third = layers.embedding(
                input=third_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )
            embed_forth = layers.embedding(
                input=forth_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )
Y
Yu Yang 已提交
1690

1691 1692
            concat_embed = paddle.concat(
                [embed_first, embed_second, embed_third, embed_forth],
1693 1694
                axis=1,
            )
Y
Yu Yang 已提交
1695

C
Charles-hit 已提交
1696 1697 1698 1699 1700
            hidden1 = paddle.static.nn.fc(
                x=concat_embed, size=256, activation='sigmoid'
            )
            predict_word = paddle.static.nn.fc(
                x=hidden1, size=dict_size, activation='softmax'
1701
            )
1702 1703 1704 1705 1706 1707
            cost = paddle.nn.functional.cross_entropy(
                input=predict_word,
                label=next_word,
                reduction='none',
                use_softmax=False,
            )
1708
            avg_cost = paddle.mean(cost)
1709
            return avg_cost
Y
Yu Yang 已提交
1710

1711
    def make_pool2d(self):
1712 1713 1714
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1715
            x = self._get_data(name='x', shape=[3, 224, 224], dtype='float32')
C
ccrrong 已提交
1716 1717
            return paddle.nn.functional.max_pool2d(
                x, kernel_size=[5, 3], stride=[1, 2], padding=(2, 1)
1718
            )
1719

K
Kaipeng Deng 已提交
1720
    def make_pool2d_infershape(self):
1721 1722 1723
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
1724
            theta = self._get_data("theta", shape=[2, 3], dtype='float32')
1725 1726 1727
            x = paddle.nn.functional.affine_grid(
                theta, out_shape=[2, 3, 244, 244]
            )
C
ccrrong 已提交
1728 1729
            return paddle.nn.functional.max_pool2d(
                x, kernel_size=[5, 3], stride=[1, 2], padding=(2, 1)
1730
            )
K
Kaipeng Deng 已提交
1731

1732
    def make_softmax(self):
1733 1734 1735
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1736
            data = self._get_data(name='data', shape=[10], dtype='float32')
C
Charles-hit 已提交
1737
            hid = paddle.nn.Linear(10, 20)(data)
1738
            return paddle.nn.functional.softmax(hid, axis=1)
D
dangqingqing 已提交
1739

1740
    @prog_scope()
1741
    def make_nce(self):
Y
Yang Yu 已提交
1742 1743
        window_size = 5
        words = []
1744
        for i in range(window_size):
Y
Yang Yu 已提交
1745
            words.append(
1746
                self._get_data(name=f'word_{i}', shape=[1], dtype='int64')
1747
            )
Y
Yang Yu 已提交
1748 1749

        dict_size = 10000
M
minqiyang 已提交
1750
        label_word = int(window_size // 2) + 1
Y
Yang Yu 已提交
1751 1752

        embs = []
1753
        for i in range(window_size):
Y
Yang Yu 已提交
1754 1755 1756
            if i == label_word:
                continue

1757 1758 1759 1760 1761 1762
            emb = layers.embedding(
                input=words[i],
                size=[dict_size, 32],
                param_attr='emb.w',
                is_sparse=True,
            )
Y
Yang Yu 已提交
1763 1764 1765

            embs.append(emb)

1766
        embs = paddle.concat(embs, axis=1)
1767
        loss = paddle.static.nn.nce(
1768 1769 1770 1771 1772 1773
            input=embs,
            label=words[label_word],
            num_total_classes=dict_size,
            param_attr='nce.w',
            bias_attr='nce.b',
        )
1774
        avg_loss = paddle.mean(loss)
1775
        return avg_loss
Y
Yang Yu 已提交
1776

1777
    def make_multiplex(self):
1778 1779 1780
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1781 1782 1783
            x1 = self._get_data(name='x1', shape=[4], dtype='float32')
            x2 = self._get_data(name='x2', shape=[4], dtype='float32')
            index = self._get_data(name='index', shape=[1], dtype='int32')
1784
            out = paddle.multiplex(inputs=[x1, x2], index=index)
1785
            return out
1786 1787

    def make_softmax_with_cross_entropy(self):
1788 1789 1790
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1791 1792
            x = self._get_data(name='x', shape=[16], dtype='float32')
            y = self._get_data(name='label', shape=[1], dtype='int64')
1793
            loss, softmax = paddle.nn.functional.softmax_with_cross_entropy(
1794 1795
                x, y, return_softmax=True
            )
1796 1797 1798
            self.assertIsNotNone(loss)
            self.assertIsNotNone(softmax)

1799
            loss = paddle.nn.functional.softmax_with_cross_entropy(x, y)
1800 1801 1802 1803 1804 1805
            self.assertIsNotNone(loss)

            x1 = self._get_data(name='x1', shape=[16, 32, 64], dtype='float32')
            y1 = self._get_data(name='label1', shape=[1, 32, 64], dtype='int64')
            y2 = self._get_data(name='label2', shape=[16, 1, 64], dtype='int64')
            y3 = self._get_data(name='label3', shape=[16, 32, 1], dtype='int64')
1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
            loss1 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y1, axis=1
            )
            loss2 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y2, axis=2
            )
            loss3 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y3, axis=3
            )
            loss4 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y3, axis=-1
            )
1818 1819 1820 1821
            self.assertIsNotNone(loss1)
            self.assertIsNotNone(loss2)
            self.assertIsNotNone(loss3)
            self.assertIsNotNone(loss4)
1822
            return loss4
1823 1824

    def make_scatter(self):
1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x = self._get_data(
                name='x', shape=[3, 3], append_batch_size=False, dtype='float32'
            )
            idx = self._get_data(
                name='idx', shape=[2], append_batch_size=False, dtype='int32'
            )
            updates = self._get_data(
                name='updates',
                shape=[2, 3],
                dtype='float32',
G
GGBond8488 已提交
1838
                append_batch_size=False,
1839
            )
1840
            out = paddle.scatter(x, index=idx, updates=updates)
1841
            return out
Y
yangyaming 已提交
1842

1843 1844 1845
    def make_one_hot(self):
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            label = self._get_data(name="label", shape=[1], dtype="int32")
1846
            one_hot_label = paddle.nn.functional.one_hot(label, 10)
1847
            return one_hot_label
1848

1849 1850 1851 1852 1853
    def make_label_smooth(self):
        # TODO(minqiyang): support gpu ut
        self._force_to_use_cpu = True
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            label = self._get_data(name="label", shape=[1], dtype="int32")
1854
            one_hot_label = paddle.nn.functional.one_hot(label, 10)
1855
            smooth_label = F.label_smooth(label=one_hot_label, epsilon=0.1)
1856
            return smooth_label
1857

1858
    def make_topk(self):
1859 1860 1861
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1862
            data = self._get_data(name="label", shape=[200], dtype="float32")
1863
            values, indices = paddle.topk(data, k=5)
1864 1865
            return values
            return indices
J
jerrywgz 已提交
1866

1867
    def make_l2_normalize(self):
1868 1869 1870
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1871
            x = self._get_data(name='x', shape=[8, 7, 10], dtype="float32")
1872
            output = paddle.nn.functional.normalize(x, axis=1)
1873
            return output
1874

1875
    def make_shape(self):
1876 1877 1878 1879 1880 1881
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 100, 100], dtype="float32"
            )
2
201716010711 已提交
1882
            out = paddle.shape(input)
1883
            return out
B
Bai Yifan 已提交
1884

1885
    def make_pad2d(self):
1886 1887 1888 1889 1890 1891
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 100, 100], dtype="float32"
            )
傅剑寒 已提交
1892 1893 1894

            tmp_pad = paddle.nn.Pad2D(
                padding=[1, 2, 3, 4],
1895 1896 1897 1898
                mode='reflect',
                data_format='NCHW',
                name="shape",
            )
傅剑寒 已提交
1899
            out = tmp_pad(input)
1900
            return out
W
whs 已提交
1901

K
Kaipeng Deng 已提交
1902
    def make_mish(self):
1903 1904 1905
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
1906
            input = self._get_data(name="input", shape=[16], dtype="float32")
1907
            out = paddle.nn.functional.mish(input, name='mish')
1908
            return out
K
Kaipeng Deng 已提交
1909

1910
    def make_cross_entropy(self):
1911 1912 1913
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1914 1915
            x = self._get_data(name="x", shape=[30, 10], dtype="float32")
            label = self._get_data(name="label", shape=[30, 1], dtype="int64")
1916
            mode = 'channel'
1917 1918 1919 1920 1921 1922 1923 1924
            out = paddle.nn.functional.cross_entropy(
                x,
                label,
                soft_label=False,
                ignore_index=4,
                reduction='none',
                use_softmax=False,
            )
1925
            return out
1926

1927
    def make_uniform_random_batch_size_like(self):
1928 1929 1930 1931 1932 1933
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[13, 11], dtype='float32'
            )
1934
            out = random.uniform_random_batch_size_like(input, [-1, 11])
1935
            return out
G
fix  
gongweibao 已提交
1936

1937
    def make_gaussian_random(self):
1938 1939 1940
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1941
            out = random.gaussian(shape=[20, 30])
1942
            return out
G
fix  
gongweibao 已提交
1943

1944
    def make_sum(self):
1945 1946 1947 1948 1949 1950
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[13, 11], dtype='float32'
            )
G
fix  
gongweibao 已提交
1951

1952
            out = paddle.add_n(input)
1953
            return out
G
fix  
gongweibao 已提交
1954

1955
    def make_slice(self):
G
fix  
gongweibao 已提交
1956 1957 1958 1959
        starts = [1, 0, 2]
        ends = [3, 3, 4]
        axes = [0, 1, 2]

1960 1961 1962 1963 1964 1965
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 4, 5, 6], dtype='float32'
            )
G
fix  
gongweibao 已提交
1966

2
201716010711 已提交
1967
            out = paddle.slice(input, axes=axes, starts=starts, ends=ends)
1968
            return out
G
merge  
gongweibao 已提交
1969

1970
    def make_scale_variable(self):
1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 4, 5, 6], dtype='float32'
            )
            scale_var = self._get_data(
                name="scale",
                shape=[1],
                dtype='float32',
                append_batch_size=False,
            )
2
201716010711 已提交
1983
            out = paddle.scale(input, scale=scale_var)
1984 1985
            return out

1986
    def make_bilinear_tensor_product_layer(self):
1987 1988 1989
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1990 1991 1992
            data = self._get_data(name='data', shape=[4], dtype="float32")

            theta = self._get_data(name="theta", shape=[5], dtype="float32")
1993 1994 1995
            out = paddle.static.nn.common.bilinear_tensor_product(
                data, theta, 6
            )
1996
            return out
1997 1998

    def make_batch_norm(self):
1999 2000 2001 2002 2003 2004
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            data = self._get_data(
                name='data', shape=[32, 128, 128], dtype="float32"
            )
2005
            out = paddle.static.nn.batch_norm(data)
2006
            return out
2007

2008
    def make_batch_norm_momentum_variable(self):
2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            data = self._get_data(
                name='data', shape=[32, 128, 128], dtype="float32"
            )
            momentum = self._get_data(
                name='momentum',
                shape=[1],
                dtype='float32',
                append_batch_size=False,
            )
2021
            out = paddle.static.nn.batch_norm(data, momentum=momentum)
2022
            return out
2023

2024
    def make_range(self):
2025 2026 2027
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
C
ccrrong 已提交
2028 2029 2030
            paddle.arange(0, 10, 2, 'int32')
            paddle.arange(0.1, 10.0, 0.2, 'float32')
            paddle.arange(0.1, 10.0, 0.2, 'float64')
2031 2032 2033 2034 2035 2036 2037 2038 2039
            start = paddle.tensor.fill_constant(
                shape=[1], value=0.1, dtype="float32"
            )
            end = paddle.tensor.fill_constant(
                shape=[1], value=10.0, dtype="float32"
            )
            step = paddle.tensor.fill_constant(
                shape=[1], value=0.2, dtype="float32"
            )
C
ccrrong 已提交
2040
            y = paddle.arange(start, end, step, 'float64')
2041 2042 2043
            return y

    def make_spectral_norm(self):
2044 2045 2046 2047 2048 2049 2050 2051 2052
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            weight = self._get_data(
                name='weight',
                shape=[2, 3, 32, 32],
                dtype="float32",
                append_batch_size=False,
            )
2053
            out = paddle.static.nn.spectral_norm(weight, dim=1, power_iters=1)
2054
            return out
2055 2056

    def make_kldiv_loss(self):
2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x = self._get_data(
                name='x',
                shape=[32, 128, 128],
                dtype="float32",
                append_batch_size=False,
            )
            target = self._get_data(
                name='target',
                shape=[32, 128, 128],
                dtype="float32",
                append_batch_size=False,
            )
2072 2073 2074
            loss = paddle.nn.functional.kl_div(
                input=x, label=target, reduction='batchmean'
            )
2075
            return loss
2076

M
minqiyang 已提交
2077
    def make_pixel_shuffle(self):
2078 2079 2080
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
M
minqiyang 已提交
2081
            x = self._get_data(name="X", shape=[9, 4, 4], dtype="float32")
2082
            out = paddle.nn.functional.pixel_shuffle(x, upscale_factor=3)
2083
            return out
M
minqiyang 已提交
2084

R
ruri 已提交
2085
    def make_mse_loss(self):
2086 2087 2088
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
R
ruri 已提交
2089 2090
            x = self._get_data(name="X", shape=[1], dtype="float32")
            y = self._get_data(name="Y", shape=[1], dtype="float32")
2091
            out = paddle.nn.functional.mse_loss(input=x, label=y)
2092
            return out
R
ruri 已提交
2093

2094
    def make_square_error_cost(self):
2095 2096 2097
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
2098 2099
            x = self._get_data(name="X", shape=[1], dtype="float32")
            y = self._get_data(name="Y", shape=[1], dtype="float32")
2100
            out = paddle.nn.functional.square_error_cost(input=x, label=y)
2101
            return out
2102

W
whs 已提交
2103
    def test_affine_grid(self):
2104
        with self.static_graph():
G
GGBond8488 已提交
2105 2106 2107
            data = paddle.static.data(
                name='data', shape=[-1, 2, 3, 3], dtype="float32"
            )
2108
            out = paddle.argsort(x=data, axis=1)
W
whs 已提交
2109

G
GGBond8488 已提交
2110 2111 2112 2113 2114 2115
            theta = paddle.static.data(
                name="theta", shape=[-1, 2, 3], dtype="float32"
            )
            out_shape = paddle.static.data(
                name="out_shape", shape=[-1], dtype="int32"
            )
2116 2117
            data_0 = paddle.nn.functional.affine_grid(theta, out_shape)
            data_1 = paddle.nn.functional.affine_grid(theta, [5, 3, 28, 28])
W
whs 已提交
2118 2119 2120

            self.assertIsNotNone(data_0)
            self.assertIsNotNone(data_1)
D
dengkaipeng 已提交
2121

W
wangchaochaohu 已提交
2122 2123 2124 2125 2126 2127
    def test_stridedslice(self):
        axes = [0, 1, 2]
        starts = [1, 0, 2]
        ends = [3, 3, 4]
        strides = [1, 1, 1]
        with self.static_graph():
G
GGBond8488 已提交
2128 2129 2130
            x = paddle.static.data(
                name="x", shape=[-1, 245, 30, 30], dtype="float32"
            )
2
201716010711 已提交
2131
            out = paddle.strided_slice(
2132 2133
                x, axes=axes, starts=starts, ends=ends, strides=strides
            )
W
wangchaochaohu 已提交
2134 2135
            return out

2136 2137
    def test_fill_constant_batch_size_like(self):
        with self.static_graph():
2138
            like = paddle.tensor.fill_constant(
2139 2140 2141 2142 2143
                shape=[1, 200], value=10, dtype='int64'
            )
            out = layers.fill_constant_batch_size_like(
                input=like, shape=[2, 3300], value=1315454564656, dtype='int64'
            )
2144 2145
            return out

Z
zhoushiyu 已提交
2146 2147 2148
    def test_shuffle_batch(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
G
GGBond8488 已提交
2149 2150
            x = paddle.static.data(
                name='X', shape=[-1, 4, 50], dtype='float32', lod_level=0
2151
            )
2152
            out1 = shuffle_batch(x)
Z
zhoushiyu 已提交
2153
            default_main_program().random_seed = 1000
2154
            out2 = shuffle_batch(x)
Z
zhoushiyu 已提交
2155 2156
            self.assertIsNotNone(out1)
            self.assertIsNotNone(out2)
2157
            return out1
Z
zhoushiyu 已提交
2158

2159 2160
    def test_partial_sum(self):
        with self.static_graph():
2161 2162
            x = paddle.static.data(name="x", shape=[None, 3], dtype="float32")
            y = paddle.static.data(name="y", shape=[None, 3], dtype="float32")
2163
            sum = partial_sum([x, y], start_index=0, length=2)
2164
            return sum
2165

S
ShenLiang 已提交
2166 2167
    def test_batch_fc(self):
        with self.static_graph():
2168 2169 2170
            input = paddle.static.data(
                name="input", shape=[16, 2, 3], dtype="float32"
            )
2171
            out = batch_fc(
S
ShenLiang 已提交
2172 2173 2174 2175 2176
                input=input,
                param_size=[16, 3, 10],
                param_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="w_0",
2177
                    initializer=paddle.nn.initializer.XavierNormal(),
2178
                ),
S
ShenLiang 已提交
2179 2180 2181 2182
                bias_size=[16, 10],
                bias_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="b_0",
2183
                    initializer=paddle.nn.initializer.XavierNormal(),
2184 2185 2186 2187
                ),
                act="relu",
            )
        return out
S
ShenLiang 已提交
2188

S
ShenLiang 已提交
2189 2190
    def test_rank_attention(self):
        with self.static_graph():
2191 2192 2193 2194
            input = paddle.static.data(
                name="input", shape=[None, 2], dtype="float32"
            )
            rank_offset = paddle.static.data(
2195 2196
                name="rank_offset", shape=[None, 7], dtype="int32"
            )
2197
            out = rank_attention(
S
ShenLiang 已提交
2198 2199 2200 2201 2202 2203
                input=input,
                rank_offset=rank_offset,
                rank_param_shape=[18, 3],
                rank_param_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="ubm_rank_param.w_0",
2204
                    initializer=paddle.nn.initializer.XavierNormal(),
2205 2206 2207 2208
                ),
                max_rank=3,
            )
            return out
S
ShenLiang 已提交
2209

2210 2211 2212
    def test_row_conv(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
G
GGBond8488 已提交
2213 2214 2215
            x = paddle.static.data(
                name='x', shape=[-1, 16], dtype='float32', lod_level=1
            )
2216
            out = paddle.static.nn.row_conv(input=x, future_context_size=2)
2217
            return out
2218 2219 2220 2221

    def test_simple_conv2d(self):
        # TODO(minqiyang): dygraph do not support layers with param now
        with self.static_graph():
G
GGBond8488 已提交
2222 2223
            images = paddle.static.data(
                name='pixel', shape=[-1, 3, 48, 48], dtype='float32'
2224
            )
2225
            return paddle.static.nn.conv2d(
2226 2227
                input=images, num_filters=3, filter_size=[4, 4]
            )
2228 2229 2230 2231

    def test_squeeze(self):
        # TODO(minqiyang): dygraph do not support layers with param now
        with self.static_graph():
G
GGBond8488 已提交
2232 2233 2234
            x = paddle.static.data(
                name='x', shape=[-1, 1, 1, 4], dtype='float32'
            )
2235
            out = paddle.squeeze(x, axis=[2])
2236
            return out
2237 2238 2239 2240

    def test_flatten(self):
        # TODO(minqiyang): dygraph do not support op without kernel now
        with self.static_graph():
G
GGBond8488 已提交
2241
            x = paddle.static.data(
2242 2243 2244 2245
                name='x',
                shape=[4, 4, 3],
                dtype="float32",
            )
2246
            out = paddle.flatten(x, 1, -1, name="flatten")
2247
            return out
2248

Z
zhoukunsheng 已提交
2249 2250 2251
    def test_linspace(self):
        program = Program()
        with program_guard(program):
2252
            out = paddle.linspace(20, 10, 5, 'float64')
Z
zhoukunsheng 已提交
2253 2254 2255
            self.assertIsNotNone(out)
        print(str(program))

2256 2257
    def test_unfold(self):
        with self.static_graph():
G
GGBond8488 已提交
2258 2259 2260
            x = paddle.static.data(
                name='x', shape=[-1, 3, 20, 20], dtype='float32'
            )
2261
            out = paddle.nn.functional.unfold(x, [3, 3], 1, 1, 1)
2262
            return out
2263

2264 2265
    def test_partial_concat(self):
        with self.static_graph():
2266 2267
            x = paddle.static.data(name="x", shape=[None, 3], dtype="float32")
            y = paddle.static.data(name="y", shape=[None, 3], dtype="float32")
2268 2269
            concat1 = partial_concat([x, y], start_index=0, length=2)
            concat2 = partial_concat(x, start_index=0, length=-1)
2270 2271
            return concat1, concat2

2272
    def test_addmm(self):
2273 2274 2275
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
G
GGBond8488 已提交
2276
            input = paddle.static.data(
2277 2278 2279 2280
                name='input_data',
                shape=[3, 3],
                dtype='float32',
            )
G
GGBond8488 已提交
2281 2282
            x = paddle.static.data(name='x', shape=[3, 2], dtype='float32')
            y = paddle.static.data(name='y', shape=[2, 3], dtype='float32')
2283 2284

            out = paddle.addmm(input=input, x=x, y=y)
2285
            return out
2286

2287 2288 2289
    def test_warpctc_with_padding(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
2290
            input_length = paddle.static.data(
2291 2292
                name='logits_length', shape=[11], dtype='int64'
            )
2293
            label_length = paddle.static.data(
2294 2295
                name='labels_length', shape=[12], dtype='int64'
            )
2296 2297 2298 2299
            label = paddle.static.data(
                name='label', shape=[12, 1], dtype='int32'
            )
            predict = paddle.static.data(
2300 2301
                name='predict', shape=[4, 4, 8], dtype='float32'
            )
2302 2303 2304 2305 2306 2307
            output = paddle.nn.functional.ctc_loss(
                log_probs=predict,
                labels=label,
                input_lengths=input_length,
                label_lengths=label_length,
                reduction='none',
2308 2309
            )
            return output
2310

Y
Yu Yang 已提交
2311

2312 2313
class ExampleNet(paddle.nn.Layer):
    def __init__(self):
2314
        super().__init__()
2315
        self.weight = self.create_parameter(
2316 2317
            shape=[1, 1], attr=paddle.ParamAttr(trainable=False)
        )
2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330

    def forward(self):
        # only for test parameter trainable attr
        pass


class TestLayerParameterTrainableSet(unittest.TestCase):
    def test_layer_parameter_set(self):
        with fluid.dygraph.guard():
            net = ExampleNet()
            self.assertFalse(net.weight.trainable)


2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347
class TestLayerTrainingAttribute(unittest.TestCase):
    def test_set_train_eval_in_dynamic_mode(self):
        with fluid.dygraph.guard():
            net = paddle.nn.Dropout()
            net.train()
            self.assertTrue(net.training)
            net.eval()
            self.assertFalse(net.training)

    def test_set_train_eval_in_static_mode(self):
        net = paddle.nn.Dropout()
        net.train()
        self.assertTrue(net.training)
        net.eval()
        self.assertFalse(net.training)


J
Jiabin Yang 已提交
2348 2349
class MyLayer(paddle.nn.Layer):
    def __init__(self):
2350
        super().__init__()
J
Jiabin Yang 已提交
2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361
        self._linear = paddle.nn.Linear(1, 1)
        self._dropout = paddle.nn.Dropout(p=0.5)

    def forward(self, input):
        temp = self._linear(input)
        temp = self._dropout(temp)
        return temp


class MySuperLayer(paddle.nn.Layer):
    def __init__(self):
2362
        super().__init__()
J
Jiabin Yang 已提交
2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377
        self._mylayer = MyLayer()

    def forward(self, input):
        temp = self._mylayer(input)
        return temp


class TestSubLayerCount(unittest.TestCase):
    def test_sublayer(self):
        with fluid.dygraph.guard():
            mySuperlayer = MySuperLayer()
            self.assertTrue(len(mySuperlayer.sublayers()) == 3)
            self.assertTrue(len(mySuperlayer.sublayers(include_self=True)) == 4)


Y
Yu Yang 已提交
2378
if __name__ == '__main__':
2379
    paddle.enable_static()
Y
Yu Yang 已提交
2380
    unittest.main()