test_layers.py 86.6 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import contextlib
import inspect
Q
Qiao Longfei 已提交
17 18
import unittest

19
import numpy as np
20
from decorator_helper import prog_scope
21
from test_imperative_base import new_program_scope
22 23

import paddle
24
import paddle.nn.functional as F
25 26
from paddle import fluid
from paddle.fluid import core, layers, nets
27
from paddle.fluid.dygraph import base, to_variable
28
from paddle.fluid.framework import Program, default_main_program, program_guard
29 30 31 32 33 34 35
from paddle.incubate.layers.nn import (
    batch_fc,
    partial_concat,
    partial_sum,
    rank_attention,
    shuffle_batch,
)
36
from paddle.tensor import random
37 38 39 40 41 42 43 44 45 46 47


class LayerTest(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        cls.seed = 111

    @classmethod
    def tearDownClass(cls):
        pass

48 49 50 51 52 53 54 55
    def _get_place(self, force_to_use_cpu=False):
        # this option for ops that only have cpu kernel
        if force_to_use_cpu:
            return core.CPUPlace()
        else:
            if core.is_compiled_with_cuda():
                return core.CUDAPlace(0)
            return core.CPUPlace()
56 57 58 59

    @contextlib.contextmanager
    def static_graph(self):
        with new_program_scope():
C
cnn 已提交
60
            paddle.seed(self.seed)
L
Leo Chen 已提交
61
            paddle.framework.random._manual_program_seed(self.seed)
62 63
            yield

64 65 66
    def get_static_graph_result(
        self, feed, fetch_list, with_lod=False, force_to_use_cpu=False
    ):
67
        exe = fluid.Executor(self._get_place(force_to_use_cpu))
68
        exe.run(fluid.default_startup_program())
69 70 71 72 73 74
        return exe.run(
            fluid.default_main_program(),
            feed=feed,
            fetch_list=fetch_list,
            return_numpy=(not with_lod),
        )
75 76

    @contextlib.contextmanager
77
    def dynamic_graph(self, force_to_use_cpu=False):
L
lujun 已提交
78
        with fluid.dygraph.guard(
79 80
            self._get_place(force_to_use_cpu=force_to_use_cpu)
        ):
C
cnn 已提交
81
            paddle.seed(self.seed)
L
Leo Chen 已提交
82
            paddle.framework.random._manual_program_seed(self.seed)
83 84 85 86
            yield


class TestLayer(LayerTest):
87
    def test_custom_layer_with_kwargs(self):
88
        class CustomLayer(paddle.nn.Layer):
89
            def __init__(self, input_size, linear1_size=4):
90
                super().__init__()
91
                self.linear1 = paddle.nn.Linear(
92 93
                    input_size, linear1_size, bias_attr=False
                )
94 95 96
                self.linear2 = paddle.nn.Linear(
                    linear1_size, 1, bias_attr=False
                )
97 98 99 100 101

            def forward(self, x, do_linear2=False):
                ret = self.linear1(x)
                if do_linear2:
                    ret = self.linear2(ret)
102 103 104 105 106
                return ret

        with self.dynamic_graph():
            inp = np.ones([3, 3], dtype='float32')
            x = base.to_variable(inp)
107 108
            custom = CustomLayer(input_size=3, linear1_size=2)
            ret = custom(x, do_linear2=False)
109
            np.testing.assert_array_equal(ret.numpy().shape, [3, 2])
110
            ret = custom(x, do_linear2=True)
111
            np.testing.assert_array_equal(ret.numpy().shape, [3, 1])
112

C
ccrrong 已提交
113 114 115
    def test_dropout(self):
        inp = np.ones([3, 32, 32], dtype='float32')
        with self.static_graph():
G
GGBond8488 已提交
116
            t = paddle.static.data(
C
ccrrong 已提交
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
                name='data',
                shape=[3, 32, 32],
                dtype='float32',
            )
            dropout = paddle.nn.Dropout(p=0.35)
            ret = dropout(t)
            ret2 = paddle.nn.functional.dropout(t, p=0.35)
            static_ret, static_ret2 = self.get_static_graph_result(
                feed={'data': inp}, fetch_list=[ret, ret2]
            )
        with self.dynamic_graph():
            t = base.to_variable(inp)
            dropout = paddle.nn.Dropout(p=0.35)
            dy_ret = dropout(t)
            dy_ret2 = paddle.nn.functional.dropout(t, p=0.35)
            dy_ret_value = dy_ret.numpy()
            dy_ret2_value = dy_ret2.numpy()

        np.testing.assert_array_equal(static_ret, static_ret2)
        np.testing.assert_array_equal(dy_ret_value, dy_ret2_value)
        np.testing.assert_array_equal(static_ret, dy_ret_value)

S
songyouwei 已提交
139 140 141
    def test_linear(self):
        inp = np.ones([3, 32, 32], dtype='float32')
        with self.static_graph():
G
GGBond8488 已提交
142 143
            t = paddle.static.data(
                name='data', shape=[3, 32, 32], dtype='float32'
144
            )
145
            linear = paddle.nn.Linear(
146 147 148
                32,
                4,
                bias_attr=paddle.nn.initializer.Constant(value=1),
149
            )
S
songyouwei 已提交
150
            ret = linear(t)
151 152 153
            static_ret = self.get_static_graph_result(
                feed={'data': inp}, fetch_list=[ret]
            )[0]
S
songyouwei 已提交
154 155
        with self.dynamic_graph():
            t = base.to_variable(inp)
156
            linear = paddle.nn.Linear(
157 158 159
                32,
                4,
                bias_attr=paddle.nn.initializer.Constant(value=1),
160
            )
S
songyouwei 已提交
161 162 163
            dy_ret = linear(t)
            dy_ret_value = dy_ret.numpy()

164
        np.testing.assert_array_equal(static_ret, dy_ret_value)
S
songyouwei 已提交
165

166 167 168 169
        with self.static_graph():
            # the input of Linear must be Variable.
            def test_Variable():
                inp = np.ones([3, 32, 32], dtype='float32')
170
                linear = paddle.nn.Linear(
171 172
                    32,
                    4,
173
                    bias_attr=paddle.nn.initializer.Constant(value=1),
174
                )
175 176 177 178 179 180 181 182
                linear_ret1 = linear(inp)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Linear must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
                inp = np.ones([3, 32, 32], dtype='int32')
183
                linear = paddle.nn.Linear(
184 185
                    32,
                    4,
186
                    bias_attr=paddle.nn.initializer.Constant(value=1),
187
                )
188 189 190 191
                linear_ret2 = linear(inp)

            self.assertRaises(TypeError, test_type)

W
wangzhen38 已提交
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
    def test_cvm(self):
        inp = np.ones([10, 10], dtype='float32')
        arr = [[0.6931472, -1.904654e-09, 1, 1, 1, 1, 1, 1, 1, 1]] * 10
        cvm1 = np.array(arr, dtype='float32')
        cvm2 = np.ones([10, 8], dtype='float32')
        show_clk = np.ones([10, 2], dtype='float32')
        with self.static_graph():
            x = paddle.static.data(
                name='data',
                shape=[10, 10],
                dtype='float32',
            )
            u = paddle.static.data(
                name='show_click',
                shape=[10, 2],
                dtype='float32',
            )
            no_cvm = paddle.static.nn.continuous_value_model(x, u, True)
            static_ret1 = self.get_static_graph_result(
                feed={'data': inp, 'show_click': show_clk},
                fetch_list=[no_cvm],
            )[0]
        with self.static_graph():
            x = paddle.static.data(
                name='data',
                shape=[10, 10],
                dtype='float32',
            )
            u = paddle.static.data(
                name='show_click',
                shape=[10, 2],
                dtype='float32',
            )
            cvm = paddle.static.nn.continuous_value_model(x, u, False)
            static_ret2 = self.get_static_graph_result(
                feed={'data': inp, 'show_click': show_clk}, fetch_list=[cvm]
            )[0]
        np.testing.assert_allclose(static_ret1, cvm1, rtol=1e-5, atol=1e-06)
        np.testing.assert_allclose(static_ret2, cvm2, rtol=1e-5, atol=1e-06)

232 233 234
    def test_Flatten(self):
        inp = np.ones([3, 4, 4, 5], dtype='float32')
        with self.static_graph():
G
GGBond8488 已提交
235 236
            t = paddle.static.data(
                name='data', shape=[3, 4, 4, 5], dtype='float32'
237
            )
238
            flatten = paddle.nn.Flatten()
239
            ret = flatten(t)
240 241 242
            static_ret = self.get_static_graph_result(
                feed={'data': inp}, fetch_list=[ret]
            )[0]
243 244
        with self.dynamic_graph():
            t = base.to_variable(inp)
245
            flatten = paddle.nn.Flatten()
246 247 248
            dy_ret = flatten(t)
            dy_ret_value = dy_ret.numpy()

249
        np.testing.assert_array_equal(static_ret, dy_ret_value)
250 251 252 253 254

        with self.static_graph():
            # the input of Linear must be Variable.
            def test_Variable():
                inp = np.ones([3, 32, 32], dtype='float32')
255
                linear = paddle.nn.Linear(
256 257
                    32,
                    4,
258
                    bias_attr=paddle.nn.initializer.Constant(value=1),
259
                )
260 261 262 263 264 265 266 267
                linear_ret1 = linear(inp)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Linear must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
                inp = np.ones([3, 32, 32], dtype='int32')
268
                linear = paddle.nn.Linear(
269 270
                    32,
                    4,
271
                    bias_attr=paddle.nn.initializer.Constant(value=1),
272
                )
273 274 275 276
                linear_ret2 = linear(inp)

            self.assertRaises(TypeError, test_type)

C
ceci3 已提交
277 278 279
    def test_SyncBatchNorm(self):
        if core.is_compiled_with_cuda():
            with self.static_graph():
G
GGBond8488 已提交
280 281 282
                t = paddle.static.data(
                    name='t', shape=[-1, 3, 5, 5], dtype='float32'
                )
C
ceci3 已提交
283
                my_sync_bn = paddle.nn.SyncBatchNorm(3)
C
ceci3 已提交
284 285
                ret = my_sync_bn(t)
                static_ret = self.get_static_graph_result(
286
                    feed={'t': np.ones([3, 3, 5, 5], dtype='float32')},
287 288
                    fetch_list=[ret],
                )[0]
C
ceci3 已提交
289 290 291 292 293 294

            with self.dynamic_graph():
                t = np.ones([3, 3, 5, 5], dtype='float32')
                my_syncbn = paddle.nn.SyncBatchNorm(3)
                dy_ret = my_syncbn(base.to_variable(t))
                dy_ret_value = dy_ret.numpy()
295
            np.testing.assert_array_equal(static_ret, dy_ret_value)
C
ceci3 已提交
296

297 298
    def test_relu(self):
        with self.static_graph():
G
GGBond8488 已提交
299
            t = paddle.static.data(name='t', shape=[-1, 3, 3], dtype='float32')
300
            ret = F.relu(t)
301
            static_ret = self.get_static_graph_result(
302 303
                feed={'t': np.ones([3, 3], dtype='float32')}, fetch_list=[ret]
            )[0]
304 305 306

        with self.dynamic_graph():
            t = np.ones([3, 3], dtype='float32')
307
            dy_ret = F.relu(base.to_variable(t))
308
            dy_ret_value = dy_ret.numpy()
309

310
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
C
ceci3 已提交
311

312 313
    def test_matmul(self):
        with self.static_graph():
G
GGBond8488 已提交
314 315 316 317
            t = paddle.static.data(name='t', shape=[-1, 3, 3], dtype='float32')
            t2 = paddle.static.data(
                name='t2', shape=[-1, 3, 3], dtype='float32'
            )
K
kangguangli 已提交
318
            ret = paddle.matmul(t, t2)
319 320 321 322 323 324 325
            static_ret = self.get_static_graph_result(
                feed={
                    't': np.ones([3, 3], dtype='float32'),
                    't2': np.ones([3, 3], dtype='float32'),
                },
                fetch_list=[ret],
            )[0]
326 327 328 329

        with self.dynamic_graph():
            t = np.ones([3, 3], dtype='float32')
            t2 = np.ones([3, 3], dtype='float32')
K
kangguangli 已提交
330
            dy_ret = paddle.matmul(base.to_variable(t), base.to_variable(t2))
331
            dy_ret_value = dy_ret.numpy()
332

333
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
334

X
Xin Pan 已提交
335 336 337 338 339 340 341 342 343
    def test_elementwise_math(self):
        n = np.ones([3, 3], dtype='float32')
        n2 = np.ones([3, 3], dtype='float32') * 1.1
        n3 = np.ones([3, 3], dtype='float32') * 2
        n4 = np.ones([3, 3], dtype='float32') * 3
        n5 = np.ones([3, 3], dtype='float32') * 4
        n6 = np.ones([3, 3], dtype='float32') * 5

        with self.static_graph():
G
GGBond8488 已提交
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
            t = paddle.static.data(name='t', shape=[-1, 3, 3], dtype='float32')
            t2 = paddle.static.data(
                name='t2', shape=[-1, 3, 3], dtype='float32'
            )
            t3 = paddle.static.data(
                name='t3', shape=[-1, 3, 3], dtype='float32'
            )
            t4 = paddle.static.data(
                name='t4', shape=[-1, 3, 3], dtype='float32'
            )
            t5 = paddle.static.data(
                name='t5', shape=[-1, 3, 3], dtype='float32'
            )
            t6 = paddle.static.data(
                name='t6', shape=[-1, 3, 3], dtype='float32'
            )
X
Xin Pan 已提交
360

361
            ret = paddle.add(t, t2)
362
            ret = paddle.pow(ret, t3)
363 364 365
            ret = paddle.divide(ret, t4)
            ret = paddle.subtract(ret, t5)
            ret = paddle.multiply(ret, t6)
X
Xin Pan 已提交
366

367 368 369 370
            static_ret = self.get_static_graph_result(
                feed={'t': n, 't2': n2, 't3': n3, 't4': n4, 't5': n5, 't6': n6},
                fetch_list=[ret],
            )[0]
X
Xin Pan 已提交
371 372

        with self.dynamic_graph():
373
            ret = paddle.add(to_variable(n), to_variable(n2))
374
            ret = paddle.pow(ret, to_variable(n3))
375 376 377
            ret = paddle.divide(ret, to_variable(n4))
            ret = paddle.subtract(ret, to_variable(n5))
            dy_ret = paddle.multiply(ret, to_variable(n6))
378
            dy_ret_value = dy_ret.numpy()
379

380
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
X
Xin Pan 已提交
381 382 383 384 385 386

    def test_elementwise_minmax(self):
        n = np.ones([3, 3], dtype='float32')
        n2 = np.ones([3, 3], dtype='float32') * 2

        with self.dynamic_graph():
387
            min_ret = paddle.minimum(to_variable(n), to_variable(n2))
H
HongyuJia 已提交
388
            max_ret = paddle.maximum(to_variable(n), to_variable(n2))
389 390
            min_ret_value = min_ret.numpy()
            max_ret_value = max_ret.numpy()
X
Xin Pan 已提交
391

392 393
        np.testing.assert_allclose(n, min_ret_value, rtol=1e-05)
        np.testing.assert_allclose(n2, max_ret_value, rtol=1e-05)
X
Xin Pan 已提交
394

395 396 397
    def test_conv2d_transpose(self):
        inp_np = np.arange(0, 24).reshape([2, 3, 2, 2]).astype('float32')
        with self.static_graph():
G
GGBond8488 已提交
398 399 400
            img = paddle.static.data(
                name='pixel', shape=[-1, 3, 2, 2], dtype='float32'
            )
401
            out = paddle.static.nn.conv2d_transpose(
402 403
                input=img,
                num_filters=10,
404
                filter_size=27,
405
                act='sigmoid',
406
                bias_attr=paddle.nn.initializer.Constant(value=1),
407 408 409 410
            )
            static_rlt = self.get_static_graph_result(
                feed={'pixel': inp_np}, fetch_list=[out]
            )[0]
411
        with self.static_graph():
G
GGBond8488 已提交
412 413 414
            img = paddle.static.data(
                name='pixel', shape=[-1, 3, 2, 2], dtype='float32'
            )
415 416 417 418
            conv2d_transpose = paddle.nn.Conv2DTranspose(
                3,
                10,
                27,
419
                bias_attr=paddle.nn.initializer.Constant(value=1),
420
            )
421
            out = conv2d_transpose(img)
422
            out = paddle.nn.functional.sigmoid(out)
423 424 425
            static_rlt2 = self.get_static_graph_result(
                feed={'pixel': inp_np}, fetch_list=[out]
            )[0]
426
        with self.dynamic_graph():
427 428 429 430
            conv2d_transpose = paddle.nn.Conv2DTranspose(
                3,
                10,
                27,
431
                bias_attr=paddle.nn.initializer.Constant(value=1),
432
            )
433
            dy_rlt = conv2d_transpose(base.to_variable(inp_np))
434
            dy_rlt = paddle.nn.functional.sigmoid(dy_rlt)
435
            dy_rlt_value = dy_rlt.numpy()
436 437
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt2, rtol=1e-05)
438

439 440 441
        with self.dynamic_graph():
            images = np.ones([2, 3, 5, 5], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
442
            weight_attr = fluid.ParamAttr(
443
                initializer=paddle.nn.initializer.Assign(custom_weight)
444
            )
445 446 447 448 449 450
            conv2d1 = paddle.nn.Conv2DTranspose(3, 3, [2, 2])
            conv2d2 = paddle.nn.Conv2DTranspose(
                3,
                3,
                [2, 2],
                weight_attr=weight_attr,
451
            )
452 453 454 455 456 457 458
            dy_ret1 = conv2d1(base.to_variable(images))
            dy_ret2 = conv2d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv2d1_weight_np = conv2d1.weight.numpy()
            conv2d1_bias = conv2d1.bias
            self.assertFalse(
459 460
                np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())
            )
461
            conv2d2.weight.set_value(conv2d1_weight_np)
462 463 464
            np.testing.assert_array_equal(
                conv2d1_weight_np, conv2d2.weight.numpy()
            )
465 466 467
            conv2d2.bias.set_value(conv2d1_bias)
            dy_ret1 = conv2d1(base.to_variable(images))
            dy_ret2 = conv2d2(base.to_variable(images))
468
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
469 470 471

            conv2d2.weight = conv2d1.weight
            conv2d2.bias = conv2d1.bias
472 473 474 475 476 477
            np.testing.assert_array_equal(
                conv2d1.weight.numpy(), conv2d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv2d1.bias.numpy(), conv2d2.bias.numpy()
            )
478

479 480 481 482
        with self.static_graph():
            # the input of Conv2DTranspose must be Variable.
            def test_Variable():
                images = np.ones([2, 3, 5, 5], dtype='float32')
483
                conv2d = paddle.nn.Conv2DTranspose(3, 3, [2, 2])
484 485 486 487 488 489 490
                conv2d_ret1 = conv2d(images)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Conv2DTranspose must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
G
GGBond8488 已提交
491 492
                images = paddle.static.data(
                    name='pixel', shape=[-1, 3, 5, 5], dtype='int32'
493
                )
494
                conv2d = paddle.nn.Conv2DTranspose(3, 3, [2, 2])
495 496 497 498
                conv2d_ret2 = conv2d(images)

            self.assertRaises(TypeError, test_type)

499 500 501 502 503
    def test_bilinear_tensor_product(self):
        inp_np_x = np.array([[1, 2, 3]]).astype('float32')
        inp_np_y = np.array([[4, 5, 6]]).astype('float32')

        with self.static_graph():
G
GGBond8488 已提交
504 505
            data_x = paddle.static.data(name='x', shape=[1, 3], dtype="float32")
            data_y = paddle.static.data(name='y', shape=[1, 3], dtype="float32")
506
            out = paddle.static.nn.common.bilinear_tensor_product(
507 508 509
                data_x,
                data_y,
                6,
510
                bias_attr=paddle.nn.initializer.Constant(value=1),
511 512
                act='sigmoid',
            )
513

514 515 516
            static_rlt = self.get_static_graph_result(
                feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out]
            )[0]
517

518
        with self.static_graph():
G
GGBond8488 已提交
519 520
            data_x = paddle.static.data(name='x', shape=[1, 3], dtype="float32")
            data_y = paddle.static.data(name='y', shape=[1, 3], dtype="float32")
521
            btp = paddle.nn.Bilinear(
522 523
                3,
                3,
524
                6,
525
                bias_attr=paddle.nn.initializer.Constant(value=1),
526
            )
527
            out = btp(data_x, data_y)
528
            out = paddle.nn.functional.sigmoid(out)
529 530 531
            static_rlt2 = self.get_static_graph_result(
                feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out]
            )[0]
532
        with self.dynamic_graph():
533
            btp = paddle.nn.Bilinear(
534 535
                3,
                3,
536
                6,
537
                bias_attr=paddle.nn.initializer.Constant(value=1),
538
            )
539
            dy_rlt = btp(base.to_variable(inp_np_x), base.to_variable(inp_np_y))
540
            dy_rlt = paddle.nn.functional.sigmoid(dy_rlt)
541
            dy_rlt_value = dy_rlt.numpy()
542

543
        with self.dynamic_graph():
544
            btp2 = paddle.nn.Bilinear(3, 3, 6)
545 546 547
            dy_rlt2 = btp2(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
548
            dy_rlt2 = paddle.nn.functional.sigmoid(dy_rlt2)
549
            dy_rlt2_value = dy_rlt2.numpy()
550

551
        with self.static_graph():
G
GGBond8488 已提交
552 553
            data_x2 = paddle.static.data(
                name='x', shape=[1, 3], dtype="float32"
554
            )
G
GGBond8488 已提交
555 556
            data_y2 = paddle.static.data(
                name='y', shape=[1, 3], dtype="float32"
557
            )
558
            out2 = paddle.static.nn.common.bilinear_tensor_product(
559 560 561 562 563 564
                data_x2, data_y2, 6, act='sigmoid'
            )

            static_rlt3 = self.get_static_graph_result(
                feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out2]
            )[0]
565

566 567 568
        np.testing.assert_array_equal(dy_rlt2_value, static_rlt3)
        np.testing.assert_array_equal(static_rlt2, static_rlt)
        np.testing.assert_array_equal(dy_rlt_value, static_rlt)
569

570 571
        with self.dynamic_graph():
            custom_weight = np.random.randn(6, 3, 3).astype("float32")
572
            weight_attr = fluid.ParamAttr(
573
                initializer=paddle.nn.initializer.Assign(custom_weight)
574
            )
575 576
            btp1 = paddle.nn.Bilinear(3, 3, 6)
            btp2 = paddle.nn.Bilinear(3, 3, 6, weight_attr=weight_attr)
577 578 579
            dy_rlt1 = btp1(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
580
            dy_rlt1 = paddle.nn.functional.sigmoid(dy_rlt1)
581 582 583
            dy_rlt2 = btp2(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
584
            dy_rlt2 = paddle.nn.functional.sigmoid(dy_rlt2)
585 586 587
            self.assertFalse(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()))
            btp2.weight.set_value(btp1.weight.numpy())
            btp2.bias.set_value(btp1.bias)
588 589 590 591 592 593
            dy_rlt1 = btp1(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
            dy_rlt2 = btp2(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
594
            np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
595 596 597

            btp2.weight = btp1.weight
            btp2.bias = btp1.bias
598 599 600
            np.testing.assert_array_equal(
                btp1.weight.numpy(), btp2.weight.numpy()
            )
601
            np.testing.assert_array_equal(btp1.bias.numpy(), btp2.bias.numpy())
602

603 604 605 606
    def test_embeding(self):
        inp_word = np.array([[[1]]]).astype('int64')
        dict_size = 20
        with self.static_graph():
G
GGBond8488 已提交
607 608 609 610
            data_t = paddle.static.data(
                name='word', shape=[-1, 1], dtype='int64'
            )
            data_t.desc.set_need_check_feed(False)
611 612 613 614 615 616 617 618 619
            emb = layers.embedding(
                input=data_t,
                size=[dict_size, 32],
                param_attr='emb.w',
                is_sparse=False,
            )
            static_rlt = self.get_static_graph_result(
                feed={'word': inp_word}, fetch_list=[emb]
            )[0]
620
        with self.static_graph():
G
GGBond8488 已提交
621 622 623 624
            data_t = paddle.static.data(
                name='word', shape=[-1, 1], dtype='int64'
            )
            data_t.desc.set_need_check_feed(False)
625 626
            emb2 = paddle.nn.Embedding(
                dict_size, 32, weight_attr='emb.w', sparse=False
627
            )
628
            emb_rlt = emb2(data_t)
629 630 631
            static_rlt2 = self.get_static_graph_result(
                feed={'word': inp_word}, fetch_list=[emb_rlt]
            )[0]
632
        with self.dynamic_graph():
633 634
            emb2 = paddle.nn.Embedding(
                dict_size, 32, weight_attr='emb.w', sparse=False
635
            )
636 637
            dy_rlt = emb2(base.to_variable(inp_word))
            dy_rlt_value = dy_rlt.numpy()
638 639

        self.assertTrue(np.allclose(static_rlt2, static_rlt))
640
        self.assertTrue(np.allclose(dy_rlt_value, static_rlt))
641

642 643
        with self.dynamic_graph():
            custom_weight = np.random.randn(dict_size, 32).astype("float32")
644
            weight_attr = fluid.ParamAttr(
645
                initializer=paddle.nn.initializer.Assign(custom_weight)
646
            )
647 648 649
            emb1 = paddle.nn.Embedding(dict_size, 32, sparse=False)
            emb2 = paddle.nn.Embedding(
                dict_size, 32, weight_attr=weight_attr, sparse=False
650
            )
651 652 653
            rep1 = emb1(base.to_variable(inp_word))
            rep2 = emb2(base.to_variable(inp_word))
            self.assertFalse(np.array_equal(emb1.weight.numpy(), custom_weight))
654
            np.testing.assert_array_equal(emb2.weight.numpy(), custom_weight)
655 656 657
            self.assertFalse(np.array_equal(rep1.numpy(), rep2.numpy()))
            emb2.weight.set_value(emb1.weight.numpy())
            rep2 = emb2(base.to_variable(inp_word))
658
            np.testing.assert_array_equal(rep1.numpy(), rep2.numpy())
659 660

            emb2.weight = emb1.weight
661 662 663
            np.testing.assert_array_equal(
                emb1.weight.numpy(), emb2.weight.numpy()
            )
664

S
songyouwei 已提交
665 666 667
    def test_one_hot(self):
        with self.dynamic_graph():
            label = fluid.dygraph.to_variable(np.array([[1], [1], [3], [0]]))
668 669 670
            one_hot_label1 = paddle.nn.functional.one_hot(label, 4)
            one_hot_label2 = paddle.nn.functional.one_hot(
                label, fluid.dygraph.to_variable(np.array([4]))
671 672 673 674
            )
            np.testing.assert_array_equal(
                one_hot_label1.numpy(), one_hot_label2.numpy()
            )
S
songyouwei 已提交
675 676 677 678

    def test_split(self):
        with self.dynamic_graph():
            input = fluid.dygraph.to_variable(np.random.random((3, 8, 5)))
679 680
            x0, x1 = paddle.split(input, num_or_sections=2, axis=1)
            x00, x11 = paddle.split(
681 682
                input,
                num_or_sections=2,
683
                axis=fluid.dygraph.to_variable(np.array([1])),
684
            )
685 686
            np.testing.assert_array_equal(x0.numpy(), x00.numpy())
            np.testing.assert_array_equal(x1.numpy(), x11.numpy())
S
songyouwei 已提交
687 688 689 690

    def test_topk(self):
        with self.dynamic_graph():
            input = fluid.dygraph.to_variable(np.random.random((13, 11)))
691 692
            top5_values1, top5_indices1 = paddle.topk(input, k=5)
            top5_values2, top5_indices2 = paddle.topk(
693 694 695 696 697 698 699 700
                input, k=fluid.dygraph.to_variable(np.array([5]))
            )
            np.testing.assert_array_equal(
                top5_values1.numpy(), top5_values2.numpy()
            )
            np.testing.assert_array_equal(
                top5_indices1.numpy(), top5_indices2.numpy()
            )
S
songyouwei 已提交
701

L
lujun 已提交
702 703
    def test_conv3d(self):
        with self.static_graph():
G
GGBond8488 已提交
704 705
            images = paddle.static.data(
                name='pixel', shape=[-1, 3, 6, 6, 6], dtype='float32'
706
            )
707 708 709
            ret = paddle.static.nn.conv3d(
                input=images, num_filters=3, filter_size=2
            )
L
lujun 已提交
710
            static_ret = self.get_static_graph_result(
711
                feed={'pixel': np.ones([2, 3, 6, 6, 6], dtype='float32')},
712 713
                fetch_list=[ret],
            )[0]
L
lujun 已提交
714 715

        with self.static_graph():
G
GGBond8488 已提交
716 717
            images = paddle.static.data(
                name='pixel', shape=[-1, 3, 6, 6, 6], dtype='float32'
718
            )
719 720 721
            conv3d = paddle.nn.Conv3D(
                in_channels=3, out_channels=3, kernel_size=2
            )
L
lujun 已提交
722 723
            ret = conv3d(images)
            static_ret2 = self.get_static_graph_result(
724
                feed={'pixel': np.ones([2, 3, 6, 6, 6], dtype='float32')},
725 726
                fetch_list=[ret],
            )[0]
L
lujun 已提交
727 728 729

        with self.dynamic_graph():
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
730 731 732
            conv3d = paddle.nn.Conv3D(
                in_channels=3, out_channels=3, kernel_size=2
            )
L
lujun 已提交
733
            dy_ret = conv3d(base.to_variable(images))
734
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
735

736 737
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
738

739 740 741
        with self.dynamic_graph():
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
742
            weight_attr = fluid.ParamAttr(
743
                initializer=paddle.nn.initializer.Assign(custom_weight)
744
            )
745 746 747 748 749 750 751 752
            conv3d1 = paddle.nn.Conv3D(
                in_channels=3, out_channels=3, kernel_size=2
            )
            conv3d2 = paddle.nn.Conv3D(
                in_channels=3,
                out_channels=3,
                kernel_size=2,
                weight_attr=weight_attr,
753
            )
754 755 756 757 758 759 760
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv3d1_weight_np = conv3d1.weight.numpy()
            conv3d1_bias = conv3d1.bias
            self.assertFalse(
761 762
                np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
            )
763
            conv3d2.weight.set_value(conv3d1_weight_np)
764 765 766
            np.testing.assert_array_equal(
                conv3d1_weight_np, conv3d2.weight.numpy()
            )
767 768 769
            conv3d1.bias.set_value(conv3d1_bias)
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
770
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
771 772 773

            conv3d2.weight = conv3d1.weight
            conv3d2.bias = conv3d1.bias
774 775 776 777 778 779
            np.testing.assert_array_equal(
                conv3d1.weight.numpy(), conv3d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv3d1.bias.numpy(), conv3d2.bias.numpy()
            )
780

781
    def test_group_norm(self):
L
lujun 已提交
782 783 784 785 786 787 788 789 790 791
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
G
GGBond8488 已提交
792 793
            X = paddle.static.data(
                name='X', shape=shape, dtype='float32', lod_level=1
794
            )
795
            ret = paddle.static.nn.group_norm(
796 797
                input=X,
                groups=2,
798 799
                param_attr=paddle.nn.initializer.Uniform(low=-0.5, high=0.5),
                bias_attr=paddle.nn.initializer.Constant(value=1),
800 801 802 803 804 805 806 807 808 809
            )
            static_ret = self.get_static_graph_result(
                feed={
                    'X': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
810 811

        with self.static_graph():
G
GGBond8488 已提交
812 813
            X = paddle.static.data(
                name='X', shape=shape, dtype='float32', lod_level=1
814
            )
815 816 817
            groupNorm = paddle.nn.GroupNorm(
                num_channels=shape[1],
                num_groups=2,
818 819
                weight_attr=paddle.nn.initializer.Uniform(low=-0.5, high=0.5),
                bias_attr=paddle.nn.initializer.Constant(value=1),
820
            )
L
lujun 已提交
821
            ret = groupNorm(X)
822 823 824 825 826 827 828 829 830
            static_ret2 = self.get_static_graph_result(
                feed={
                    'X': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
831 832

        with self.dynamic_graph():
833 834 835
            groupNorm = paddle.nn.GroupNorm(
                num_channels=shape[1],
                num_groups=2,
836 837
                weight_attr=paddle.nn.initializer.Uniform(low=-0.5, high=0.5),
                bias_attr=paddle.nn.initializer.Constant(value=1),
838
            )
L
lujun 已提交
839
            dy_ret = groupNorm(base.to_variable(input))
840
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
841

842 843
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
844

845 846 847 848 849 850 851 852 853 854 855
    def test_instance_norm(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
G
GGBond8488 已提交
856
            X = paddle.static.data(name='X', shape=shape, dtype='float32')
857
            ret = paddle.static.nn.instance_norm(input=X)
858 859 860
            static_ret = self.get_static_graph_result(
                feed={'X': input}, fetch_list=[ret]
            )[0]
861 862

        with self.static_graph():
G
GGBond8488 已提交
863
            X = paddle.static.data(name='X', shape=shape, dtype='float32')
864
            instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
865
            ret = instanceNorm(X)
866 867 868
            static_ret2 = self.get_static_graph_result(
                feed={'X': input}, fetch_list=[ret]
            )[0]
869 870

        with self.dynamic_graph():
871
            instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
872 873 874 875
            dy_ret = instanceNorm(base.to_variable(input))
            dy_rlt_value = dy_ret.numpy()

        with self.dynamic_graph():
876
            instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
877 878 879
            dy_ret = instanceNorm(base.to_variable(input))
            dy_rlt_value2 = dy_ret.numpy()

880 881 882
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_rlt_value2, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
883 884 885 886

        with self.static_graph():
            # the input of InstanceNorm must be Variable.
            def test_Variable():
887
                instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
888 889 890 891 892 893 894
                ret1 = instanceNorm(input)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of InstanceNorm must be float32 or float64
            def test_type():
                input = np.random.random(shape).astype('int32')
895
                instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
896 897 898 899
                ret2 = instanceNorm(input)

            self.assertRaises(TypeError, test_type)

L
lujun 已提交
900 901 902 903 904 905 906 907 908 909 910
    def test_spectral_norm(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
G
GGBond8488 已提交
911 912
            Weight = paddle.static.data(
                name='Weight', shape=shape, dtype='float32', lod_level=1
913
            )
914 915 916
            ret = paddle.static.nn.spectral_norm(
                weight=Weight, dim=1, power_iters=2
            )
917 918 919 920 921 922 923 924 925
            static_ret = self.get_static_graph_result(
                feed={
                    'Weight': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    ),
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
926 927

        with self.static_graph():
G
GGBond8488 已提交
928 929
            Weight = paddle.static.data(
                name='Weight', shape=shape, dtype='float32', lod_level=1
930
            )
931
            spectralNorm = paddle.nn.SpectralNorm(shape, dim=1, power_iters=2)
L
lujun 已提交
932
            ret = spectralNorm(Weight)
933 934 935 936 937 938 939 940 941
            static_ret2 = self.get_static_graph_result(
                feed={
                    'Weight': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
942 943

        with self.dynamic_graph():
944
            spectralNorm = paddle.nn.SpectralNorm(shape, dim=1, power_iters=2)
L
lujun 已提交
945
            dy_ret = spectralNorm(base.to_variable(input))
946
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
947

948 949
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
950 951

    def test_conv3d_transpose(self):
952 953 954
        input_array = (
            np.arange(0, 48).reshape([2, 3, 2, 2, 2]).astype('float32')
        )
L
lujun 已提交
955 956

        with self.static_graph():
G
GGBond8488 已提交
957 958 959
            img = paddle.static.data(
                name='pixel', shape=[-1, 3, 2, 2, 2], dtype='float32'
            )
960
            out = paddle.static.nn.conv3d_transpose(
961
                input=img, num_filters=12, filter_size=12, use_cudnn=True
962
            )
L
lujun 已提交
963
            static_rlt = self.get_static_graph_result(
964 965
                feed={'pixel': input_array}, fetch_list=[out]
            )[0]
L
lujun 已提交
966
        with self.static_graph():
G
GGBond8488 已提交
967 968 969
            img = paddle.static.data(
                name='pixel', shape=[-1, 3, 2, 2, 2], dtype='float32'
            )
970 971
            conv3d_transpose = paddle.nn.Conv3DTranspose(
                in_channels=3, out_channels=12, kernel_size=12
972
            )
L
lujun 已提交
973 974
            out = conv3d_transpose(img)
            static_rlt2 = self.get_static_graph_result(
975 976
                feed={'pixel': input_array}, fetch_list=[out]
            )[0]
L
lujun 已提交
977
        with self.dynamic_graph():
978 979
            conv3d_transpose = paddle.nn.Conv3DTranspose(
                in_channels=3, out_channels=12, kernel_size=12
980
            )
L
lujun 已提交
981
            dy_rlt = conv3d_transpose(base.to_variable(input_array))
982
            dy_rlt_value = dy_rlt.numpy()
983 984
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05)
L
lujun 已提交
985

986 987 988
        with self.dynamic_graph():
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
989
            weight_attr = fluid.ParamAttr(
990
                initializer=paddle.nn.initializer.Assign(custom_weight)
991
            )
992 993 994 995
            conv3d1 = paddle.nn.Conv3DTranspose(
                in_channels=3,
                out_channels=3,
                kernel_size=2,
996 997
                bias_attr='conv3d1_b',
            )
998 999 1000 1001 1002
            conv3d2 = paddle.nn.Conv3DTranspose(
                in_channels=3,
                out_channels=3,
                kernel_size=2,
                weight_attr=weight_attr,
1003 1004
                bias_attr='conv3d2_b',
            )
1005 1006 1007 1008 1009 1010 1011
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv3d1_weight_np = conv3d1.weight.numpy()
            conv3d1_bias = conv3d1.bias
            self.assertFalse(
1012 1013
                np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
            )
1014
            conv3d2.weight.set_value(conv3d1_weight_np)
1015 1016 1017
            np.testing.assert_array_equal(
                conv3d1_weight_np, conv3d2.weight.numpy()
            )
1018 1019 1020
            conv3d1.bias.set_value(conv3d1_bias)
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
1021
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
1022 1023 1024

            conv3d2.weight = conv3d1.weight
            conv3d2.bias = conv3d1.bias
1025 1026 1027 1028 1029 1030
            np.testing.assert_array_equal(
                conv3d1.weight.numpy(), conv3d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv3d1.bias.numpy(), conv3d2.bias.numpy()
            )
1031

1032
    def test_while_loop(self):
1033
        with self.static_graph():
1034 1035 1036 1037
            i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
            ten = paddle.tensor.fill_constant(
                shape=[1], dtype='int64', value=10
            )
1038 1039

            def cond(i):
L
LiYuRio 已提交
1040
                return paddle.less_than(i, ten)
1041 1042 1043 1044

            def body(i):
                return i + 1

1045
            out = paddle.static.nn.while_loop(cond, body, [i])
1046 1047 1048
            static_ret = self.get_static_graph_result(feed={}, fetch_list=out)

        with self.dynamic_graph():
1049 1050 1051 1052
            i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
            ten = paddle.tensor.fill_constant(
                shape=[1], dtype='int64', value=10
            )
1053

1054
            def cond1(i):
L
LiYuRio 已提交
1055
                return paddle.less_than(i, ten)
1056

1057
            def body1(i):
1058 1059
                return i + 1

1060
            dy_ret = paddle.static.nn.while_loop(cond1, body1, [i])
1061
            with self.assertRaises(ValueError):
1062 1063 1064
                j = paddle.tensor.fill_constant(
                    shape=[1], dtype='int64', value=0
                )
1065 1066 1067 1068

                def body2(i):
                    return i + 1, i + 2

1069
                paddle.static.nn.while_loop(cond1, body2, [j])
1070

1071
        np.testing.assert_array_equal(static_ret[0], dy_ret[0].numpy())
1072

1073 1074 1075 1076 1077
    def test_compare(self):
        value_a = np.arange(3)
        value_b = np.arange(3)
        # less than
        with self.static_graph():
G
GGBond8488 已提交
1078 1079
            a = paddle.static.data(name='a', shape=[-1, 1], dtype='int64')
            b = paddle.static.data(name='b', shape=[-1, 1], dtype='int64')
L
LiYuRio 已提交
1080
            cond = paddle.less_than(x=a, y=b)
1081 1082 1083
            static_ret = self.get_static_graph_result(
                feed={"a": value_a, "b": value_b}, fetch_list=[cond]
            )[0]
1084 1085 1086
        with self.dynamic_graph():
            da = base.to_variable(value_a)
            db = base.to_variable(value_b)
L
LiYuRio 已提交
1087
            dcond = paddle.less_than(x=da, y=db)
1088

1089 1090
            for i in range(len(static_ret)):
                self.assertTrue(dcond.numpy()[i] == static_ret[i])
1091 1092 1093

        # less equal
        with self.static_graph():
G
GGBond8488 已提交
1094 1095
            a1 = paddle.static.data(name='a1', shape=[-1, 1], dtype='int64')
            b1 = paddle.static.data(name='b1', shape=[-1, 1], dtype='int64')
1096
            cond1 = paddle.less_equal(x=a1, y=b1)
1097 1098 1099
            static_ret1 = self.get_static_graph_result(
                feed={"a1": value_a, "b1": value_b}, fetch_list=[cond1]
            )[0]
1100 1101 1102
        with self.dynamic_graph():
            da1 = base.to_variable(value_a)
            db1 = base.to_variable(value_b)
1103
            dcond1 = paddle.less_equal(x=da1, y=db1)
1104 1105 1106 1107

            for i in range(len(static_ret1)):
                self.assertTrue(dcond1.numpy()[i] == static_ret1[i])

1108
        # greater than
1109
        with self.static_graph():
G
GGBond8488 已提交
1110 1111
            a2 = paddle.static.data(name='a2', shape=[-1, 1], dtype='int64')
            b2 = paddle.static.data(name='b2', shape=[-1, 1], dtype='int64')
1112
            cond2 = paddle.greater_than(x=a2, y=b2)
1113 1114 1115
            static_ret2 = self.get_static_graph_result(
                feed={"a2": value_a, "b2": value_b}, fetch_list=[cond2]
            )[0]
1116 1117 1118
        with self.dynamic_graph():
            da2 = base.to_variable(value_a)
            db2 = base.to_variable(value_b)
1119
            dcond2 = paddle.greater_than(x=da2, y=db2)
1120 1121 1122 1123

            for i in range(len(static_ret2)):
                self.assertTrue(dcond2.numpy()[i] == static_ret2[i])

1124
        # greater equal
1125
        with self.static_graph():
G
GGBond8488 已提交
1126 1127
            a3 = paddle.static.data(name='a3', shape=[-1, 1], dtype='int64')
            b3 = paddle.static.data(name='b3', shape=[-1, 1], dtype='int64')
1128
            cond3 = paddle.greater_equal(x=a3, y=b3)
1129 1130 1131
            static_ret3 = self.get_static_graph_result(
                feed={"a3": value_a, "b3": value_b}, fetch_list=[cond3]
            )[0]
1132 1133 1134
        with self.dynamic_graph():
            da3 = base.to_variable(value_a)
            db3 = base.to_variable(value_b)
1135
            dcond3 = paddle.greater_equal(x=da3, y=db3)
1136 1137 1138 1139 1140 1141

            for i in range(len(static_ret3)):
                self.assertTrue(dcond3.numpy()[i] == static_ret3[i])

        # equal
        with self.static_graph():
G
GGBond8488 已提交
1142 1143
            a4 = paddle.static.data(name='a4', shape=[-1, 1], dtype='int64')
            b4 = paddle.static.data(name='b4', shape=[-1, 1], dtype='int64')
1144
            cond4 = paddle.equal(x=a4, y=b4)
1145 1146 1147
            static_ret4 = self.get_static_graph_result(
                feed={"a4": value_a, "b4": value_b}, fetch_list=[cond4]
            )[0]
1148 1149 1150
        with self.dynamic_graph():
            da4 = base.to_variable(value_a)
            db4 = base.to_variable(value_b)
1151
            dcond4 = paddle.equal(x=da4, y=db4)
1152 1153 1154 1155 1156 1157

            for i in range(len(static_ret4)):
                self.assertTrue(dcond4.numpy()[i] == static_ret4[i])

        # not equal
        with self.static_graph():
G
GGBond8488 已提交
1158 1159
            a5 = paddle.static.data(name='a5', shape=[-1, 1], dtype='int64')
            b5 = paddle.static.data(name='b5', shape=[-1, 1], dtype='int64')
1160
            cond5 = paddle.equal(x=a5, y=b5)
1161 1162 1163
            static_ret5 = self.get_static_graph_result(
                feed={"a5": value_a, "b5": value_b}, fetch_list=[cond5]
            )[0]
1164 1165 1166
        with self.dynamic_graph():
            da5 = base.to_variable(value_a)
            db5 = base.to_variable(value_b)
1167
            dcond5 = paddle.equal(x=da5, y=db5)
1168 1169 1170 1171

            for i in range(len(static_ret5)):
                self.assertTrue(dcond5.numpy()[i] == static_ret5[i])

1172 1173
    def test_cond(self):
        def less_than_branch(a, b):
1174
            return paddle.add(a, b)
1175 1176

        def greater_equal_branch(a, b):
1177
            return paddle.subtract(a, b)
1178 1179

        with self.static_graph():
1180
            a = paddle.tensor.fill_constant(
1181 1182
                shape=[1], dtype='float32', value=0.1
            )
1183
            b = paddle.tensor.fill_constant(
1184 1185
                shape=[1], dtype='float32', value=0.23
            )
1186
            out = paddle.static.nn.cond(
1187 1188 1189 1190 1191 1192 1193 1194 1195
                a >= b,
                lambda: greater_equal_branch(a, b),
                lambda: less_than_branch(a, b),
            )
            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
1196 1197 1198 1199 1200 1201 1202
            exe = fluid.Executor(place)
            ret = exe.run(fetch_list=[out])
            static_res = ret[0]

        with self.dynamic_graph():
            a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32'))
            b = fluid.dygraph.to_variable(np.array([0.23]).astype('float32'))
1203
            out = paddle.static.nn.cond(
1204 1205 1206 1207
                a < b,
                lambda: less_than_branch(a, b),
                lambda: greater_equal_branch(a, b),
            )
1208
            out2 = paddle.static.nn.cond(
1209 1210 1211 1212
                a >= b,
                lambda: greater_equal_branch(a, b),
                lambda: less_than_branch(a, b),
            )
1213 1214
            dynamic_res = out.numpy()
            dynamic_res2 = out2.numpy()
1215
            np.testing.assert_array_equal(dynamic_res, dynamic_res2)
1216
            with self.assertRaises(TypeError):
1217
                paddle.static.nn.cond(a < b, 'str', 'str')
1218
            with self.assertRaises(TypeError):
1219
                paddle.static.nn.cond(a >= b, 'str', 'str')
1220

1221
        np.testing.assert_array_equal(static_res, dynamic_res)
1222

1223 1224
    def test_case(self):
        def fn_1():
1225 1226 1227
            return paddle.tensor.fill_constant(
                shape=[1, 2], dtype='float32', value=1
            )
1228 1229

        def fn_2():
1230 1231 1232
            return paddle.tensor.fill_constant(
                shape=[2, 2], dtype='int32', value=2
            )
1233 1234

        def fn_3():
1235 1236 1237
            return paddle.tensor.fill_constant(
                shape=[3], dtype='int32', value=3
            )
1238 1239

        with self.static_graph():
1240 1241 1242 1243 1244 1245 1246 1247 1248
            x = paddle.tensor.fill_constant(
                shape=[1], dtype='float32', value=0.3
            )
            y = paddle.tensor.fill_constant(
                shape=[1], dtype='float32', value=0.1
            )
            z = paddle.tensor.fill_constant(
                shape=[1], dtype='float32', value=0.2
            )
1249

L
LiYuRio 已提交
1250 1251
            pred_1 = paddle.less_than(z, x)  # true: 0.2 < 0.3
            pred_2 = paddle.less_than(x, y)  # false: 0.3 < 0.1
1252
            pred_3 = paddle.equal(x, y)  # false: 0.3 == 0.1
1253

1254
            out_1 = paddle.static.nn.case(
1255 1256
                pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3
            )
1257 1258 1259
            out_2 = paddle.static.nn.case(
                pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)]
            )
1260

1261 1262 1263 1264 1265
            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
1266 1267 1268 1269
            exe = fluid.Executor(place)
            static_res1, static_res2 = exe.run(fetch_list=[out_1, out_2])

        with self.dynamic_graph():
1270 1271 1272 1273 1274 1275 1276 1277 1278
            x = paddle.tensor.fill_constant(
                shape=[1], dtype='float32', value=0.3
            )
            y = paddle.tensor.fill_constant(
                shape=[1], dtype='float32', value=0.1
            )
            z = paddle.tensor.fill_constant(
                shape=[1], dtype='float32', value=0.2
            )
1279

L
LiYuRio 已提交
1280 1281
            pred_1 = paddle.less_than(z, x)  # true: 0.2 < 0.3
            pred_2 = paddle.less_than(x, y)  # false: 0.3 < 0.1
1282
            pred_3 = paddle.equal(x, y)  # false: 0.3 == 0.1
1283

1284
            out_1 = paddle.static.nn.case(
1285 1286
                pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3
            )
1287 1288 1289
            out_2 = paddle.static.nn.case(
                pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)]
            )
1290 1291 1292
            dynamic_res1 = out_1.numpy()
            dynamic_res2 = out_2.numpy()

1293 1294
        np.testing.assert_array_equal(static_res1, dynamic_res1)
        np.testing.assert_array_equal(static_res2, dynamic_res2)
1295 1296 1297

    def test_switch_case(self):
        def fn_1():
1298 1299 1300
            return paddle.tensor.fill_constant(
                shape=[1, 2], dtype='float32', value=1
            )
1301 1302

        def fn_2():
1303 1304 1305
            return paddle.tensor.fill_constant(
                shape=[2, 2], dtype='int32', value=2
            )
1306 1307

        def fn_3():
1308 1309 1310
            return paddle.tensor.fill_constant(
                shape=[3], dtype='int32', value=3
            )
1311 1312

        with self.static_graph():
1313 1314 1315 1316 1317 1318
            index_1 = paddle.tensor.fill_constant(
                shape=[1], dtype='int32', value=1
            )
            index_2 = paddle.tensor.fill_constant(
                shape=[1], dtype='int32', value=2
            )
1319

1320
            out_1 = paddle.static.nn.switch_case(
1321 1322 1323 1324
                branch_index=index_1,
                branch_fns={1: fn_1, 2: fn_2},
                default=fn_3,
            )
1325
            out_2 = paddle.static.nn.switch_case(
1326 1327 1328 1329
                branch_index=index_2,
                branch_fns=[(1, fn_1), (2, fn_2)],
                default=fn_3,
            )
1330
            out_3 = paddle.static.nn.switch_case(
1331 1332 1333 1334 1335 1336 1337 1338 1339
                branch_index=index_2,
                branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)],
            )

            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
1340 1341
            exe = fluid.Executor(place)
            static_res1, static_res2, static_res3 = exe.run(
1342 1343
                fetch_list=[out_1, out_2, out_3]
            )
1344 1345

        with self.dynamic_graph():
1346 1347 1348 1349 1350 1351
            index_1 = paddle.tensor.fill_constant(
                shape=[1], dtype='int32', value=1
            )
            index_2 = paddle.tensor.fill_constant(
                shape=[1], dtype='int32', value=2
            )
1352

1353
            out_1 = paddle.static.nn.switch_case(
1354 1355 1356 1357
                branch_index=index_1,
                branch_fns={1: fn_1, 2: fn_2},
                default=fn_3,
            )
1358
            out_2 = paddle.static.nn.switch_case(
1359 1360 1361 1362
                branch_index=index_2,
                branch_fns=[(1, fn_1), (2, fn_2)],
                default=fn_3,
            )
1363
            out_3 = paddle.static.nn.switch_case(
1364 1365 1366
                branch_index=index_2,
                branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)],
            )
1367 1368 1369 1370 1371

            dynamic_res1 = out_1.numpy()
            dynamic_res2 = out_2.numpy()
            dynamic_res3 = out_3.numpy()

1372 1373 1374
        np.testing.assert_array_equal(static_res1, dynamic_res1)
        np.testing.assert_array_equal(static_res2, dynamic_res2)
        np.testing.assert_array_equal(static_res3, dynamic_res3)
1375

1376 1377
    def test_crop_tensor(self):
        with self.static_graph():
G
GGBond8488 已提交
1378 1379
            x = paddle.static.data(
                name="x1", shape=[-1, 6, 5, 8], dtype="float32"
1380
            )
G
GGBond8488 已提交
1381 1382 1383

            dim1 = paddle.static.data(name="dim1", shape=[1], dtype="float32")
            dim2 = paddle.static.data(name="dim2", shape=[1], dtype="float32")
1384
            crop_shape1 = (1, 2, 4, 4)
G
GGBond8488 已提交
1385 1386
            crop_shape2 = paddle.static.data(
                name="crop_shape", shape=[4], dtype="float32"
1387
            )
1388 1389
            crop_shape3 = [-1, dim1, dim2, 4]
            crop_offsets1 = [0, 0, 1, 0]
G
GGBond8488 已提交
1390 1391
            crop_offsets2 = paddle.static.data(
                name="crop_offset", shape=[4], dtype="float32"
1392
            )
1393 1394
            crop_offsets3 = [0, dim1, dim2, 0]

1395 1396 1397
            out1 = paddle.crop(x, shape=crop_shape1, offsets=crop_offsets1)
            out2 = paddle.crop(x, shape=crop_shape2, offsets=crop_offsets2)
            out3 = paddle.crop(x, shape=crop_shape3, offsets=crop_offsets3)
1398 1399 1400 1401 1402

            self.assertIsNotNone(out1)
            self.assertIsNotNone(out2)
            self.assertIsNotNone(out3)

1403 1404
    def test_shard_index(self):
        with self.static_graph():
G
GGBond8488 已提交
1405 1406 1407
            x = paddle.static.data(
                name="label", shape=[-1, 4, 1], dtype='int64'
            )
1408
            shard_label = paddle.shard_index(
1409 1410
                input=x, index_num=20, nshards=2, shard_id=0
            )
1411 1412 1413

        self.assertIsNotNone(shard_label)

1414 1415 1416 1417
    def test_accuracy(self):
        x = np.random.rand(3, 32, 32).astype("float32")
        y = np.array([[1], [0], [1]])
        with self.static_graph():
1418 1419 1420 1421
            data = paddle.static.data(
                name="input", shape=[-1, 32, 32], dtype="float32"
            )
            label = paddle.static.data(name="label", shape=[-1, 1], dtype="int")
C
Charles-hit 已提交
1422 1423
            data_new = paddle.reshape(data, [3, 32 * 32])
            fc_out = paddle.nn.Linear(32 * 32, 10)(data_new)
1424
            predict = paddle.nn.functional.softmax(fc_out)
1425
            result = paddle.static.accuracy(input=predict, label=label, k=5)
1426 1427 1428 1429
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)

            exe.run(fluid.default_startup_program())
L
Leo Chen 已提交
1430 1431
            # x = np.random.rand(3, 32, 32).astype("float32")
            # y = np.array([[1], [0], [1]])
1432

1433
            static_out = exe.run(
1434
                feed={"input": x, "label": y}, fetch_list=result
1435
            )
1436

L
Leo Chen 已提交
1437
        with self.dynamic_graph(force_to_use_cpu=True):
1438 1439
            data = base.to_variable(x)
            label = base.to_variable(y)
C
Charles-hit 已提交
1440 1441
            data_new = paddle.reshape(data, [3, 32 * 32])
            fc_out = paddle.nn.Linear(32 * 32, 10)(data_new)
1442
            predict = paddle.nn.functional.softmax(fc_out)
1443 1444 1445
            dynamic_out = paddle.static.accuracy(
                input=predict, label=label, k=5
            )
1446

1447
        np.testing.assert_array_equal(static_out[0], dynamic_out.numpy())
1448

Y
Yu Yang 已提交
1449

1450
class TestBook(LayerTest):
H
hong 已提交
1451 1452
    def setUp(self):
        self.only_static_set = set({"make_word_embedding"})
1453 1454 1455 1456 1457 1458 1459
        self.not_compare_static_dygraph_set = set(
            {
                "make_gaussian_random",
                "make_kldiv_loss",
                "make_uniform_random_batch_size_like",
            }
        )
1460
        self.all_close_compare = set({"make_spectral_norm"})
H
hong 已提交
1461

1462
    def test_all_layers(self):
1463 1464 1465 1466 1467
        attrs = (getattr(self, name) for name in dir(self))
        methods = filter(inspect.ismethod, attrs)
        for method in methods:
            if not method.__name__.startswith('make_'):
                continue
M
minqiyang 已提交
1468 1469 1470
            self._low_data_bound = 0
            self._high_data_bound = 2
            self._batch_size = 2
1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482
            self._feed_dict = {}
            self._force_to_use_cpu = False
            with self.static_graph():
                static_var = method()
                if isinstance(static_var, tuple):
                    static_var = static_var[0]

                if static_var is not None:
                    fetch_list = [static_var.name]
                    static_result = self.get_static_graph_result(
                        feed=self._feed_dict,
                        fetch_list=fetch_list,
1483 1484
                        force_to_use_cpu=self._force_to_use_cpu,
                    )
H
hong 已提交
1485

1486 1487
                else:
                    continue
H
hong 已提交
1488 1489
            if method.__name__ in self.only_static_set:
                continue
1490 1491 1492 1493 1494

            with self.dynamic_graph(self._force_to_use_cpu):
                dy_result = method()
                if isinstance(dy_result, tuple):
                    dy_result = dy_result[0]
1495
                dy_result_value = dy_result.numpy()
1496

1497
            if method.__name__ in self.all_close_compare:
1498 1499 1500 1501 1502 1503
                np.testing.assert_allclose(
                    static_result[0],
                    dy_result_value,
                    rtol=1e-05,
                    atol=0,
                    err_msg='Result of function [{}] compare failed'.format(
1504 1505 1506
                        method.__name__
                    ),
                )
1507 1508
                continue

H
hong 已提交
1509
            if method.__name__ not in self.not_compare_static_dygraph_set:
1510 1511 1512 1513
                np.testing.assert_array_equal(
                    static_result[0],
                    dy_result_value,
                    err_msg='Result of function [{}] not equal'.format(
1514 1515 1516
                        method.__name__
                    ),
                )
1517 1518 1519 1520

    def _get_np_data(self, shape, dtype, append_batch_size=True):
        np.random.seed(self.seed)
        if append_batch_size:
M
minqiyang 已提交
1521
            shape = [self._batch_size] + shape
1522 1523 1524 1525 1526
        if dtype == 'float32':
            return np.random.random(shape).astype(dtype)
        elif dtype == 'float64':
            return np.random.random(shape).astype(dtype)
        elif dtype == 'int32':
1527 1528 1529
            return np.random.randint(
                self._low_data_bound, self._high_data_bound, shape
            ).astype(dtype)
1530
        elif dtype == 'int64':
1531 1532 1533 1534 1535 1536 1537
            return np.random.randint(
                self._low_data_bound, self._high_data_bound, shape
            ).astype(dtype)

    def _get_data(
        self, name, shape, dtype, set_feed_dict=True, append_batch_size=True
    ):
1538
        if base.enabled():
1539 1540 1541 1542 1543
            return base.to_variable(
                value=self._get_np_data(shape, dtype, append_batch_size),
                name=name,
                zero_copy=False,
            )
1544 1545
        else:
            if set_feed_dict:
1546
                self._feed_dict[name] = self._get_np_data(
1547 1548
                    shape, dtype, append_batch_size
                )
G
GGBond8488 已提交
1549 1550 1551
            if append_batch_size:
                shape = [-1] + shape
            data = paddle.static.data(
1552 1553 1554 1555
                name=name,
                shape=shape,
                dtype=dtype,
            )
G
GGBond8488 已提交
1556 1557
            data.desc.set_need_check_feed(False)
            return data
1558 1559

    def make_fit_a_line(self):
1560 1561 1562 1563
        with program_guard(
            fluid.default_main_program(),
            startup_program=fluid.default_startup_program(),
        ):
1564
            x = self._get_data(name='x', shape=[13], dtype='float32')
C
Charles-hit 已提交
1565
            y_predict = paddle.nn.Linear(13, 1)(x)
1566
            y = self._get_data(name='y', shape=[1], dtype='float32')
1567 1568 1569
            cost = paddle.nn.functional.square_error_cost(
                input=y_predict, label=y
            )
1570
            avg_cost = paddle.mean(cost)
1571
            return avg_cost
Y
Yu Yang 已提交
1572

1573
    def make_recognize_digits_mlp(self):
1574 1575 1576
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
Y
Yu Yang 已提交
1577
            # Change g_program, so the rest layers use `g_program`
1578 1579
            images = self._get_data(name='pixel', shape=[784], dtype='float32')
            label = self._get_data(name='label', shape=[1], dtype='int64')
C
Charles-hit 已提交
1580 1581 1582 1583 1584 1585 1586 1587
            hidden1 = paddle.nn.Linear(784, 128)(images)
            hidden1 = paddle.nn.functional.relu(hidden1)
            hidden2 = paddle.nn.Linear(128, 64)(hidden1)
            hidden2 = paddle.nn.functional.relu(hidden2)
            hidden1 = paddle.nn.Linear(128, 10, "sftmax.w1")(hidden1)
            hidden2 = paddle.nn.Linear(64, 10, "sftmax.w2")(hidden2)
            hidden = hidden1 + hidden2
            predict = paddle.nn.functional.softmax(hidden)
1588 1589 1590
            cost = paddle.nn.functional.cross_entropy(
                input=predict, label=label, reduction='none', use_softmax=False
            )
1591
            avg_cost = paddle.mean(cost)
1592
            return avg_cost
Y
Yu Yang 已提交
1593

1594
    def make_conv2d_transpose(self):
1595 1596 1597
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1598
            img = self._get_data(name='pixel', shape=[3, 2, 2], dtype='float32')
1599
            return paddle.static.nn.conv2d_transpose(
1600 1601
                input=img, num_filters=10, output_size=28
            )
1602

1603
    def make_recognize_digits_conv(self):
1604 1605 1606 1607 1608 1609
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            images = self._get_data(
                name='pixel', shape=[1, 28, 28], dtype='float32'
            )
1610
            label = self._get_data(name='label', shape=[1], dtype='int64')
1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626
            conv_pool_1 = nets.simple_img_conv_pool(
                input=images,
                filter_size=5,
                num_filters=2,
                pool_size=2,
                pool_stride=2,
                act="relu",
            )
            conv_pool_2 = nets.simple_img_conv_pool(
                input=conv_pool_1,
                filter_size=5,
                num_filters=4,
                pool_size=2,
                pool_stride=2,
                act="relu",
            )
Y
Yu Yang 已提交
1627

C
Charles-hit 已提交
1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643
            conv_pool_2_new = paddle.reshape(
                conv_pool_2,
                [
                    conv_pool_2.shape[0],
                    conv_pool_2.shape[1]
                    * conv_pool_2.shape[2]
                    * conv_pool_2.shape[3],
                ],
            )
            predict = paddle.nn.Linear(
                conv_pool_2.shape[1]
                * conv_pool_2.shape[2]
                * conv_pool_2.shape[3],
                10,
            )(conv_pool_2_new)
            predict = paddle.nn.functional.softmax(predict)
1644 1645 1646
            cost = paddle.nn.functional.cross_entropy(
                input=predict, label=label, reduction='none', use_softmax=False
            )
1647
            avg_cost = paddle.mean(cost)
1648
            return avg_cost
Y
Yu Yang 已提交
1649

1650
    def make_word_embedding(self):
1651 1652 1653
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
Y
Yu Yang 已提交
1654 1655
            dict_size = 10000
            embed_size = 32
1656
            first_word = self._get_data(name='firstw', shape=[1], dtype='int64')
1657 1658 1659
            second_word = self._get_data(
                name='secondw', shape=[1], dtype='int64'
            )
1660 1661 1662
            third_word = self._get_data(name='thirdw', shape=[1], dtype='int64')
            forth_word = self._get_data(name='forthw', shape=[1], dtype='int64')
            next_word = self._get_data(name='nextw', shape=[1], dtype='int64')
Y
Yu Yang 已提交
1663

1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688
            embed_first = layers.embedding(
                input=first_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )
            embed_second = layers.embedding(
                input=second_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )

            embed_third = layers.embedding(
                input=third_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )
            embed_forth = layers.embedding(
                input=forth_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )
Y
Yu Yang 已提交
1689

1690 1691
            concat_embed = paddle.concat(
                [embed_first, embed_second, embed_third, embed_forth],
1692 1693
                axis=1,
            )
Y
Yu Yang 已提交
1694

C
Charles-hit 已提交
1695 1696 1697 1698 1699
            hidden1 = paddle.static.nn.fc(
                x=concat_embed, size=256, activation='sigmoid'
            )
            predict_word = paddle.static.nn.fc(
                x=hidden1, size=dict_size, activation='softmax'
1700
            )
1701 1702 1703 1704 1705 1706
            cost = paddle.nn.functional.cross_entropy(
                input=predict_word,
                label=next_word,
                reduction='none',
                use_softmax=False,
            )
1707
            avg_cost = paddle.mean(cost)
1708
            return avg_cost
Y
Yu Yang 已提交
1709

1710
    def make_pool2d(self):
1711 1712 1713
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1714
            x = self._get_data(name='x', shape=[3, 224, 224], dtype='float32')
C
ccrrong 已提交
1715 1716
            return paddle.nn.functional.max_pool2d(
                x, kernel_size=[5, 3], stride=[1, 2], padding=(2, 1)
1717
            )
1718

K
Kaipeng Deng 已提交
1719
    def make_pool2d_infershape(self):
1720 1721 1722
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
1723
            theta = self._get_data("theta", shape=[2, 3], dtype='float32')
1724 1725 1726
            x = paddle.nn.functional.affine_grid(
                theta, out_shape=[2, 3, 244, 244]
            )
C
ccrrong 已提交
1727 1728
            return paddle.nn.functional.max_pool2d(
                x, kernel_size=[5, 3], stride=[1, 2], padding=(2, 1)
1729
            )
K
Kaipeng Deng 已提交
1730

1731
    def make_softmax(self):
1732 1733 1734
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1735
            data = self._get_data(name='data', shape=[10], dtype='float32')
C
Charles-hit 已提交
1736
            hid = paddle.nn.Linear(10, 20)(data)
1737
            return paddle.nn.functional.softmax(hid, axis=1)
D
dangqingqing 已提交
1738

1739
    @prog_scope()
1740
    def make_nce(self):
Y
Yang Yu 已提交
1741 1742
        window_size = 5
        words = []
1743
        for i in range(window_size):
Y
Yang Yu 已提交
1744
            words.append(
1745
                self._get_data(name=f'word_{i}', shape=[1], dtype='int64')
1746
            )
Y
Yang Yu 已提交
1747 1748

        dict_size = 10000
M
minqiyang 已提交
1749
        label_word = int(window_size // 2) + 1
Y
Yang Yu 已提交
1750 1751

        embs = []
1752
        for i in range(window_size):
Y
Yang Yu 已提交
1753 1754 1755
            if i == label_word:
                continue

1756 1757 1758 1759 1760 1761
            emb = layers.embedding(
                input=words[i],
                size=[dict_size, 32],
                param_attr='emb.w',
                is_sparse=True,
            )
Y
Yang Yu 已提交
1762 1763 1764

            embs.append(emb)

1765
        embs = paddle.concat(embs, axis=1)
1766
        loss = paddle.static.nn.nce(
1767 1768 1769 1770 1771 1772
            input=embs,
            label=words[label_word],
            num_total_classes=dict_size,
            param_attr='nce.w',
            bias_attr='nce.b',
        )
1773
        avg_loss = paddle.mean(loss)
1774
        return avg_loss
Y
Yang Yu 已提交
1775

1776
    def make_multiplex(self):
1777 1778 1779
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1780 1781 1782
            x1 = self._get_data(name='x1', shape=[4], dtype='float32')
            x2 = self._get_data(name='x2', shape=[4], dtype='float32')
            index = self._get_data(name='index', shape=[1], dtype='int32')
1783
            out = paddle.multiplex(inputs=[x1, x2], index=index)
1784
            return out
1785 1786

    def make_softmax_with_cross_entropy(self):
1787 1788 1789
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1790 1791
            x = self._get_data(name='x', shape=[16], dtype='float32')
            y = self._get_data(name='label', shape=[1], dtype='int64')
1792
            loss, softmax = paddle.nn.functional.softmax_with_cross_entropy(
1793 1794
                x, y, return_softmax=True
            )
1795 1796 1797
            self.assertIsNotNone(loss)
            self.assertIsNotNone(softmax)

1798
            loss = paddle.nn.functional.softmax_with_cross_entropy(x, y)
1799 1800 1801 1802 1803 1804
            self.assertIsNotNone(loss)

            x1 = self._get_data(name='x1', shape=[16, 32, 64], dtype='float32')
            y1 = self._get_data(name='label1', shape=[1, 32, 64], dtype='int64')
            y2 = self._get_data(name='label2', shape=[16, 1, 64], dtype='int64')
            y3 = self._get_data(name='label3', shape=[16, 32, 1], dtype='int64')
1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816
            loss1 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y1, axis=1
            )
            loss2 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y2, axis=2
            )
            loss3 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y3, axis=3
            )
            loss4 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y3, axis=-1
            )
1817 1818 1819 1820
            self.assertIsNotNone(loss1)
            self.assertIsNotNone(loss2)
            self.assertIsNotNone(loss3)
            self.assertIsNotNone(loss4)
1821
            return loss4
1822 1823

    def make_scatter(self):
1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x = self._get_data(
                name='x', shape=[3, 3], append_batch_size=False, dtype='float32'
            )
            idx = self._get_data(
                name='idx', shape=[2], append_batch_size=False, dtype='int32'
            )
            updates = self._get_data(
                name='updates',
                shape=[2, 3],
                dtype='float32',
G
GGBond8488 已提交
1837
                append_batch_size=False,
1838
            )
1839
            out = paddle.scatter(x, index=idx, updates=updates)
1840
            return out
Y
yangyaming 已提交
1841

1842 1843 1844
    def make_one_hot(self):
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            label = self._get_data(name="label", shape=[1], dtype="int32")
1845
            one_hot_label = paddle.nn.functional.one_hot(label, 10)
1846
            return one_hot_label
1847

1848 1849 1850 1851 1852
    def make_label_smooth(self):
        # TODO(minqiyang): support gpu ut
        self._force_to_use_cpu = True
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            label = self._get_data(name="label", shape=[1], dtype="int32")
1853
            one_hot_label = paddle.nn.functional.one_hot(label, 10)
1854
            smooth_label = F.label_smooth(label=one_hot_label, epsilon=0.1)
1855
            return smooth_label
1856

1857
    def make_topk(self):
1858 1859 1860
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1861
            data = self._get_data(name="label", shape=[200], dtype="float32")
1862
            values, indices = paddle.topk(data, k=5)
1863 1864
            return values
            return indices
J
jerrywgz 已提交
1865

1866
    def make_l2_normalize(self):
1867 1868 1869
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1870
            x = self._get_data(name='x', shape=[8, 7, 10], dtype="float32")
1871
            output = paddle.nn.functional.normalize(x, axis=1)
1872
            return output
1873

1874
    def make_shape(self):
1875 1876 1877 1878 1879 1880
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 100, 100], dtype="float32"
            )
2
201716010711 已提交
1881
            out = paddle.shape(input)
1882
            return out
B
Bai Yifan 已提交
1883

1884
    def make_pad2d(self):
1885 1886 1887 1888 1889 1890
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 100, 100], dtype="float32"
            )
傅剑寒 已提交
1891 1892 1893

            tmp_pad = paddle.nn.Pad2D(
                padding=[1, 2, 3, 4],
1894 1895 1896 1897
                mode='reflect',
                data_format='NCHW',
                name="shape",
            )
傅剑寒 已提交
1898
            out = tmp_pad(input)
1899
            return out
W
whs 已提交
1900

K
Kaipeng Deng 已提交
1901
    def make_mish(self):
1902 1903 1904
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
1905
            input = self._get_data(name="input", shape=[16], dtype="float32")
1906
            out = paddle.nn.functional.mish(input, name='mish')
1907
            return out
K
Kaipeng Deng 已提交
1908

1909
    def make_cross_entropy(self):
1910 1911 1912
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1913 1914
            x = self._get_data(name="x", shape=[30, 10], dtype="float32")
            label = self._get_data(name="label", shape=[30, 1], dtype="int64")
1915
            mode = 'channel'
1916 1917 1918 1919 1920 1921 1922 1923
            out = paddle.nn.functional.cross_entropy(
                x,
                label,
                soft_label=False,
                ignore_index=4,
                reduction='none',
                use_softmax=False,
            )
1924
            return out
1925

1926
    def make_uniform_random_batch_size_like(self):
1927 1928 1929 1930 1931 1932
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[13, 11], dtype='float32'
            )
1933
            out = random.uniform_random_batch_size_like(input, [-1, 11])
1934
            return out
G
fix  
gongweibao 已提交
1935

1936
    def make_gaussian_random(self):
1937 1938 1939
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1940
            out = random.gaussian(shape=[20, 30])
1941
            return out
G
fix  
gongweibao 已提交
1942

1943
    def make_sum(self):
1944 1945 1946 1947 1948 1949
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[13, 11], dtype='float32'
            )
G
fix  
gongweibao 已提交
1950

1951
            out = paddle.add_n(input)
1952
            return out
G
fix  
gongweibao 已提交
1953

1954
    def make_slice(self):
G
fix  
gongweibao 已提交
1955 1956 1957 1958
        starts = [1, 0, 2]
        ends = [3, 3, 4]
        axes = [0, 1, 2]

1959 1960 1961 1962 1963 1964
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 4, 5, 6], dtype='float32'
            )
G
fix  
gongweibao 已提交
1965

2
201716010711 已提交
1966
            out = paddle.slice(input, axes=axes, starts=starts, ends=ends)
1967
            return out
G
merge  
gongweibao 已提交
1968

1969
    def make_scale_variable(self):
1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 4, 5, 6], dtype='float32'
            )
            scale_var = self._get_data(
                name="scale",
                shape=[1],
                dtype='float32',
                append_batch_size=False,
            )
2
201716010711 已提交
1982
            out = paddle.scale(input, scale=scale_var)
1983 1984
            return out

1985
    def make_bilinear_tensor_product_layer(self):
1986 1987 1988
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
1989 1990 1991
            data = self._get_data(name='data', shape=[4], dtype="float32")

            theta = self._get_data(name="theta", shape=[5], dtype="float32")
1992 1993 1994
            out = paddle.static.nn.common.bilinear_tensor_product(
                data, theta, 6
            )
1995
            return out
1996 1997

    def make_batch_norm(self):
1998 1999 2000 2001 2002 2003
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            data = self._get_data(
                name='data', shape=[32, 128, 128], dtype="float32"
            )
2004
            out = paddle.static.nn.batch_norm(data)
2005
            return out
2006

2007
    def make_batch_norm_momentum_variable(self):
2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            data = self._get_data(
                name='data', shape=[32, 128, 128], dtype="float32"
            )
            momentum = self._get_data(
                name='momentum',
                shape=[1],
                dtype='float32',
                append_batch_size=False,
            )
2020
            out = paddle.static.nn.batch_norm(data, momentum=momentum)
2021
            return out
2022

2023
    def make_range(self):
2024 2025 2026
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
C
ccrrong 已提交
2027 2028 2029
            paddle.arange(0, 10, 2, 'int32')
            paddle.arange(0.1, 10.0, 0.2, 'float32')
            paddle.arange(0.1, 10.0, 0.2, 'float64')
2030 2031 2032 2033 2034 2035 2036 2037 2038
            start = paddle.tensor.fill_constant(
                shape=[1], value=0.1, dtype="float32"
            )
            end = paddle.tensor.fill_constant(
                shape=[1], value=10.0, dtype="float32"
            )
            step = paddle.tensor.fill_constant(
                shape=[1], value=0.2, dtype="float32"
            )
C
ccrrong 已提交
2039
            y = paddle.arange(start, end, step, 'float64')
2040 2041 2042
            return y

    def make_spectral_norm(self):
2043 2044 2045 2046 2047 2048 2049 2050 2051
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            weight = self._get_data(
                name='weight',
                shape=[2, 3, 32, 32],
                dtype="float32",
                append_batch_size=False,
            )
2052
            out = paddle.static.nn.spectral_norm(weight, dim=1, power_iters=1)
2053
            return out
2054 2055

    def make_kldiv_loss(self):
2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x = self._get_data(
                name='x',
                shape=[32, 128, 128],
                dtype="float32",
                append_batch_size=False,
            )
            target = self._get_data(
                name='target',
                shape=[32, 128, 128],
                dtype="float32",
                append_batch_size=False,
            )
2071 2072 2073
            loss = paddle.nn.functional.kl_div(
                input=x, label=target, reduction='batchmean'
            )
2074
            return loss
2075

M
minqiyang 已提交
2076
    def make_pixel_shuffle(self):
2077 2078 2079
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
M
minqiyang 已提交
2080
            x = self._get_data(name="X", shape=[9, 4, 4], dtype="float32")
2081
            out = paddle.nn.functional.pixel_shuffle(x, upscale_factor=3)
2082
            return out
M
minqiyang 已提交
2083

R
ruri 已提交
2084
    def make_mse_loss(self):
2085 2086 2087
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
R
ruri 已提交
2088 2089
            x = self._get_data(name="X", shape=[1], dtype="float32")
            y = self._get_data(name="Y", shape=[1], dtype="float32")
2090
            out = paddle.nn.functional.mse_loss(input=x, label=y)
2091
            return out
R
ruri 已提交
2092

2093
    def make_square_error_cost(self):
2094 2095 2096
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
2097 2098
            x = self._get_data(name="X", shape=[1], dtype="float32")
            y = self._get_data(name="Y", shape=[1], dtype="float32")
2099
            out = paddle.nn.functional.square_error_cost(input=x, label=y)
2100
            return out
2101

W
whs 已提交
2102
    def test_affine_grid(self):
2103
        with self.static_graph():
G
GGBond8488 已提交
2104 2105 2106
            data = paddle.static.data(
                name='data', shape=[-1, 2, 3, 3], dtype="float32"
            )
2107
            out = paddle.argsort(x=data, axis=1)
W
whs 已提交
2108

G
GGBond8488 已提交
2109 2110 2111 2112 2113 2114
            theta = paddle.static.data(
                name="theta", shape=[-1, 2, 3], dtype="float32"
            )
            out_shape = paddle.static.data(
                name="out_shape", shape=[-1], dtype="int32"
            )
2115 2116
            data_0 = paddle.nn.functional.affine_grid(theta, out_shape)
            data_1 = paddle.nn.functional.affine_grid(theta, [5, 3, 28, 28])
W
whs 已提交
2117 2118 2119

            self.assertIsNotNone(data_0)
            self.assertIsNotNone(data_1)
D
dengkaipeng 已提交
2120

W
wangchaochaohu 已提交
2121 2122 2123 2124 2125 2126
    def test_stridedslice(self):
        axes = [0, 1, 2]
        starts = [1, 0, 2]
        ends = [3, 3, 4]
        strides = [1, 1, 1]
        with self.static_graph():
G
GGBond8488 已提交
2127 2128 2129
            x = paddle.static.data(
                name="x", shape=[-1, 245, 30, 30], dtype="float32"
            )
2
201716010711 已提交
2130
            out = paddle.strided_slice(
2131 2132
                x, axes=axes, starts=starts, ends=ends, strides=strides
            )
W
wangchaochaohu 已提交
2133 2134
            return out

2135 2136
    def test_fill_constant_batch_size_like(self):
        with self.static_graph():
2137
            like = paddle.tensor.fill_constant(
2138 2139 2140 2141 2142
                shape=[1, 200], value=10, dtype='int64'
            )
            out = layers.fill_constant_batch_size_like(
                input=like, shape=[2, 3300], value=1315454564656, dtype='int64'
            )
2143 2144
            return out

Z
zhoushiyu 已提交
2145 2146 2147
    def test_shuffle_batch(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
G
GGBond8488 已提交
2148 2149
            x = paddle.static.data(
                name='X', shape=[-1, 4, 50], dtype='float32', lod_level=0
2150
            )
2151
            out1 = shuffle_batch(x)
Z
zhoushiyu 已提交
2152
            default_main_program().random_seed = 1000
2153
            out2 = shuffle_batch(x)
Z
zhoushiyu 已提交
2154 2155
            self.assertIsNotNone(out1)
            self.assertIsNotNone(out2)
2156
            return out1
Z
zhoushiyu 已提交
2157

2158 2159
    def test_partial_sum(self):
        with self.static_graph():
2160 2161
            x = paddle.static.data(name="x", shape=[None, 3], dtype="float32")
            y = paddle.static.data(name="y", shape=[None, 3], dtype="float32")
2162
            sum = partial_sum([x, y], start_index=0, length=2)
2163
            return sum
2164

S
ShenLiang 已提交
2165 2166
    def test_batch_fc(self):
        with self.static_graph():
2167 2168 2169
            input = paddle.static.data(
                name="input", shape=[16, 2, 3], dtype="float32"
            )
2170
            out = batch_fc(
S
ShenLiang 已提交
2171 2172 2173 2174 2175
                input=input,
                param_size=[16, 3, 10],
                param_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="w_0",
2176
                    initializer=paddle.nn.initializer.XavierNormal(),
2177
                ),
S
ShenLiang 已提交
2178 2179 2180 2181
                bias_size=[16, 10],
                bias_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="b_0",
2182
                    initializer=paddle.nn.initializer.XavierNormal(),
2183 2184 2185 2186
                ),
                act="relu",
            )
        return out
S
ShenLiang 已提交
2187

S
ShenLiang 已提交
2188 2189
    def test_rank_attention(self):
        with self.static_graph():
2190 2191 2192 2193
            input = paddle.static.data(
                name="input", shape=[None, 2], dtype="float32"
            )
            rank_offset = paddle.static.data(
2194 2195
                name="rank_offset", shape=[None, 7], dtype="int32"
            )
2196
            out = rank_attention(
S
ShenLiang 已提交
2197 2198 2199 2200 2201 2202
                input=input,
                rank_offset=rank_offset,
                rank_param_shape=[18, 3],
                rank_param_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="ubm_rank_param.w_0",
2203
                    initializer=paddle.nn.initializer.XavierNormal(),
2204 2205 2206 2207
                ),
                max_rank=3,
            )
            return out
S
ShenLiang 已提交
2208

2209 2210 2211
    def test_row_conv(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
G
GGBond8488 已提交
2212 2213 2214
            x = paddle.static.data(
                name='x', shape=[-1, 16], dtype='float32', lod_level=1
            )
2215
            out = paddle.static.nn.row_conv(input=x, future_context_size=2)
2216
            return out
2217 2218 2219 2220

    def test_simple_conv2d(self):
        # TODO(minqiyang): dygraph do not support layers with param now
        with self.static_graph():
G
GGBond8488 已提交
2221 2222
            images = paddle.static.data(
                name='pixel', shape=[-1, 3, 48, 48], dtype='float32'
2223
            )
2224
            return paddle.static.nn.conv2d(
2225 2226
                input=images, num_filters=3, filter_size=[4, 4]
            )
2227 2228 2229 2230

    def test_squeeze(self):
        # TODO(minqiyang): dygraph do not support layers with param now
        with self.static_graph():
G
GGBond8488 已提交
2231 2232 2233
            x = paddle.static.data(
                name='x', shape=[-1, 1, 1, 4], dtype='float32'
            )
2234
            out = paddle.squeeze(x, axis=[2])
2235
            return out
2236 2237 2238 2239

    def test_flatten(self):
        # TODO(minqiyang): dygraph do not support op without kernel now
        with self.static_graph():
G
GGBond8488 已提交
2240
            x = paddle.static.data(
2241 2242 2243 2244
                name='x',
                shape=[4, 4, 3],
                dtype="float32",
            )
2245
            out = paddle.flatten(x, 1, -1, name="flatten")
2246
            return out
2247

Z
zhoukunsheng 已提交
2248 2249 2250
    def test_linspace(self):
        program = Program()
        with program_guard(program):
2251
            out = paddle.linspace(20, 10, 5, 'float64')
Z
zhoukunsheng 已提交
2252 2253 2254
            self.assertIsNotNone(out)
        print(str(program))

2255 2256
    def test_unfold(self):
        with self.static_graph():
G
GGBond8488 已提交
2257 2258 2259
            x = paddle.static.data(
                name='x', shape=[-1, 3, 20, 20], dtype='float32'
            )
2260
            out = paddle.nn.functional.unfold(x, [3, 3], 1, 1, 1)
2261
            return out
2262

2263 2264
    def test_partial_concat(self):
        with self.static_graph():
2265 2266
            x = paddle.static.data(name="x", shape=[None, 3], dtype="float32")
            y = paddle.static.data(name="y", shape=[None, 3], dtype="float32")
2267 2268
            concat1 = partial_concat([x, y], start_index=0, length=2)
            concat2 = partial_concat(x, start_index=0, length=-1)
2269 2270
            return concat1, concat2

2271
    def test_addmm(self):
2272 2273 2274
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
G
GGBond8488 已提交
2275
            input = paddle.static.data(
2276 2277 2278 2279
                name='input_data',
                shape=[3, 3],
                dtype='float32',
            )
G
GGBond8488 已提交
2280 2281
            x = paddle.static.data(name='x', shape=[3, 2], dtype='float32')
            y = paddle.static.data(name='y', shape=[2, 3], dtype='float32')
2282 2283

            out = paddle.addmm(input=input, x=x, y=y)
2284
            return out
2285

2286 2287 2288
    def test_warpctc_with_padding(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
2289
            input_length = paddle.static.data(
2290 2291
                name='logits_length', shape=[11], dtype='int64'
            )
2292
            label_length = paddle.static.data(
2293 2294
                name='labels_length', shape=[12], dtype='int64'
            )
2295 2296 2297 2298
            label = paddle.static.data(
                name='label', shape=[12, 1], dtype='int32'
            )
            predict = paddle.static.data(
2299 2300
                name='predict', shape=[4, 4, 8], dtype='float32'
            )
2301 2302 2303 2304 2305 2306
            output = paddle.nn.functional.ctc_loss(
                log_probs=predict,
                labels=label,
                input_lengths=input_length,
                label_lengths=label_length,
                reduction='none',
2307 2308
            )
            return output
2309

Y
Yu Yang 已提交
2310

2311 2312
class ExampleNet(paddle.nn.Layer):
    def __init__(self):
2313
        super().__init__()
2314
        self.weight = self.create_parameter(
2315 2316
            shape=[1, 1], attr=paddle.ParamAttr(trainable=False)
        )
2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329

    def forward(self):
        # only for test parameter trainable attr
        pass


class TestLayerParameterTrainableSet(unittest.TestCase):
    def test_layer_parameter_set(self):
        with fluid.dygraph.guard():
            net = ExampleNet()
            self.assertFalse(net.weight.trainable)


2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346
class TestLayerTrainingAttribute(unittest.TestCase):
    def test_set_train_eval_in_dynamic_mode(self):
        with fluid.dygraph.guard():
            net = paddle.nn.Dropout()
            net.train()
            self.assertTrue(net.training)
            net.eval()
            self.assertFalse(net.training)

    def test_set_train_eval_in_static_mode(self):
        net = paddle.nn.Dropout()
        net.train()
        self.assertTrue(net.training)
        net.eval()
        self.assertFalse(net.training)


J
Jiabin Yang 已提交
2347 2348
class MyLayer(paddle.nn.Layer):
    def __init__(self):
2349
        super().__init__()
J
Jiabin Yang 已提交
2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360
        self._linear = paddle.nn.Linear(1, 1)
        self._dropout = paddle.nn.Dropout(p=0.5)

    def forward(self, input):
        temp = self._linear(input)
        temp = self._dropout(temp)
        return temp


class MySuperLayer(paddle.nn.Layer):
    def __init__(self):
2361
        super().__init__()
J
Jiabin Yang 已提交
2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376
        self._mylayer = MyLayer()

    def forward(self, input):
        temp = self._mylayer(input)
        return temp


class TestSubLayerCount(unittest.TestCase):
    def test_sublayer(self):
        with fluid.dygraph.guard():
            mySuperlayer = MySuperLayer()
            self.assertTrue(len(mySuperlayer.sublayers()) == 3)
            self.assertTrue(len(mySuperlayer.sublayers(include_self=True)) == 4)


Y
Yu Yang 已提交
2377
if __name__ == '__main__':
2378
    paddle.enable_static()
Y
Yu Yang 已提交
2379
    unittest.main()