test_layers.py 154.8 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import contextlib
import inspect
Q
Qiao Longfei 已提交
17 18
import unittest

19
import numpy as np
20
from decorator_helper import prog_scope
21
from test_imperative_base import new_program_scope
22 23 24

import paddle
import paddle.fluid as fluid
25
import paddle.fluid.layers as layers
26
import paddle.fluid.nets as nets
27
import paddle.nn.functional as F
28
from paddle.fluid import core
29 30 31 32 33 34 35
from paddle.fluid.dygraph import base, nn, to_variable
from paddle.fluid.framework import (
    Program,
    _test_eager_guard,
    default_main_program,
    program_guard,
)
J
jerrywgz 已提交
36
from paddle.fluid.initializer import Constant
37
from paddle.fluid.param_attr import ParamAttr
38
from paddle.tensor import random
39 40 41 42 43 44 45 46 47 48 49


class LayerTest(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        cls.seed = 111

    @classmethod
    def tearDownClass(cls):
        pass

50 51 52 53 54 55 56 57
    def _get_place(self, force_to_use_cpu=False):
        # this option for ops that only have cpu kernel
        if force_to_use_cpu:
            return core.CPUPlace()
        else:
            if core.is_compiled_with_cuda():
                return core.CUDAPlace(0)
            return core.CPUPlace()
58 59 60 61

    @contextlib.contextmanager
    def static_graph(self):
        with new_program_scope():
C
cnn 已提交
62
            paddle.seed(self.seed)
L
Leo Chen 已提交
63
            paddle.framework.random._manual_program_seed(self.seed)
64 65
            yield

66 67 68
    def get_static_graph_result(
        self, feed, fetch_list, with_lod=False, force_to_use_cpu=False
    ):
69
        exe = fluid.Executor(self._get_place(force_to_use_cpu))
70
        exe.run(fluid.default_startup_program())
71 72 73 74 75 76
        return exe.run(
            fluid.default_main_program(),
            feed=feed,
            fetch_list=fetch_list,
            return_numpy=(not with_lod),
        )
77 78

    @contextlib.contextmanager
79
    def dynamic_graph(self, force_to_use_cpu=False):
L
lujun 已提交
80
        with fluid.dygraph.guard(
81 82
            self._get_place(force_to_use_cpu=force_to_use_cpu)
        ):
C
cnn 已提交
83
            paddle.seed(self.seed)
L
Leo Chen 已提交
84
            paddle.framework.random._manual_program_seed(self.seed)
85 86 87 88
            yield


class TestLayer(LayerTest):
89 90
    def test_custom_layer_with_kwargs(self):
        class CustomLayer(fluid.Layer):
91
            def __init__(self, input_size, linear1_size=4):
92
                super().__init__()
93
                self.linear1 = paddle.nn.Linear(
94 95
                    input_size, linear1_size, bias_attr=False
                )
96 97 98
                self.linear2 = paddle.nn.Linear(
                    linear1_size, 1, bias_attr=False
                )
99 100 101 102 103

            def forward(self, x, do_linear2=False):
                ret = self.linear1(x)
                if do_linear2:
                    ret = self.linear2(ret)
104 105 106
                return ret

        with self.dynamic_graph():
107 108 109 110 111
            with _test_eager_guard():
                inp = np.ones([3, 3], dtype='float32')
                x = base.to_variable(inp)
                custom = CustomLayer(input_size=3, linear1_size=2)
                ret = custom(x, do_linear2=False)
112
                np.testing.assert_array_equal(ret.numpy().shape, [3, 2])
113
                ret = custom(x, do_linear2=True)
114
                np.testing.assert_array_equal(ret.numpy().shape, [3, 1])
115 116
            inp = np.ones([3, 3], dtype='float32')
            x = base.to_variable(inp)
117 118
            custom = CustomLayer(input_size=3, linear1_size=2)
            ret = custom(x, do_linear2=False)
119
            np.testing.assert_array_equal(ret.numpy().shape, [3, 2])
120
            ret = custom(x, do_linear2=True)
121
            np.testing.assert_array_equal(ret.numpy().shape, [3, 1])
122

S
songyouwei 已提交
123 124 125
    def test_linear(self):
        inp = np.ones([3, 32, 32], dtype='float32')
        with self.static_graph():
126 127 128 129 130 131
            t = layers.data(
                name='data',
                shape=[3, 32, 32],
                dtype='float32',
                append_batch_size=False,
            )
132
            linear = paddle.nn.Linear(
133 134
                32, 4, bias_attr=fluid.initializer.ConstantInitializer(value=1)
            )
S
songyouwei 已提交
135
            ret = linear(t)
136 137 138
            static_ret = self.get_static_graph_result(
                feed={'data': inp}, fetch_list=[ret]
            )[0]
S
songyouwei 已提交
139
        with self.dynamic_graph():
140 141
            with _test_eager_guard():
                t = base.to_variable(inp)
142
                linear = paddle.nn.Linear(
143 144
                    32,
                    4,
145 146
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                )
147 148 149
                dy_eager_ret = linear(t)
                dy_eager_ret_value = dy_eager_ret.numpy()

S
songyouwei 已提交
150
            t = base.to_variable(inp)
151
            linear = paddle.nn.Linear(
152 153
                32, 4, bias_attr=fluid.initializer.ConstantInitializer(value=1)
            )
S
songyouwei 已提交
154 155 156
            dy_ret = linear(t)
            dy_ret_value = dy_ret.numpy()

157 158
        np.testing.assert_array_equal(static_ret, dy_eager_ret_value)
        np.testing.assert_array_equal(static_ret, dy_ret_value)
S
songyouwei 已提交
159

160 161 162 163 164
        with self.static_graph():

            # the input of Linear must be Variable.
            def test_Variable():
                inp = np.ones([3, 32, 32], dtype='float32')
165
                linear = paddle.nn.Linear(
166 167
                    32,
                    4,
168 169
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                )
170 171 172 173 174 175 176 177
                linear_ret1 = linear(inp)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Linear must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
                inp = np.ones([3, 32, 32], dtype='int32')
178
                linear = paddle.nn.Linear(
179 180
                    32,
                    4,
181 182
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                )
183 184 185 186 187 188 189
                linear_ret2 = linear(inp)

            self.assertRaises(TypeError, test_type)

    def test_Flatten(self):
        inp = np.ones([3, 4, 4, 5], dtype='float32')
        with self.static_graph():
190 191 192 193 194 195
            t = layers.data(
                name='data',
                shape=[3, 4, 4, 5],
                dtype='float32',
                append_batch_size=False,
            )
196 197
            flatten = nn.Flatten()
            ret = flatten(t)
198 199 200
            static_ret = self.get_static_graph_result(
                feed={'data': inp}, fetch_list=[ret]
            )[0]
201
        with self.dynamic_graph():
202 203 204 205 206 207
            with _test_eager_guard():
                t = base.to_variable(inp)
                flatten = nn.Flatten()
                dy_eager_ret = flatten(t)
                dy_eager_ret_value = dy_eager_ret.numpy()

208 209 210 211 212
            t = base.to_variable(inp)
            flatten = nn.Flatten()
            dy_ret = flatten(t)
            dy_ret_value = dy_ret.numpy()

213 214
        np.testing.assert_array_equal(static_ret, dy_eager_ret_value)
        np.testing.assert_array_equal(static_ret, dy_ret_value)
215 216 217 218 219 220

        with self.static_graph():

            # the input of Linear must be Variable.
            def test_Variable():
                inp = np.ones([3, 32, 32], dtype='float32')
221
                linear = paddle.nn.Linear(
222 223
                    32,
                    4,
224 225
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                )
226 227 228 229 230 231 232 233
                linear_ret1 = linear(inp)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Linear must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
                inp = np.ones([3, 32, 32], dtype='int32')
234
                linear = paddle.nn.Linear(
235 236
                    32,
                    4,
237 238
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                )
239 240 241 242
                linear_ret2 = linear(inp)

            self.assertRaises(TypeError, test_type)

C
ceci3 已提交
243 244 245 246
    def test_SyncBatchNorm(self):
        if core.is_compiled_with_cuda():
            with self.static_graph():
                t = layers.data(name='t', shape=[-1, 3, 5, 5], dtype='float32')
C
ceci3 已提交
247
                my_sync_bn = paddle.nn.SyncBatchNorm(3)
C
ceci3 已提交
248 249
                ret = my_sync_bn(t)
                static_ret = self.get_static_graph_result(
250
                    feed={'t': np.ones([3, 3, 5, 5], dtype='float32')},
251 252
                    fetch_list=[ret],
                )[0]
C
ceci3 已提交
253 254

            with self.dynamic_graph():
255 256 257 258 259 260
                with _test_eager_guard():
                    t = np.ones([3, 3, 5, 5], dtype='float32')
                    my_syncbn = paddle.nn.SyncBatchNorm(3)
                    dy_eager_ret = my_syncbn(base.to_variable(t))
                    dy_eager_ret_value = dy_eager_ret.numpy()

C
ceci3 已提交
261 262 263 264
                t = np.ones([3, 3, 5, 5], dtype='float32')
                my_syncbn = paddle.nn.SyncBatchNorm(3)
                dy_ret = my_syncbn(base.to_variable(t))
                dy_ret_value = dy_ret.numpy()
265 266
            np.testing.assert_array_equal(static_ret, dy_ret_value)
            np.testing.assert_array_equal(static_ret, dy_eager_ret_value)
C
ceci3 已提交
267

268 269 270 271 272
    def test_relu(self):
        with self.static_graph():
            t = layers.data(name='t', shape=[3, 3], dtype='float32')
            ret = layers.relu(t)
            static_ret = self.get_static_graph_result(
273 274
                feed={'t': np.ones([3, 3], dtype='float32')}, fetch_list=[ret]
            )[0]
275 276

        with self.dynamic_graph():
277 278 279 280 281
            with _test_eager_guard():
                t = np.ones([3, 3], dtype='float32')
                dy_eager_ret = layers.relu(base.to_variable(t))
                dy_eager_ret_value = dy_eager_ret.numpy()

282 283
            t = np.ones([3, 3], dtype='float32')
            dy_ret = layers.relu(base.to_variable(t))
284
            dy_ret_value = dy_ret.numpy()
285

286 287
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05)
C
ceci3 已提交
288

289 290 291 292 293
    def test_matmul(self):
        with self.static_graph():
            t = layers.data(name='t', shape=[3, 3], dtype='float32')
            t2 = layers.data(name='t2', shape=[3, 3], dtype='float32')
            ret = layers.matmul(t, t2)
294 295 296 297 298 299 300
            static_ret = self.get_static_graph_result(
                feed={
                    't': np.ones([3, 3], dtype='float32'),
                    't2': np.ones([3, 3], dtype='float32'),
                },
                fetch_list=[ret],
            )[0]
301 302

        with self.dynamic_graph():
303 304 305
            with _test_eager_guard():
                t = np.ones([3, 3], dtype='float32')
                t2 = np.ones([3, 3], dtype='float32')
306 307 308
                dy_eager_ret = layers.matmul(
                    base.to_variable(t), base.to_variable(t2)
                )
309 310
                dy_eager_ret_value = dy_eager_ret.numpy()

311 312
            t = np.ones([3, 3], dtype='float32')
            t2 = np.ones([3, 3], dtype='float32')
X
polish  
Xin Pan 已提交
313
            dy_ret = layers.matmul(base.to_variable(t), base.to_variable(t2))
314
            dy_ret_value = dy_ret.numpy()
315

316 317
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05)
318

M
minqiyang 已提交
319 320 321 322 323 324 325 326 327 328 329 330 331
    def test_gru_unit(self):
        lod = [[2, 4, 3]]
        D = 5
        T = sum(lod[0])
        N = len(lod[0])

        input = np.random.rand(T, 3 * D).astype('float32')
        hidden_input = np.random.rand(T, D).astype('float32')

        with self.static_graph():
            x = layers.data(name='x', shape=[-1, D * 3], dtype='float32')
            hidden = layers.data(name='hidden', shape=[-1, D], dtype='float32')
            updated_hidden, reset_hidden_pre, gate = layers.gru_unit(
332 333
                input=x, hidden=hidden, size=D * 3
            )
M
minqiyang 已提交
334
            static_ret = self.get_static_graph_result(
335 336 337
                feed={'x': input, 'hidden': hidden_input},
                fetch_list=[updated_hidden, reset_hidden_pre, gate],
            )
M
minqiyang 已提交
338 339 340 341 342

        with self.static_graph():
            x = layers.data(name='x', shape=[-1, D * 3], dtype='float32')
            hidden = layers.data(name='hidden', shape=[-1, D], dtype='float32')
            updated_hidden, reset_hidden_pre, gate = layers.gru_unit(
343 344
                input=x, hidden=hidden, size=D * 3
            )
345
            gru = nn.GRUUnit(size=D * 3)
M
minqiyang 已提交
346 347 348
            updated_hidden, reset_hidden_pre, gate = gru(x, hidden)

            static_ret2 = self.get_static_graph_result(
349 350 351
                feed={'x': input, 'hidden': hidden_input},
                fetch_list=[updated_hidden, reset_hidden_pre, gate],
            )
M
minqiyang 已提交
352 353

        with self.dynamic_graph():
354 355
            with _test_eager_guard():
                gru = nn.GRUUnit(size=D * 3)
356 357 358
                dy_eager_ret = gru(
                    base.to_variable(input), base.to_variable(hidden_input)
                )
359 360 361 362
                dy_eager_ret_value = []
                for i in range(len(static_ret)):
                    dy_eager_ret_value.append(dy_eager_ret[i].numpy())

363
            gru = nn.GRUUnit(size=D * 3)
364 365 366
            dy_ret = gru(
                base.to_variable(input), base.to_variable(hidden_input)
            )
367 368 369
            dy_ret_value = []
            for i in range(len(static_ret)):
                dy_ret_value.append(dy_ret[i].numpy())
M
minqiyang 已提交
370 371

        for i in range(len(static_ret)):
372 373 374 375 376 377 378 379 380
            np.testing.assert_allclose(
                static_ret[i], static_ret2[i], rtol=1e-05
            )
            np.testing.assert_allclose(
                static_ret[i], dy_ret_value[i], rtol=1e-05
            )
            np.testing.assert_allclose(
                static_ret[i], dy_eager_ret_value[i], rtol=1e-05
            )
M
minqiyang 已提交
381

382
        with self.dynamic_graph():
383 384 385 386
            with _test_eager_guard():
                custom_weight = np.random.randn(D, D * 3).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
387 388 389
                        custom_weight
                    )
                )
390 391
                gru1 = nn.GRUUnit(size=D * 3)
                gru2 = nn.GRUUnit(size=D * 3, param_attr=weight_attr)
392 393 394 395 396 397
                dy_ret1 = gru1(
                    base.to_variable(input), base.to_variable(hidden_input)
                )
                dy_ret2 = gru2(
                    base.to_variable(input), base.to_variable(hidden_input)
                )
398
                self.assertFalse(
399 400
                    np.array_equal(gru1.weight.numpy(), gru2.weight.numpy())
                )
401 402 403 404
                for o1, o2 in zip(dy_ret1, dy_ret2):
                    self.assertFalse(np.array_equal(o1.numpy(), o2.numpy()))
                gru2.weight.set_value(gru1.weight.numpy())
                gru2.bias.set_value(gru1.bias)
405 406 407 408 409 410
                dy_ret1 = gru1(
                    base.to_variable(input), base.to_variable(hidden_input)
                )
                dy_ret2 = gru2(
                    base.to_variable(input), base.to_variable(hidden_input)
                )
411
                for o1, o2 in zip(dy_ret1, dy_ret2):
412
                    np.testing.assert_array_equal(o1.numpy(), o2.numpy())
413 414 415

                gru2.weight = gru1.weight
                gru2.bias = gru1.bias
416 417 418 419 420 421
                np.testing.assert_array_equal(
                    gru1.weight.numpy(), gru2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    gru1.bias.numpy(), gru2.bias.numpy()
                )
422

423
            custom_weight = np.random.randn(D, D * 3).astype("float32")
424 425 426 427 428
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
429 430
            gru1 = nn.GRUUnit(size=D * 3)
            gru2 = nn.GRUUnit(size=D * 3, param_attr=weight_attr)
431 432 433 434 435 436
            dy_ret1 = gru1(
                base.to_variable(input), base.to_variable(hidden_input)
            )
            dy_ret2 = gru2(
                base.to_variable(input), base.to_variable(hidden_input)
            )
437
            self.assertFalse(
438 439
                np.array_equal(gru1.weight.numpy(), gru2.weight.numpy())
            )
440 441 442 443
            for o1, o2 in zip(dy_ret1, dy_ret2):
                self.assertFalse(np.array_equal(o1.numpy(), o2.numpy()))
            gru2.weight.set_value(gru1.weight.numpy())
            gru2.bias.set_value(gru1.bias)
444 445 446 447 448 449
            dy_ret1 = gru1(
                base.to_variable(input), base.to_variable(hidden_input)
            )
            dy_ret2 = gru2(
                base.to_variable(input), base.to_variable(hidden_input)
            )
450
            for o1, o2 in zip(dy_ret1, dy_ret2):
451
                np.testing.assert_array_equal(o1.numpy(), o2.numpy())
452 453 454

            gru2.weight = gru1.weight
            gru2.bias = gru1.bias
455 456 457
            np.testing.assert_array_equal(
                gru1.weight.numpy(), gru2.weight.numpy()
            )
458
            np.testing.assert_array_equal(gru1.bias.numpy(), gru2.bias.numpy())
459

X
Xin Pan 已提交
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
    def test_elementwise_math(self):
        n = np.ones([3, 3], dtype='float32')
        n2 = np.ones([3, 3], dtype='float32') * 1.1
        n3 = np.ones([3, 3], dtype='float32') * 2
        n4 = np.ones([3, 3], dtype='float32') * 3
        n5 = np.ones([3, 3], dtype='float32') * 4
        n6 = np.ones([3, 3], dtype='float32') * 5

        with self.static_graph():
            t = layers.data(name='t', shape=[3, 3], dtype='float32')
            t2 = layers.data(name='t2', shape=[3, 3], dtype='float32')
            t3 = layers.data(name='t3', shape=[3, 3], dtype='float32')
            t4 = layers.data(name='t4', shape=[3, 3], dtype='float32')
            t5 = layers.data(name='t5', shape=[3, 3], dtype='float32')
            t6 = layers.data(name='t6', shape=[3, 3], dtype='float32')

476
            ret = paddle.add(t, t2)
477
            ret = paddle.pow(ret, t3)
478 479 480
            ret = paddle.divide(ret, t4)
            ret = paddle.subtract(ret, t5)
            ret = paddle.multiply(ret, t6)
X
Xin Pan 已提交
481

482 483 484 485
            static_ret = self.get_static_graph_result(
                feed={'t': n, 't2': n2, 't3': n3, 't4': n4, 't5': n5, 't6': n6},
                fetch_list=[ret],
            )[0]
X
Xin Pan 已提交
486 487

        with self.dynamic_graph():
488
            with _test_eager_guard():
489
                ret = paddle.add(to_variable(n), to_variable(n2))
490
                ret = paddle.pow(ret, to_variable(n3))
491 492 493
                ret = paddle.divide(ret, to_variable(n4))
                ret = paddle.subtract(ret, to_variable(n5))
                dy_eager_ret = paddle.multiply(ret, to_variable(n6))
494 495
                dy_eager_ret_value = dy_eager_ret.numpy()

496
            ret = paddle.add(to_variable(n), to_variable(n2))
497
            ret = paddle.pow(ret, to_variable(n3))
498 499 500
            ret = paddle.divide(ret, to_variable(n4))
            ret = paddle.subtract(ret, to_variable(n5))
            dy_ret = paddle.multiply(ret, to_variable(n6))
501
            dy_ret_value = dy_ret.numpy()
502

503 504
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05)
X
Xin Pan 已提交
505 506 507 508 509 510

    def test_elementwise_minmax(self):
        n = np.ones([3, 3], dtype='float32')
        n2 = np.ones([3, 3], dtype='float32') * 2

        with self.dynamic_graph():
511
            with _test_eager_guard():
512
                min_eager_ret = paddle.minimum(to_variable(n), to_variable(n2))
H
HongyuJia 已提交
513
                max_eager_ret = paddle.maximum(to_variable(n), to_variable(n2))
514 515 516
                min_eager_ret_value = min_eager_ret.numpy()
                max_eager_ret_value = max_eager_ret.numpy()

517
            min_ret = paddle.minimum(to_variable(n), to_variable(n2))
H
HongyuJia 已提交
518
            max_ret = paddle.maximum(to_variable(n), to_variable(n2))
519 520
            min_ret_value = min_ret.numpy()
            max_ret_value = max_ret.numpy()
X
Xin Pan 已提交
521

522 523 524 525
        np.testing.assert_allclose(n, min_ret_value, rtol=1e-05)
        np.testing.assert_allclose(n2, max_ret_value, rtol=1e-05)
        np.testing.assert_allclose(n, min_eager_ret_value, rtol=1e-05)
        np.testing.assert_allclose(n2, max_eager_ret_value, rtol=1e-05)
X
Xin Pan 已提交
526

527 528 529 530 531 532 533
    def test_sequence_conv(self):
        inp_np = np.arange(12).reshape([3, 4]).astype('float32')
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()
        with self.static_graph():
534 535 536 537 538 539 540
            seq = layers.data(
                name='seq_in',
                shape=[3, 4],
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
541
            out = layers.sequence_conv(seq, 2, act='sigmoid')
542 543 544 545 546 547 548 549 550
            static_rlt = self.get_static_graph_result(
                feed={
                    "seq_in": fluid.create_lod_tensor(
                        data=inp_np, recursive_seq_lens=[[1, 1, 1]], place=place
                    )
                },
                fetch_list=[out],
                with_lod=True,
            )[0]
551 552

        with self.static_graph():
553 554 555 556 557 558 559
            seq = layers.data(
                name='seq_in',
                shape=[3, 4],
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
560
            seq_conv = nn.SequenceConv('seq_conv', num_filters=2, act='sigmoid')
561
            out = seq_conv(seq)
562 563 564 565 566 567 568 569 570 571 572 573
            static_rlt2 = self.get_static_graph_result(
                feed={
                    "seq_in": fluid.create_lod_tensor(
                        data=inp_np, recursive_seq_lens=[[1, 1, 1]], place=place
                    )
                },
                fetch_list=[out],
                with_lod=True,
            )[0]
        np.testing.assert_array_equal(
            np.array(static_rlt), np.array(static_rlt2)
        )
574 575 576 577 578

    def test_conv2d_transpose(self):
        inp_np = np.arange(0, 24).reshape([2, 3, 2, 2]).astype('float32')
        with self.static_graph():
            img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32')
579
            out = paddle.static.nn.conv2d_transpose(
580 581
                input=img,
                num_filters=10,
582
                filter_size=27,
583
                act='sigmoid',
584 585 586 587 588
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
            )
            static_rlt = self.get_static_graph_result(
                feed={'pixel': inp_np}, fetch_list=[out]
            )[0]
589 590 591
        with self.static_graph():
            img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32')
            conv2d_transpose = nn.Conv2DTranspose(
592
                num_channels=3,
593
                num_filters=10,
594
                filter_size=27,
595
                act='sigmoid',
596 597
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
            )
598
            out = conv2d_transpose(img)
599 600 601
            static_rlt2 = self.get_static_graph_result(
                feed={'pixel': inp_np}, fetch_list=[out]
            )[0]
602
        with self.dynamic_graph():
603 604 605 606 607 608
            with _test_eager_guard():
                conv2d_transpose = nn.Conv2DTranspose(
                    num_channels=3,
                    num_filters=10,
                    filter_size=27,
                    act='sigmoid',
609 610
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                )
611 612 613
                dy_eager_rlt = conv2d_transpose(base.to_variable(inp_np))
                dy_eager_rlt_value = dy_eager_rlt.numpy()

614
            conv2d_transpose = nn.Conv2DTranspose(
615
                num_channels=3,
616
                num_filters=10,
617
                filter_size=27,
618
                act='sigmoid',
619 620
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
            )
621
            dy_rlt = conv2d_transpose(base.to_variable(inp_np))
622
            dy_rlt_value = dy_rlt.numpy()
623 624 625
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt2, rtol=1e-05)
        np.testing.assert_allclose(dy_eager_rlt_value, static_rlt2, rtol=1e-05)
626

627
        with self.dynamic_graph():
628 629 630 631 632
            with _test_eager_guard():
                images = np.ones([2, 3, 5, 5], dtype='float32')
                custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
633 634 635 636 637 638 639 640 641 642 643 644
                        custom_weight
                    )
                )
                conv2d1 = nn.Conv2DTranspose(
                    num_channels=3, num_filters=3, filter_size=[2, 2]
                )
                conv2d2 = nn.Conv2DTranspose(
                    num_channels=3,
                    num_filters=3,
                    filter_size=[2, 2],
                    param_attr=weight_attr,
                )
645 646 647
                dy_ret1 = conv2d1(base.to_variable(images))
                dy_ret2 = conv2d2(base.to_variable(images))
                self.assertFalse(
648 649
                    np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())
                )
650 651 652 653

                conv2d1_weight_np = conv2d1.weight.numpy()
                conv2d1_bias = conv2d1.bias
                self.assertFalse(
654 655
                    np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())
                )
656
                conv2d2.weight.set_value(conv2d1_weight_np)
657 658 659
                np.testing.assert_array_equal(
                    conv2d1_weight_np, conv2d2.weight.numpy()
                )
660 661 662
                conv2d2.bias.set_value(conv2d1_bias)
                dy_ret1 = conv2d1(base.to_variable(images))
                dy_ret2 = conv2d2(base.to_variable(images))
663
                np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
664 665 666

                conv2d2.weight = conv2d1.weight
                conv2d2.bias = conv2d1.bias
667 668 669 670 671 672
                np.testing.assert_array_equal(
                    conv2d1.weight.numpy(), conv2d2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    conv2d1.bias.numpy(), conv2d2.bias.numpy()
                )
673

674 675
            images = np.ones([2, 3, 5, 5], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
676 677 678 679 680 681 682 683 684 685 686 687 688 689
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
            conv2d1 = nn.Conv2DTranspose(
                num_channels=3, num_filters=3, filter_size=[2, 2]
            )
            conv2d2 = nn.Conv2DTranspose(
                num_channels=3,
                num_filters=3,
                filter_size=[2, 2],
                param_attr=weight_attr,
            )
690 691 692 693 694 695 696
            dy_ret1 = conv2d1(base.to_variable(images))
            dy_ret2 = conv2d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv2d1_weight_np = conv2d1.weight.numpy()
            conv2d1_bias = conv2d1.bias
            self.assertFalse(
697 698
                np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())
            )
699
            conv2d2.weight.set_value(conv2d1_weight_np)
700 701 702
            np.testing.assert_array_equal(
                conv2d1_weight_np, conv2d2.weight.numpy()
            )
703 704 705
            conv2d2.bias.set_value(conv2d1_bias)
            dy_ret1 = conv2d1(base.to_variable(images))
            dy_ret2 = conv2d2(base.to_variable(images))
706
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
707 708 709

            conv2d2.weight = conv2d1.weight
            conv2d2.bias = conv2d1.bias
710 711 712 713 714 715
            np.testing.assert_array_equal(
                conv2d1.weight.numpy(), conv2d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv2d1.bias.numpy(), conv2d2.bias.numpy()
            )
716

717 718 719 720 721
        with self.static_graph():

            # the input of Conv2DTranspose must be Variable.
            def test_Variable():
                images = np.ones([2, 3, 5, 5], dtype='float32')
722 723 724
                conv2d = nn.Conv2DTranspose(
                    num_channels=3, num_filters=3, filter_size=[2, 2]
                )
725 726 727 728 729 730 731
                conv2d_ret1 = conv2d(images)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Conv2DTranspose must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
732 733 734 735 736 737
                images = layers.data(
                    name='pixel', shape=[3, 5, 5], dtype='int32'
                )
                conv2d = nn.Conv2DTranspose(
                    num_channels=3, num_filters=3, filter_size=[2, 2]
                )
738 739 740 741
                conv2d_ret2 = conv2d(images)

            self.assertRaises(TypeError, test_type)

742 743 744 745 746
    def test_bilinear_tensor_product(self):
        inp_np_x = np.array([[1, 2, 3]]).astype('float32')
        inp_np_y = np.array([[4, 5, 6]]).astype('float32')

        with self.static_graph():
747 748 749 750 751 752
            data_x = layers.data(
                name='x', shape=[1, 3], dtype="float32", append_batch_size=False
            )
            data_y = layers.data(
                name='y', shape=[1, 3], dtype="float32", append_batch_size=False
            )
753 754 755 756 757
            out = layers.bilinear_tensor_product(
                data_x,
                data_y,
                6,
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
758 759
                act='sigmoid',
            )
760

761 762 763
            static_rlt = self.get_static_graph_result(
                feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out]
            )[0]
764

765
        with self.static_graph():
766 767 768 769 770 771
            data_x = layers.data(
                name='x', shape=[1, 3], dtype="float32", append_batch_size=False
            )
            data_y = layers.data(
                name='y', shape=[1, 3], dtype="float32", append_batch_size=False
            )
772
            btp = nn.BilinearTensorProduct(
773 774
                3,
                3,
775 776
                6,
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
777 778
                act='sigmoid',
            )
779
            out = btp(data_x, data_y)
780 781 782
            static_rlt2 = self.get_static_graph_result(
                feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out]
            )[0]
783
        with self.dynamic_graph():
784 785 786 787 788 789
            with _test_eager_guard():
                btp = nn.BilinearTensorProduct(
                    3,
                    3,
                    6,
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
790 791 792 793 794
                    act='sigmoid',
                )
                dy_eager_rlt = btp(
                    base.to_variable(inp_np_x), base.to_variable(inp_np_y)
                )
795 796
                dy_eager_rlt_value = dy_eager_rlt.numpy()

797
            btp = nn.BilinearTensorProduct(
798 799
                3,
                3,
800 801
                6,
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
802 803
                act='sigmoid',
            )
804
            dy_rlt = btp(base.to_variable(inp_np_x), base.to_variable(inp_np_y))
805
            dy_rlt_value = dy_rlt.numpy()
806

807
        with self.dynamic_graph():
808 809
            with _test_eager_guard():
                btp2 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid')
810 811 812
                dy_eager_rlt2 = btp2(
                    base.to_variable(inp_np_x), base.to_variable(inp_np_y)
                )
813 814
                dy_eager_rlt2_value = dy_eager_rlt2.numpy()

815
            btp2 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid')
816 817 818
            dy_rlt2 = btp2(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
819
            dy_rlt2_value = dy_rlt2.numpy()
820

821
        with self.static_graph():
822 823 824 825 826 827 828 829 830 831 832 833 834
            data_x2 = layers.data(
                name='x', shape=[1, 3], dtype="float32", append_batch_size=False
            )
            data_y2 = layers.data(
                name='y', shape=[1, 3], dtype="float32", append_batch_size=False
            )
            out2 = layers.bilinear_tensor_product(
                data_x2, data_y2, 6, act='sigmoid'
            )

            static_rlt3 = self.get_static_graph_result(
                feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out2]
            )[0]
835

836 837 838 839 840
        np.testing.assert_array_equal(dy_rlt2_value, static_rlt3)
        np.testing.assert_array_equal(dy_eager_rlt2_value, static_rlt3)
        np.testing.assert_array_equal(static_rlt2, static_rlt)
        np.testing.assert_array_equal(dy_rlt_value, static_rlt)
        np.testing.assert_array_equal(dy_eager_rlt_value, static_rlt)
841

842
        with self.dynamic_graph():
843 844 845 846
            with _test_eager_guard():
                custom_weight = np.random.randn(6, 3, 3).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
847 848 849
                        custom_weight
                    )
                )
850
                btp1 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid')
851 852 853 854 855 856 857 858 859
                btp2 = nn.BilinearTensorProduct(
                    3, 3, 6, act='sigmoid', param_attr=weight_attr
                )
                dy_rlt1 = btp1(
                    base.to_variable(inp_np_x), base.to_variable(inp_np_y)
                )
                dy_rlt2 = btp2(
                    base.to_variable(inp_np_x), base.to_variable(inp_np_y)
                )
860
                self.assertFalse(
861 862
                    np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
                )
863 864
                btp2.weight.set_value(btp1.weight.numpy())
                btp2.bias.set_value(btp1.bias)
865 866 867 868 869 870
                dy_rlt1 = btp1(
                    base.to_variable(inp_np_x), base.to_variable(inp_np_y)
                )
                dy_rlt2 = btp2(
                    base.to_variable(inp_np_x), base.to_variable(inp_np_y)
                )
871
                np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
872 873 874

                btp2.weight = btp1.weight
                btp2.bias = btp1.bias
875 876 877 878 879 880
                np.testing.assert_array_equal(
                    btp1.weight.numpy(), btp2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    btp1.bias.numpy(), btp2.bias.numpy()
                )
881

882
            custom_weight = np.random.randn(6, 3, 3).astype("float32")
883 884 885 886 887
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
888
            btp1 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid')
889 890 891 892 893 894 895 896 897
            btp2 = nn.BilinearTensorProduct(
                3, 3, 6, act='sigmoid', param_attr=weight_attr
            )
            dy_rlt1 = btp1(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
            dy_rlt2 = btp2(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
898 899 900
            self.assertFalse(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()))
            btp2.weight.set_value(btp1.weight.numpy())
            btp2.bias.set_value(btp1.bias)
901 902 903 904 905 906
            dy_rlt1 = btp1(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
            dy_rlt2 = btp2(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
907
            np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
908 909 910

            btp2.weight = btp1.weight
            btp2.bias = btp1.bias
911 912 913
            np.testing.assert_array_equal(
                btp1.weight.numpy(), btp2.weight.numpy()
            )
914
            np.testing.assert_array_equal(btp1.bias.numpy(), btp2.bias.numpy())
915

916
    def prelu_test(self, mode):
917 918
        inp_np = np.ones([5, 200, 100, 100]).astype('float32')
        with self.static_graph():
919 920 921 922 923 924
            data_t = layers.data(
                name="input",
                shape=[5, 200, 100, 100],
                dtype="float32",
                append_batch_size=False,
            )
925
            out = paddle.static.nn.prelu(
926 927 928 929 930
                data_t, mode, param_attr=ParamAttr(initializer=Constant(1.0))
            )
            static_rlt = self.get_static_graph_result(
                feed={"input": inp_np}, fetch_list=[out]
            )[0]
931 932

        with self.static_graph():
933 934 935 936 937 938 939 940 941 942 943 944
            data_t = layers.data(
                name="input",
                shape=[5, 200, 100, 100],
                dtype="float32",
                append_batch_size=False,
            )
            prelu = nn.PRelu(
                mode=mode,
                channel=inp_np.shape[1],
                input_shape=data_t.shape,
                param_attr=ParamAttr(initializer=Constant(1.0)),
            )
945
            out = prelu(data_t)
946 947 948
            static_rlt2 = self.get_static_graph_result(
                feed={"input": inp_np}, fetch_list=[out]
            )[0]
949 950

        with self.dynamic_graph():
951 952 953 954 955
            with _test_eager_guard():
                prelu = nn.PRelu(
                    mode=mode,
                    channel=inp_np.shape[1],
                    input_shape=inp_np.shape,
956 957
                    param_attr=ParamAttr(initializer=Constant(1.0)),
                )
958 959 960
                dy_eager_rlt = prelu(base.to_variable(inp_np))
                dy_eager_rlt_value = dy_eager_rlt.numpy()

961 962 963 964 965 966
            prelu = nn.PRelu(
                mode=mode,
                channel=inp_np.shape[1],
                input_shape=inp_np.shape,
                param_attr=ParamAttr(initializer=Constant(1.0)),
            )
967
            dy_rlt = prelu(base.to_variable(inp_np))
968
            dy_rlt_value = dy_rlt.numpy()
969

970 971 972
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_eager_rlt_value, static_rlt, rtol=1e-05)
973

974
        with self.dynamic_graph():
975 976 977 978 979 980 981
            with _test_eager_guard():
                inp_np = np.random.randn(5, 200, 100, 100).astype("float32")
                inp = base.to_variable(inp_np)
                prelu1 = nn.PRelu(
                    mode=mode,
                    channel=inp_np.shape[1],
                    input_shape=inp_np.shape,
982 983
                    param_attr=ParamAttr(initializer=Constant(2.0)),
                )
984 985 986 987
                prelu2 = nn.PRelu(
                    mode=mode,
                    channel=inp_np.shape[1],
                    input_shape=inp_np.shape,
988 989
                    param_attr=ParamAttr(initializer=Constant(1.0)),
                )
990 991 992
                dy_rlt1 = prelu1(inp)
                dy_rlt2 = prelu2(inp)
                self.assertFalse(
993 994
                    np.array_equal(prelu1.weight.numpy(), prelu2.weight.numpy())
                )
995
                self.assertFalse(
996 997
                    np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
                )
998 999 1000
                prelu2.weight.set_value(prelu1.weight.numpy())
                dy_rlt1 = prelu1(inp)
                dy_rlt2 = prelu2(inp)
1001
                np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
1002 1003

                prelu2.weight = prelu1.weight
1004 1005 1006
                np.testing.assert_array_equal(
                    prelu1.weight.numpy(), prelu2.weight.numpy()
                )
1007

1008 1009
            inp_np = np.random.randn(5, 200, 100, 100).astype("float32")
            inp = base.to_variable(inp_np)
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
            prelu1 = nn.PRelu(
                mode=mode,
                channel=inp_np.shape[1],
                input_shape=inp_np.shape,
                param_attr=ParamAttr(initializer=Constant(2.0)),
            )
            prelu2 = nn.PRelu(
                mode=mode,
                channel=inp_np.shape[1],
                input_shape=inp_np.shape,
                param_attr=ParamAttr(initializer=Constant(1.0)),
            )
1022 1023 1024
            dy_rlt1 = prelu1(inp)
            dy_rlt2 = prelu2(inp)
            self.assertFalse(
1025 1026
                np.array_equal(prelu1.weight.numpy(), prelu2.weight.numpy())
            )
1027 1028 1029 1030
            self.assertFalse(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()))
            prelu2.weight.set_value(prelu1.weight.numpy())
            dy_rlt1 = prelu1(inp)
            dy_rlt2 = prelu2(inp)
1031
            np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
1032 1033

            prelu2.weight = prelu1.weight
1034 1035 1036
            np.testing.assert_array_equal(
                prelu1.weight.numpy(), prelu2.weight.numpy()
            )
1037

1038 1039 1040 1041 1042
    def test_prelu(self):
        self.prelu_test("channel")
        self.prelu_test("element")
        self.prelu_test("all")

1043 1044 1045 1046 1047
    def test_embeding(self):
        inp_word = np.array([[[1]]]).astype('int64')
        dict_size = 20
        with self.static_graph():
            data_t = layers.data(name='word', shape=[1], dtype='int64')
1048 1049 1050 1051 1052 1053 1054 1055 1056
            emb = layers.embedding(
                input=data_t,
                size=[dict_size, 32],
                param_attr='emb.w',
                is_sparse=False,
            )
            static_rlt = self.get_static_graph_result(
                feed={'word': inp_word}, fetch_list=[emb]
            )[0]
1057 1058
        with self.static_graph():
            data_t = layers.data(name='word', shape=[1], dtype='int64')
1059 1060 1061
            emb2 = nn.Embedding(
                size=[dict_size, 32], param_attr='emb.w', is_sparse=False
            )
1062
            emb_rlt = emb2(data_t)
1063 1064 1065
            static_rlt2 = self.get_static_graph_result(
                feed={'word': inp_word}, fetch_list=[emb_rlt]
            )[0]
1066
        with self.dynamic_graph():
1067
            with _test_eager_guard():
1068 1069 1070 1071 1072
                emb2 = nn.Embedding(
                    size=[dict_size, 32],
                    param_attr='eager_emb.w',
                    is_sparse=False,
                )
1073 1074 1075
                dy_eager_rlt = emb2(base.to_variable(inp_word))
                dy_eager_rlt_value = dy_eager_rlt.numpy()

1076 1077 1078
            emb2 = nn.Embedding(
                size=[dict_size, 32], param_attr='emb.w', is_sparse=False
            )
1079 1080
            dy_rlt = emb2(base.to_variable(inp_word))
            dy_rlt_value = dy_rlt.numpy()
1081 1082

        self.assertTrue(np.allclose(static_rlt2, static_rlt))
1083
        self.assertTrue(np.allclose(dy_rlt_value, static_rlt))
1084
        self.assertTrue(np.allclose(dy_eager_rlt_value, static_rlt))
1085

1086
        with self.dynamic_graph():
1087 1088 1089 1090
            with _test_eager_guard():
                custom_weight = np.random.randn(dict_size, 32).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
1091 1092 1093
                        custom_weight
                    )
                )
1094
                emb1 = nn.Embedding(size=[dict_size, 32], is_sparse=False)
1095 1096 1097 1098 1099
                emb2 = nn.Embedding(
                    size=[dict_size, 32],
                    param_attr=weight_attr,
                    is_sparse=False,
                )
1100 1101 1102
                rep1 = emb1(base.to_variable(inp_word))
                rep2 = emb2(base.to_variable(inp_word))
                self.assertFalse(
1103 1104 1105 1106 1107
                    np.array_equal(emb1.weight.numpy(), custom_weight)
                )
                np.testing.assert_array_equal(
                    emb2.weight.numpy(), custom_weight
                )
1108 1109 1110
                self.assertFalse(np.array_equal(rep1.numpy(), rep2.numpy()))
                emb2.weight.set_value(emb1.weight.numpy())
                rep2 = emb2(base.to_variable(inp_word))
1111
                np.testing.assert_array_equal(rep1.numpy(), rep2.numpy())
1112 1113

                emb2.weight = emb1.weight
1114 1115 1116
                np.testing.assert_array_equal(
                    emb1.weight.numpy(), emb2.weight.numpy()
                )
1117

1118
            custom_weight = np.random.randn(dict_size, 32).astype("float32")
1119 1120 1121 1122 1123
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
1124
            emb1 = nn.Embedding(size=[dict_size, 32], is_sparse=False)
1125 1126 1127
            emb2 = nn.Embedding(
                size=[dict_size, 32], param_attr=weight_attr, is_sparse=False
            )
1128 1129 1130
            rep1 = emb1(base.to_variable(inp_word))
            rep2 = emb2(base.to_variable(inp_word))
            self.assertFalse(np.array_equal(emb1.weight.numpy(), custom_weight))
1131
            np.testing.assert_array_equal(emb2.weight.numpy(), custom_weight)
1132 1133 1134
            self.assertFalse(np.array_equal(rep1.numpy(), rep2.numpy()))
            emb2.weight.set_value(emb1.weight.numpy())
            rep2 = emb2(base.to_variable(inp_word))
1135
            np.testing.assert_array_equal(rep1.numpy(), rep2.numpy())
1136 1137

            emb2.weight = emb1.weight
1138 1139 1140
            np.testing.assert_array_equal(
                emb1.weight.numpy(), emb2.weight.numpy()
            )
1141

1142 1143 1144 1145
    def test_nce(self):
        window_size = 5
        dict_size = 20
        label_word = int(window_size // 2) + 1
1146
        inp_word = np.array([[1], [2], [3], [4], [5]]).astype('int64')
1147 1148 1149 1150 1151 1152
        nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype('float32')
        seed = 1
        with self.static_graph():
            words = []
            for i in range(window_size):
                words.append(
1153 1154 1155 1156 1157 1158 1159
                    layers.data(
                        name='word_{0}'.format(i), shape=[None], dtype='int64'
                    )
                )
            sample_weights = layers.fill_constant(
                shape=[5, 1], dtype='float32', value=1
            )
1160 1161 1162 1163 1164
            embs = []
            for i in range(window_size):
                if i == label_word:
                    continue

1165 1166 1167 1168 1169 1170
                emb = fluid.embedding(
                    input=words[i],
                    size=[dict_size, 32],
                    param_attr='emb.w',
                    is_sparse=False,
                )
1171 1172 1173
                embs.append(emb)

            embs = layers.concat(input=embs, axis=1)
1174
            wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
1175
            nce_loss = paddle.static.nn.nce(
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
                input=embs,
                label=wl,
                num_total_classes=dict_size,
                num_neg_samples=2,
                sampler="custom_dist",
                custom_dist=nid_freq_arr.tolist(),
                seed=seed,
                param_attr='nce.w',
                bias_attr='nce.b',
                sample_weight=sample_weights,
            )
1187 1188 1189
            feed_dict = dict()
            for i in range(window_size):
                feed_dict['word_{0}'.format(i)] = inp_word[i]
1190 1191 1192
            static_rlt = self.get_static_graph_result(
                feed=feed_dict, fetch_list=[nce_loss]
            )[0]
W
Weilong Wu 已提交
1193

1194 1195 1196 1197
        with self.static_graph():
            words = []
            for i in range(window_size):
                words.append(
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
                    layers.data(
                        name='word_{0}'.format(i), shape=[None], dtype='int64'
                    )
                )
            sample_weights = layers.fill_constant(
                shape=[5, 1], dtype='float32', value=1
            )
            emb = nn.Embedding(
                size=[dict_size, 32], param_attr='emb.w', is_sparse=False
            )
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217

            embs2 = []
            for i in range(window_size):
                if i == label_word:
                    continue

                emb_rlt = emb(words[i])
                embs2.append(emb_rlt)

            embs2 = layers.concat(input=embs2, axis=1)
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
            nce = nn.NCE(
                num_total_classes=dict_size,
                dim=embs2.shape[1],
                num_neg_samples=2,
                sampler="custom_dist",
                custom_dist=nid_freq_arr.tolist(),
                seed=seed,
                param_attr='nce.w',
                bias_attr='nce.b',
                sample_weight=sample_weights,
            )
1229

1230 1231
            wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
            nce_loss2 = nce(embs2, wl)
1232 1233 1234 1235
            feed_dict = dict()
            for i in range(len(words)):
                feed_dict['word_{0}'.format(i)] = inp_word[i]

1236 1237 1238
            static_rlt2 = self.get_static_graph_result(
                feed=feed_dict, fetch_list=[nce_loss2]
            )[0]
1239

L
Leo Chen 已提交
1240
        with self.dynamic_graph():
W
Weilong Wu 已提交
1241 1242 1243 1244
            with _test_eager_guard():
                words = []
                for i in range(window_size):
                    words.append(base.to_variable(inp_word[i]))
1245 1246 1247 1248 1249 1250 1251 1252
                sample_weights = layers.fill_constant(
                    shape=[5, 1], dtype='float32', value=1
                )
                emb = nn.Embedding(
                    size=[dict_size, 32],
                    param_attr='eager_emb.w',
                    is_sparse=False,
                )
W
Weilong Wu 已提交
1253 1254 1255 1256 1257 1258 1259 1260 1261

                embs3 = []
                for i in range(window_size):
                    if i == label_word:
                        continue

                    emb_rlt = emb(words[i])
                    embs3.append(emb_rlt)

1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
                embs3 = layers.concat(
                    input=embs3, axis=fluid.dygraph.to_variable(np.array([1]))
                )
                nce = nn.NCE(
                    num_total_classes=dict_size,
                    dim=embs3.shape[1],
                    num_neg_samples=2,
                    sampler="custom_dist",
                    custom_dist=nid_freq_arr.tolist(),
                    seed=seed,
                    param_attr='eager_nce.w',
                    bias_attr='eager_nce.b',
                    sample_weight=sample_weights,
                )
W
Weilong Wu 已提交
1276 1277 1278 1279 1280

                wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
                dy_eager_rlt = nce(embs3, wl)
                dy_eager_rlt_value = dy_eager_rlt.numpy()

1281 1282 1283
            words = []
            for i in range(window_size):
                words.append(base.to_variable(inp_word[i]))
1284 1285 1286 1287 1288 1289
            sample_weights = layers.fill_constant(
                shape=[5, 1], dtype='float32', value=1
            )
            emb = nn.Embedding(
                size=[dict_size, 32], param_attr='emb.w', is_sparse=False
            )
1290 1291 1292 1293 1294 1295 1296 1297 1298

            embs3 = []
            for i in range(window_size):
                if i == label_word:
                    continue

                emb_rlt = emb(words[i])
                embs3.append(emb_rlt)

1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
            embs3 = layers.concat(
                input=embs3, axis=fluid.dygraph.to_variable(np.array([1]))
            )
            nce = nn.NCE(
                num_total_classes=dict_size,
                dim=embs3.shape[1],
                num_neg_samples=2,
                sampler="custom_dist",
                custom_dist=nid_freq_arr.tolist(),
                seed=seed,
                param_attr='nce.w',
                bias_attr='nce.b',
                sample_weight=sample_weights,
            )
1313

1314 1315
            wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
            dy_rlt = nce(embs3, wl)
1316
            dy_rlt_value = dy_rlt.numpy()
1317

1318 1319 1320
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_eager_rlt_value, static_rlt, rtol=1e-05)
1321

L
Leo Chen 已提交
1322
        with self.dynamic_graph():
W
Weilong Wu 已提交
1323
            with _test_eager_guard():
1324 1325 1326
                custom_weight = np.random.randn(dict_size, 128).astype(
                    "float32"
                )
W
Weilong Wu 已提交
1327 1328
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
1329 1330 1331
                        custom_weight
                    )
                )
W
Weilong Wu 已提交
1332 1333 1334 1335 1336 1337
                words = []
                for i in range(window_size):
                    words.append(base.to_variable(inp_word[i]))
                sample_weights = layers.fill_constant(
                    shape=fluid.dygraph.to_variable(np.array([5, 1])),
                    dtype='float32',
1338 1339 1340 1341 1342 1343 1344
                    value=1,
                )
                emb = nn.Embedding(
                    size=[dict_size, 32],
                    param_attr='eager_emb.w',
                    is_sparse=False,
                )
W
Weilong Wu 已提交
1345 1346 1347 1348 1349 1350 1351 1352 1353 1354

                embs3 = []
                for i in range(window_size):
                    if i == label_word:
                        continue

                    emb_rlt = emb(words[i])
                    embs3.append(emb_rlt)

                embs3 = layers.concat(input=embs3, axis=1)
1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
                nce1 = nn.NCE(
                    num_total_classes=dict_size,
                    dim=embs3.shape[1],
                    num_neg_samples=2,
                    sampler="custom_dist",
                    custom_dist=nid_freq_arr.tolist(),
                    seed=seed,
                    param_attr='eager_nce1.w',
                    bias_attr='eager_nce1.b',
                    sample_weight=sample_weights,
                )

                nce2 = nn.NCE(
                    num_total_classes=dict_size,
                    dim=embs3.shape[1],
                    num_neg_samples=2,
                    sampler="custom_dist",
                    custom_dist=nid_freq_arr.tolist(),
                    seed=seed,
                    param_attr=weight_attr,
                    bias_attr='eager_nce2.b',
                    sample_weight=sample_weights,
                )
W
Weilong Wu 已提交
1378 1379 1380 1381 1382

                wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
                nce1_loss = nce1(embs3, wl)
                nce2_loss = nce2(embs3, wl)
                self.assertFalse(
1383 1384
                    np.array_equal(nce1_loss.numpy(), nce2_loss.numpy())
                )
W
Weilong Wu 已提交
1385 1386 1387 1388
                nce2.weight.set_value(nce1.weight.numpy())
                nce2.bias.set_value(nce1.bias)
                nce1_loss = nce1(embs3, wl)
                nce2_loss = nce2(embs3, wl)
1389 1390 1391
                np.testing.assert_array_equal(
                    nce1_loss.numpy(), nce2_loss.numpy()
                )
W
Weilong Wu 已提交
1392 1393 1394

                nce2.weight = nce1.weight
                nce2.bias = nce1.bias
1395 1396 1397 1398 1399 1400
                np.testing.assert_array_equal(
                    nce1.weight.numpy(), nce2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    nce1.bias.numpy(), nce2.bias.numpy()
                )
W
Weilong Wu 已提交
1401

1402
            custom_weight = np.random.randn(dict_size, 128).astype("float32")
1403 1404 1405 1406 1407
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
1408 1409 1410 1411
            words = []
            for i in range(window_size):
                words.append(base.to_variable(inp_word[i]))
            sample_weights = layers.fill_constant(
S
songyouwei 已提交
1412 1413
                shape=fluid.dygraph.to_variable(np.array([5, 1])),
                dtype='float32',
1414 1415 1416 1417 1418
                value=1,
            )
            emb = nn.Embedding(
                size=[dict_size, 32], param_attr='emb.w', is_sparse=False
            )
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428

            embs3 = []
            for i in range(window_size):
                if i == label_word:
                    continue

                emb_rlt = emb(words[i])
                embs3.append(emb_rlt)

            embs3 = layers.concat(input=embs3, axis=1)
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451
            nce1 = nn.NCE(
                num_total_classes=dict_size,
                dim=embs3.shape[1],
                num_neg_samples=2,
                sampler="custom_dist",
                custom_dist=nid_freq_arr.tolist(),
                seed=seed,
                param_attr='nce1.w',
                bias_attr='nce1.b',
                sample_weight=sample_weights,
            )

            nce2 = nn.NCE(
                num_total_classes=dict_size,
                dim=embs3.shape[1],
                num_neg_samples=2,
                sampler="custom_dist",
                custom_dist=nid_freq_arr.tolist(),
                seed=seed,
                param_attr=weight_attr,
                bias_attr='nce2.b',
                sample_weight=sample_weights,
            )
1452

1453 1454 1455
            wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
            nce1_loss = nce1(embs3, wl)
            nce2_loss = nce2(embs3, wl)
1456
            self.assertFalse(
1457 1458
                np.array_equal(nce1_loss.numpy(), nce2_loss.numpy())
            )
1459 1460
            nce2.weight.set_value(nce1.weight.numpy())
            nce2.bias.set_value(nce1.bias)
1461 1462
            nce1_loss = nce1(embs3, wl)
            nce2_loss = nce2(embs3, wl)
1463
            np.testing.assert_array_equal(nce1_loss.numpy(), nce2_loss.numpy())
1464 1465 1466

            nce2.weight = nce1.weight
            nce2.bias = nce1.bias
1467 1468 1469
            np.testing.assert_array_equal(
                nce1.weight.numpy(), nce2.weight.numpy()
            )
1470
            np.testing.assert_array_equal(nce1.bias.numpy(), nce2.bias.numpy())
1471

S
songyouwei 已提交
1472 1473
    def test_one_hot(self):
        with self.dynamic_graph():
1474
            with _test_eager_guard():
1475 1476 1477
                label = fluid.dygraph.to_variable(
                    np.array([[1], [1], [3], [0]])
                )
1478 1479
                one_hot_label1 = fluid.layers.one_hot(input=label, depth=4)
                one_hot_label2 = fluid.layers.one_hot(
1480 1481 1482 1483 1484
                    input=label, depth=fluid.dygraph.to_variable(np.array([4]))
                )
                np.testing.assert_array_equal(
                    one_hot_label1.numpy(), one_hot_label2.numpy()
                )
1485

S
songyouwei 已提交
1486 1487 1488
            label = fluid.dygraph.to_variable(np.array([[1], [1], [3], [0]]))
            one_hot_label1 = fluid.layers.one_hot(input=label, depth=4)
            one_hot_label2 = fluid.layers.one_hot(
1489 1490 1491 1492 1493
                input=label, depth=fluid.dygraph.to_variable(np.array([4]))
            )
            np.testing.assert_array_equal(
                one_hot_label1.numpy(), one_hot_label2.numpy()
            )
S
songyouwei 已提交
1494 1495 1496

    def test_split(self):
        with self.dynamic_graph():
1497 1498 1499
            with _test_eager_guard():
                input = fluid.dygraph.to_variable(np.random.random((3, 8, 5)))
                x0, x1 = fluid.layers.split(input, num_or_sections=2, dim=1)
1500 1501 1502 1503 1504
                x00, x11 = fluid.layers.split(
                    input,
                    num_or_sections=2,
                    dim=fluid.dygraph.to_variable(np.array([1])),
                )
1505 1506
                np.testing.assert_array_equal(x0.numpy(), x00.numpy())
                np.testing.assert_array_equal(x1.numpy(), x11.numpy())
1507

S
songyouwei 已提交
1508 1509
            input = fluid.dygraph.to_variable(np.random.random((3, 8, 5)))
            x0, x1 = fluid.layers.split(input, num_or_sections=2, dim=1)
1510 1511 1512 1513 1514
            x00, x11 = fluid.layers.split(
                input,
                num_or_sections=2,
                dim=fluid.dygraph.to_variable(np.array([1])),
            )
1515 1516
            np.testing.assert_array_equal(x0.numpy(), x00.numpy())
            np.testing.assert_array_equal(x1.numpy(), x11.numpy())
S
songyouwei 已提交
1517 1518 1519

    def test_topk(self):
        with self.dynamic_graph():
1520 1521 1522 1523
            with _test_eager_guard():
                input = fluid.dygraph.to_variable(np.random.random((13, 11)))
                top5_values1, top5_indices1 = layers.topk(input, k=5)
                top5_values2, top5_indices2 = layers.topk(
1524 1525 1526 1527 1528 1529 1530 1531
                    input, k=fluid.dygraph.to_variable(np.array([5]))
                )
                np.testing.assert_array_equal(
                    top5_values1.numpy(), top5_values2.numpy()
                )
                np.testing.assert_array_equal(
                    top5_indices1.numpy(), top5_indices2.numpy()
                )
1532

S
songyouwei 已提交
1533 1534 1535
            input = fluid.dygraph.to_variable(np.random.random((13, 11)))
            top5_values1, top5_indices1 = layers.topk(input, k=5)
            top5_values2, top5_indices2 = layers.topk(
1536 1537 1538 1539 1540 1541 1542 1543
                input, k=fluid.dygraph.to_variable(np.array([5]))
            )
            np.testing.assert_array_equal(
                top5_values1.numpy(), top5_values2.numpy()
            )
            np.testing.assert_array_equal(
                top5_indices1.numpy(), top5_indices2.numpy()
            )
S
songyouwei 已提交
1544

L
lujun 已提交
1545 1546
    def test_conv3d(self):
        with self.static_graph():
1547 1548 1549
            images = layers.data(
                name='pixel', shape=[3, 6, 6, 6], dtype='float32'
            )
1550 1551 1552
            ret = paddle.static.nn.conv3d(
                input=images, num_filters=3, filter_size=2
            )
L
lujun 已提交
1553
            static_ret = self.get_static_graph_result(
1554
                feed={'pixel': np.ones([2, 3, 6, 6, 6], dtype='float32')},
1555 1556
                fetch_list=[ret],
            )[0]
L
lujun 已提交
1557 1558

        with self.static_graph():
1559 1560 1561
            images = layers.data(
                name='pixel', shape=[3, 6, 6, 6], dtype='float32'
            )
1562 1563 1564
            conv3d = paddle.nn.Conv3D(
                in_channels=3, out_channels=3, kernel_size=2
            )
L
lujun 已提交
1565 1566
            ret = conv3d(images)
            static_ret2 = self.get_static_graph_result(
1567
                feed={'pixel': np.ones([2, 3, 6, 6, 6], dtype='float32')},
1568 1569
                fetch_list=[ret],
            )[0]
L
lujun 已提交
1570 1571

        with self.dynamic_graph():
1572 1573
            with _test_eager_guard():
                images = np.ones([2, 3, 6, 6, 6], dtype='float32')
1574 1575 1576
                conv3d = paddle.nn.Conv3D(
                    in_channels=3, out_channels=3, kernel_size=2
                )
1577 1578 1579
                dy_eager_ret = conv3d(base.to_variable(images))
                dy_eager_rlt_value = dy_eager_ret.numpy()

L
lujun 已提交
1580
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
1581 1582 1583
            conv3d = paddle.nn.Conv3D(
                in_channels=3, out_channels=3, kernel_size=2
            )
L
lujun 已提交
1584
            dy_ret = conv3d(base.to_variable(images))
1585
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
1586

1587 1588 1589
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
1590

1591
        with self.dynamic_graph():
1592 1593 1594 1595 1596
            with _test_eager_guard():
                images = np.ones([2, 3, 6, 6, 6], dtype='float32')
                custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
1597 1598 1599
                        custom_weight
                    )
                )
1600 1601
                conv3d1 = paddle.nn.Conv3D(
                    in_channels=3, out_channels=3, kernel_size=2
1602
                )
1603 1604 1605 1606 1607
                conv3d2 = paddle.nn.Conv3D(
                    in_channels=3,
                    out_channels=3,
                    kernel_size=2,
                    weight_attr=weight_attr,
1608
                )
1609 1610 1611
                dy_ret1 = conv3d1(base.to_variable(images))
                dy_ret2 = conv3d2(base.to_variable(images))
                self.assertFalse(
1612 1613
                    np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())
                )
1614 1615 1616 1617

                conv3d1_weight_np = conv3d1.weight.numpy()
                conv3d1_bias = conv3d1.bias
                self.assertFalse(
1618 1619
                    np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
                )
1620
                conv3d2.weight.set_value(conv3d1_weight_np)
1621 1622 1623
                np.testing.assert_array_equal(
                    conv3d1_weight_np, conv3d2.weight.numpy()
                )
1624 1625 1626
                conv3d1.bias.set_value(conv3d1_bias)
                dy_ret1 = conv3d1(base.to_variable(images))
                dy_ret2 = conv3d2(base.to_variable(images))
1627
                np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
1628 1629 1630

                conv3d2.weight = conv3d1.weight
                conv3d2.bias = conv3d1.bias
1631 1632 1633 1634 1635 1636
                np.testing.assert_array_equal(
                    conv3d1.weight.numpy(), conv3d2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    conv3d1.bias.numpy(), conv3d2.bias.numpy()
                )
1637

1638 1639
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
1640 1641 1642 1643 1644
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
1645 1646 1647 1648 1649 1650 1651 1652
            conv3d1 = paddle.nn.Conv3D(
                in_channels=3, out_channels=3, kernel_size=2
            )
            conv3d2 = paddle.nn.Conv3D(
                in_channels=3,
                out_channels=3,
                kernel_size=2,
                weight_attr=weight_attr,
1653
            )
1654 1655 1656 1657 1658 1659 1660
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv3d1_weight_np = conv3d1.weight.numpy()
            conv3d1_bias = conv3d1.bias
            self.assertFalse(
1661 1662
                np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
            )
1663
            conv3d2.weight.set_value(conv3d1_weight_np)
1664 1665 1666
            np.testing.assert_array_equal(
                conv3d1_weight_np, conv3d2.weight.numpy()
            )
1667 1668 1669
            conv3d1.bias.set_value(conv3d1_bias)
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
1670
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
1671 1672 1673

            conv3d2.weight = conv3d1.weight
            conv3d2.bias = conv3d1.bias
1674 1675 1676 1677 1678 1679
            np.testing.assert_array_equal(
                conv3d1.weight.numpy(), conv3d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv3d1.bias.numpy(), conv3d2.bias.numpy()
            )
1680

L
lujun 已提交
1681 1682 1683 1684 1685 1686 1687 1688
    def test_row_conv(self):
        input = np.arange(15).reshape([3, 5]).astype('float32')
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        with self.static_graph():
1689 1690 1691 1692 1693 1694 1695
            x = layers.data(
                name='X',
                shape=[3, 5],
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
L
lujun 已提交
1696
            ret = layers.row_conv(input=x, future_context_size=2)
1697 1698 1699 1700 1701 1702 1703 1704 1705
            static_ret = self.get_static_graph_result(
                feed={
                    'X': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
1706 1707

        with self.static_graph():
1708 1709 1710 1711 1712 1713 1714
            x = layers.data(
                name='X',
                shape=[3, 5],
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
L
lujun 已提交
1715 1716
            rowConv = nn.RowConv('RowConv', future_context_size=2)
            ret = rowConv(x)
1717 1718 1719 1720 1721 1722 1723 1724 1725
            static_ret2 = self.get_static_graph_result(
                feed={
                    'X': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
1726

1727
        # TODO: dygraph can't support LODTensor
L
lujun 已提交
1728

1729
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
1730

1731
    def func_group_norm(self):
L
lujun 已提交
1732 1733 1734 1735 1736 1737 1738 1739 1740 1741
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
1742 1743 1744 1745 1746 1747 1748
            X = fluid.layers.data(
                name='X',
                shape=shape,
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
1749
            ret = paddle.static.nn.group_norm(
1750 1751
                input=X,
                groups=2,
1752
                param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5),
1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
            )
            static_ret = self.get_static_graph_result(
                feed={
                    'X': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
1764 1765

        with self.static_graph():
1766 1767 1768 1769 1770 1771 1772
            X = fluid.layers.data(
                name='X',
                shape=shape,
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
1773 1774 1775
            groupNorm = nn.GroupNorm(
                channels=shape[1],
                groups=2,
1776
                param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5),
1777 1778
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
            )
L
lujun 已提交
1779
            ret = groupNorm(X)
1780 1781 1782 1783 1784 1785 1786 1787 1788
            static_ret2 = self.get_static_graph_result(
                feed={
                    'X': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
1789 1790

        with self.dynamic_graph():
1791 1792 1793
            groupNorm = nn.GroupNorm(
                channels=shape[1],
                groups=2,
1794
                param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5),
1795 1796
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
            )
L
lujun 已提交
1797
            dy_ret = groupNorm(base.to_variable(input))
1798
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
1799

1800 1801
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
1802

1803 1804 1805 1806 1807
    def test_group_norm(self):
        with _test_eager_guard():
            self.func_group_norm()
        self.func_group_norm()

1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818
    def test_instance_norm(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
1819 1820 1821
            X = fluid.layers.data(
                name='X', shape=shape, dtype='float32', append_batch_size=False
            )
1822
            ret = paddle.static.nn.instance_norm(input=X)
1823 1824 1825
            static_ret = self.get_static_graph_result(
                feed={'X': input}, fetch_list=[ret]
            )[0]
1826 1827

        with self.static_graph():
1828 1829 1830
            X = fluid.layers.data(
                name='X', shape=shape, dtype='float32', append_batch_size=False
            )
1831
            instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
1832
            ret = instanceNorm(X)
1833 1834 1835
            static_ret2 = self.get_static_graph_result(
                feed={'X': input}, fetch_list=[ret]
            )[0]
1836 1837

        with self.dynamic_graph():
1838
            with _test_eager_guard():
1839
                instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
1840 1841 1842
                dy_eager_ret = instanceNorm(base.to_variable(input))
                dy_eager_rlt_value = dy_eager_ret.numpy()

1843
            instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
1844 1845 1846 1847
            dy_ret = instanceNorm(base.to_variable(input))
            dy_rlt_value = dy_ret.numpy()

        with self.dynamic_graph():
1848
            with _test_eager_guard():
1849
                instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
1850 1851 1852
                dy_eager_ret = instanceNorm(base.to_variable(input))
                dy_eager_rlt_value2 = dy_eager_ret.numpy()

1853
            instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
1854 1855 1856
            dy_ret = instanceNorm(base.to_variable(input))
            dy_rlt_value2 = dy_ret.numpy()

1857 1858 1859 1860 1861
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_rlt_value2, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value2, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
1862 1863 1864 1865

        with self.static_graph():
            # the input of InstanceNorm must be Variable.
            def test_Variable():
1866
                instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
1867 1868 1869 1870 1871 1872 1873
                ret1 = instanceNorm(input)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of InstanceNorm must be float32 or float64
            def test_type():
                input = np.random.random(shape).astype('int32')
1874
                instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1])
1875 1876 1877 1878
                ret2 = instanceNorm(input)

            self.assertRaises(TypeError, test_type)

L
lujun 已提交
1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889
    def test_spectral_norm(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
1890 1891 1892 1893 1894 1895 1896
            Weight = fluid.layers.data(
                name='Weight',
                shape=shape,
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
L
lujun 已提交
1897
            ret = layers.spectral_norm(weight=Weight, dim=1, power_iters=2)
1898 1899 1900 1901 1902 1903 1904 1905 1906
            static_ret = self.get_static_graph_result(
                feed={
                    'Weight': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    ),
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
1907 1908

        with self.static_graph():
1909 1910 1911 1912 1913 1914 1915
            Weight = fluid.layers.data(
                name='Weight',
                shape=shape,
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
1916
            spectralNorm = nn.SpectralNorm(shape, dim=1, power_iters=2)
L
lujun 已提交
1917
            ret = spectralNorm(Weight)
1918 1919 1920 1921 1922 1923 1924 1925 1926
            static_ret2 = self.get_static_graph_result(
                feed={
                    'Weight': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
1927 1928

        with self.dynamic_graph():
1929 1930 1931 1932 1933
            with _test_eager_guard():
                spectralNorm = nn.SpectralNorm(shape, dim=1, power_iters=2)
                dy_eager_ret = spectralNorm(base.to_variable(input))
                dy_eager_rlt_value = dy_eager_ret.numpy()

1934
            spectralNorm = nn.SpectralNorm(shape, dim=1, power_iters=2)
L
lujun 已提交
1935
            dy_ret = spectralNorm(base.to_variable(input))
1936
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
1937

1938 1939 1940
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951

    def test_tree_conv(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()
        adj_array = [1, 2, 1, 3, 1, 4, 1, 5, 2, 6, 2, 7, 2, 8, 4, 9, 4, 10]
        adj = np.array(adj_array).reshape((1, 9, 2)).astype('int32')
        adj = np.tile(adj, (1, 1, 1))
        vectors = np.random.random((1, 10, 5)).astype('float32')
        with self.static_graph():
1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984
            NodesVector = fluid.layers.data(
                name='NodesVector',
                shape=(1, 10, 5),
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
            EdgeSet = fluid.layers.data(
                name='EdgeSet',
                shape=(1, 9, 2),
                dtype='int32',
                lod_level=1,
                append_batch_size=False,
            )
            ret = fluid.contrib.layers.tree_conv(
                nodes_vector=NodesVector,
                edge_set=EdgeSet,
                output_size=6,
                num_filters=1,
                max_depth=2,
            )
            static_ret = self.get_static_graph_result(
                feed={
                    'NodesVector': fluid.create_lod_tensor(
                        data=vectors, recursive_seq_lens=[[1]], place=place
                    ),
                    'EdgeSet': fluid.create_lod_tensor(
                        data=adj, recursive_seq_lens=[[1]], place=place
                    ),
                },
                fetch_list=[ret],
                with_lod=False,
            )[0]
L
lujun 已提交
1985 1986

        with self.static_graph():
1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003
            NodesVector = fluid.layers.data(
                name='NodesVector',
                shape=(1, 10, 5),
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
            EdgeSet = fluid.layers.data(
                name='EdgeSet',
                shape=(1, 9, 2),
                dtype='int32',
                lod_level=1,
                append_batch_size=False,
            )
            treeConv = nn.TreeConv(
                feature_size=5, output_size=6, num_filters=1, max_depth=2
            )
L
lujun 已提交
2004
            ret = treeConv(NodesVector, EdgeSet)
2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016
            static_ret2 = self.get_static_graph_result(
                feed={
                    'NodesVector': fluid.create_lod_tensor(
                        data=vectors, recursive_seq_lens=[[1]], place=place
                    ),
                    'EdgeSet': fluid.create_lod_tensor(
                        data=adj, recursive_seq_lens=[[1]], place=place
                    ),
                },
                fetch_list=[ret],
                with_lod=False,
            )[0]
L
lujun 已提交
2017 2018

        with self.dynamic_graph():
2019
            with _test_eager_guard():
2020 2021 2022 2023 2024 2025
                treeConv = nn.TreeConv(
                    feature_size=5, output_size=6, num_filters=1, max_depth=2
                )
                dy_eager_ret = treeConv(
                    base.to_variable(vectors), base.to_variable(adj)
                )
2026 2027
                dy_eager_rlt_value = dy_eager_ret.numpy()

2028 2029 2030
            treeConv = nn.TreeConv(
                feature_size=5, output_size=6, num_filters=1, max_depth=2
            )
L
lujun 已提交
2031
            dy_ret = treeConv(base.to_variable(vectors), base.to_variable(adj))
2032
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
2033

2034 2035 2036
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
L
lujun 已提交
2037

2038
        with self.dynamic_graph():
2039 2040 2041 2042
            with _test_eager_guard():
                custom_weight = np.random.randn(5, 3, 6, 1).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
                        custom_weight
                    )
                )
                treeConv1 = nn.TreeConv(
                    feature_size=5,
                    output_size=6,
                    num_filters=1,
                    max_depth=2,
                    bias_attr='eager_tc1_b',
                )
                treeConv2 = nn.TreeConv(
                    feature_size=5,
                    output_size=6,
                    num_filters=1,
                    max_depth=2,
                    param_attr=weight_attr,
                    bias_attr='eager_tc2_b',
                )
                dy_ret1 = treeConv1(
                    base.to_variable(vectors), base.to_variable(adj)
                )
                dy_ret2 = treeConv2(
                    base.to_variable(vectors), base.to_variable(adj)
                )
2067
                self.assertFalse(
2068 2069
                    np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())
                )
2070 2071
                treeConv2.weight.set_value(treeConv1.weight.numpy())
                treeConv2.bias.set_value(treeConv1.bias)
2072 2073 2074 2075 2076 2077
                dy_ret1 = treeConv1(
                    base.to_variable(vectors), base.to_variable(adj)
                )
                dy_ret2 = treeConv2(
                    base.to_variable(vectors), base.to_variable(adj)
                )
2078
                np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
2079 2080 2081

                treeConv2.weight = treeConv1.weight
                treeConv2.bias = treeConv1.bias
2082 2083 2084 2085 2086 2087
                np.testing.assert_array_equal(
                    treeConv1.weight.numpy(), treeConv2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    treeConv1.bias.numpy(), treeConv2.bias.numpy()
                )
2088

2089
            custom_weight = np.random.randn(5, 3, 6, 1).astype("float32")
2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
            treeConv1 = nn.TreeConv(
                feature_size=5,
                output_size=6,
                num_filters=1,
                max_depth=2,
                bias_attr='tc1_b',
            )
            treeConv2 = nn.TreeConv(
                feature_size=5,
                output_size=6,
                num_filters=1,
                max_depth=2,
                param_attr=weight_attr,
                bias_attr='tc2_b',
            )
            dy_ret1 = treeConv1(
                base.to_variable(vectors), base.to_variable(adj)
            )
            dy_ret2 = treeConv2(
                base.to_variable(vectors), base.to_variable(adj)
            )
2116 2117 2118
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))
            treeConv2.weight.set_value(treeConv1.weight.numpy())
            treeConv2.bias.set_value(treeConv1.bias)
2119 2120 2121 2122 2123 2124
            dy_ret1 = treeConv1(
                base.to_variable(vectors), base.to_variable(adj)
            )
            dy_ret2 = treeConv2(
                base.to_variable(vectors), base.to_variable(adj)
            )
2125
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
2126 2127 2128

            treeConv2.weight = treeConv1.weight
            treeConv2.bias = treeConv1.bias
2129 2130 2131 2132 2133 2134
            np.testing.assert_array_equal(
                treeConv1.weight.numpy(), treeConv2.weight.numpy()
            )
            np.testing.assert_array_equal(
                treeConv1.bias.numpy(), treeConv2.bias.numpy()
            )
2135

L
lujun 已提交
2136
    def test_conv3d_transpose(self):
2137 2138 2139
        input_array = (
            np.arange(0, 48).reshape([2, 3, 2, 2, 2]).astype('float32')
        )
L
lujun 已提交
2140 2141 2142

        with self.static_graph():
            img = layers.data(name='pixel', shape=[3, 2, 2, 2], dtype='float32')
2143
            out = paddle.static.nn.conv3d_transpose(
2144
                input=img, num_filters=12, filter_size=12, use_cudnn=True
2145
            )
L
lujun 已提交
2146
            static_rlt = self.get_static_graph_result(
2147 2148
                feed={'pixel': input_array}, fetch_list=[out]
            )[0]
L
lujun 已提交
2149 2150
        with self.static_graph():
            img = layers.data(name='pixel', shape=[3, 2, 2, 2], dtype='float32')
2151 2152
            conv3d_transpose = paddle.nn.Conv3DTranspose(
                in_channels=3, out_channels=12, kernel_size=12
2153
            )
L
lujun 已提交
2154 2155
            out = conv3d_transpose(img)
            static_rlt2 = self.get_static_graph_result(
2156 2157
                feed={'pixel': input_array}, fetch_list=[out]
            )[0]
L
lujun 已提交
2158
        with self.dynamic_graph():
2159
            with _test_eager_guard():
2160 2161 2162 2163
                conv3d_transpose = paddle.nn.Conv3DTranspose(
                    in_channels=3,
                    out_channels=12,
                    kernel_size=12,
2164
                )
2165 2166 2167
                dy_eager_rlt = conv3d_transpose(base.to_variable(input_array))
                dy_eager_rlt_value = dy_eager_rlt.numpy()

2168 2169
            conv3d_transpose = paddle.nn.Conv3DTranspose(
                in_channels=3, out_channels=12, kernel_size=12
2170
            )
L
lujun 已提交
2171
            dy_rlt = conv3d_transpose(base.to_variable(input_array))
2172
            dy_rlt_value = dy_rlt.numpy()
2173 2174 2175
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_eager_rlt_value, static_rlt, rtol=1e-05)
L
lujun 已提交
2176

2177
        with self.dynamic_graph():
2178 2179 2180 2181 2182
            with _test_eager_guard():
                images = np.ones([2, 3, 6, 6, 6], dtype='float32')
                custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
2183 2184 2185
                        custom_weight
                    )
                )
2186 2187 2188 2189
                conv3d1 = paddle.nn.Conv3DTranspose(
                    in_channels=3,
                    out_channels=3,
                    kernel_size=2,
2190 2191
                    bias_attr='eager_conv3d1_b',
                )
2192 2193 2194 2195 2196
                conv3d2 = paddle.nn.Conv3DTranspose(
                    in_channels=3,
                    out_channels=3,
                    kernel_size=2,
                    weight_attr=weight_attr,
2197 2198
                    bias_attr='eager_conv3d2_b',
                )
2199 2200 2201
                dy_ret1 = conv3d1(base.to_variable(images))
                dy_ret2 = conv3d2(base.to_variable(images))
                self.assertFalse(
2202 2203
                    np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())
                )
2204 2205 2206 2207

                conv3d1_weight_np = conv3d1.weight.numpy()
                conv3d1_bias = conv3d1.bias
                self.assertFalse(
2208 2209
                    np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
                )
2210
                conv3d2.weight.set_value(conv3d1_weight_np)
2211 2212 2213
                np.testing.assert_array_equal(
                    conv3d1_weight_np, conv3d2.weight.numpy()
                )
2214 2215 2216
                conv3d1.bias.set_value(conv3d1_bias)
                dy_ret1 = conv3d1(base.to_variable(images))
                dy_ret2 = conv3d2(base.to_variable(images))
2217
                np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
2218 2219 2220

                conv3d2.weight = conv3d1.weight
                conv3d2.bias = conv3d1.bias
2221 2222 2223 2224 2225 2226
                np.testing.assert_array_equal(
                    conv3d1.weight.numpy(), conv3d2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    conv3d1.bias.numpy(), conv3d2.bias.numpy()
                )
2227

2228 2229
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
2230 2231 2232 2233 2234
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
2235 2236 2237 2238
            conv3d1 = paddle.nn.Conv3DTranspose(
                in_channels=3,
                out_channels=3,
                kernel_size=2,
2239 2240
                bias_attr='conv3d1_b',
            )
2241 2242 2243 2244 2245
            conv3d2 = paddle.nn.Conv3DTranspose(
                in_channels=3,
                out_channels=3,
                kernel_size=2,
                weight_attr=weight_attr,
2246 2247
                bias_attr='conv3d2_b',
            )
2248 2249 2250 2251 2252 2253 2254
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv3d1_weight_np = conv3d1.weight.numpy()
            conv3d1_bias = conv3d1.bias
            self.assertFalse(
2255 2256
                np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
            )
2257
            conv3d2.weight.set_value(conv3d1_weight_np)
2258 2259 2260
            np.testing.assert_array_equal(
                conv3d1_weight_np, conv3d2.weight.numpy()
            )
2261 2262 2263
            conv3d1.bias.set_value(conv3d1_bias)
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
2264
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
2265 2266 2267

            conv3d2.weight = conv3d1.weight
            conv3d2.bias = conv3d1.bias
2268 2269 2270 2271 2272 2273
            np.testing.assert_array_equal(
                conv3d1.weight.numpy(), conv3d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv3d1.bias.numpy(), conv3d2.bias.numpy()
            )
2274

2275
    def func_while_loop(self):
2276 2277 2278 2279 2280
        with self.static_graph():
            i = layers.fill_constant(shape=[1], dtype='int64', value=0)
            ten = layers.fill_constant(shape=[1], dtype='int64', value=10)

            def cond(i):
L
LiYuRio 已提交
2281
                return paddle.less_than(i, ten)
2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292

            def body(i):
                return i + 1

            out = layers.while_loop(cond, body, [i])
            static_ret = self.get_static_graph_result(feed={}, fetch_list=out)

        with self.dynamic_graph():
            i = layers.fill_constant(shape=[1], dtype='int64', value=0)
            ten = layers.fill_constant(shape=[1], dtype='int64', value=10)

2293
            def cond1(i):
L
LiYuRio 已提交
2294
                return paddle.less_than(i, ten)
2295

2296
            def body1(i):
2297 2298
                return i + 1

2299
            dy_ret = layers.while_loop(cond1, body1, [i])
2300 2301 2302 2303 2304 2305
            with self.assertRaises(ValueError):
                j = layers.fill_constant(shape=[1], dtype='int64', value=0)

                def body2(i):
                    return i + 1, i + 2

2306
                layers.while_loop(cond1, body2, [j])
2307

2308
        np.testing.assert_array_equal(static_ret[0], dy_ret[0].numpy())
2309

2310 2311 2312 2313 2314
    def test_while_loop(self):
        with _test_eager_guard():
            self.func_while_loop()
        self.func_while_loop()

2315 2316 2317 2318 2319 2320 2321
    def test_compare(self):
        value_a = np.arange(3)
        value_b = np.arange(3)
        # less than
        with self.static_graph():
            a = layers.data(name='a', shape=[1], dtype='int64')
            b = layers.data(name='b', shape=[1], dtype='int64')
L
LiYuRio 已提交
2322
            cond = paddle.less_than(x=a, y=b)
2323 2324 2325
            static_ret = self.get_static_graph_result(
                feed={"a": value_a, "b": value_b}, fetch_list=[cond]
            )[0]
2326
        with self.dynamic_graph():
2327 2328 2329
            with _test_eager_guard():
                da = base.to_variable(value_a)
                db = base.to_variable(value_b)
L
LiYuRio 已提交
2330
                dcond = paddle.less_than(x=da, y=db)
2331 2332 2333 2334

                for i in range(len(static_ret)):
                    self.assertTrue(dcond.numpy()[i] == static_ret[i])

2335 2336
            da = base.to_variable(value_a)
            db = base.to_variable(value_b)
L
LiYuRio 已提交
2337
            dcond = paddle.less_than(x=da, y=db)
2338

2339 2340
            for i in range(len(static_ret)):
                self.assertTrue(dcond.numpy()[i] == static_ret[i])
2341 2342 2343 2344 2345

        # less equal
        with self.static_graph():
            a1 = layers.data(name='a1', shape=[1], dtype='int64')
            b1 = layers.data(name='b1', shape=[1], dtype='int64')
2346
            cond1 = paddle.less_equal(x=a1, y=b1)
2347 2348 2349
            static_ret1 = self.get_static_graph_result(
                feed={"a1": value_a, "b1": value_b}, fetch_list=[cond1]
            )[0]
2350
        with self.dynamic_graph():
2351 2352 2353
            with _test_eager_guard():
                da1 = base.to_variable(value_a)
                db1 = base.to_variable(value_b)
2354
                dcond1 = paddle.less_equal(x=da1, y=db1)
2355 2356 2357 2358

                for i in range(len(static_ret1)):
                    self.assertTrue(dcond1.numpy()[i] == static_ret1[i])

2359 2360
            da1 = base.to_variable(value_a)
            db1 = base.to_variable(value_b)
2361
            dcond1 = paddle.less_equal(x=da1, y=db1)
2362 2363 2364 2365

            for i in range(len(static_ret1)):
                self.assertTrue(dcond1.numpy()[i] == static_ret1[i])

2366
        # greater than
2367 2368 2369
        with self.static_graph():
            a2 = layers.data(name='a2', shape=[1], dtype='int64')
            b2 = layers.data(name='b2', shape=[1], dtype='int64')
2370
            cond2 = paddle.greater_than(x=a2, y=b2)
2371 2372 2373
            static_ret2 = self.get_static_graph_result(
                feed={"a2": value_a, "b2": value_b}, fetch_list=[cond2]
            )[0]
2374
        with self.dynamic_graph():
2375 2376 2377
            with _test_eager_guard():
                da2 = base.to_variable(value_a)
                db2 = base.to_variable(value_b)
2378
                dcond2 = paddle.greater_than(x=da2, y=db2)
2379 2380 2381 2382

                for i in range(len(static_ret2)):
                    self.assertTrue(dcond2.numpy()[i] == static_ret2[i])

2383 2384
            da2 = base.to_variable(value_a)
            db2 = base.to_variable(value_b)
2385
            dcond2 = paddle.greater_than(x=da2, y=db2)
2386 2387 2388 2389

            for i in range(len(static_ret2)):
                self.assertTrue(dcond2.numpy()[i] == static_ret2[i])

2390
        # greater equal
2391 2392 2393
        with self.static_graph():
            a3 = layers.data(name='a3', shape=[1], dtype='int64')
            b3 = layers.data(name='b3', shape=[1], dtype='int64')
2394
            cond3 = paddle.greater_equal(x=a3, y=b3)
2395 2396 2397
            static_ret3 = self.get_static_graph_result(
                feed={"a3": value_a, "b3": value_b}, fetch_list=[cond3]
            )[0]
2398
        with self.dynamic_graph():
2399 2400 2401
            with _test_eager_guard():
                da3 = base.to_variable(value_a)
                db3 = base.to_variable(value_b)
2402
                dcond3 = paddle.greater_equal(x=da3, y=db3)
2403 2404 2405 2406

                for i in range(len(static_ret3)):
                    self.assertTrue(dcond3.numpy()[i] == static_ret3[i])

2407 2408
            da3 = base.to_variable(value_a)
            db3 = base.to_variable(value_b)
2409
            dcond3 = paddle.greater_equal(x=da3, y=db3)
2410 2411 2412 2413 2414 2415 2416 2417

            for i in range(len(static_ret3)):
                self.assertTrue(dcond3.numpy()[i] == static_ret3[i])

        # equal
        with self.static_graph():
            a4 = layers.data(name='a4', shape=[1], dtype='int64')
            b4 = layers.data(name='b4', shape=[1], dtype='int64')
2418
            cond4 = paddle.equal(x=a4, y=b4)
2419 2420 2421
            static_ret4 = self.get_static_graph_result(
                feed={"a4": value_a, "b4": value_b}, fetch_list=[cond4]
            )[0]
2422
        with self.dynamic_graph():
2423 2424 2425
            with _test_eager_guard():
                da4 = base.to_variable(value_a)
                db4 = base.to_variable(value_b)
2426
                dcond4 = paddle.equal(x=da4, y=db4)
2427 2428 2429 2430

                for i in range(len(static_ret4)):
                    self.assertTrue(dcond4.numpy()[i] == static_ret4[i])

2431 2432
            da4 = base.to_variable(value_a)
            db4 = base.to_variable(value_b)
2433
            dcond4 = paddle.equal(x=da4, y=db4)
2434 2435 2436 2437 2438 2439 2440 2441

            for i in range(len(static_ret4)):
                self.assertTrue(dcond4.numpy()[i] == static_ret4[i])

        # not equal
        with self.static_graph():
            a5 = layers.data(name='a5', shape=[1], dtype='int64')
            b5 = layers.data(name='b5', shape=[1], dtype='int64')
2442
            cond5 = paddle.equal(x=a5, y=b5)
2443 2444 2445
            static_ret5 = self.get_static_graph_result(
                feed={"a5": value_a, "b5": value_b}, fetch_list=[cond5]
            )[0]
2446
        with self.dynamic_graph():
2447 2448 2449
            with _test_eager_guard():
                da5 = base.to_variable(value_a)
                db5 = base.to_variable(value_b)
2450
                dcond5 = paddle.equal(x=da5, y=db5)
2451 2452 2453 2454

                for i in range(len(static_ret5)):
                    self.assertTrue(dcond5.numpy()[i] == static_ret5[i])

2455 2456
            da5 = base.to_variable(value_a)
            db5 = base.to_variable(value_b)
2457
            dcond5 = paddle.equal(x=da5, y=db5)
2458 2459 2460 2461

            for i in range(len(static_ret5)):
                self.assertTrue(dcond5.numpy()[i] == static_ret5[i])

2462 2463
    def test_cond(self):
        def less_than_branch(a, b):
2464
            return paddle.add(a, b)
2465 2466

        def greater_equal_branch(a, b):
2467
            return paddle.subtract(a, b)
2468 2469

        with self.static_graph():
2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485
            a = fluid.layers.fill_constant(
                shape=[1], dtype='float32', value=0.1
            )
            b = fluid.layers.fill_constant(
                shape=[1], dtype='float32', value=0.23
            )
            out = fluid.layers.cond(
                a >= b,
                lambda: greater_equal_branch(a, b),
                lambda: less_than_branch(a, b),
            )
            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
2486 2487 2488 2489 2490
            exe = fluid.Executor(place)
            ret = exe.run(fetch_list=[out])
            static_res = ret[0]

        with self.dynamic_graph():
2491 2492 2493
            with _test_eager_guard():
                a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32'))
                b = fluid.dygraph.to_variable(
2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505
                    np.array([0.23]).astype('float32')
                )
                out = layers.cond(
                    a < b,
                    lambda: less_than_branch(a, b),
                    lambda: greater_equal_branch(a, b),
                )
                out2 = layers.cond(
                    a >= b,
                    lambda: greater_equal_branch(a, b),
                    lambda: less_than_branch(a, b),
                )
2506 2507
                eager_dynamic_res = out.numpy()
                eager_dynamic_res2 = out2.numpy()
2508 2509 2510
                np.testing.assert_array_equal(
                    eager_dynamic_res, eager_dynamic_res2
                )
2511 2512 2513 2514 2515
                with self.assertRaises(TypeError):
                    layers.cond(a < b, 'str', 'str')
                with self.assertRaises(TypeError):
                    layers.cond(a >= b, 'str', 'str')

2516 2517
            a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32'))
            b = fluid.dygraph.to_variable(np.array([0.23]).astype('float32'))
2518 2519 2520 2521 2522 2523 2524 2525 2526 2527
            out = layers.cond(
                a < b,
                lambda: less_than_branch(a, b),
                lambda: greater_equal_branch(a, b),
            )
            out2 = layers.cond(
                a >= b,
                lambda: greater_equal_branch(a, b),
                lambda: less_than_branch(a, b),
            )
2528 2529
            dynamic_res = out.numpy()
            dynamic_res2 = out2.numpy()
2530
            np.testing.assert_array_equal(dynamic_res, dynamic_res2)
2531 2532 2533 2534 2535
            with self.assertRaises(TypeError):
                layers.cond(a < b, 'str', 'str')
            with self.assertRaises(TypeError):
                layers.cond(a >= b, 'str', 'str')

2536 2537
        np.testing.assert_array_equal(static_res, dynamic_res)
        np.testing.assert_array_equal(static_res, eager_dynamic_res)
2538

2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553
    def test_case(self):
        def fn_1():
            return layers.fill_constant(shape=[1, 2], dtype='float32', value=1)

        def fn_2():
            return layers.fill_constant(shape=[2, 2], dtype='int32', value=2)

        def fn_3():
            return layers.fill_constant(shape=[3], dtype='int32', value=3)

        with self.static_graph():
            x = layers.fill_constant(shape=[1], dtype='float32', value=0.3)
            y = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
            z = layers.fill_constant(shape=[1], dtype='float32', value=0.2)

L
LiYuRio 已提交
2554 2555
            pred_1 = paddle.less_than(z, x)  # true: 0.2 < 0.3
            pred_2 = paddle.less_than(x, y)  # false: 0.3 < 0.1
2556
            pred_3 = paddle.equal(x, y)  # false: 0.3 == 0.1
2557

2558 2559 2560
            out_1 = layers.case(
                pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3
            )
2561 2562
            out_2 = layers.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)])

2563 2564 2565 2566 2567
            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
2568 2569 2570 2571
            exe = fluid.Executor(place)
            static_res1, static_res2 = exe.run(fetch_list=[out_1, out_2])

        with self.dynamic_graph():
2572 2573 2574 2575 2576
            with _test_eager_guard():
                x = layers.fill_constant(shape=[1], dtype='float32', value=0.3)
                y = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
                z = layers.fill_constant(shape=[1], dtype='float32', value=0.2)

L
LiYuRio 已提交
2577 2578
                pred_1 = paddle.less_than(z, x)  # true: 0.2 < 0.3
                pred_2 = paddle.less_than(x, y)  # false: 0.3 < 0.1
2579
                pred_3 = paddle.equal(x, y)  # false: 0.3 == 0.1
2580

2581 2582 2583 2584 2585 2586
                out_1 = layers.case(
                    pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3
                )
                out_2 = layers.case(
                    pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)]
                )
2587 2588 2589
                eager_dynamic_res1 = out_1.numpy()
                eager_dynamic_res2 = out_2.numpy()

2590 2591 2592 2593
            x = layers.fill_constant(shape=[1], dtype='float32', value=0.3)
            y = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
            z = layers.fill_constant(shape=[1], dtype='float32', value=0.2)

L
LiYuRio 已提交
2594 2595
            pred_1 = paddle.less_than(z, x)  # true: 0.2 < 0.3
            pred_2 = paddle.less_than(x, y)  # false: 0.3 < 0.1
2596
            pred_3 = paddle.equal(x, y)  # false: 0.3 == 0.1
2597

2598 2599 2600
            out_1 = layers.case(
                pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3
            )
2601 2602 2603 2604
            out_2 = layers.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)])
            dynamic_res1 = out_1.numpy()
            dynamic_res2 = out_2.numpy()

2605 2606 2607 2608
        np.testing.assert_array_equal(static_res1, dynamic_res1)
        np.testing.assert_array_equal(static_res2, dynamic_res2)
        np.testing.assert_array_equal(static_res1, eager_dynamic_res1)
        np.testing.assert_array_equal(static_res2, eager_dynamic_res2)
2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623

    def test_switch_case(self):
        def fn_1():
            return layers.fill_constant(shape=[1, 2], dtype='float32', value=1)

        def fn_2():
            return layers.fill_constant(shape=[2, 2], dtype='int32', value=2)

        def fn_3():
            return layers.fill_constant(shape=[3], dtype='int32', value=3)

        with self.static_graph():
            index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1)
            index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2)

2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643
            out_1 = layers.switch_case(
                branch_index=index_1,
                branch_fns={1: fn_1, 2: fn_2},
                default=fn_3,
            )
            out_2 = layers.switch_case(
                branch_index=index_2,
                branch_fns=[(1, fn_1), (2, fn_2)],
                default=fn_3,
            )
            out_3 = layers.switch_case(
                branch_index=index_2,
                branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)],
            )

            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
2644 2645
            exe = fluid.Executor(place)
            static_res1, static_res2, static_res3 = exe.run(
2646 2647
                fetch_list=[out_1, out_2, out_3]
            )
2648 2649

        with self.dynamic_graph():
2650
            with _test_eager_guard():
2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671
                index_1 = layers.fill_constant(
                    shape=[1], dtype='int32', value=1
                )
                index_2 = layers.fill_constant(
                    shape=[1], dtype='int32', value=2
                )

                out_1 = layers.switch_case(
                    branch_index=index_1,
                    branch_fns={1: fn_1, 2: fn_2},
                    default=fn_3,
                )
                out_2 = layers.switch_case(
                    branch_index=index_2,
                    branch_fns=[(1, fn_1), (2, fn_2)],
                    default=fn_3,
                )
                out_3 = layers.switch_case(
                    branch_index=index_2,
                    branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)],
                )
2672 2673 2674 2675 2676

                eager_dynamic_res1 = out_1.numpy()
                eager_dynamic_res2 = out_2.numpy()
                eager_dynamic_res3 = out_3.numpy()

2677 2678 2679
            index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1)
            index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2)

2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693
            out_1 = layers.switch_case(
                branch_index=index_1,
                branch_fns={1: fn_1, 2: fn_2},
                default=fn_3,
            )
            out_2 = layers.switch_case(
                branch_index=index_2,
                branch_fns=[(1, fn_1), (2, fn_2)],
                default=fn_3,
            )
            out_3 = layers.switch_case(
                branch_index=index_2,
                branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)],
            )
2694 2695 2696 2697 2698

            dynamic_res1 = out_1.numpy()
            dynamic_res2 = out_2.numpy()
            dynamic_res3 = out_3.numpy()

2699 2700 2701 2702 2703 2704
        np.testing.assert_array_equal(static_res1, dynamic_res1)
        np.testing.assert_array_equal(static_res2, dynamic_res2)
        np.testing.assert_array_equal(static_res3, dynamic_res3)
        np.testing.assert_array_equal(static_res1, eager_dynamic_res1)
        np.testing.assert_array_equal(static_res2, eager_dynamic_res2)
        np.testing.assert_array_equal(static_res3, eager_dynamic_res3)
2705

2706 2707 2708 2709
    def test_crop_tensor(self):
        with self.static_graph():
            x = fluid.layers.data(name="x1", shape=[6, 5, 8])

2710 2711 2712 2713 2714 2715
            dim1 = fluid.layers.data(
                name="dim1", shape=[1], append_batch_size=False
            )
            dim2 = fluid.layers.data(
                name="dim2", shape=[1], append_batch_size=False
            )
2716
            crop_shape1 = (1, 2, 4, 4)
2717 2718 2719
            crop_shape2 = fluid.layers.data(
                name="crop_shape", shape=[4], append_batch_size=False
            )
2720 2721
            crop_shape3 = [-1, dim1, dim2, 4]
            crop_offsets1 = [0, 0, 1, 0]
2722 2723 2724
            crop_offsets2 = fluid.layers.data(
                name="crop_offset", shape=[4], append_batch_size=False
            )
2725 2726
            crop_offsets3 = [0, dim1, dim2, 0]

2727 2728 2729
            out1 = paddle.crop(x, shape=crop_shape1, offsets=crop_offsets1)
            out2 = paddle.crop(x, shape=crop_shape2, offsets=crop_offsets2)
            out3 = paddle.crop(x, shape=crop_shape3, offsets=crop_offsets3)
2730 2731 2732 2733 2734

            self.assertIsNotNone(out1)
            self.assertIsNotNone(out2)
            self.assertIsNotNone(out3)

2735 2736 2737
    def test_shard_index(self):
        with self.static_graph():
            x = fluid.layers.data(name="label", shape=[4, 1], dtype='int64')
2738 2739 2740
            shard_label = fluid.layers.shard_index(
                input=x, index_num=20, nshards=2, shard_id=0
            )
2741 2742 2743

        self.assertIsNotNone(shard_label)

2744 2745 2746 2747 2748 2749 2750 2751
    def test_accuracy(self):
        x = np.random.rand(3, 32, 32).astype("float32")
        y = np.array([[1], [0], [1]])
        with self.static_graph():
            data = fluid.data(name="input", shape=[-1, 32, 32], dtype="float32")
            label = fluid.data(name="label", shape=[-1, 1], dtype="int")
            fc_out = fluid.layers.fc(input=data, size=10)
            predict = fluid.layers.softmax(input=fc_out)
2752
            result = paddle.static.accuracy(input=predict, label=label, k=5)
2753 2754 2755 2756
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)

            exe.run(fluid.default_startup_program())
L
Leo Chen 已提交
2757 2758
            # x = np.random.rand(3, 32, 32).astype("float32")
            # y = np.array([[1], [0], [1]])
2759 2760 2761
            static_out = exe.run(
                feed={"input": x, "label": y}, fetch_list=result[0]
            )
2762

L
Leo Chen 已提交
2763
        with self.dynamic_graph(force_to_use_cpu=True):
2764 2765 2766 2767
            data = base.to_variable(x)
            label = base.to_variable(y)
            fc_out = fluid.layers.fc(data, size=10)
            predict = fluid.layers.softmax(fc_out)
2768 2769 2770
            dynamic_out = paddle.static.accuracy(
                input=predict, label=label, k=5
            )
2771

2772
        np.testing.assert_array_equal(static_out[0], dynamic_out.numpy())
2773

Y
Yu Yang 已提交
2774

2775
class TestBook(LayerTest):
H
hong 已提交
2776 2777
    def setUp(self):
        self.only_static_set = set({"make_word_embedding"})
2778 2779 2780 2781 2782 2783 2784 2785
        self.not_compare_static_dygraph_set = set(
            {
                "make_gaussian_random",
                "make_kldiv_loss",
                "make_sampling_id",
                "make_uniform_random_batch_size_like",
            }
        )
2786
        self.all_close_compare = set({"make_spectral_norm"})
H
hong 已提交
2787

2788
    def func_all_layers(self):
2789 2790 2791 2792 2793
        attrs = (getattr(self, name) for name in dir(self))
        methods = filter(inspect.ismethod, attrs)
        for method in methods:
            if not method.__name__.startswith('make_'):
                continue
M
minqiyang 已提交
2794 2795 2796
            self._low_data_bound = 0
            self._high_data_bound = 2
            self._batch_size = 2
2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808
            self._feed_dict = {}
            self._force_to_use_cpu = False
            with self.static_graph():
                static_var = method()
                if isinstance(static_var, tuple):
                    static_var = static_var[0]

                if static_var is not None:
                    fetch_list = [static_var.name]
                    static_result = self.get_static_graph_result(
                        feed=self._feed_dict,
                        fetch_list=fetch_list,
2809 2810
                        force_to_use_cpu=self._force_to_use_cpu,
                    )
H
hong 已提交
2811

2812 2813
                else:
                    continue
H
hong 已提交
2814 2815
            if method.__name__ in self.only_static_set:
                continue
2816 2817 2818 2819 2820

            with self.dynamic_graph(self._force_to_use_cpu):
                dy_result = method()
                if isinstance(dy_result, tuple):
                    dy_result = dy_result[0]
2821
                dy_result_value = dy_result.numpy()
2822

2823
            if method.__name__ in self.all_close_compare:
2824 2825 2826 2827 2828 2829
                np.testing.assert_allclose(
                    static_result[0],
                    dy_result_value,
                    rtol=1e-05,
                    atol=0,
                    err_msg='Result of function [{}] compare failed'.format(
2830 2831 2832
                        method.__name__
                    ),
                )
2833 2834
                continue

H
hong 已提交
2835
            if method.__name__ not in self.not_compare_static_dygraph_set:
2836 2837 2838 2839
                np.testing.assert_array_equal(
                    static_result[0],
                    dy_result_value,
                    err_msg='Result of function [{}] not equal'.format(
2840 2841 2842
                        method.__name__
                    ),
                )
2843

2844 2845 2846 2847 2848
    def test_all_layers(self):
        with _test_eager_guard():
            self.func_all_layers()
        self.func_all_layers()

2849 2850 2851
    def _get_np_data(self, shape, dtype, append_batch_size=True):
        np.random.seed(self.seed)
        if append_batch_size:
M
minqiyang 已提交
2852
            shape = [self._batch_size] + shape
2853 2854 2855 2856 2857
        if dtype == 'float32':
            return np.random.random(shape).astype(dtype)
        elif dtype == 'float64':
            return np.random.random(shape).astype(dtype)
        elif dtype == 'int32':
2858 2859 2860
            return np.random.randint(
                self._low_data_bound, self._high_data_bound, shape
            ).astype(dtype)
2861
        elif dtype == 'int64':
2862 2863 2864 2865 2866 2867 2868
            return np.random.randint(
                self._low_data_bound, self._high_data_bound, shape
            ).astype(dtype)

    def _get_data(
        self, name, shape, dtype, set_feed_dict=True, append_batch_size=True
    ):
2869
        if base.enabled():
2870 2871 2872 2873 2874
            return base.to_variable(
                value=self._get_np_data(shape, dtype, append_batch_size),
                name=name,
                zero_copy=False,
            )
2875 2876
        else:
            if set_feed_dict:
2877
                self._feed_dict[name] = self._get_np_data(
2878 2879 2880 2881 2882 2883 2884 2885
                    shape, dtype, append_batch_size
                )
            return layers.data(
                name=name,
                shape=shape,
                dtype=dtype,
                append_batch_size=append_batch_size,
            )
2886 2887

    def make_fit_a_line(self):
2888 2889 2890 2891
        with program_guard(
            fluid.default_main_program(),
            startup_program=fluid.default_startup_program(),
        ):
2892
            x = self._get_data(name='x', shape=[13], dtype='float32')
Y
Yu Yang 已提交
2893
            y_predict = layers.fc(input=x, size=1, act=None)
2894
            y = self._get_data(name='y', shape=[1], dtype='float32')
2895 2896 2897
            cost = paddle.nn.functional.square_error_cost(
                input=y_predict, label=y
            )
2898
            avg_cost = paddle.mean(cost)
2899
            return avg_cost
Y
Yu Yang 已提交
2900

2901
    def make_recognize_digits_mlp(self):
2902 2903 2904
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
Y
Yu Yang 已提交
2905
            # Change g_program, so the rest layers use `g_program`
2906 2907
            images = self._get_data(name='pixel', shape=[784], dtype='float32')
            label = self._get_data(name='label', shape=[1], dtype='int64')
Y
Yu Yang 已提交
2908 2909
            hidden1 = layers.fc(input=images, size=128, act='relu')
            hidden2 = layers.fc(input=hidden1, size=64, act='relu')
2910 2911 2912 2913 2914 2915
            predict = layers.fc(
                input=[hidden2, hidden1],
                size=10,
                act='softmax',
                param_attr=["sftmax.w1", "sftmax.w2"],
            )
Y
Yu Yang 已提交
2916
            cost = layers.cross_entropy(input=predict, label=label)
2917
            avg_cost = paddle.mean(cost)
2918
            return avg_cost
Y
Yu Yang 已提交
2919

2920
    def make_conv2d_transpose(self):
2921 2922 2923
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
2924
            img = self._get_data(name='pixel', shape=[3, 2, 2], dtype='float32')
2925
            return paddle.static.nn.conv2d_transpose(
2926 2927
                input=img, num_filters=10, output_size=28
            )
2928

2929
    def make_recognize_digits_conv(self):
2930 2931 2932 2933 2934 2935
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            images = self._get_data(
                name='pixel', shape=[1, 28, 28], dtype='float32'
            )
2936
            label = self._get_data(name='label', shape=[1], dtype='int64')
2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952
            conv_pool_1 = nets.simple_img_conv_pool(
                input=images,
                filter_size=5,
                num_filters=2,
                pool_size=2,
                pool_stride=2,
                act="relu",
            )
            conv_pool_2 = nets.simple_img_conv_pool(
                input=conv_pool_1,
                filter_size=5,
                num_filters=4,
                pool_size=2,
                pool_stride=2,
                act="relu",
            )
Y
Yu Yang 已提交
2953 2954 2955

            predict = layers.fc(input=conv_pool_2, size=10, act="softmax")
            cost = layers.cross_entropy(input=predict, label=label)
2956
            avg_cost = paddle.mean(cost)
2957
            return avg_cost
Y
Yu Yang 已提交
2958

2959
    def make_word_embedding(self):
2960 2961 2962
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
Y
Yu Yang 已提交
2963 2964
            dict_size = 10000
            embed_size = 32
2965
            first_word = self._get_data(name='firstw', shape=[1], dtype='int64')
2966 2967 2968
            second_word = self._get_data(
                name='secondw', shape=[1], dtype='int64'
            )
2969 2970 2971
            third_word = self._get_data(name='thirdw', shape=[1], dtype='int64')
            forth_word = self._get_data(name='forthw', shape=[1], dtype='int64')
            next_word = self._get_data(name='nextw', shape=[1], dtype='int64')
Y
Yu Yang 已提交
2972

2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997
            embed_first = layers.embedding(
                input=first_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )
            embed_second = layers.embedding(
                input=second_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )

            embed_third = layers.embedding(
                input=third_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )
            embed_forth = layers.embedding(
                input=forth_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )
Y
Yu Yang 已提交
2998 2999 3000

            concat_embed = layers.concat(
                input=[embed_first, embed_second, embed_third, embed_forth],
3001 3002
                axis=1,
            )
Y
Yu Yang 已提交
3003 3004

            hidden1 = layers.fc(input=concat_embed, size=256, act='sigmoid')
3005 3006 3007
            predict_word = layers.fc(
                input=hidden1, size=dict_size, act='softmax'
            )
Y
Yu Yang 已提交
3008
            cost = layers.cross_entropy(input=predict_word, label=next_word)
3009
            avg_cost = paddle.mean(cost)
3010
            return avg_cost
Y
Yu Yang 已提交
3011

3012
    def make_pool2d(self):
3013 3014 3015
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3016
            x = self._get_data(name='x', shape=[3, 224, 224], dtype='float32')
3017 3018 3019
            return layers.pool2d(
                x, pool_size=[5, 3], pool_stride=[1, 2], pool_padding=(2, 1)
            )
3020

K
Kaipeng Deng 已提交
3021
    def make_pool2d_infershape(self):
3022 3023 3024
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
3025
            theta = self._get_data("theta", shape=[2, 3], dtype='float32')
3026 3027 3028
            x = paddle.nn.functional.affine_grid(
                theta, out_shape=[2, 3, 244, 244]
            )
3029 3030 3031
            return layers.pool2d(
                x, pool_size=[5, 3], pool_stride=[1, 2], pool_padding=(2, 1)
            )
K
Kaipeng Deng 已提交
3032

3033
    def make_lstm_unit(self):
3034 3035 3036 3037 3038 3039
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x_t_data = self._get_data(
                name='x_t_data', shape=[10, 10], dtype='float32'
            )
Y
yangyaming 已提交
3040
            x_t = layers.fc(input=x_t_data, size=10)
3041 3042 3043
            prev_hidden_data = self._get_data(
                name='prev_hidden_data', shape=[10, 30], dtype='float32'
            )
Y
yangyaming 已提交
3044
            prev_hidden = layers.fc(input=prev_hidden_data, size=30)
3045 3046 3047
            prev_cell_data = self._get_data(
                name='prev_cell', shape=[10, 30], dtype='float32'
            )
Y
yangyaming 已提交
3048
            prev_cell = layers.fc(input=prev_cell_data, size=30)
3049 3050 3051
            return layers.lstm_unit(
                x_t=x_t, hidden_t_prev=prev_hidden, cell_t_prev=prev_cell
            )
3052

3053
    def make_softmax(self):
3054 3055 3056
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3057
            data = self._get_data(name='data', shape=[10], dtype='float32')
D
dangqingqing 已提交
3058
            hid = layers.fc(input=data, size=20)
3059
            return layers.softmax(hid, axis=1)
D
dangqingqing 已提交
3060

3061
    @prog_scope()
3062
    def make_nce(self):
Y
Yang Yu 已提交
3063 3064
        window_size = 5
        words = []
3065
        for i in range(window_size):
Y
Yang Yu 已提交
3066
            words.append(
3067 3068 3069 3070
                self._get_data(
                    name='word_{0}'.format(i), shape=[1], dtype='int64'
                )
            )
Y
Yang Yu 已提交
3071 3072

        dict_size = 10000
M
minqiyang 已提交
3073
        label_word = int(window_size // 2) + 1
Y
Yang Yu 已提交
3074 3075

        embs = []
3076
        for i in range(window_size):
Y
Yang Yu 已提交
3077 3078 3079
            if i == label_word:
                continue

3080 3081 3082 3083 3084 3085
            emb = layers.embedding(
                input=words[i],
                size=[dict_size, 32],
                param_attr='emb.w',
                is_sparse=True,
            )
Y
Yang Yu 已提交
3086 3087 3088 3089

            embs.append(emb)

        embs = layers.concat(input=embs, axis=1)
3090
        loss = paddle.static.nn.nce(
3091 3092 3093 3094 3095 3096
            input=embs,
            label=words[label_word],
            num_total_classes=dict_size,
            param_attr='nce.w',
            bias_attr='nce.b',
        )
3097
        avg_loss = paddle.mean(loss)
3098
        return avg_loss
Y
Yang Yu 已提交
3099

3100
    def make_multiplex(self):
3101 3102 3103
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3104 3105 3106
            x1 = self._get_data(name='x1', shape=[4], dtype='float32')
            x2 = self._get_data(name='x2', shape=[4], dtype='float32')
            index = self._get_data(name='index', shape=[1], dtype='int32')
3107
            out = layers.multiplex(inputs=[x1, x2], index=index)
3108
            return out
3109 3110

    def make_softmax_with_cross_entropy(self):
3111 3112 3113
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3114 3115
            x = self._get_data(name='x', shape=[16], dtype='float32')
            y = self._get_data(name='label', shape=[1], dtype='int64')
3116
            loss, softmax = paddle.nn.functional.softmax_with_cross_entropy(
3117 3118
                x, y, return_softmax=True
            )
3119 3120 3121
            self.assertIsNotNone(loss)
            self.assertIsNotNone(softmax)

3122
            loss = paddle.nn.functional.softmax_with_cross_entropy(x, y)
3123 3124 3125 3126 3127 3128
            self.assertIsNotNone(loss)

            x1 = self._get_data(name='x1', shape=[16, 32, 64], dtype='float32')
            y1 = self._get_data(name='label1', shape=[1, 32, 64], dtype='int64')
            y2 = self._get_data(name='label2', shape=[16, 1, 64], dtype='int64')
            y3 = self._get_data(name='label3', shape=[16, 32, 1], dtype='int64')
3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140
            loss1 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y1, axis=1
            )
            loss2 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y2, axis=2
            )
            loss3 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y3, axis=3
            )
            loss4 = paddle.nn.functional.softmax_with_cross_entropy(
                x1, y3, axis=-1
            )
3141 3142 3143 3144
            self.assertIsNotNone(loss1)
            self.assertIsNotNone(loss2)
            self.assertIsNotNone(loss3)
            self.assertIsNotNone(loss4)
3145
            return loss4
3146 3147

    def make_smooth_l1(self):
3148 3149 3150
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3151 3152
            x = self._get_data(name='x', shape=[4], dtype='float32')
            y = self._get_data(name='label', shape=[4], dtype='float32')
3153
            loss = layers.smooth_l1(x, y)
3154
            return loss
3155

3156
    def make_scatter(self):
3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x = self._get_data(
                name='x', shape=[3, 3], append_batch_size=False, dtype='float32'
            )
            idx = self._get_data(
                name='idx', shape=[2], append_batch_size=False, dtype='int32'
            )
            updates = self._get_data(
                name='updates',
                shape=[2, 3],
                append_batch_size=False,
                dtype='float32',
            )
3172
            out = paddle.scatter(x, index=idx, updates=updates)
3173
            return out
Y
yangyaming 已提交
3174

3175 3176 3177 3178
    def make_one_hot(self):
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            label = self._get_data(name="label", shape=[1], dtype="int32")
            one_hot_label = layers.one_hot(input=label, depth=10)
3179
            return one_hot_label
3180

3181 3182 3183 3184 3185
    def make_label_smooth(self):
        # TODO(minqiyang): support gpu ut
        self._force_to_use_cpu = True
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            label = self._get_data(name="label", shape=[1], dtype="int32")
3186
            one_hot_label = layers.one_hot(input=label, depth=10)
3187
            smooth_label = F.label_smooth(label=one_hot_label, epsilon=0.1)
3188
            return smooth_label
3189

3190
    def make_topk(self):
3191 3192 3193
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3194 3195
            data = self._get_data(name="label", shape=[200], dtype="float32")
            values, indices = layers.topk(data, k=5)
3196 3197
            return values
            return indices
J
jerrywgz 已提交
3198

3199
    def make_resize_bilinear(self):
3200 3201 3202
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3203
            x = self._get_data(name='x', shape=[3, 9, 6], dtype="float32")
B
baiyf 已提交
3204
            output = layers.resize_bilinear(x, out_shape=[12, 12])
3205
            return output
K
Kaipeng Deng 已提交
3206 3207

    def make_resize_bilinear_by_scale(self):
3208 3209 3210
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
3211 3212
            x = self._get_data(name='x', shape=[3, 9, 6], dtype="float32")
            output = layers.resize_bilinear(x, scale=1.5)
3213
            return output
3214

3215
    def make_resize_nearest(self):
K
Kaipeng Deng 已提交
3216
        try:
3217 3218 3219
            with program_guard(
                fluid.default_main_program(), fluid.default_startup_program()
            ):
K
Kaipeng Deng 已提交
3220 3221 3222 3223 3224 3225
                x = self._get_data(name='x1', shape=[3, 9, 6], dtype="float32")
                output = layers.resize_nearest(x, out_shape=[12, 12])
        except ValueError:
            pass

        try:
3226 3227 3228 3229 3230 3231
            with program_guard(
                fluid.default_main_program(), fluid.default_startup_program()
            ):
                x = self._get_data(
                    name='x2', shape=[3, 9, 6, 7], dtype="float32"
                )
K
Kaipeng Deng 已提交
3232 3233 3234 3235
                output = layers.resize_nearest(x, out_shape=[12, 12, 12])
        except ValueError:
            pass

3236 3237 3238
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3239
            x = self._get_data(name='x', shape=[3, 9, 6], dtype="float32")
3240
            output = layers.resize_nearest(x, out_shape=[12, 12])
3241
            return output
K
Kaipeng Deng 已提交
3242 3243

    def make_resize_nearest_by_scale(self):
3244 3245 3246
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
3247 3248
            x = self._get_data(name='x1', shape=[3, 9, 6], dtype="float32")
            output = layers.resize_nearest(x, scale=1.8)
3249
            return output
K
Kaipeng Deng 已提交
3250 3251 3252

    def make_resize_trilinear(self):
        try:
3253 3254 3255
            with program_guard(
                fluid.default_main_program(), fluid.default_startup_program()
            ):
K
Kaipeng Deng 已提交
3256 3257 3258 3259 3260 3261
                x = self._get_data(name='x2', shape=[3, 9, 6], dtype="float32")
                output = layers.resize_trilinear(x, out_shape=[12, 12, 12])
        except ValueError:
            pass

        try:
3262 3263 3264 3265 3266 3267
            with program_guard(
                fluid.default_main_program(), fluid.default_startup_program()
            ):
                x = self._get_data(
                    name='x', shape=[3, 9, 6, 7], dtype="float32"
                )
K
Kaipeng Deng 已提交
3268 3269 3270 3271
                output = layers.resize_trilinear(x, out_shape=[12, 12])
        except ValueError:
            pass

3272 3273 3274
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
3275 3276
            x = self._get_data(name='x', shape=[3, 9, 6, 7], dtype="float32")
            output = layers.resize_trilinear(x, out_shape=[12, 12, 12])
3277
            return output
K
Kaipeng Deng 已提交
3278 3279

    def make_resize_trilinear_by_scale(self):
3280 3281 3282
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
3283 3284
            x = self._get_data(name='x', shape=[3, 9, 6, 7], dtype="float32")
            output = layers.resize_trilinear(x, scale=2.1)
3285
            return output
3286

3287
    def make_polygon_box_transform(self):
3288 3289 3290
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3291
            x = self._get_data(name='x', shape=[8, 4, 4], dtype="float32")
3292
            output = layers.polygon_box_transform(input=x)
3293
            return output
3294

3295
    def make_l2_normalize(self):
3296 3297 3298
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3299
            x = self._get_data(name='x', shape=[8, 7, 10], dtype="float32")
3300
            output = layers.l2_normalize(x, axis=1)
3301
            return output
3302

3303
    def make_shape(self):
3304 3305 3306 3307 3308 3309
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 100, 100], dtype="float32"
            )
G
fix  
gongweibao 已提交
3310
            out = layers.shape(input)
3311
            return out
B
Bai Yifan 已提交
3312

3313
    def make_pad2d(self):
3314 3315 3316 3317 3318 3319
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 100, 100], dtype="float32"
            )
傅剑寒 已提交
3320 3321 3322

            tmp_pad = paddle.nn.Pad2D(
                padding=[1, 2, 3, 4],
3323 3324 3325 3326
                mode='reflect',
                data_format='NCHW',
                name="shape",
            )
傅剑寒 已提交
3327
            out = tmp_pad(input)
3328
            return out
W
whs 已提交
3329

K
Kaipeng Deng 已提交
3330
    def make_mish(self):
3331 3332 3333
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
3334 3335
            input = self._get_data(name="input", shape=[16], dtype="float32")
            out = layers.mish(input, name='mish')
3336
            return out
K
Kaipeng Deng 已提交
3337

3338
    def make_cross_entropy(self):
3339 3340 3341
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3342 3343
            x = self._get_data(name="x", shape=[30, 10], dtype="float32")
            label = self._get_data(name="label", shape=[30, 1], dtype="int64")
3344 3345
            mode = 'channel'
            out = layers.cross_entropy(x, label, False, 4)
3346
            return out
3347

3348
    def make_uniform_random_batch_size_like(self):
3349 3350 3351 3352 3353 3354
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[13, 11], dtype='float32'
            )
3355
            out = random.uniform_random_batch_size_like(input, [-1, 11])
3356
            return out
G
fix  
gongweibao 已提交
3357

3358
    def make_gaussian_random(self):
3359 3360 3361
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
G
fix  
gongweibao 已提交
3362
            out = layers.gaussian_random(shape=[20, 30])
3363
            return out
G
fix  
gongweibao 已提交
3364

3365
    def make_sampling_id(self):
3366 3367 3368 3369 3370 3371 3372 3373 3374
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x = self._get_data(
                name="X",
                shape=[13, 11],
                dtype='float32',
                append_batch_size=False,
            )
G
fix  
gongweibao 已提交
3375 3376

            out = layers.sampling_id(x)
3377
            return out
G
fix  
gongweibao 已提交
3378

3379
    def make_sum(self):
3380 3381 3382 3383 3384 3385
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[13, 11], dtype='float32'
            )
G
fix  
gongweibao 已提交
3386

3387
            out = paddle.add_n(input)
3388
            return out
G
fix  
gongweibao 已提交
3389

3390
    def make_slice(self):
G
fix  
gongweibao 已提交
3391 3392 3393 3394
        starts = [1, 0, 2]
        ends = [3, 3, 4]
        axes = [0, 1, 2]

3395 3396 3397 3398 3399 3400
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 4, 5, 6], dtype='float32'
            )
G
fix  
gongweibao 已提交
3401

2
201716010711 已提交
3402
            out = paddle.slice(input, axes=axes, starts=starts, ends=ends)
3403
            return out
G
merge  
gongweibao 已提交
3404

3405
    def make_scale_variable(self):
3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 4, 5, 6], dtype='float32'
            )
            scale_var = self._get_data(
                name="scale",
                shape=[1],
                dtype='float32',
                append_batch_size=False,
            )
2
201716010711 已提交
3418
            out = paddle.scale(input, scale=scale_var)
3419 3420
            return out

M
minqiyang 已提交
3421
    def make_iou_similarity(self):
3422 3423 3424
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
M
minqiyang 已提交
3425 3426
            x = self._get_data(name="x", shape=[4], dtype="float32")
            y = self._get_data(name="y", shape=[4], dtype="float32")
X
Xin Pan 已提交
3427
            out = layers.iou_similarity(x, y, name='iou_similarity')
3428
            return out
3429 3430

    def make_grid_sampler(self):
3431 3432 3433
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3434 3435
            x = self._get_data(name='x', shape=[3, 5, 7], dtype='float32')
            grid = self._get_data(name='grid', shape=[5, 7, 2], dtype='float32')
D
dengkaipeng 已提交
3436
            out = layers.grid_sampler(x, grid)
3437
            return out
3438 3439

    def make_bilinear_tensor_product_layer(self):
3440 3441 3442
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3443 3444 3445 3446
            data = self._get_data(name='data', shape=[4], dtype="float32")

            theta = self._get_data(name="theta", shape=[5], dtype="float32")
            out = layers.bilinear_tensor_product(data, theta, 6)
3447
            return out
3448 3449

    def make_batch_norm(self):
3450 3451 3452 3453 3454 3455
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            data = self._get_data(
                name='data', shape=[32, 128, 128], dtype="float32"
            )
3456
            out = layers.batch_norm(data)
3457
            return out
3458

3459
    def make_batch_norm_momentum_variable(self):
3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            data = self._get_data(
                name='data', shape=[32, 128, 128], dtype="float32"
            )
            momentum = self._get_data(
                name='momentum',
                shape=[1],
                dtype='float32',
                append_batch_size=False,
            )
3472
            out = layers.batch_norm(data, momentum=momentum)
3473
            return out
3474

3475
    def make_range(self):
3476 3477 3478
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
C
ccrrong 已提交
3479 3480 3481
            paddle.arange(0, 10, 2, 'int32')
            paddle.arange(0.1, 10.0, 0.2, 'float32')
            paddle.arange(0.1, 10.0, 0.2, 'float64')
3482 3483 3484
            start = layers.fill_constant(shape=[1], value=0.1, dtype="float32")
            end = layers.fill_constant(shape=[1], value=10.0, dtype="float32")
            step = layers.fill_constant(shape=[1], value=0.2, dtype="float32")
C
ccrrong 已提交
3485
            y = paddle.arange(start, end, step, 'float64')
3486 3487 3488
            return y

    def make_spectral_norm(self):
3489 3490 3491 3492 3493 3494 3495 3496 3497
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            weight = self._get_data(
                name='weight',
                shape=[2, 3, 32, 32],
                dtype="float32",
                append_batch_size=False,
            )
3498
            out = layers.spectral_norm(weight, dim=1, power_iters=1)
3499
            return out
3500 3501

    def make_kldiv_loss(self):
3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x = self._get_data(
                name='x',
                shape=[32, 128, 128],
                dtype="float32",
                append_batch_size=False,
            )
            target = self._get_data(
                name='target',
                shape=[32, 128, 128],
                dtype="float32",
                append_batch_size=False,
            )
3517 3518 3519
            loss = paddle.nn.functional.kl_div(
                input=x, label=target, reduction='batchmean'
            )
3520
            return loss
3521

M
minqiyang 已提交
3522
    def make_pixel_shuffle(self):
3523 3524 3525
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
M
minqiyang 已提交
3526
            x = self._get_data(name="X", shape=[9, 4, 4], dtype="float32")
3527
            out = paddle.nn.functional.pixel_shuffle(x, upscale_factor=3)
3528
            return out
M
minqiyang 已提交
3529

R
ruri 已提交
3530
    def make_mse_loss(self):
3531 3532 3533
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
R
ruri 已提交
3534 3535
            x = self._get_data(name="X", shape=[1], dtype="float32")
            y = self._get_data(name="Y", shape=[1], dtype="float32")
3536
            out = paddle.nn.functional.mse_loss(input=x, label=y)
3537
            return out
R
ruri 已提交
3538

3539
    def make_square_error_cost(self):
3540 3541 3542
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3543 3544
            x = self._get_data(name="X", shape=[1], dtype="float32")
            y = self._get_data(name="Y", shape=[1], dtype="float32")
3545
            out = paddle.nn.functional.square_error_cost(input=x, label=y)
3546
            return out
3547

3548 3549 3550 3551
    def test_dynamic_lstmp(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            hidden_dim, proj_dim = 16, 8
3552 3553 3554
            seq_data = layers.data(
                name='seq_data', shape=[10, 10], dtype='float32', lod_level=1
            )
3555 3556
            fc_out = layers.fc(input=seq_data, size=4 * hidden_dim)
            self.assertIsNotNone(
3557 3558 3559 3560
                layers.dynamic_lstmp(
                    input=fc_out, size=4 * hidden_dim, proj_size=proj_dim
                )
            )
3561 3562 3563 3564 3565 3566

    def test_im2sequence(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[3, 128, 128], dtype='float32')
            y = layers.data(name='y', shape=[], dtype='float32')
3567 3568 3569 3570 3571 3572 3573 3574
            output = layers.im2sequence(
                input=x,
                input_image_size=y,
                stride=[1, 1],
                filter_size=[2, 2],
                out_stride=[1, 1],
            )
            return output
3575 3576 3577 3578

    def test_lod_reset(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
3579
            # case 1
3580
            x = layers.data(name='x', shape=[10], dtype='float32')
3581 3582 3583
            y = layers.data(
                name='y', shape=[10, 20], dtype='float32', lod_level=2
            )
3584 3585 3586
            z = layers.lod_reset(x=x, y=y)
            self.assertTrue(z.lod_level == 2)
            # case 2
3587
            lod_tensor_in = layers.data(name='lod_in', shape=[1], dtype='int32')
3588 3589 3590 3591 3592 3593
            z = layers.lod_reset(x=x, y=lod_tensor_in)
            self.assertTrue(z.lod_level == 1)
            # case 3
            z = layers.lod_reset(x=x, target_lod=[1, 2, 3])
            self.assertTrue(z.lod_level == 1)
            return z
3594

W
whs 已提交
3595
    def test_affine_grid(self):
3596
        with self.static_graph():
W
whs 已提交
3597
            data = layers.data(name='data', shape=[2, 3, 3], dtype="float32")
3598
            out = paddle.argsort(x=data, axis=1)
W
whs 已提交
3599 3600

            theta = layers.data(name="theta", shape=[2, 3], dtype="float32")
3601
            out_shape = layers.data(name="out_shape", shape=[-1], dtype="int32")
3602 3603
            data_0 = paddle.nn.functional.affine_grid(theta, out_shape)
            data_1 = paddle.nn.functional.affine_grid(theta, [5, 3, 28, 28])
W
whs 已提交
3604 3605 3606

            self.assertIsNotNone(data_0)
            self.assertIsNotNone(data_1)
D
dengkaipeng 已提交
3607

W
wangchaochaohu 已提交
3608 3609 3610 3611 3612 3613 3614
    def test_stridedslice(self):
        axes = [0, 1, 2]
        starts = [1, 0, 2]
        ends = [3, 3, 4]
        strides = [1, 1, 1]
        with self.static_graph():
            x = layers.data(name="x", shape=[245, 30, 30], dtype="float32")
2
201716010711 已提交
3615
            out = paddle.strided_slice(
3616 3617
                x, axes=axes, starts=starts, ends=ends, strides=strides
            )
W
wangchaochaohu 已提交
3618 3619
            return out

3620 3621
    def test_fill_constant_batch_size_like(self):
        with self.static_graph():
3622 3623 3624 3625 3626 3627
            like = fluid.layers.fill_constant(
                shape=[1, 200], value=10, dtype='int64'
            )
            out = layers.fill_constant_batch_size_like(
                input=like, shape=[2, 3300], value=1315454564656, dtype='int64'
            )
3628 3629
            return out

3630 3631 3632 3633
    def test_sequence_expand(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[10], dtype='float32')
3634 3635 3636 3637
            y = layers.data(
                name='y', shape=[10, 20], dtype='float32', lod_level=2
            )
            return layers.sequence_expand(x=x, y=y, ref_level=1)
3638

3639 3640 3641 3642 3643
    def test_sequence_reshape(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[8], dtype='float32', lod_level=1)
            out = layers.sequence_reshape(input=x, new_dim=16)
3644
            return out
3645

3646 3647 3648 3649
    def test_sequence_unpad(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[10, 5], dtype='float32')
3650
            length = layers.data(name='length', shape=[], dtype='int64')
3651
            return layers.sequence_unpad(x=x, length=length)
3652

3653 3654 3655
    def test_sequence_softmax(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
3656 3657 3658
            seq_data = layers.data(
                name='seq_data', shape=[10, 10], dtype='float32', lod_level=1
            )
3659
            seq = layers.fc(input=seq_data, size=20)
3660
            return layers.sequence_softmax(seq)
3661

3662 3663 3664 3665 3666
    def test_sequence_unsqueeze(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[8, 2], dtype='float32')
            out = layers.unsqueeze(input=x, axes=[1])
3667
            return out
3668

3669 3670 3671
    def test_sequence_scatter(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688
            x = layers.data(
                name='x', shape=[3, 6], append_batch_size=False, dtype='float32'
            )
            idx = layers.data(
                name='idx',
                shape=[12, 1],
                append_batch_size=False,
                dtype='int32',
                lod_level=1,
            )
            updates = layers.data(
                name='updates',
                shape=[12, 1],
                append_batch_size=False,
                dtype='float32',
                lod_level=1,
            )
3689
            out = layers.sequence_scatter(input=x, index=idx, updates=updates)
3690
            return out
W
whs 已提交
3691

3692 3693 3694 3695
    def test_sequence_slice(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            import numpy as np
3696 3697 3698 3699

            seqs = layers.data(
                name='x', shape=[10, 5], dtype='float32', lod_level=1
            )
3700 3701
            offset = layers.assign(input=np.array([[0, 1]]).astype('int32'))
            length = layers.assign(input=np.array([[2, 1]]).astype('int32'))
3702 3703 3704 3705
            out = layers.sequence_slice(
                input=seqs, offset=offset, length=length
            )
            return out
W
whs 已提交
3706

Z
zhoushiyu 已提交
3707 3708 3709
    def test_shuffle_batch(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
3710 3711 3712
            x = layers.data(
                name='X', shape=[4, 50], dtype='float32', lod_level=0
            )
Z
zhoushiyu 已提交
3713 3714 3715 3716 3717
            out1 = fluid.contrib.layers.shuffle_batch(x)
            default_main_program().random_seed = 1000
            out2 = fluid.contrib.layers.shuffle_batch(x)
            self.assertIsNotNone(out1)
            self.assertIsNotNone(out2)
3718
            return out1
Z
zhoushiyu 已提交
3719

3720 3721 3722 3723
    def test_partial_sum(self):
        with self.static_graph():
            x = fluid.data(name="x", shape=[None, 3], dtype="float32")
            y = fluid.data(name="y", shape=[None, 3], dtype="float32")
3724 3725 3726 3727
            sum = fluid.contrib.layers.partial_sum(
                [x, y], start_index=0, length=2
            )
            return sum
3728

S
ShenLiang 已提交
3729 3730 3731 3732 3733 3734 3735 3736 3737
    def test_batch_fc(self):
        with self.static_graph():
            input = fluid.data(name="input", shape=[16, 2, 3], dtype="float32")
            out = fluid.contrib.layers.batch_fc(
                input=input,
                param_size=[16, 3, 10],
                param_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="w_0",
3738 3739
                    initializer=fluid.initializer.Xavier(uniform=False),
                ),
S
ShenLiang 已提交
3740 3741 3742 3743
                bias_size=[16, 10],
                bias_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="b_0",
3744 3745 3746 3747 3748
                    initializer=fluid.initializer.Xavier(uniform=False),
                ),
                act="relu",
            )
        return out
S
ShenLiang 已提交
3749

S
ShenLiang 已提交
3750 3751 3752
    def test_rank_attention(self):
        with self.static_graph():
            input = fluid.data(name="input", shape=[None, 2], dtype="float32")
3753 3754 3755
            rank_offset = fluid.data(
                name="rank_offset", shape=[None, 7], dtype="int32"
            )
S
ShenLiang 已提交
3756 3757 3758 3759 3760 3761 3762
            out = fluid.contrib.layers.rank_attention(
                input=input,
                rank_offset=rank_offset,
                rank_param_shape=[18, 3],
                rank_param_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="ubm_rank_param.w_0",
3763 3764 3765 3766 3767
                    initializer=fluid.initializer.Xavier(uniform=False),
                ),
                max_rank=3,
            )
            return out
S
ShenLiang 已提交
3768

3769 3770 3771 3772 3773 3774 3775 3776 3777 3778
    def test_sequence_enumerate(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name="input", shape=[1], dtype='int32', lod_level=1)
            out = layers.sequence_enumerate(input=x, win_size=2, pad_value=0)

    def test_roi_perspective_transform(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name="x", shape=[256, 30, 30], dtype="float32")
3779 3780 3781
            rois = layers.data(
                name="rois", shape=[8], dtype="float32", lod_level=1
            )
3782
            output = layers.roi_perspective_transform(x, rois, 7, 7, 0.6)
3783
            return output
3784 3785 3786 3787 3788 3789

    def test_row_conv(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[16], dtype='float32', lod_level=1)
            out = layers.row_conv(input=x, future_context_size=2)
3790
            return out
3791 3792 3793 3794

    def test_simple_conv2d(self):
        # TODO(minqiyang): dygraph do not support layers with param now
        with self.static_graph():
3795 3796 3797 3798 3799 3800
            images = layers.data(
                name='pixel', shape=[3, 48, 48], dtype='float32'
            )
            return layers.conv2d(
                input=images, num_filters=3, filter_size=[4, 4]
            )
3801 3802 3803 3804 3805

    def test_squeeze(self):
        # TODO(minqiyang): dygraph do not support layers with param now
        with self.static_graph():
            x = layers.data(name='x', shape=[1, 1, 4], dtype='float32')
3806
            out = paddle.squeeze(x, axis=[2])
3807
            return out
3808 3809 3810 3811

    def test_flatten(self):
        # TODO(minqiyang): dygraph do not support op without kernel now
        with self.static_graph():
3812 3813 3814 3815 3816 3817
            x = layers.data(
                name='x',
                append_batch_size=False,
                shape=[4, 4, 3],
                dtype="float32",
            )
3818
            out = paddle.flatten(x, 1, -1, name="flatten")
3819
            return out
3820

Z
zhoukunsheng 已提交
3821 3822 3823
    def test_linspace(self):
        program = Program()
        with program_guard(program):
3824
            out = paddle.linspace(20, 10, 5, 'float64')
Z
zhoukunsheng 已提交
3825 3826 3827
            self.assertIsNotNone(out)
        print(str(program))

3828 3829 3830 3831
    def test_unfold(self):
        with self.static_graph():
            x = layers.data(name='x', shape=[3, 20, 20], dtype='float32')
            out = layers.unfold(x, [3, 3], 1, 1, 1)
3832
            return out
3833

3834 3835 3836 3837
    def test_partial_concat(self):
        with self.static_graph():
            x = fluid.data(name="x", shape=[None, 3], dtype="float32")
            y = fluid.data(name="y", shape=[None, 3], dtype="float32")
3838 3839 3840 3841 3842 3843
            concat1 = fluid.contrib.layers.partial_concat(
                [x, y], start_index=0, length=2
            )
            concat2 = fluid.contrib.layers.partial_concat(
                x, start_index=0, length=-1
            )
3844 3845
            return concat1, concat2

C
cjt222 已提交
3846
    def test_deform_roi_pooling(self):
3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = layers.data(
                name='input',
                shape=[2, 3, 32, 32],
                dtype='float32',
                append_batch_size=False,
            )
            rois = layers.data(
                name="rois", shape=[4], dtype='float32', lod_level=1
            )
            trans = layers.data(
                name="trans",
                shape=[2, 3, 32, 32],
                dtype='float32',
                append_batch_size=False,
            )
            out = layers.deformable_roi_pooling(
                input=input,
                rois=rois,
                trans=trans,
                no_trans=False,
                spatial_scale=1.0,
                group_size=(1, 1),
                pooled_height=8,
                pooled_width=8,
                part_size=(8, 8),
                sample_per_part=4,
                trans_std=0.1,
            )
        return out
C
cjt222 已提交
3879

3880
    def test_retinanet_target_assign(self):
3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            bbox_pred = layers.data(
                name='bbox_pred',
                shape=[1, 100, 4],
                append_batch_size=False,
                dtype='float32',
            )
            cls_logits = layers.data(
                name='cls_logits',
                shape=[1, 100, 10],
                append_batch_size=False,
                dtype='float32',
            )
            anchor_box = layers.data(
                name='anchor_box',
                shape=[100, 4],
                append_batch_size=False,
                dtype='float32',
            )
            anchor_var = layers.data(
                name='anchor_var',
                shape=[100, 4],
                append_batch_size=False,
                dtype='float32',
            )
            gt_boxes = layers.data(
                name='gt_boxes',
                shape=[10, 4],
                append_batch_size=False,
                dtype='float32',
            )
            gt_labels = layers.data(
                name='gt_labels',
                shape=[10, 1],
                append_batch_size=False,
                dtype='int32',
            )
            is_crowd = layers.data(
                name='is_crowd',
                shape=[1],
                append_batch_size=False,
                dtype='int32',
            )
            im_info = layers.data(
                name='im_info',
                shape=[1, 3],
                append_batch_size=False,
                dtype='float32',
            )
            return layers.retinanet_target_assign(
                bbox_pred,
                cls_logits,
                anchor_box,
                anchor_var,
                gt_boxes,
                gt_labels,
                is_crowd,
                im_info,
                10,
            )
3943

3944
    def test_addmm(self):
3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = layers.data(
                name='input_data',
                shape=[3, 3],
                append_batch_size=False,
                dtype='float32',
            )
            x = layers.data(
                name='x', shape=[3, 2], append_batch_size=False, dtype='float32'
            )
            y = layers.data(
                name='y', shape=[2, 3], append_batch_size=False, dtype='float32'
            )
3960 3961

            out = paddle.addmm(input=input, x=x, y=y)
3962
            return out
3963

3964
    def test_retinanet_detection_output(self):
3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            bboxes = layers.data(
                name='bboxes',
                shape=[1, 21, 4],
                append_batch_size=False,
                dtype='float32',
            )
            scores = layers.data(
                name='scores',
                shape=[1, 21, 10],
                append_batch_size=False,
                dtype='float32',
            )
            anchors = layers.data(
                name='anchors',
                shape=[21, 4],
                append_batch_size=False,
                dtype='float32',
            )
            im_info = layers.data(
                name="im_info",
                shape=[1, 3],
                append_batch_size=False,
                dtype='float32',
            )
3992 3993 3994 3995 3996 3997 3998 3999 4000
            nmsed_outs = layers.retinanet_detection_output(
                bboxes=[bboxes, bboxes],
                scores=[scores, scores],
                anchors=[anchors, anchors],
                im_info=im_info,
                score_threshold=0.05,
                nms_top_k=1000,
                keep_top_k=100,
                nms_threshold=0.3,
4001 4002 4003
                nms_eta=1.0,
            )
            return nmsed_outs
4004

4005 4006 4007
    def test_warpctc_with_padding(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
4008
            input_length = paddle.static.data(
4009 4010
                name='logits_length', shape=[11], dtype='int64'
            )
4011
            label_length = paddle.static.data(
4012 4013
                name='labels_length', shape=[12], dtype='int64'
            )
4014 4015 4016 4017
            label = paddle.static.data(
                name='label', shape=[12, 1], dtype='int32'
            )
            predict = paddle.static.data(
4018 4019
                name='predict', shape=[4, 4, 8], dtype='float32'
            )
4020 4021 4022 4023 4024 4025
            output = paddle.nn.functional.ctc_loss(
                log_probs=predict,
                labels=label,
                input_lengths=input_length,
                label_lengths=label_length,
                reduction='none',
4026 4027
            )
            return output
4028

4029 4030 4031 4032
    def test_basic_gru(self):
        input_size = 128
        hidden_size = 256
        with self.static_graph():
4033 4034 4035 4036 4037 4038 4039 4040 4041
            input = fluid.data(
                name="input", shape=[None, None, input_size], dtype='float32'
            )
            pre_hidden = fluid.data(
                name="pre_hidden", shape=[None, hidden_size], dtype='float32'
            )
            sequence_length = fluid.data(
                name="sequence_length", shape=[None], dtype='int32'
            )
4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052

            for bidirectional in [True, False]:
                for batch_first in [True, False]:
                    rnn_out, last_hidden = fluid.contrib.layers.basic_gru(
                        input,
                        pre_hidden,
                        hidden_size=256,
                        num_layers=2,
                        sequence_length=sequence_length,
                        dropout_prob=0.5,
                        bidirectional=bidirectional,
4053 4054
                        batch_first=batch_first,
                    )
4055

Y
Yu Yang 已提交
4056

4057 4058 4059 4060
class TestMetricsDetectionMap(unittest.TestCase):
    def test_detection_map(self):
        program = fluid.Program()
        with program_guard(program):
4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081
            detect_res = fluid.layers.data(
                name='detect_res',
                shape=[10, 6],
                append_batch_size=False,
                dtype='float32',
            )
            label = fluid.layers.data(
                name='label',
                shape=[10, 1],
                append_batch_size=False,
                dtype='float32',
            )
            box = fluid.layers.data(
                name='bbox',
                shape=[10, 4],
                append_batch_size=False,
                dtype='float32',
            )
            map_eval = fluid.metrics.DetectionMAP(
                detect_res, label, box, class_num=21
            )
4082 4083 4084 4085 4086 4087
            cur_map, accm_map = map_eval.get_map_var()
            self.assertIsNotNone(cur_map)
            self.assertIsNotNone(accm_map)
        print(str(program))


4088 4089
class ExampleNet(paddle.nn.Layer):
    def __init__(self):
4090
        super().__init__()
4091
        self.weight = self.create_parameter(
4092 4093
            shape=[1, 1], attr=paddle.ParamAttr(trainable=False)
        )
4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106

    def forward(self):
        # only for test parameter trainable attr
        pass


class TestLayerParameterTrainableSet(unittest.TestCase):
    def test_layer_parameter_set(self):
        with fluid.dygraph.guard():
            net = ExampleNet()
            self.assertFalse(net.weight.trainable)


4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123
class TestLayerTrainingAttribute(unittest.TestCase):
    def test_set_train_eval_in_dynamic_mode(self):
        with fluid.dygraph.guard():
            net = paddle.nn.Dropout()
            net.train()
            self.assertTrue(net.training)
            net.eval()
            self.assertFalse(net.training)

    def test_set_train_eval_in_static_mode(self):
        net = paddle.nn.Dropout()
        net.train()
        self.assertTrue(net.training)
        net.eval()
        self.assertFalse(net.training)


J
Jiabin Yang 已提交
4124 4125
class MyLayer(paddle.nn.Layer):
    def __init__(self):
4126
        super().__init__()
J
Jiabin Yang 已提交
4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137
        self._linear = paddle.nn.Linear(1, 1)
        self._dropout = paddle.nn.Dropout(p=0.5)

    def forward(self, input):
        temp = self._linear(input)
        temp = self._dropout(temp)
        return temp


class MySuperLayer(paddle.nn.Layer):
    def __init__(self):
4138
        super().__init__()
J
Jiabin Yang 已提交
4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153
        self._mylayer = MyLayer()

    def forward(self, input):
        temp = self._mylayer(input)
        return temp


class TestSubLayerCount(unittest.TestCase):
    def test_sublayer(self):
        with fluid.dygraph.guard():
            mySuperlayer = MySuperLayer()
            self.assertTrue(len(mySuperlayer.sublayers()) == 3)
            self.assertTrue(len(mySuperlayer.sublayers(include_self=True)) == 4)


Y
Yu Yang 已提交
4154
if __name__ == '__main__':
4155
    paddle.enable_static()
Y
Yu Yang 已提交
4156
    unittest.main()