test_layers.py 195.9 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Q
Qiao Longfei 已提交
15 16
import unittest

17 18
import contextlib
import numpy as np
19
from decorator_helper import prog_scope
20
import inspect
21 22 23

import paddle
import paddle.fluid as fluid
24
from paddle.fluid.layers.device import get_places
25 26 27
import paddle.fluid.nets as nets
from paddle.fluid.framework import Program, program_guard, default_main_program
from paddle.fluid.param_attr import ParamAttr
28
from paddle.fluid import core
J
jerrywgz 已提交
29
from paddle.fluid.initializer import Constant
30 31
import paddle.fluid.layers as layers
from test_imperative_base import new_program_scope
L
lujun 已提交
32 33
from paddle.fluid.dygraph import nn
from paddle.fluid.dygraph import base
34
from paddle.fluid.dygraph import to_variable
35
from paddle.fluid.framework import _test_eager_guard
36 37 38 39 40 41 42 43 44 45 46


class LayerTest(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        cls.seed = 111

    @classmethod
    def tearDownClass(cls):
        pass

47 48 49 50 51 52 53 54
    def _get_place(self, force_to_use_cpu=False):
        # this option for ops that only have cpu kernel
        if force_to_use_cpu:
            return core.CPUPlace()
        else:
            if core.is_compiled_with_cuda():
                return core.CUDAPlace(0)
            return core.CPUPlace()
55 56 57 58

    @contextlib.contextmanager
    def static_graph(self):
        with new_program_scope():
C
cnn 已提交
59
            paddle.seed(self.seed)
L
Leo Chen 已提交
60
            paddle.framework.random._manual_program_seed(self.seed)
61 62
            yield

63 64 65
    def get_static_graph_result(
        self, feed, fetch_list, with_lod=False, force_to_use_cpu=False
    ):
66
        exe = fluid.Executor(self._get_place(force_to_use_cpu))
67
        exe.run(fluid.default_startup_program())
68 69 70 71 72 73
        return exe.run(
            fluid.default_main_program(),
            feed=feed,
            fetch_list=fetch_list,
            return_numpy=(not with_lod),
        )
74 75

    @contextlib.contextmanager
76
    def dynamic_graph(self, force_to_use_cpu=False):
L
lujun 已提交
77
        with fluid.dygraph.guard(
78 79
            self._get_place(force_to_use_cpu=force_to_use_cpu)
        ):
C
cnn 已提交
80
            paddle.seed(self.seed)
L
Leo Chen 已提交
81
            paddle.framework.random._manual_program_seed(self.seed)
82 83 84 85
            yield


class TestLayer(LayerTest):
86 87
    def test_custom_layer_with_kwargs(self):
        class CustomLayer(fluid.Layer):
88 89
            def __init__(self, input_size, linear1_size=4):
                super(CustomLayer, self).__init__()
90 91 92
                self.linear1 = nn.Linear(
                    input_size, linear1_size, bias_attr=False
                )
93 94 95 96 97 98
                self.linear2 = nn.Linear(linear1_size, 1, bias_attr=False)

            def forward(self, x, do_linear2=False):
                ret = self.linear1(x)
                if do_linear2:
                    ret = self.linear2(ret)
99 100 101
                return ret

        with self.dynamic_graph():
102 103 104 105 106
            with _test_eager_guard():
                inp = np.ones([3, 3], dtype='float32')
                x = base.to_variable(inp)
                custom = CustomLayer(input_size=3, linear1_size=2)
                ret = custom(x, do_linear2=False)
107
                np.testing.assert_array_equal(ret.numpy().shape, [3, 2])
108
                ret = custom(x, do_linear2=True)
109
                np.testing.assert_array_equal(ret.numpy().shape, [3, 1])
110 111
            inp = np.ones([3, 3], dtype='float32')
            x = base.to_variable(inp)
112 113
            custom = CustomLayer(input_size=3, linear1_size=2)
            ret = custom(x, do_linear2=False)
114
            np.testing.assert_array_equal(ret.numpy().shape, [3, 2])
115
            ret = custom(x, do_linear2=True)
116
            np.testing.assert_array_equal(ret.numpy().shape, [3, 1])
117

118 119 120
    def test_dropout(self):
        inp = np.ones([3, 32, 32], dtype='float32')
        with self.static_graph():
121 122 123 124 125 126
            t = layers.data(
                name='data',
                shape=[3, 32, 32],
                dtype='float32',
                append_batch_size=False,
            )
127 128
            dropout = nn.Dropout(p=0.35, seed=1, is_test=False)
            ret = dropout(t)
129 130 131
            ret2 = fluid.layers.dropout(
                t, dropout_prob=0.35, seed=1, is_test=False
            )
132
            static_ret, static_ret2 = self.get_static_graph_result(
133 134
                feed={'data': inp}, fetch_list=[ret, ret2]
            )
135
        with self.dynamic_graph():
136 137 138 139
            with _test_eager_guard():
                t = base.to_variable(inp)
                dropout = nn.Dropout(p=0.35, seed=1, is_test=False)
                dy_eager_ret = dropout(t)
140 141 142
                dy_eager_ret2 = fluid.layers.dropout(
                    t, dropout_prob=0.35, seed=1, is_test=False
                )
143 144 145
                dy_eager_ret_value = dy_eager_ret.numpy()
                dy_eager_ret2_value = dy_eager_ret2.numpy()

146 147 148
            t = base.to_variable(inp)
            dropout = nn.Dropout(p=0.35, seed=1, is_test=False)
            dy_ret = dropout(t)
149 150 151
            dy_ret2 = fluid.layers.dropout(
                t, dropout_prob=0.35, seed=1, is_test=False
            )
152 153 154
            dy_ret_value = dy_ret.numpy()
            dy_ret2_value = dy_ret2.numpy()

155 156
        np.testing.assert_array_equal(dy_eager_ret_value, dy_eager_ret2_value)
        np.testing.assert_array_equal(static_ret, dy_eager_ret_value)
157

158 159 160
        np.testing.assert_array_equal(static_ret, static_ret2)
        np.testing.assert_array_equal(dy_ret_value, dy_ret2_value)
        np.testing.assert_array_equal(static_ret, dy_ret_value)
161

S
songyouwei 已提交
162 163 164
    def test_linear(self):
        inp = np.ones([3, 32, 32], dtype='float32')
        with self.static_graph():
165 166 167 168 169 170
            t = layers.data(
                name='data',
                shape=[3, 32, 32],
                dtype='float32',
                append_batch_size=False,
            )
S
songyouwei 已提交
171
            linear = nn.Linear(
172 173
                32, 4, bias_attr=fluid.initializer.ConstantInitializer(value=1)
            )
S
songyouwei 已提交
174
            ret = linear(t)
175 176 177
            static_ret = self.get_static_graph_result(
                feed={'data': inp}, fetch_list=[ret]
            )[0]
S
songyouwei 已提交
178
        with self.dynamic_graph():
179 180 181 182 183
            with _test_eager_guard():
                t = base.to_variable(inp)
                linear = nn.Linear(
                    32,
                    4,
184 185
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                )
186 187 188
                dy_eager_ret = linear(t)
                dy_eager_ret_value = dy_eager_ret.numpy()

S
songyouwei 已提交
189 190
            t = base.to_variable(inp)
            linear = nn.Linear(
191 192
                32, 4, bias_attr=fluid.initializer.ConstantInitializer(value=1)
            )
S
songyouwei 已提交
193 194 195
            dy_ret = linear(t)
            dy_ret_value = dy_ret.numpy()

196 197
        np.testing.assert_array_equal(static_ret, dy_eager_ret_value)
        np.testing.assert_array_equal(static_ret, dy_ret_value)
S
songyouwei 已提交
198

199 200 201 202 203 204 205 206
        with self.static_graph():

            # the input of Linear must be Variable.
            def test_Variable():
                inp = np.ones([3, 32, 32], dtype='float32')
                linear = nn.Linear(
                    32,
                    4,
207 208
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                )
209 210 211 212 213 214 215 216
                linear_ret1 = linear(inp)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Linear must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
                inp = np.ones([3, 32, 32], dtype='int32')
217 218 219
                linear = nn.Linear(
                    32,
                    4,
220 221
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                )
222 223 224 225 226 227 228
                linear_ret2 = linear(inp)

            self.assertRaises(TypeError, test_type)

    def test_Flatten(self):
        inp = np.ones([3, 4, 4, 5], dtype='float32')
        with self.static_graph():
229 230 231 232 233 234
            t = layers.data(
                name='data',
                shape=[3, 4, 4, 5],
                dtype='float32',
                append_batch_size=False,
            )
235 236
            flatten = nn.Flatten()
            ret = flatten(t)
237 238 239
            static_ret = self.get_static_graph_result(
                feed={'data': inp}, fetch_list=[ret]
            )[0]
240
        with self.dynamic_graph():
241 242 243 244 245 246
            with _test_eager_guard():
                t = base.to_variable(inp)
                flatten = nn.Flatten()
                dy_eager_ret = flatten(t)
                dy_eager_ret_value = dy_eager_ret.numpy()

247 248 249 250 251
            t = base.to_variable(inp)
            flatten = nn.Flatten()
            dy_ret = flatten(t)
            dy_ret_value = dy_ret.numpy()

252 253
        np.testing.assert_array_equal(static_ret, dy_eager_ret_value)
        np.testing.assert_array_equal(static_ret, dy_ret_value)
254 255 256 257 258 259 260 261 262

        with self.static_graph():

            # the input of Linear must be Variable.
            def test_Variable():
                inp = np.ones([3, 32, 32], dtype='float32')
                linear = nn.Linear(
                    32,
                    4,
263 264
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                )
265 266 267 268 269 270 271 272
                linear_ret1 = linear(inp)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Linear must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
                inp = np.ones([3, 32, 32], dtype='int32')
273 274 275
                linear = nn.Linear(
                    32,
                    4,
276 277
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                )
278 279 280 281
                linear_ret2 = linear(inp)

            self.assertRaises(TypeError, test_type)

282 283 284
    def test_layer_norm(self):
        inp = np.ones([3, 32, 32], dtype='float32')
        with self.static_graph():
285 286 287 288 289 290
            t = layers.data(
                name='data',
                shape=[3, 32, 32],
                dtype='float32',
                append_batch_size=False,
            )
291 292 293
            ret = layers.layer_norm(
                t,
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
294 295 296 297 298
                act='sigmoid',
            )
            static_ret = self.get_static_graph_result(
                feed={'data': inp}, fetch_list=[ret]
            )[0]
299
        with self.static_graph():
300 301 302 303 304 305
            t = layers.data(
                name='data',
                shape=[3, 32, 32],
                dtype='float32',
                append_batch_size=False,
            )
306
            lm = nn.LayerNorm(
307
                normalized_shape=[32, 32],
308
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
309 310
                act='sigmoid',
            )
311
            ret = lm(t)
312 313 314
            static_ret2 = self.get_static_graph_result(
                feed={'data': inp}, fetch_list=[ret]
            )[0]
315
        with self.dynamic_graph():
316 317 318 319
            with _test_eager_guard():
                lm = nn.LayerNorm(
                    normalized_shape=[32, 32],
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
320 321
                    act='sigmoid',
                )
322 323 324
                dy_eager_ret = lm(base.to_variable(inp))
                dy_eager_ret_value = dy_eager_ret.numpy()

325
            lm = nn.LayerNorm(
326
                normalized_shape=[32, 32],
327
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
328 329
                act='sigmoid',
            )
330
            dy_ret = lm(base.to_variable(inp))
331
            dy_ret_value = dy_ret.numpy()
332

333
        with self.dynamic_graph():
334 335 336 337 338 339 340
            with _test_eager_guard():
                lm = nn.LayerNorm(
                    normalized_shape=[32, 32],
                    shift=False,
                    scale=False,
                    param_attr=fluid.initializer.ConstantInitializer(value=1),
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
341 342
                    act='sigmoid',
                )
343 344 345 346 347
                lm(base.to_variable(inp))

                self.assertFalse(hasattr(lm, "_scale_w"))
                self.assertFalse(hasattr(lm, "_bias_w"))

348
            lm = nn.LayerNorm(
349
                normalized_shape=[32, 32],
350 351 352 353
                shift=False,
                scale=False,
                param_attr=fluid.initializer.ConstantInitializer(value=1),
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
354 355
                act='sigmoid',
            )
356 357 358 359
            lm(base.to_variable(inp))

            self.assertFalse(hasattr(lm, "_scale_w"))
            self.assertFalse(hasattr(lm, "_bias_w"))
360

361 362 363
        np.testing.assert_array_equal(static_ret, static_ret2)
        np.testing.assert_array_equal(dy_eager_ret_value, static_ret2)
        np.testing.assert_array_equal(dy_ret_value, static_ret2)
364

365
        with self.dynamic_graph():
366 367 368 369
            with _test_eager_guard():
                lm = nn.LayerNorm(
                    normalized_shape=[16, 32],
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
370 371
                    act='sigmoid',
                )
372 373 374
                with self.assertRaises(ValueError):
                    lm(base.to_variable(inp))

375 376 377
            lm = nn.LayerNorm(
                normalized_shape=[16, 32],
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
378 379
                act='sigmoid',
            )
380 381 382
            with self.assertRaises(ValueError):
                lm(base.to_variable(inp))

C
ceci3 已提交
383 384 385 386
    def test_SyncBatchNorm(self):
        if core.is_compiled_with_cuda():
            with self.static_graph():
                t = layers.data(name='t', shape=[-1, 3, 5, 5], dtype='float32')
C
ceci3 已提交
387
                my_sync_bn = paddle.nn.SyncBatchNorm(3)
C
ceci3 已提交
388 389
                ret = my_sync_bn(t)
                static_ret = self.get_static_graph_result(
390
                    feed={'t': np.ones([3, 3, 5, 5], dtype='float32')},
391 392
                    fetch_list=[ret],
                )[0]
C
ceci3 已提交
393 394

            with self.dynamic_graph():
395 396 397 398 399 400
                with _test_eager_guard():
                    t = np.ones([3, 3, 5, 5], dtype='float32')
                    my_syncbn = paddle.nn.SyncBatchNorm(3)
                    dy_eager_ret = my_syncbn(base.to_variable(t))
                    dy_eager_ret_value = dy_eager_ret.numpy()

C
ceci3 已提交
401 402 403 404
                t = np.ones([3, 3, 5, 5], dtype='float32')
                my_syncbn = paddle.nn.SyncBatchNorm(3)
                dy_ret = my_syncbn(base.to_variable(t))
                dy_ret_value = dy_ret.numpy()
405 406
            np.testing.assert_array_equal(static_ret, dy_ret_value)
            np.testing.assert_array_equal(static_ret, dy_eager_ret_value)
C
ceci3 已提交
407

408 409 410 411 412
    def test_relu(self):
        with self.static_graph():
            t = layers.data(name='t', shape=[3, 3], dtype='float32')
            ret = layers.relu(t)
            static_ret = self.get_static_graph_result(
413 414
                feed={'t': np.ones([3, 3], dtype='float32')}, fetch_list=[ret]
            )[0]
415 416

        with self.dynamic_graph():
417 418 419 420 421
            with _test_eager_guard():
                t = np.ones([3, 3], dtype='float32')
                dy_eager_ret = layers.relu(base.to_variable(t))
                dy_eager_ret_value = dy_eager_ret.numpy()

422 423
            t = np.ones([3, 3], dtype='float32')
            dy_ret = layers.relu(base.to_variable(t))
424
            dy_ret_value = dy_ret.numpy()
425

426 427
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05)
C
ceci3 已提交
428

429 430 431 432 433
    def test_matmul(self):
        with self.static_graph():
            t = layers.data(name='t', shape=[3, 3], dtype='float32')
            t2 = layers.data(name='t2', shape=[3, 3], dtype='float32')
            ret = layers.matmul(t, t2)
434 435 436 437 438 439 440
            static_ret = self.get_static_graph_result(
                feed={
                    't': np.ones([3, 3], dtype='float32'),
                    't2': np.ones([3, 3], dtype='float32'),
                },
                fetch_list=[ret],
            )[0]
441 442

        with self.dynamic_graph():
443 444 445
            with _test_eager_guard():
                t = np.ones([3, 3], dtype='float32')
                t2 = np.ones([3, 3], dtype='float32')
446 447 448
                dy_eager_ret = layers.matmul(
                    base.to_variable(t), base.to_variable(t2)
                )
449 450
                dy_eager_ret_value = dy_eager_ret.numpy()

451 452
            t = np.ones([3, 3], dtype='float32')
            t2 = np.ones([3, 3], dtype='float32')
X
polish  
Xin Pan 已提交
453
            dy_ret = layers.matmul(base.to_variable(t), base.to_variable(t2))
454
            dy_ret_value = dy_ret.numpy()
455

456 457
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05)
458

459 460 461 462 463
    def test_conv2d(self):
        with self.static_graph():
            images = layers.data(name='pixel', shape=[3, 5, 5], dtype='float32')
            ret = layers.conv2d(input=images, num_filters=3, filter_size=[2, 2])
            static_ret = self.get_static_graph_result(
464
                feed={'pixel': np.ones([2, 3, 5, 5], dtype='float32')},
465 466
                fetch_list=[ret],
            )[0]
467 468 469

        with self.static_graph():
            images = layers.data(name='pixel', shape=[3, 5, 5], dtype='float32')
470 471 472
            conv2d = nn.Conv2D(
                num_channels=3, num_filters=3, filter_size=[2, 2]
            )
473 474
            ret = conv2d(images)
            static_ret2 = self.get_static_graph_result(
475
                feed={'pixel': np.ones([2, 3, 5, 5], dtype='float32')},
476 477
                fetch_list=[ret],
            )[0]
478 479

        with self.dynamic_graph():
480 481
            with _test_eager_guard():
                images = np.ones([2, 3, 5, 5], dtype='float32')
482 483 484
                conv2d = nn.Conv2D(
                    num_channels=3, num_filters=3, filter_size=[2, 2]
                )
485 486 487
                dy_eager_ret = conv2d(base.to_variable(images))
                dy_eager_ret_value = dy_eager_ret.numpy()

488
            images = np.ones([2, 3, 5, 5], dtype='float32')
489 490 491
            conv2d = nn.Conv2D(
                num_channels=3, num_filters=3, filter_size=[2, 2]
            )
492
            dy_ret = conv2d(base.to_variable(images))
493
            dy_ret_value = dy_ret.numpy()
494

495
        with self.dynamic_graph():
496 497
            with _test_eager_guard():
                images = np.ones([2, 3, 5, 5], dtype='float32')
498 499 500 501 502 503
                conv2d = nn.Conv2D(
                    num_channels=3,
                    num_filters=3,
                    filter_size=[2, 2],
                    bias_attr=False,
                )
504
                dy_ret = conv2d(base.to_variable(images))
505
                self.assertIsNone(conv2d.bias)
506

507
            images = np.ones([2, 3, 5, 5], dtype='float32')
508 509 510 511 512 513
            conv2d = nn.Conv2D(
                num_channels=3,
                num_filters=3,
                filter_size=[2, 2],
                bias_attr=False,
            )
514
            dy_ret = conv2d(base.to_variable(images))
515
            self.assertIsNone(conv2d.bias)
516

517 518 519 520
        with self.static_graph():
            # the input of Conv2D must be Variable.
            def test_Variable():
                images = np.ones([2, 3, 5, 5], dtype='float32')
521 522 523
                conv2d = nn.Conv2D(
                    num_channels=3, num_filters=3, filter_size=[2, 2]
                )
524 525 526 527 528 529 530
                conv2d_ret1 = conv2d(images)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Conv2D must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
531 532 533 534 535 536
                images = layers.data(
                    name='pixel', shape=[3, 5, 5], dtype='int32'
                )
                conv2d = nn.Conv2D(
                    num_channels=3, num_filters=3, filter_size=[2, 2]
                )
537 538 539 540
                conv2d_ret2 = conv2d(images)

            self.assertRaises(TypeError, test_type)

541 542 543
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
Y
Yu Yang 已提交
544

545
        with self.dynamic_graph():
546 547 548 549 550
            with _test_eager_guard():
                images = np.ones([2, 3, 5, 5], dtype='float32')
                custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
551 552 553 554 555 556 557 558 559 560 561 562
                        custom_weight
                    )
                )
                conv2d1 = nn.Conv2D(
                    num_channels=3, num_filters=3, filter_size=[2, 2]
                )
                conv2d2 = nn.Conv2D(
                    num_channels=3,
                    num_filters=3,
                    filter_size=[2, 2],
                    param_attr=weight_attr,
                )
563 564 565
                dy_ret1 = conv2d1(base.to_variable(images))
                dy_ret2 = conv2d2(base.to_variable(images))
                self.assertFalse(
566 567
                    np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())
                )
568 569 570 571

                conv2d1_weight_np = conv2d1.weight.numpy()
                conv2d1_bias = conv2d1.bias
                self.assertFalse(
572 573
                    np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())
                )
574
                conv2d2.weight.set_value(conv2d1_weight_np)
575 576 577
                np.testing.assert_array_equal(
                    conv2d1_weight_np, conv2d2.weight.numpy()
                )
578 579 580
                conv2d2.bias.set_value(conv2d1_bias)
                dy_ret1 = conv2d1(base.to_variable(images))
                dy_ret2 = conv2d2(base.to_variable(images))
581
                np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
582 583 584

                conv2d2.weight = conv2d1.weight
                conv2d2.bias = conv2d1.bias
585 586 587 588 589 590
                np.testing.assert_array_equal(
                    conv2d1.weight.numpy(), conv2d2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    conv2d1.bias.numpy(), conv2d2.bias.numpy()
                )
591

592 593
            images = np.ones([2, 3, 5, 5], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
594 595 596 597 598 599 600 601 602 603 604 605 606 607
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
            conv2d1 = nn.Conv2D(
                num_channels=3, num_filters=3, filter_size=[2, 2]
            )
            conv2d2 = nn.Conv2D(
                num_channels=3,
                num_filters=3,
                filter_size=[2, 2],
                param_attr=weight_attr,
            )
608 609 610 611 612 613 614
            dy_ret1 = conv2d1(base.to_variable(images))
            dy_ret2 = conv2d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv2d1_weight_np = conv2d1.weight.numpy()
            conv2d1_bias = conv2d1.bias
            self.assertFalse(
615 616
                np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())
            )
617
            conv2d2.weight.set_value(conv2d1_weight_np)
618 619 620
            np.testing.assert_array_equal(
                conv2d1_weight_np, conv2d2.weight.numpy()
            )
621 622 623
            conv2d2.bias.set_value(conv2d1_bias)
            dy_ret1 = conv2d1(base.to_variable(images))
            dy_ret2 = conv2d2(base.to_variable(images))
624
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
625 626 627

            conv2d2.weight = conv2d1.weight
            conv2d2.bias = conv2d1.bias
628 629 630 631 632 633
            np.testing.assert_array_equal(
                conv2d1.weight.numpy(), conv2d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv2d1.bias.numpy(), conv2d2.bias.numpy()
            )
634

M
minqiyang 已提交
635 636 637 638 639 640 641 642 643 644 645 646 647
    def test_gru_unit(self):
        lod = [[2, 4, 3]]
        D = 5
        T = sum(lod[0])
        N = len(lod[0])

        input = np.random.rand(T, 3 * D).astype('float32')
        hidden_input = np.random.rand(T, D).astype('float32')

        with self.static_graph():
            x = layers.data(name='x', shape=[-1, D * 3], dtype='float32')
            hidden = layers.data(name='hidden', shape=[-1, D], dtype='float32')
            updated_hidden, reset_hidden_pre, gate = layers.gru_unit(
648 649
                input=x, hidden=hidden, size=D * 3
            )
M
minqiyang 已提交
650
            static_ret = self.get_static_graph_result(
651 652 653
                feed={'x': input, 'hidden': hidden_input},
                fetch_list=[updated_hidden, reset_hidden_pre, gate],
            )
M
minqiyang 已提交
654 655 656 657 658

        with self.static_graph():
            x = layers.data(name='x', shape=[-1, D * 3], dtype='float32')
            hidden = layers.data(name='hidden', shape=[-1, D], dtype='float32')
            updated_hidden, reset_hidden_pre, gate = layers.gru_unit(
659 660
                input=x, hidden=hidden, size=D * 3
            )
661
            gru = nn.GRUUnit(size=D * 3)
M
minqiyang 已提交
662 663 664
            updated_hidden, reset_hidden_pre, gate = gru(x, hidden)

            static_ret2 = self.get_static_graph_result(
665 666 667
                feed={'x': input, 'hidden': hidden_input},
                fetch_list=[updated_hidden, reset_hidden_pre, gate],
            )
M
minqiyang 已提交
668 669

        with self.dynamic_graph():
670 671
            with _test_eager_guard():
                gru = nn.GRUUnit(size=D * 3)
672 673 674
                dy_eager_ret = gru(
                    base.to_variable(input), base.to_variable(hidden_input)
                )
675 676 677 678
                dy_eager_ret_value = []
                for i in range(len(static_ret)):
                    dy_eager_ret_value.append(dy_eager_ret[i].numpy())

679
            gru = nn.GRUUnit(size=D * 3)
680 681 682
            dy_ret = gru(
                base.to_variable(input), base.to_variable(hidden_input)
            )
683 684 685
            dy_ret_value = []
            for i in range(len(static_ret)):
                dy_ret_value.append(dy_ret[i].numpy())
M
minqiyang 已提交
686 687

        for i in range(len(static_ret)):
688 689 690 691 692 693 694 695 696
            np.testing.assert_allclose(
                static_ret[i], static_ret2[i], rtol=1e-05
            )
            np.testing.assert_allclose(
                static_ret[i], dy_ret_value[i], rtol=1e-05
            )
            np.testing.assert_allclose(
                static_ret[i], dy_eager_ret_value[i], rtol=1e-05
            )
M
minqiyang 已提交
697

698
        with self.dynamic_graph():
699 700 701 702
            with _test_eager_guard():
                custom_weight = np.random.randn(D, D * 3).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
703 704 705
                        custom_weight
                    )
                )
706 707
                gru1 = nn.GRUUnit(size=D * 3)
                gru2 = nn.GRUUnit(size=D * 3, param_attr=weight_attr)
708 709 710 711 712 713
                dy_ret1 = gru1(
                    base.to_variable(input), base.to_variable(hidden_input)
                )
                dy_ret2 = gru2(
                    base.to_variable(input), base.to_variable(hidden_input)
                )
714
                self.assertFalse(
715 716
                    np.array_equal(gru1.weight.numpy(), gru2.weight.numpy())
                )
717 718 719 720
                for o1, o2 in zip(dy_ret1, dy_ret2):
                    self.assertFalse(np.array_equal(o1.numpy(), o2.numpy()))
                gru2.weight.set_value(gru1.weight.numpy())
                gru2.bias.set_value(gru1.bias)
721 722 723 724 725 726
                dy_ret1 = gru1(
                    base.to_variable(input), base.to_variable(hidden_input)
                )
                dy_ret2 = gru2(
                    base.to_variable(input), base.to_variable(hidden_input)
                )
727
                for o1, o2 in zip(dy_ret1, dy_ret2):
728
                    np.testing.assert_array_equal(o1.numpy(), o2.numpy())
729 730 731

                gru2.weight = gru1.weight
                gru2.bias = gru1.bias
732 733 734 735 736 737
                np.testing.assert_array_equal(
                    gru1.weight.numpy(), gru2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    gru1.bias.numpy(), gru2.bias.numpy()
                )
738

739
            custom_weight = np.random.randn(D, D * 3).astype("float32")
740 741 742 743 744
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
745 746
            gru1 = nn.GRUUnit(size=D * 3)
            gru2 = nn.GRUUnit(size=D * 3, param_attr=weight_attr)
747 748 749 750 751 752
            dy_ret1 = gru1(
                base.to_variable(input), base.to_variable(hidden_input)
            )
            dy_ret2 = gru2(
                base.to_variable(input), base.to_variable(hidden_input)
            )
753
            self.assertFalse(
754 755
                np.array_equal(gru1.weight.numpy(), gru2.weight.numpy())
            )
756 757 758 759
            for o1, o2 in zip(dy_ret1, dy_ret2):
                self.assertFalse(np.array_equal(o1.numpy(), o2.numpy()))
            gru2.weight.set_value(gru1.weight.numpy())
            gru2.bias.set_value(gru1.bias)
760 761 762 763 764 765
            dy_ret1 = gru1(
                base.to_variable(input), base.to_variable(hidden_input)
            )
            dy_ret2 = gru2(
                base.to_variable(input), base.to_variable(hidden_input)
            )
766
            for o1, o2 in zip(dy_ret1, dy_ret2):
767
                np.testing.assert_array_equal(o1.numpy(), o2.numpy())
768 769 770

            gru2.weight = gru1.weight
            gru2.bias = gru1.bias
771 772 773
            np.testing.assert_array_equal(
                gru1.weight.numpy(), gru2.weight.numpy()
            )
774
            np.testing.assert_array_equal(gru1.bias.numpy(), gru2.bias.numpy())
775

X
Xin Pan 已提交
776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
    def test_elementwise_math(self):
        n = np.ones([3, 3], dtype='float32')
        n2 = np.ones([3, 3], dtype='float32') * 1.1
        n3 = np.ones([3, 3], dtype='float32') * 2
        n4 = np.ones([3, 3], dtype='float32') * 3
        n5 = np.ones([3, 3], dtype='float32') * 4
        n6 = np.ones([3, 3], dtype='float32') * 5

        with self.static_graph():
            t = layers.data(name='t', shape=[3, 3], dtype='float32')
            t2 = layers.data(name='t2', shape=[3, 3], dtype='float32')
            t3 = layers.data(name='t3', shape=[3, 3], dtype='float32')
            t4 = layers.data(name='t4', shape=[3, 3], dtype='float32')
            t5 = layers.data(name='t5', shape=[3, 3], dtype='float32')
            t6 = layers.data(name='t6', shape=[3, 3], dtype='float32')

            ret = layers.elementwise_add(t, t2)
            ret = layers.elementwise_pow(ret, t3)
            ret = layers.elementwise_div(ret, t4)
            ret = layers.elementwise_sub(ret, t5)
            ret = layers.elementwise_mul(ret, t6)

798 799 800 801
            static_ret = self.get_static_graph_result(
                feed={'t': n, 't2': n2, 't3': n3, 't4': n4, 't5': n5, 't6': n6},
                fetch_list=[ret],
            )[0]
X
Xin Pan 已提交
802 803

        with self.dynamic_graph():
804 805 806 807 808 809 810 811
            with _test_eager_guard():
                ret = layers.elementwise_add(to_variable(n), to_variable(n2))
                ret = layers.elementwise_pow(ret, to_variable(n3))
                ret = layers.elementwise_div(ret, to_variable(n4))
                ret = layers.elementwise_sub(ret, to_variable(n5))
                dy_eager_ret = layers.elementwise_mul(ret, to_variable(n6))
                dy_eager_ret_value = dy_eager_ret.numpy()

812 813 814 815 816
            ret = layers.elementwise_add(to_variable(n), to_variable(n2))
            ret = layers.elementwise_pow(ret, to_variable(n3))
            ret = layers.elementwise_div(ret, to_variable(n4))
            ret = layers.elementwise_sub(ret, to_variable(n5))
            dy_ret = layers.elementwise_mul(ret, to_variable(n6))
817
            dy_ret_value = dy_ret.numpy()
818

819 820
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05)
X
Xin Pan 已提交
821 822 823 824 825 826

    def test_elementwise_minmax(self):
        n = np.ones([3, 3], dtype='float32')
        n2 = np.ones([3, 3], dtype='float32') * 2

        with self.dynamic_graph():
827
            with _test_eager_guard():
828 829 830 831 832 833
                min_eager_ret = layers.elementwise_min(
                    to_variable(n), to_variable(n2)
                )
                max_eager_ret = layers.elementwise_max(
                    to_variable(n), to_variable(n2)
                )
834 835 836
                min_eager_ret_value = min_eager_ret.numpy()
                max_eager_ret_value = max_eager_ret.numpy()

837 838
            min_ret = layers.elementwise_min(to_variable(n), to_variable(n2))
            max_ret = layers.elementwise_max(to_variable(n), to_variable(n2))
839 840
            min_ret_value = min_ret.numpy()
            max_ret_value = max_ret.numpy()
X
Xin Pan 已提交
841

842 843 844 845
        np.testing.assert_allclose(n, min_ret_value, rtol=1e-05)
        np.testing.assert_allclose(n2, max_ret_value, rtol=1e-05)
        np.testing.assert_allclose(n, min_eager_ret_value, rtol=1e-05)
        np.testing.assert_allclose(n2, max_eager_ret_value, rtol=1e-05)
X
Xin Pan 已提交
846

847 848 849 850 851 852 853
    def test_sequence_conv(self):
        inp_np = np.arange(12).reshape([3, 4]).astype('float32')
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()
        with self.static_graph():
854 855 856 857 858 859 860
            seq = layers.data(
                name='seq_in',
                shape=[3, 4],
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
861
            out = layers.sequence_conv(seq, 2, act='sigmoid')
862 863 864 865 866 867 868 869 870
            static_rlt = self.get_static_graph_result(
                feed={
                    "seq_in": fluid.create_lod_tensor(
                        data=inp_np, recursive_seq_lens=[[1, 1, 1]], place=place
                    )
                },
                fetch_list=[out],
                with_lod=True,
            )[0]
871 872

        with self.static_graph():
873 874 875 876 877 878 879
            seq = layers.data(
                name='seq_in',
                shape=[3, 4],
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
880
            seq_conv = nn.SequenceConv('seq_conv', num_filters=2, act='sigmoid')
881
            out = seq_conv(seq)
882 883 884 885 886 887 888 889 890 891 892 893
            static_rlt2 = self.get_static_graph_result(
                feed={
                    "seq_in": fluid.create_lod_tensor(
                        data=inp_np, recursive_seq_lens=[[1, 1, 1]], place=place
                    )
                },
                fetch_list=[out],
                with_lod=True,
            )[0]
        np.testing.assert_array_equal(
            np.array(static_rlt), np.array(static_rlt2)
        )
894 895 896 897 898 899

    def test_conv2d_transpose(self):
        inp_np = np.arange(0, 24).reshape([2, 3, 2, 2]).astype('float32')
        with self.static_graph():
            img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32')
            out = layers.conv2d_transpose(
900 901
                input=img,
                num_filters=10,
902
                filter_size=27,
903
                act='sigmoid',
904 905 906 907 908
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
            )
            static_rlt = self.get_static_graph_result(
                feed={'pixel': inp_np}, fetch_list=[out]
            )[0]
909 910 911
        with self.static_graph():
            img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32')
            conv2d_transpose = nn.Conv2DTranspose(
912
                num_channels=3,
913
                num_filters=10,
914
                filter_size=27,
915
                act='sigmoid',
916 917
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
            )
918
            out = conv2d_transpose(img)
919 920 921
            static_rlt2 = self.get_static_graph_result(
                feed={'pixel': inp_np}, fetch_list=[out]
            )[0]
922
        with self.dynamic_graph():
923 924 925 926 927 928
            with _test_eager_guard():
                conv2d_transpose = nn.Conv2DTranspose(
                    num_channels=3,
                    num_filters=10,
                    filter_size=27,
                    act='sigmoid',
929 930
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                )
931 932 933
                dy_eager_rlt = conv2d_transpose(base.to_variable(inp_np))
                dy_eager_rlt_value = dy_eager_rlt.numpy()

934
            conv2d_transpose = nn.Conv2DTranspose(
935
                num_channels=3,
936
                num_filters=10,
937
                filter_size=27,
938
                act='sigmoid',
939 940
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
            )
941
            dy_rlt = conv2d_transpose(base.to_variable(inp_np))
942
            dy_rlt_value = dy_rlt.numpy()
943 944 945
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt2, rtol=1e-05)
        np.testing.assert_allclose(dy_eager_rlt_value, static_rlt2, rtol=1e-05)
946

947
        with self.dynamic_graph():
948 949 950 951 952
            with _test_eager_guard():
                images = np.ones([2, 3, 5, 5], dtype='float32')
                custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
953 954 955 956 957 958 959 960 961 962 963 964
                        custom_weight
                    )
                )
                conv2d1 = nn.Conv2DTranspose(
                    num_channels=3, num_filters=3, filter_size=[2, 2]
                )
                conv2d2 = nn.Conv2DTranspose(
                    num_channels=3,
                    num_filters=3,
                    filter_size=[2, 2],
                    param_attr=weight_attr,
                )
965 966 967
                dy_ret1 = conv2d1(base.to_variable(images))
                dy_ret2 = conv2d2(base.to_variable(images))
                self.assertFalse(
968 969
                    np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())
                )
970 971 972 973

                conv2d1_weight_np = conv2d1.weight.numpy()
                conv2d1_bias = conv2d1.bias
                self.assertFalse(
974 975
                    np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())
                )
976
                conv2d2.weight.set_value(conv2d1_weight_np)
977 978 979
                np.testing.assert_array_equal(
                    conv2d1_weight_np, conv2d2.weight.numpy()
                )
980 981 982
                conv2d2.bias.set_value(conv2d1_bias)
                dy_ret1 = conv2d1(base.to_variable(images))
                dy_ret2 = conv2d2(base.to_variable(images))
983
                np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
984 985 986

                conv2d2.weight = conv2d1.weight
                conv2d2.bias = conv2d1.bias
987 988 989 990 991 992
                np.testing.assert_array_equal(
                    conv2d1.weight.numpy(), conv2d2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    conv2d1.bias.numpy(), conv2d2.bias.numpy()
                )
993

994 995
            images = np.ones([2, 3, 5, 5], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
            conv2d1 = nn.Conv2DTranspose(
                num_channels=3, num_filters=3, filter_size=[2, 2]
            )
            conv2d2 = nn.Conv2DTranspose(
                num_channels=3,
                num_filters=3,
                filter_size=[2, 2],
                param_attr=weight_attr,
            )
1010 1011 1012 1013 1014 1015 1016
            dy_ret1 = conv2d1(base.to_variable(images))
            dy_ret2 = conv2d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv2d1_weight_np = conv2d1.weight.numpy()
            conv2d1_bias = conv2d1.bias
            self.assertFalse(
1017 1018
                np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())
            )
1019
            conv2d2.weight.set_value(conv2d1_weight_np)
1020 1021 1022
            np.testing.assert_array_equal(
                conv2d1_weight_np, conv2d2.weight.numpy()
            )
1023 1024 1025
            conv2d2.bias.set_value(conv2d1_bias)
            dy_ret1 = conv2d1(base.to_variable(images))
            dy_ret2 = conv2d2(base.to_variable(images))
1026
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
1027 1028 1029

            conv2d2.weight = conv2d1.weight
            conv2d2.bias = conv2d1.bias
1030 1031 1032 1033 1034 1035
            np.testing.assert_array_equal(
                conv2d1.weight.numpy(), conv2d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv2d1.bias.numpy(), conv2d2.bias.numpy()
            )
1036

1037 1038 1039 1040 1041
        with self.static_graph():

            # the input of Conv2DTranspose must be Variable.
            def test_Variable():
                images = np.ones([2, 3, 5, 5], dtype='float32')
1042 1043 1044
                conv2d = nn.Conv2DTranspose(
                    num_channels=3, num_filters=3, filter_size=[2, 2]
                )
1045 1046 1047 1048 1049 1050 1051
                conv2d_ret1 = conv2d(images)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Conv2DTranspose must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
1052 1053 1054 1055 1056 1057
                images = layers.data(
                    name='pixel', shape=[3, 5, 5], dtype='int32'
                )
                conv2d = nn.Conv2DTranspose(
                    num_channels=3, num_filters=3, filter_size=[2, 2]
                )
1058 1059 1060 1061
                conv2d_ret2 = conv2d(images)

            self.assertRaises(TypeError, test_type)

1062 1063 1064 1065 1066
    def test_bilinear_tensor_product(self):
        inp_np_x = np.array([[1, 2, 3]]).astype('float32')
        inp_np_y = np.array([[4, 5, 6]]).astype('float32')

        with self.static_graph():
1067 1068 1069 1070 1071 1072
            data_x = layers.data(
                name='x', shape=[1, 3], dtype="float32", append_batch_size=False
            )
            data_y = layers.data(
                name='y', shape=[1, 3], dtype="float32", append_batch_size=False
            )
1073 1074 1075 1076 1077
            out = layers.bilinear_tensor_product(
                data_x,
                data_y,
                6,
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
1078 1079
                act='sigmoid',
            )
1080

1081 1082 1083
            static_rlt = self.get_static_graph_result(
                feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out]
            )[0]
1084

1085
        with self.static_graph():
1086 1087 1088 1089 1090 1091
            data_x = layers.data(
                name='x', shape=[1, 3], dtype="float32", append_batch_size=False
            )
            data_y = layers.data(
                name='y', shape=[1, 3], dtype="float32", append_batch_size=False
            )
1092
            btp = nn.BilinearTensorProduct(
1093 1094
                3,
                3,
1095 1096
                6,
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
1097 1098
                act='sigmoid',
            )
1099
            out = btp(data_x, data_y)
1100 1101 1102
            static_rlt2 = self.get_static_graph_result(
                feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out]
            )[0]
1103
        with self.dynamic_graph():
1104 1105 1106 1107 1108 1109
            with _test_eager_guard():
                btp = nn.BilinearTensorProduct(
                    3,
                    3,
                    6,
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
1110 1111 1112 1113 1114
                    act='sigmoid',
                )
                dy_eager_rlt = btp(
                    base.to_variable(inp_np_x), base.to_variable(inp_np_y)
                )
1115 1116
                dy_eager_rlt_value = dy_eager_rlt.numpy()

1117
            btp = nn.BilinearTensorProduct(
1118 1119
                3,
                3,
1120 1121
                6,
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
1122 1123
                act='sigmoid',
            )
1124
            dy_rlt = btp(base.to_variable(inp_np_x), base.to_variable(inp_np_y))
1125
            dy_rlt_value = dy_rlt.numpy()
1126

1127
        with self.dynamic_graph():
1128 1129
            with _test_eager_guard():
                btp2 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid')
1130 1131 1132
                dy_eager_rlt2 = btp2(
                    base.to_variable(inp_np_x), base.to_variable(inp_np_y)
                )
1133 1134
                dy_eager_rlt2_value = dy_eager_rlt2.numpy()

1135
            btp2 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid')
1136 1137 1138
            dy_rlt2 = btp2(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
1139
            dy_rlt2_value = dy_rlt2.numpy()
1140

1141
        with self.static_graph():
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
            data_x2 = layers.data(
                name='x', shape=[1, 3], dtype="float32", append_batch_size=False
            )
            data_y2 = layers.data(
                name='y', shape=[1, 3], dtype="float32", append_batch_size=False
            )
            out2 = layers.bilinear_tensor_product(
                data_x2, data_y2, 6, act='sigmoid'
            )

            static_rlt3 = self.get_static_graph_result(
                feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out2]
            )[0]
1155

1156 1157 1158 1159 1160
        np.testing.assert_array_equal(dy_rlt2_value, static_rlt3)
        np.testing.assert_array_equal(dy_eager_rlt2_value, static_rlt3)
        np.testing.assert_array_equal(static_rlt2, static_rlt)
        np.testing.assert_array_equal(dy_rlt_value, static_rlt)
        np.testing.assert_array_equal(dy_eager_rlt_value, static_rlt)
1161

1162
        with self.dynamic_graph():
1163 1164 1165 1166
            with _test_eager_guard():
                custom_weight = np.random.randn(6, 3, 3).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
1167 1168 1169
                        custom_weight
                    )
                )
1170
                btp1 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid')
1171 1172 1173 1174 1175 1176 1177 1178 1179
                btp2 = nn.BilinearTensorProduct(
                    3, 3, 6, act='sigmoid', param_attr=weight_attr
                )
                dy_rlt1 = btp1(
                    base.to_variable(inp_np_x), base.to_variable(inp_np_y)
                )
                dy_rlt2 = btp2(
                    base.to_variable(inp_np_x), base.to_variable(inp_np_y)
                )
1180
                self.assertFalse(
1181 1182
                    np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
                )
1183 1184
                btp2.weight.set_value(btp1.weight.numpy())
                btp2.bias.set_value(btp1.bias)
1185 1186 1187 1188 1189 1190
                dy_rlt1 = btp1(
                    base.to_variable(inp_np_x), base.to_variable(inp_np_y)
                )
                dy_rlt2 = btp2(
                    base.to_variable(inp_np_x), base.to_variable(inp_np_y)
                )
1191
                np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
1192 1193 1194

                btp2.weight = btp1.weight
                btp2.bias = btp1.bias
1195 1196 1197 1198 1199 1200
                np.testing.assert_array_equal(
                    btp1.weight.numpy(), btp2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    btp1.bias.numpy(), btp2.bias.numpy()
                )
1201

1202
            custom_weight = np.random.randn(6, 3, 3).astype("float32")
1203 1204 1205 1206 1207
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
1208
            btp1 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid')
1209 1210 1211 1212 1213 1214 1215 1216 1217
            btp2 = nn.BilinearTensorProduct(
                3, 3, 6, act='sigmoid', param_attr=weight_attr
            )
            dy_rlt1 = btp1(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
            dy_rlt2 = btp2(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
1218 1219 1220
            self.assertFalse(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()))
            btp2.weight.set_value(btp1.weight.numpy())
            btp2.bias.set_value(btp1.bias)
1221 1222 1223 1224 1225 1226
            dy_rlt1 = btp1(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
            dy_rlt2 = btp2(
                base.to_variable(inp_np_x), base.to_variable(inp_np_y)
            )
1227
            np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
1228 1229 1230

            btp2.weight = btp1.weight
            btp2.bias = btp1.bias
1231 1232 1233
            np.testing.assert_array_equal(
                btp1.weight.numpy(), btp2.weight.numpy()
            )
1234
            np.testing.assert_array_equal(btp1.bias.numpy(), btp2.bias.numpy())
1235

1236
    def prelu_test(self, mode):
1237 1238
        inp_np = np.ones([5, 200, 100, 100]).astype('float32')
        with self.static_graph():
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
            data_t = layers.data(
                name="input",
                shape=[5, 200, 100, 100],
                dtype="float32",
                append_batch_size=False,
            )
            out = layers.prelu(
                data_t, mode, param_attr=ParamAttr(initializer=Constant(1.0))
            )
            static_rlt = self.get_static_graph_result(
                feed={"input": inp_np}, fetch_list=[out]
            )[0]
1251 1252

        with self.static_graph():
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
            data_t = layers.data(
                name="input",
                shape=[5, 200, 100, 100],
                dtype="float32",
                append_batch_size=False,
            )
            prelu = nn.PRelu(
                mode=mode,
                channel=inp_np.shape[1],
                input_shape=data_t.shape,
                param_attr=ParamAttr(initializer=Constant(1.0)),
            )
1265
            out = prelu(data_t)
1266 1267 1268
            static_rlt2 = self.get_static_graph_result(
                feed={"input": inp_np}, fetch_list=[out]
            )[0]
1269 1270

        with self.dynamic_graph():
1271 1272 1273 1274 1275
            with _test_eager_guard():
                prelu = nn.PRelu(
                    mode=mode,
                    channel=inp_np.shape[1],
                    input_shape=inp_np.shape,
1276 1277
                    param_attr=ParamAttr(initializer=Constant(1.0)),
                )
1278 1279 1280
                dy_eager_rlt = prelu(base.to_variable(inp_np))
                dy_eager_rlt_value = dy_eager_rlt.numpy()

1281 1282 1283 1284 1285 1286
            prelu = nn.PRelu(
                mode=mode,
                channel=inp_np.shape[1],
                input_shape=inp_np.shape,
                param_attr=ParamAttr(initializer=Constant(1.0)),
            )
1287
            dy_rlt = prelu(base.to_variable(inp_np))
1288
            dy_rlt_value = dy_rlt.numpy()
1289

1290 1291 1292
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_eager_rlt_value, static_rlt, rtol=1e-05)
1293

1294
        with self.dynamic_graph():
1295 1296 1297 1298 1299 1300 1301
            with _test_eager_guard():
                inp_np = np.random.randn(5, 200, 100, 100).astype("float32")
                inp = base.to_variable(inp_np)
                prelu1 = nn.PRelu(
                    mode=mode,
                    channel=inp_np.shape[1],
                    input_shape=inp_np.shape,
1302 1303
                    param_attr=ParamAttr(initializer=Constant(2.0)),
                )
1304 1305 1306 1307
                prelu2 = nn.PRelu(
                    mode=mode,
                    channel=inp_np.shape[1],
                    input_shape=inp_np.shape,
1308 1309
                    param_attr=ParamAttr(initializer=Constant(1.0)),
                )
1310 1311 1312
                dy_rlt1 = prelu1(inp)
                dy_rlt2 = prelu2(inp)
                self.assertFalse(
1313 1314
                    np.array_equal(prelu1.weight.numpy(), prelu2.weight.numpy())
                )
1315
                self.assertFalse(
1316 1317
                    np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
                )
1318 1319 1320
                prelu2.weight.set_value(prelu1.weight.numpy())
                dy_rlt1 = prelu1(inp)
                dy_rlt2 = prelu2(inp)
1321
                np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
1322 1323

                prelu2.weight = prelu1.weight
1324 1325 1326
                np.testing.assert_array_equal(
                    prelu1.weight.numpy(), prelu2.weight.numpy()
                )
1327

1328 1329
            inp_np = np.random.randn(5, 200, 100, 100).astype("float32")
            inp = base.to_variable(inp_np)
1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341
            prelu1 = nn.PRelu(
                mode=mode,
                channel=inp_np.shape[1],
                input_shape=inp_np.shape,
                param_attr=ParamAttr(initializer=Constant(2.0)),
            )
            prelu2 = nn.PRelu(
                mode=mode,
                channel=inp_np.shape[1],
                input_shape=inp_np.shape,
                param_attr=ParamAttr(initializer=Constant(1.0)),
            )
1342 1343 1344
            dy_rlt1 = prelu1(inp)
            dy_rlt2 = prelu2(inp)
            self.assertFalse(
1345 1346
                np.array_equal(prelu1.weight.numpy(), prelu2.weight.numpy())
            )
1347 1348 1349 1350
            self.assertFalse(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()))
            prelu2.weight.set_value(prelu1.weight.numpy())
            dy_rlt1 = prelu1(inp)
            dy_rlt2 = prelu2(inp)
1351
            np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
1352 1353

            prelu2.weight = prelu1.weight
1354 1355 1356
            np.testing.assert_array_equal(
                prelu1.weight.numpy(), prelu2.weight.numpy()
            )
1357

1358 1359 1360 1361 1362
    def test_prelu(self):
        self.prelu_test("channel")
        self.prelu_test("element")
        self.prelu_test("all")

1363 1364 1365 1366 1367
    def test_embeding(self):
        inp_word = np.array([[[1]]]).astype('int64')
        dict_size = 20
        with self.static_graph():
            data_t = layers.data(name='word', shape=[1], dtype='int64')
1368 1369 1370 1371 1372 1373 1374 1375 1376
            emb = layers.embedding(
                input=data_t,
                size=[dict_size, 32],
                param_attr='emb.w',
                is_sparse=False,
            )
            static_rlt = self.get_static_graph_result(
                feed={'word': inp_word}, fetch_list=[emb]
            )[0]
1377 1378
        with self.static_graph():
            data_t = layers.data(name='word', shape=[1], dtype='int64')
1379 1380 1381
            emb2 = nn.Embedding(
                size=[dict_size, 32], param_attr='emb.w', is_sparse=False
            )
1382
            emb_rlt = emb2(data_t)
1383 1384 1385
            static_rlt2 = self.get_static_graph_result(
                feed={'word': inp_word}, fetch_list=[emb_rlt]
            )[0]
1386
        with self.dynamic_graph():
1387
            with _test_eager_guard():
1388 1389 1390 1391 1392
                emb2 = nn.Embedding(
                    size=[dict_size, 32],
                    param_attr='eager_emb.w',
                    is_sparse=False,
                )
1393 1394 1395
                dy_eager_rlt = emb2(base.to_variable(inp_word))
                dy_eager_rlt_value = dy_eager_rlt.numpy()

1396 1397 1398
            emb2 = nn.Embedding(
                size=[dict_size, 32], param_attr='emb.w', is_sparse=False
            )
1399 1400
            dy_rlt = emb2(base.to_variable(inp_word))
            dy_rlt_value = dy_rlt.numpy()
1401 1402

        self.assertTrue(np.allclose(static_rlt2, static_rlt))
1403
        self.assertTrue(np.allclose(dy_rlt_value, static_rlt))
1404
        self.assertTrue(np.allclose(dy_eager_rlt_value, static_rlt))
1405

1406
        with self.dynamic_graph():
1407 1408 1409 1410
            with _test_eager_guard():
                custom_weight = np.random.randn(dict_size, 32).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
1411 1412 1413
                        custom_weight
                    )
                )
1414
                emb1 = nn.Embedding(size=[dict_size, 32], is_sparse=False)
1415 1416 1417 1418 1419
                emb2 = nn.Embedding(
                    size=[dict_size, 32],
                    param_attr=weight_attr,
                    is_sparse=False,
                )
1420 1421 1422
                rep1 = emb1(base.to_variable(inp_word))
                rep2 = emb2(base.to_variable(inp_word))
                self.assertFalse(
1423 1424 1425 1426 1427
                    np.array_equal(emb1.weight.numpy(), custom_weight)
                )
                np.testing.assert_array_equal(
                    emb2.weight.numpy(), custom_weight
                )
1428 1429 1430
                self.assertFalse(np.array_equal(rep1.numpy(), rep2.numpy()))
                emb2.weight.set_value(emb1.weight.numpy())
                rep2 = emb2(base.to_variable(inp_word))
1431
                np.testing.assert_array_equal(rep1.numpy(), rep2.numpy())
1432 1433

                emb2.weight = emb1.weight
1434 1435 1436
                np.testing.assert_array_equal(
                    emb1.weight.numpy(), emb2.weight.numpy()
                )
1437

1438
            custom_weight = np.random.randn(dict_size, 32).astype("float32")
1439 1440 1441 1442 1443
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
1444
            emb1 = nn.Embedding(size=[dict_size, 32], is_sparse=False)
1445 1446 1447
            emb2 = nn.Embedding(
                size=[dict_size, 32], param_attr=weight_attr, is_sparse=False
            )
1448 1449 1450
            rep1 = emb1(base.to_variable(inp_word))
            rep2 = emb2(base.to_variable(inp_word))
            self.assertFalse(np.array_equal(emb1.weight.numpy(), custom_weight))
1451
            np.testing.assert_array_equal(emb2.weight.numpy(), custom_weight)
1452 1453 1454
            self.assertFalse(np.array_equal(rep1.numpy(), rep2.numpy()))
            emb2.weight.set_value(emb1.weight.numpy())
            rep2 = emb2(base.to_variable(inp_word))
1455
            np.testing.assert_array_equal(rep1.numpy(), rep2.numpy())
1456 1457

            emb2.weight = emb1.weight
1458 1459 1460
            np.testing.assert_array_equal(
                emb1.weight.numpy(), emb2.weight.numpy()
            )
1461

1462 1463 1464 1465
    def test_nce(self):
        window_size = 5
        dict_size = 20
        label_word = int(window_size // 2) + 1
1466
        inp_word = np.array([[1], [2], [3], [4], [5]]).astype('int64')
1467 1468 1469 1470 1471 1472
        nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype('float32')
        seed = 1
        with self.static_graph():
            words = []
            for i in range(window_size):
                words.append(
1473 1474 1475 1476 1477 1478 1479
                    layers.data(
                        name='word_{0}'.format(i), shape=[None], dtype='int64'
                    )
                )
            sample_weights = layers.fill_constant(
                shape=[5, 1], dtype='float32', value=1
            )
1480 1481 1482 1483 1484
            embs = []
            for i in range(window_size):
                if i == label_word:
                    continue

1485 1486 1487 1488 1489 1490
                emb = fluid.embedding(
                    input=words[i],
                    size=[dict_size, 32],
                    param_attr='emb.w',
                    is_sparse=False,
                )
1491 1492 1493
                embs.append(emb)

            embs = layers.concat(input=embs, axis=1)
1494
            wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506
            nce_loss = layers.nce(
                input=embs,
                label=wl,
                num_total_classes=dict_size,
                num_neg_samples=2,
                sampler="custom_dist",
                custom_dist=nid_freq_arr.tolist(),
                seed=seed,
                param_attr='nce.w',
                bias_attr='nce.b',
                sample_weight=sample_weights,
            )
1507 1508 1509
            feed_dict = dict()
            for i in range(window_size):
                feed_dict['word_{0}'.format(i)] = inp_word[i]
1510 1511 1512
            static_rlt = self.get_static_graph_result(
                feed=feed_dict, fetch_list=[nce_loss]
            )[0]
W
Weilong Wu 已提交
1513

1514 1515 1516 1517
        with self.static_graph():
            words = []
            for i in range(window_size):
                words.append(
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
                    layers.data(
                        name='word_{0}'.format(i), shape=[None], dtype='int64'
                    )
                )
            sample_weights = layers.fill_constant(
                shape=[5, 1], dtype='float32', value=1
            )
            emb = nn.Embedding(
                size=[dict_size, 32], param_attr='emb.w', is_sparse=False
            )
1528 1529 1530 1531 1532 1533 1534 1535 1536 1537

            embs2 = []
            for i in range(window_size):
                if i == label_word:
                    continue

                emb_rlt = emb(words[i])
                embs2.append(emb_rlt)

            embs2 = layers.concat(input=embs2, axis=1)
1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548
            nce = nn.NCE(
                num_total_classes=dict_size,
                dim=embs2.shape[1],
                num_neg_samples=2,
                sampler="custom_dist",
                custom_dist=nid_freq_arr.tolist(),
                seed=seed,
                param_attr='nce.w',
                bias_attr='nce.b',
                sample_weight=sample_weights,
            )
1549

1550 1551
            wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
            nce_loss2 = nce(embs2, wl)
1552 1553 1554 1555
            feed_dict = dict()
            for i in range(len(words)):
                feed_dict['word_{0}'.format(i)] = inp_word[i]

1556 1557 1558
            static_rlt2 = self.get_static_graph_result(
                feed=feed_dict, fetch_list=[nce_loss2]
            )[0]
1559

L
Leo Chen 已提交
1560
        with self.dynamic_graph():
W
Weilong Wu 已提交
1561 1562 1563 1564
            with _test_eager_guard():
                words = []
                for i in range(window_size):
                    words.append(base.to_variable(inp_word[i]))
1565 1566 1567 1568 1569 1570 1571 1572
                sample_weights = layers.fill_constant(
                    shape=[5, 1], dtype='float32', value=1
                )
                emb = nn.Embedding(
                    size=[dict_size, 32],
                    param_attr='eager_emb.w',
                    is_sparse=False,
                )
W
Weilong Wu 已提交
1573 1574 1575 1576 1577 1578 1579 1580 1581

                embs3 = []
                for i in range(window_size):
                    if i == label_word:
                        continue

                    emb_rlt = emb(words[i])
                    embs3.append(emb_rlt)

1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595
                embs3 = layers.concat(
                    input=embs3, axis=fluid.dygraph.to_variable(np.array([1]))
                )
                nce = nn.NCE(
                    num_total_classes=dict_size,
                    dim=embs3.shape[1],
                    num_neg_samples=2,
                    sampler="custom_dist",
                    custom_dist=nid_freq_arr.tolist(),
                    seed=seed,
                    param_attr='eager_nce.w',
                    bias_attr='eager_nce.b',
                    sample_weight=sample_weights,
                )
W
Weilong Wu 已提交
1596 1597 1598 1599 1600

                wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
                dy_eager_rlt = nce(embs3, wl)
                dy_eager_rlt_value = dy_eager_rlt.numpy()

1601 1602 1603
            words = []
            for i in range(window_size):
                words.append(base.to_variable(inp_word[i]))
1604 1605 1606 1607 1608 1609
            sample_weights = layers.fill_constant(
                shape=[5, 1], dtype='float32', value=1
            )
            emb = nn.Embedding(
                size=[dict_size, 32], param_attr='emb.w', is_sparse=False
            )
1610 1611 1612 1613 1614 1615 1616 1617 1618

            embs3 = []
            for i in range(window_size):
                if i == label_word:
                    continue

                emb_rlt = emb(words[i])
                embs3.append(emb_rlt)

1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632
            embs3 = layers.concat(
                input=embs3, axis=fluid.dygraph.to_variable(np.array([1]))
            )
            nce = nn.NCE(
                num_total_classes=dict_size,
                dim=embs3.shape[1],
                num_neg_samples=2,
                sampler="custom_dist",
                custom_dist=nid_freq_arr.tolist(),
                seed=seed,
                param_attr='nce.w',
                bias_attr='nce.b',
                sample_weight=sample_weights,
            )
1633

1634 1635
            wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
            dy_rlt = nce(embs3, wl)
1636
            dy_rlt_value = dy_rlt.numpy()
1637

1638 1639 1640
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_eager_rlt_value, static_rlt, rtol=1e-05)
1641

L
Leo Chen 已提交
1642
        with self.dynamic_graph():
W
Weilong Wu 已提交
1643
            with _test_eager_guard():
1644 1645 1646
                custom_weight = np.random.randn(dict_size, 128).astype(
                    "float32"
                )
W
Weilong Wu 已提交
1647 1648
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
1649 1650 1651
                        custom_weight
                    )
                )
W
Weilong Wu 已提交
1652 1653 1654 1655 1656 1657
                words = []
                for i in range(window_size):
                    words.append(base.to_variable(inp_word[i]))
                sample_weights = layers.fill_constant(
                    shape=fluid.dygraph.to_variable(np.array([5, 1])),
                    dtype='float32',
1658 1659 1660 1661 1662 1663 1664
                    value=1,
                )
                emb = nn.Embedding(
                    size=[dict_size, 32],
                    param_attr='eager_emb.w',
                    is_sparse=False,
                )
W
Weilong Wu 已提交
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674

                embs3 = []
                for i in range(window_size):
                    if i == label_word:
                        continue

                    emb_rlt = emb(words[i])
                    embs3.append(emb_rlt)

                embs3 = layers.concat(input=embs3, axis=1)
1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697
                nce1 = nn.NCE(
                    num_total_classes=dict_size,
                    dim=embs3.shape[1],
                    num_neg_samples=2,
                    sampler="custom_dist",
                    custom_dist=nid_freq_arr.tolist(),
                    seed=seed,
                    param_attr='eager_nce1.w',
                    bias_attr='eager_nce1.b',
                    sample_weight=sample_weights,
                )

                nce2 = nn.NCE(
                    num_total_classes=dict_size,
                    dim=embs3.shape[1],
                    num_neg_samples=2,
                    sampler="custom_dist",
                    custom_dist=nid_freq_arr.tolist(),
                    seed=seed,
                    param_attr=weight_attr,
                    bias_attr='eager_nce2.b',
                    sample_weight=sample_weights,
                )
W
Weilong Wu 已提交
1698 1699 1700 1701 1702

                wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
                nce1_loss = nce1(embs3, wl)
                nce2_loss = nce2(embs3, wl)
                self.assertFalse(
1703 1704
                    np.array_equal(nce1_loss.numpy(), nce2_loss.numpy())
                )
W
Weilong Wu 已提交
1705 1706 1707 1708
                nce2.weight.set_value(nce1.weight.numpy())
                nce2.bias.set_value(nce1.bias)
                nce1_loss = nce1(embs3, wl)
                nce2_loss = nce2(embs3, wl)
1709 1710 1711
                np.testing.assert_array_equal(
                    nce1_loss.numpy(), nce2_loss.numpy()
                )
W
Weilong Wu 已提交
1712 1713 1714

                nce2.weight = nce1.weight
                nce2.bias = nce1.bias
1715 1716 1717 1718 1719 1720
                np.testing.assert_array_equal(
                    nce1.weight.numpy(), nce2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    nce1.bias.numpy(), nce2.bias.numpy()
                )
W
Weilong Wu 已提交
1721

1722
            custom_weight = np.random.randn(dict_size, 128).astype("float32")
1723 1724 1725 1726 1727
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
1728 1729 1730 1731
            words = []
            for i in range(window_size):
                words.append(base.to_variable(inp_word[i]))
            sample_weights = layers.fill_constant(
S
songyouwei 已提交
1732 1733
                shape=fluid.dygraph.to_variable(np.array([5, 1])),
                dtype='float32',
1734 1735 1736 1737 1738
                value=1,
            )
            emb = nn.Embedding(
                size=[dict_size, 32], param_attr='emb.w', is_sparse=False
            )
1739 1740 1741 1742 1743 1744 1745 1746 1747 1748

            embs3 = []
            for i in range(window_size):
                if i == label_word:
                    continue

                emb_rlt = emb(words[i])
                embs3.append(emb_rlt)

            embs3 = layers.concat(input=embs3, axis=1)
1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771
            nce1 = nn.NCE(
                num_total_classes=dict_size,
                dim=embs3.shape[1],
                num_neg_samples=2,
                sampler="custom_dist",
                custom_dist=nid_freq_arr.tolist(),
                seed=seed,
                param_attr='nce1.w',
                bias_attr='nce1.b',
                sample_weight=sample_weights,
            )

            nce2 = nn.NCE(
                num_total_classes=dict_size,
                dim=embs3.shape[1],
                num_neg_samples=2,
                sampler="custom_dist",
                custom_dist=nid_freq_arr.tolist(),
                seed=seed,
                param_attr=weight_attr,
                bias_attr='nce2.b',
                sample_weight=sample_weights,
            )
1772

1773 1774 1775
            wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
            nce1_loss = nce1(embs3, wl)
            nce2_loss = nce2(embs3, wl)
1776
            self.assertFalse(
1777 1778
                np.array_equal(nce1_loss.numpy(), nce2_loss.numpy())
            )
1779 1780
            nce2.weight.set_value(nce1.weight.numpy())
            nce2.bias.set_value(nce1.bias)
1781 1782
            nce1_loss = nce1(embs3, wl)
            nce2_loss = nce2(embs3, wl)
1783
            np.testing.assert_array_equal(nce1_loss.numpy(), nce2_loss.numpy())
1784 1785 1786

            nce2.weight = nce1.weight
            nce2.bias = nce1.bias
1787 1788 1789
            np.testing.assert_array_equal(
                nce1.weight.numpy(), nce2.weight.numpy()
            )
1790
            np.testing.assert_array_equal(nce1.bias.numpy(), nce2.bias.numpy())
1791

S
songyouwei 已提交
1792 1793
    def test_one_hot(self):
        with self.dynamic_graph():
1794
            with _test_eager_guard():
1795 1796 1797
                label = fluid.dygraph.to_variable(
                    np.array([[1], [1], [3], [0]])
                )
1798 1799
                one_hot_label1 = fluid.layers.one_hot(input=label, depth=4)
                one_hot_label2 = fluid.layers.one_hot(
1800 1801 1802 1803 1804
                    input=label, depth=fluid.dygraph.to_variable(np.array([4]))
                )
                np.testing.assert_array_equal(
                    one_hot_label1.numpy(), one_hot_label2.numpy()
                )
1805

S
songyouwei 已提交
1806 1807 1808
            label = fluid.dygraph.to_variable(np.array([[1], [1], [3], [0]]))
            one_hot_label1 = fluid.layers.one_hot(input=label, depth=4)
            one_hot_label2 = fluid.layers.one_hot(
1809 1810 1811 1812 1813
                input=label, depth=fluid.dygraph.to_variable(np.array([4]))
            )
            np.testing.assert_array_equal(
                one_hot_label1.numpy(), one_hot_label2.numpy()
            )
S
songyouwei 已提交
1814 1815 1816

    def test_split(self):
        with self.dynamic_graph():
1817 1818 1819
            with _test_eager_guard():
                input = fluid.dygraph.to_variable(np.random.random((3, 8, 5)))
                x0, x1 = fluid.layers.split(input, num_or_sections=2, dim=1)
1820 1821 1822 1823 1824
                x00, x11 = fluid.layers.split(
                    input,
                    num_or_sections=2,
                    dim=fluid.dygraph.to_variable(np.array([1])),
                )
1825 1826
                np.testing.assert_array_equal(x0.numpy(), x00.numpy())
                np.testing.assert_array_equal(x1.numpy(), x11.numpy())
1827

S
songyouwei 已提交
1828 1829
            input = fluid.dygraph.to_variable(np.random.random((3, 8, 5)))
            x0, x1 = fluid.layers.split(input, num_or_sections=2, dim=1)
1830 1831 1832 1833 1834
            x00, x11 = fluid.layers.split(
                input,
                num_or_sections=2,
                dim=fluid.dygraph.to_variable(np.array([1])),
            )
1835 1836
            np.testing.assert_array_equal(x0.numpy(), x00.numpy())
            np.testing.assert_array_equal(x1.numpy(), x11.numpy())
S
songyouwei 已提交
1837 1838 1839

    def test_topk(self):
        with self.dynamic_graph():
1840 1841 1842 1843
            with _test_eager_guard():
                input = fluid.dygraph.to_variable(np.random.random((13, 11)))
                top5_values1, top5_indices1 = layers.topk(input, k=5)
                top5_values2, top5_indices2 = layers.topk(
1844 1845 1846 1847 1848 1849 1850 1851
                    input, k=fluid.dygraph.to_variable(np.array([5]))
                )
                np.testing.assert_array_equal(
                    top5_values1.numpy(), top5_values2.numpy()
                )
                np.testing.assert_array_equal(
                    top5_indices1.numpy(), top5_indices2.numpy()
                )
1852

S
songyouwei 已提交
1853 1854 1855
            input = fluid.dygraph.to_variable(np.random.random((13, 11)))
            top5_values1, top5_indices1 = layers.topk(input, k=5)
            top5_values2, top5_indices2 = layers.topk(
1856 1857 1858 1859 1860 1861 1862 1863
                input, k=fluid.dygraph.to_variable(np.array([5]))
            )
            np.testing.assert_array_equal(
                top5_values1.numpy(), top5_values2.numpy()
            )
            np.testing.assert_array_equal(
                top5_indices1.numpy(), top5_indices2.numpy()
            )
S
songyouwei 已提交
1864

L
lujun 已提交
1865 1866
    def test_conv3d(self):
        with self.static_graph():
1867 1868 1869
            images = layers.data(
                name='pixel', shape=[3, 6, 6, 6], dtype='float32'
            )
1870
            ret = layers.conv3d(input=images, num_filters=3, filter_size=2)
L
lujun 已提交
1871
            static_ret = self.get_static_graph_result(
1872
                feed={'pixel': np.ones([2, 3, 6, 6, 6], dtype='float32')},
1873 1874
                fetch_list=[ret],
            )[0]
L
lujun 已提交
1875 1876

        with self.static_graph():
1877 1878 1879
            images = layers.data(
                name='pixel', shape=[3, 6, 6, 6], dtype='float32'
            )
1880
            conv3d = nn.Conv3D(num_channels=3, num_filters=3, filter_size=2)
L
lujun 已提交
1881 1882
            ret = conv3d(images)
            static_ret2 = self.get_static_graph_result(
1883
                feed={'pixel': np.ones([2, 3, 6, 6, 6], dtype='float32')},
1884 1885
                fetch_list=[ret],
            )[0]
L
lujun 已提交
1886 1887

        with self.dynamic_graph():
1888 1889 1890 1891 1892 1893
            with _test_eager_guard():
                images = np.ones([2, 3, 6, 6, 6], dtype='float32')
                conv3d = nn.Conv3D(num_channels=3, num_filters=3, filter_size=2)
                dy_eager_ret = conv3d(base.to_variable(images))
                dy_eager_rlt_value = dy_eager_ret.numpy()

L
lujun 已提交
1894
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
1895
            conv3d = nn.Conv3D(num_channels=3, num_filters=3, filter_size=2)
L
lujun 已提交
1896
            dy_ret = conv3d(base.to_variable(images))
1897
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
1898

1899 1900 1901
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
1902

1903
        with self.dynamic_graph():
1904 1905 1906 1907 1908
            with _test_eager_guard():
                images = np.ones([2, 3, 6, 6, 6], dtype='float32')
                custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920
                        custom_weight
                    )
                )
                conv3d1 = nn.Conv3D(
                    num_channels=3, num_filters=3, filter_size=2
                )
                conv3d2 = nn.Conv3D(
                    num_channels=3,
                    num_filters=3,
                    filter_size=2,
                    param_attr=weight_attr,
                )
1921 1922 1923
                dy_ret1 = conv3d1(base.to_variable(images))
                dy_ret2 = conv3d2(base.to_variable(images))
                self.assertFalse(
1924 1925
                    np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())
                )
1926 1927 1928 1929

                conv3d1_weight_np = conv3d1.weight.numpy()
                conv3d1_bias = conv3d1.bias
                self.assertFalse(
1930 1931
                    np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
                )
1932
                conv3d2.weight.set_value(conv3d1_weight_np)
1933 1934 1935
                np.testing.assert_array_equal(
                    conv3d1_weight_np, conv3d2.weight.numpy()
                )
1936 1937 1938
                conv3d1.bias.set_value(conv3d1_bias)
                dy_ret1 = conv3d1(base.to_variable(images))
                dy_ret2 = conv3d2(base.to_variable(images))
1939
                np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
1940 1941 1942

                conv3d2.weight = conv3d1.weight
                conv3d2.bias = conv3d1.bias
1943 1944 1945 1946 1947 1948
                np.testing.assert_array_equal(
                    conv3d1.weight.numpy(), conv3d2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    conv3d1.bias.numpy(), conv3d2.bias.numpy()
                )
1949

1950 1951
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
1952 1953 1954 1955 1956
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
1957
            conv3d1 = nn.Conv3D(num_channels=3, num_filters=3, filter_size=2)
1958 1959 1960 1961 1962 1963
            conv3d2 = nn.Conv3D(
                num_channels=3,
                num_filters=3,
                filter_size=2,
                param_attr=weight_attr,
            )
1964 1965 1966 1967 1968 1969 1970
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv3d1_weight_np = conv3d1.weight.numpy()
            conv3d1_bias = conv3d1.bias
            self.assertFalse(
1971 1972
                np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
            )
1973
            conv3d2.weight.set_value(conv3d1_weight_np)
1974 1975 1976
            np.testing.assert_array_equal(
                conv3d1_weight_np, conv3d2.weight.numpy()
            )
1977 1978 1979
            conv3d1.bias.set_value(conv3d1_bias)
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
1980
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
1981 1982 1983

            conv3d2.weight = conv3d1.weight
            conv3d2.bias = conv3d1.bias
1984 1985 1986 1987 1988 1989
            np.testing.assert_array_equal(
                conv3d1.weight.numpy(), conv3d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv3d1.bias.numpy(), conv3d2.bias.numpy()
            )
1990

L
lujun 已提交
1991 1992 1993 1994 1995 1996 1997 1998
    def test_row_conv(self):
        input = np.arange(15).reshape([3, 5]).astype('float32')
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        with self.static_graph():
1999 2000 2001 2002 2003 2004 2005
            x = layers.data(
                name='X',
                shape=[3, 5],
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
L
lujun 已提交
2006
            ret = layers.row_conv(input=x, future_context_size=2)
2007 2008 2009 2010 2011 2012 2013 2014 2015
            static_ret = self.get_static_graph_result(
                feed={
                    'X': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
2016 2017

        with self.static_graph():
2018 2019 2020 2021 2022 2023 2024
            x = layers.data(
                name='X',
                shape=[3, 5],
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
L
lujun 已提交
2025 2026
            rowConv = nn.RowConv('RowConv', future_context_size=2)
            ret = rowConv(x)
2027 2028 2029 2030 2031 2032 2033 2034 2035
            static_ret2 = self.get_static_graph_result(
                feed={
                    'X': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
2036

2037
        # TODO: dygraph can't support LODTensor
L
lujun 已提交
2038

2039
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
2040

2041
    def func_group_norm(self):
L
lujun 已提交
2042 2043 2044 2045 2046 2047 2048 2049 2050 2051
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
2052 2053 2054 2055 2056 2057 2058
            X = fluid.layers.data(
                name='X',
                shape=shape,
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
2059 2060 2061
            ret = layers.group_norm(
                input=X,
                groups=2,
2062
                param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5),
2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
            )
            static_ret = self.get_static_graph_result(
                feed={
                    'X': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
2074 2075

        with self.static_graph():
2076 2077 2078 2079 2080 2081 2082
            X = fluid.layers.data(
                name='X',
                shape=shape,
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
2083 2084 2085
            groupNorm = nn.GroupNorm(
                channels=shape[1],
                groups=2,
2086
                param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5),
2087 2088
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
            )
L
lujun 已提交
2089
            ret = groupNorm(X)
2090 2091 2092 2093 2094 2095 2096 2097 2098
            static_ret2 = self.get_static_graph_result(
                feed={
                    'X': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
2099 2100

        with self.dynamic_graph():
2101 2102 2103
            groupNorm = nn.GroupNorm(
                channels=shape[1],
                groups=2,
2104
                param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5),
2105 2106
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
            )
L
lujun 已提交
2107
            dy_ret = groupNorm(base.to_variable(input))
2108
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
2109

2110 2111
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
2112

2113 2114 2115 2116 2117
    def test_group_norm(self):
        with _test_eager_guard():
            self.func_group_norm()
        self.func_group_norm()

2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128
    def test_instance_norm(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
2129 2130 2131
            X = fluid.layers.data(
                name='X', shape=shape, dtype='float32', append_batch_size=False
            )
2132
            ret = layers.instance_norm(input=X)
2133 2134 2135
            static_ret = self.get_static_graph_result(
                feed={'X': input}, fetch_list=[ret]
            )[0]
2136 2137

        with self.static_graph():
2138 2139 2140
            X = fluid.layers.data(
                name='X', shape=shape, dtype='float32', append_batch_size=False
            )
2141 2142
            instanceNorm = nn.InstanceNorm(num_channels=shape[1])
            ret = instanceNorm(X)
2143 2144 2145
            static_ret2 = self.get_static_graph_result(
                feed={'X': input}, fetch_list=[ret]
            )[0]
2146 2147

        with self.dynamic_graph():
2148 2149 2150 2151 2152
            with _test_eager_guard():
                instanceNorm = nn.InstanceNorm(num_channels=shape[1])
                dy_eager_ret = instanceNorm(base.to_variable(input))
                dy_eager_rlt_value = dy_eager_ret.numpy()

2153 2154 2155 2156 2157
            instanceNorm = nn.InstanceNorm(num_channels=shape[1])
            dy_ret = instanceNorm(base.to_variable(input))
            dy_rlt_value = dy_ret.numpy()

        with self.dynamic_graph():
2158 2159 2160 2161 2162
            with _test_eager_guard():
                instanceNorm = nn.InstanceNorm(num_channels=shape[1])
                dy_eager_ret = instanceNorm(base.to_variable(input))
                dy_eager_rlt_value2 = dy_eager_ret.numpy()

2163
            instanceNorm = nn.InstanceNorm(num_channels=shape[1])
2164 2165 2166
            dy_ret = instanceNorm(base.to_variable(input))
            dy_rlt_value2 = dy_ret.numpy()

2167 2168 2169 2170 2171
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_rlt_value2, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value2, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
2172 2173 2174 2175

        with self.static_graph():
            # the input of InstanceNorm must be Variable.
            def test_Variable():
2176
                instanceNorm = nn.InstanceNorm(num_channels=shape[1])
2177 2178 2179 2180 2181 2182 2183
                ret1 = instanceNorm(input)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of InstanceNorm must be float32 or float64
            def test_type():
                input = np.random.random(shape).astype('int32')
2184
                instanceNorm = nn.InstanceNorm(num_channels=shape[1])
2185 2186 2187 2188
                ret2 = instanceNorm(input)

            self.assertRaises(TypeError, test_type)

L
lujun 已提交
2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199
    def test_spectral_norm(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
2200 2201 2202 2203 2204 2205 2206
            Weight = fluid.layers.data(
                name='Weight',
                shape=shape,
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
L
lujun 已提交
2207
            ret = layers.spectral_norm(weight=Weight, dim=1, power_iters=2)
2208 2209 2210 2211 2212 2213 2214 2215 2216
            static_ret = self.get_static_graph_result(
                feed={
                    'Weight': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    ),
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
2217 2218

        with self.static_graph():
2219 2220 2221 2222 2223 2224 2225
            Weight = fluid.layers.data(
                name='Weight',
                shape=shape,
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
2226
            spectralNorm = nn.SpectralNorm(shape, dim=1, power_iters=2)
L
lujun 已提交
2227
            ret = spectralNorm(Weight)
2228 2229 2230 2231 2232 2233 2234 2235 2236
            static_ret2 = self.get_static_graph_result(
                feed={
                    'Weight': fluid.create_lod_tensor(
                        data=input, recursive_seq_lens=[[1, 1]], place=place
                    )
                },
                fetch_list=[ret],
                with_lod=True,
            )[0]
L
lujun 已提交
2237 2238

        with self.dynamic_graph():
2239 2240 2241 2242 2243
            with _test_eager_guard():
                spectralNorm = nn.SpectralNorm(shape, dim=1, power_iters=2)
                dy_eager_ret = spectralNorm(base.to_variable(input))
                dy_eager_rlt_value = dy_eager_ret.numpy()

2244
            spectralNorm = nn.SpectralNorm(shape, dim=1, power_iters=2)
L
lujun 已提交
2245
            dy_ret = spectralNorm(base.to_variable(input))
2246
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
2247

2248 2249 2250
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261

    def test_tree_conv(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()
        adj_array = [1, 2, 1, 3, 1, 4, 1, 5, 2, 6, 2, 7, 2, 8, 4, 9, 4, 10]
        adj = np.array(adj_array).reshape((1, 9, 2)).astype('int32')
        adj = np.tile(adj, (1, 1, 1))
        vectors = np.random.random((1, 10, 5)).astype('float32')
        with self.static_graph():
2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294
            NodesVector = fluid.layers.data(
                name='NodesVector',
                shape=(1, 10, 5),
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
            EdgeSet = fluid.layers.data(
                name='EdgeSet',
                shape=(1, 9, 2),
                dtype='int32',
                lod_level=1,
                append_batch_size=False,
            )
            ret = fluid.contrib.layers.tree_conv(
                nodes_vector=NodesVector,
                edge_set=EdgeSet,
                output_size=6,
                num_filters=1,
                max_depth=2,
            )
            static_ret = self.get_static_graph_result(
                feed={
                    'NodesVector': fluid.create_lod_tensor(
                        data=vectors, recursive_seq_lens=[[1]], place=place
                    ),
                    'EdgeSet': fluid.create_lod_tensor(
                        data=adj, recursive_seq_lens=[[1]], place=place
                    ),
                },
                fetch_list=[ret],
                with_lod=False,
            )[0]
L
lujun 已提交
2295 2296

        with self.static_graph():
2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313
            NodesVector = fluid.layers.data(
                name='NodesVector',
                shape=(1, 10, 5),
                dtype='float32',
                lod_level=1,
                append_batch_size=False,
            )
            EdgeSet = fluid.layers.data(
                name='EdgeSet',
                shape=(1, 9, 2),
                dtype='int32',
                lod_level=1,
                append_batch_size=False,
            )
            treeConv = nn.TreeConv(
                feature_size=5, output_size=6, num_filters=1, max_depth=2
            )
L
lujun 已提交
2314
            ret = treeConv(NodesVector, EdgeSet)
2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326
            static_ret2 = self.get_static_graph_result(
                feed={
                    'NodesVector': fluid.create_lod_tensor(
                        data=vectors, recursive_seq_lens=[[1]], place=place
                    ),
                    'EdgeSet': fluid.create_lod_tensor(
                        data=adj, recursive_seq_lens=[[1]], place=place
                    ),
                },
                fetch_list=[ret],
                with_lod=False,
            )[0]
L
lujun 已提交
2327 2328

        with self.dynamic_graph():
2329
            with _test_eager_guard():
2330 2331 2332 2333 2334 2335
                treeConv = nn.TreeConv(
                    feature_size=5, output_size=6, num_filters=1, max_depth=2
                )
                dy_eager_ret = treeConv(
                    base.to_variable(vectors), base.to_variable(adj)
                )
2336 2337
                dy_eager_rlt_value = dy_eager_ret.numpy()

2338 2339 2340
            treeConv = nn.TreeConv(
                feature_size=5, output_size=6, num_filters=1, max_depth=2
            )
L
lujun 已提交
2341
            dy_ret = treeConv(base.to_variable(vectors), base.to_variable(adj))
2342
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
2343

2344 2345 2346
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
L
lujun 已提交
2347

2348
        with self.dynamic_graph():
2349 2350 2351 2352
            with _test_eager_guard():
                custom_weight = np.random.randn(5, 3, 6, 1).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376
                        custom_weight
                    )
                )
                treeConv1 = nn.TreeConv(
                    feature_size=5,
                    output_size=6,
                    num_filters=1,
                    max_depth=2,
                    bias_attr='eager_tc1_b',
                )
                treeConv2 = nn.TreeConv(
                    feature_size=5,
                    output_size=6,
                    num_filters=1,
                    max_depth=2,
                    param_attr=weight_attr,
                    bias_attr='eager_tc2_b',
                )
                dy_ret1 = treeConv1(
                    base.to_variable(vectors), base.to_variable(adj)
                )
                dy_ret2 = treeConv2(
                    base.to_variable(vectors), base.to_variable(adj)
                )
2377
                self.assertFalse(
2378 2379
                    np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())
                )
2380 2381
                treeConv2.weight.set_value(treeConv1.weight.numpy())
                treeConv2.bias.set_value(treeConv1.bias)
2382 2383 2384 2385 2386 2387
                dy_ret1 = treeConv1(
                    base.to_variable(vectors), base.to_variable(adj)
                )
                dy_ret2 = treeConv2(
                    base.to_variable(vectors), base.to_variable(adj)
                )
2388
                np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
2389 2390 2391

                treeConv2.weight = treeConv1.weight
                treeConv2.bias = treeConv1.bias
2392 2393 2394 2395 2396 2397
                np.testing.assert_array_equal(
                    treeConv1.weight.numpy(), treeConv2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    treeConv1.bias.numpy(), treeConv2.bias.numpy()
                )
2398

2399
            custom_weight = np.random.randn(5, 3, 6, 1).astype("float32")
2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
            treeConv1 = nn.TreeConv(
                feature_size=5,
                output_size=6,
                num_filters=1,
                max_depth=2,
                bias_attr='tc1_b',
            )
            treeConv2 = nn.TreeConv(
                feature_size=5,
                output_size=6,
                num_filters=1,
                max_depth=2,
                param_attr=weight_attr,
                bias_attr='tc2_b',
            )
            dy_ret1 = treeConv1(
                base.to_variable(vectors), base.to_variable(adj)
            )
            dy_ret2 = treeConv2(
                base.to_variable(vectors), base.to_variable(adj)
            )
2426 2427 2428
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))
            treeConv2.weight.set_value(treeConv1.weight.numpy())
            treeConv2.bias.set_value(treeConv1.bias)
2429 2430 2431 2432 2433 2434
            dy_ret1 = treeConv1(
                base.to_variable(vectors), base.to_variable(adj)
            )
            dy_ret2 = treeConv2(
                base.to_variable(vectors), base.to_variable(adj)
            )
2435
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
2436 2437 2438

            treeConv2.weight = treeConv1.weight
            treeConv2.bias = treeConv1.bias
2439 2440 2441 2442 2443 2444
            np.testing.assert_array_equal(
                treeConv1.weight.numpy(), treeConv2.weight.numpy()
            )
            np.testing.assert_array_equal(
                treeConv1.bias.numpy(), treeConv2.bias.numpy()
            )
2445

L
lujun 已提交
2446
    def test_conv3d_transpose(self):
2447 2448 2449
        input_array = (
            np.arange(0, 48).reshape([2, 3, 2, 2, 2]).astype('float32')
        )
L
lujun 已提交
2450 2451 2452

        with self.static_graph():
            img = layers.data(name='pixel', shape=[3, 2, 2, 2], dtype='float32')
2453 2454 2455
            out = layers.conv3d_transpose(
                input=img, num_filters=12, filter_size=12, use_cudnn=False
            )
L
lujun 已提交
2456
            static_rlt = self.get_static_graph_result(
2457 2458
                feed={'pixel': input_array}, fetch_list=[out]
            )[0]
L
lujun 已提交
2459 2460
        with self.static_graph():
            img = layers.data(name='pixel', shape=[3, 2, 2, 2], dtype='float32')
2461 2462 2463
            conv3d_transpose = nn.Conv3DTranspose(
                num_channels=3, num_filters=12, filter_size=12, use_cudnn=False
            )
L
lujun 已提交
2464 2465
            out = conv3d_transpose(img)
            static_rlt2 = self.get_static_graph_result(
2466 2467
                feed={'pixel': input_array}, fetch_list=[out]
            )[0]
L
lujun 已提交
2468
        with self.dynamic_graph():
2469
            with _test_eager_guard():
2470 2471 2472 2473 2474 2475
                conv3d_transpose = nn.Conv3DTranspose(
                    num_channels=3,
                    num_filters=12,
                    filter_size=12,
                    use_cudnn=False,
                )
2476 2477 2478
                dy_eager_rlt = conv3d_transpose(base.to_variable(input_array))
                dy_eager_rlt_value = dy_eager_rlt.numpy()

2479 2480 2481
            conv3d_transpose = nn.Conv3DTranspose(
                num_channels=3, num_filters=12, filter_size=12, use_cudnn=False
            )
L
lujun 已提交
2482
            dy_rlt = conv3d_transpose(base.to_variable(input_array))
2483
            dy_rlt_value = dy_rlt.numpy()
2484 2485 2486
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_eager_rlt_value, static_rlt, rtol=1e-05)
L
lujun 已提交
2487

2488
        with self.dynamic_graph():
2489 2490 2491 2492 2493
            with _test_eager_guard():
                images = np.ones([2, 3, 6, 6, 6], dtype='float32')
                custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511
                        custom_weight
                    )
                )
                conv3d1 = nn.Conv3DTranspose(
                    num_channels=3,
                    num_filters=3,
                    filter_size=2,
                    bias_attr='eager_conv3d1_b',
                    use_cudnn=False,
                )
                conv3d2 = nn.Conv3DTranspose(
                    num_channels=3,
                    num_filters=3,
                    filter_size=2,
                    param_attr=weight_attr,
                    bias_attr='eager_conv3d2_b',
                    use_cudnn=False,
                )
2512 2513 2514
                dy_ret1 = conv3d1(base.to_variable(images))
                dy_ret2 = conv3d2(base.to_variable(images))
                self.assertFalse(
2515 2516
                    np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())
                )
2517 2518 2519 2520

                conv3d1_weight_np = conv3d1.weight.numpy()
                conv3d1_bias = conv3d1.bias
                self.assertFalse(
2521 2522
                    np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
                )
2523
                conv3d2.weight.set_value(conv3d1_weight_np)
2524 2525 2526
                np.testing.assert_array_equal(
                    conv3d1_weight_np, conv3d2.weight.numpy()
                )
2527 2528 2529
                conv3d1.bias.set_value(conv3d1_bias)
                dy_ret1 = conv3d1(base.to_variable(images))
                dy_ret2 = conv3d2(base.to_variable(images))
2530
                np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
2531 2532 2533

                conv3d2.weight = conv3d1.weight
                conv3d2.bias = conv3d1.bias
2534 2535 2536 2537 2538 2539
                np.testing.assert_array_equal(
                    conv3d1.weight.numpy(), conv3d2.weight.numpy()
                )
                np.testing.assert_array_equal(
                    conv3d1.bias.numpy(), conv3d2.bias.numpy()
                )
2540

2541 2542
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562
            weight_attr = fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    custom_weight
                )
            )
            conv3d1 = nn.Conv3DTranspose(
                num_channels=3,
                num_filters=3,
                filter_size=2,
                bias_attr='conv3d1_b',
                use_cudnn=False,
            )
            conv3d2 = nn.Conv3DTranspose(
                num_channels=3,
                num_filters=3,
                filter_size=2,
                param_attr=weight_attr,
                bias_attr='conv3d2_b',
                use_cudnn=False,
            )
2563 2564 2565 2566 2567 2568 2569
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv3d1_weight_np = conv3d1.weight.numpy()
            conv3d1_bias = conv3d1.bias
            self.assertFalse(
2570 2571
                np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())
            )
2572
            conv3d2.weight.set_value(conv3d1_weight_np)
2573 2574 2575
            np.testing.assert_array_equal(
                conv3d1_weight_np, conv3d2.weight.numpy()
            )
2576 2577 2578
            conv3d1.bias.set_value(conv3d1_bias)
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
2579
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
2580 2581 2582

            conv3d2.weight = conv3d1.weight
            conv3d2.bias = conv3d1.bias
2583 2584 2585 2586 2587 2588
            np.testing.assert_array_equal(
                conv3d1.weight.numpy(), conv3d2.weight.numpy()
            )
            np.testing.assert_array_equal(
                conv3d1.bias.numpy(), conv3d2.bias.numpy()
            )
2589

2590 2591 2592 2593 2594 2595 2596 2597
    def test_eye_op(self):
        np_eye = np.eye(3, 2)
        array_rlt1 = [np_eye for _ in range(3)]
        stack_rlt1 = np.stack(array_rlt1, axis=0)
        array_rlt2 = [stack_rlt1 for _ in range(4)]
        stack_rlt2 = np.stack(array_rlt2, axis=0)

        with self.dynamic_graph():
2598 2599
            with _test_eager_guard():
                eager_eye_tensor = layers.eye(num_rows=3, num_columns=2)
2600 2601 2602 2603 2604 2605
                eager_eye_tensor_rlt1 = layers.eye(
                    num_rows=3, num_columns=2, batch_shape=[3]
                )
                eager_eye_tensor_rlt2 = layers.eye(
                    num_rows=3, num_columns=2, batch_shape=[4, 3]
                )
2606 2607 2608 2609 2610 2611
                eager_diag_tensor = layers.eye(20)
                eager_eye_tensor_value = eager_eye_tensor.numpy()
                eager_eye_tensor_rlt1_value = eager_eye_tensor_rlt1.numpy()
                eager_eye_tensor_rlt2_value = eager_eye_tensor_rlt2.numpy()
                eager_diag_tensor_value = eager_diag_tensor.numpy()

2612
            eye_tensor = layers.eye(num_rows=3, num_columns=2)
2613 2614 2615 2616 2617 2618
            eye_tensor_rlt1 = layers.eye(
                num_rows=3, num_columns=2, batch_shape=[3]
            )
            eye_tensor_rlt2 = layers.eye(
                num_rows=3, num_columns=2, batch_shape=[4, 3]
            )
2619
            diag_tensor = layers.eye(20)
2620 2621 2622 2623
            eye_tensor_value = eye_tensor.numpy()
            eye_tensor_rlt1_value = eye_tensor_rlt1.numpy()
            eye_tensor_rlt2_value = eye_tensor_rlt2.numpy()
            diag_tensor_value = diag_tensor.numpy()
2624

2625
        np.testing.assert_allclose(eager_eye_tensor_value, np_eye, rtol=1e-05)
2626 2627 2628 2629 2630 2631 2632 2633 2634
        np.testing.assert_allclose(
            eager_eye_tensor_rlt1_value, stack_rlt1, rtol=1e-05
        )
        np.testing.assert_allclose(
            eager_eye_tensor_rlt2_value, stack_rlt2, rtol=1e-05
        )
        np.testing.assert_allclose(
            eager_diag_tensor_value, np.eye(20), rtol=1e-05
        )
2635 2636

        np.testing.assert_allclose(eye_tensor_value, np_eye, rtol=1e-05)
2637 2638 2639 2640 2641 2642
        np.testing.assert_allclose(
            eye_tensor_rlt1_value, stack_rlt1, rtol=1e-05
        )
        np.testing.assert_allclose(
            eye_tensor_rlt2_value, stack_rlt2, rtol=1e-05
        )
2643
        np.testing.assert_allclose(diag_tensor_value, np.eye(20), rtol=1e-05)
2644 2645 2646 2647 2648 2649 2650 2651 2652 2653

        with self.assertRaises(TypeError):
            layers.eye(num_rows=3.1)
        with self.assertRaises(TypeError):
            layers.eye(num_rows=3, num_columns=2.2)
        with self.assertRaises(TypeError):
            layers.eye(num_rows=3, batch_shape=2)
        with self.assertRaises(TypeError):
            layers.eye(num_rows=3, batch_shape=[-1])

2654
    def func_while_loop(self):
2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671
        with self.static_graph():
            i = layers.fill_constant(shape=[1], dtype='int64', value=0)
            ten = layers.fill_constant(shape=[1], dtype='int64', value=10)

            def cond(i):
                return layers.less_than(i, ten)

            def body(i):
                return i + 1

            out = layers.while_loop(cond, body, [i])
            static_ret = self.get_static_graph_result(feed={}, fetch_list=out)

        with self.dynamic_graph():
            i = layers.fill_constant(shape=[1], dtype='int64', value=0)
            ten = layers.fill_constant(shape=[1], dtype='int64', value=10)

2672
            def cond1(i):
2673 2674
                return layers.less_than(i, ten)

2675
            def body1(i):
2676 2677
                return i + 1

2678
            dy_ret = layers.while_loop(cond1, body1, [i])
2679 2680 2681 2682 2683 2684
            with self.assertRaises(ValueError):
                j = layers.fill_constant(shape=[1], dtype='int64', value=0)

                def body2(i):
                    return i + 1, i + 2

2685
                layers.while_loop(cond1, body2, [j])
2686

2687
        np.testing.assert_array_equal(static_ret[0], dy_ret[0].numpy())
2688

2689 2690 2691 2692 2693
    def test_while_loop(self):
        with _test_eager_guard():
            self.func_while_loop()
        self.func_while_loop()

2694 2695 2696 2697 2698 2699 2700 2701
    def test_compare(self):
        value_a = np.arange(3)
        value_b = np.arange(3)
        # less than
        with self.static_graph():
            a = layers.data(name='a', shape=[1], dtype='int64')
            b = layers.data(name='b', shape=[1], dtype='int64')
            cond = layers.less_than(x=a, y=b)
2702 2703 2704
            static_ret = self.get_static_graph_result(
                feed={"a": value_a, "b": value_b}, fetch_list=[cond]
            )[0]
2705
        with self.dynamic_graph():
2706 2707 2708 2709 2710 2711 2712 2713
            with _test_eager_guard():
                da = base.to_variable(value_a)
                db = base.to_variable(value_b)
                dcond = layers.less_than(x=da, y=db)

                for i in range(len(static_ret)):
                    self.assertTrue(dcond.numpy()[i] == static_ret[i])

2714 2715 2716 2717
            da = base.to_variable(value_a)
            db = base.to_variable(value_b)
            dcond = layers.less_than(x=da, y=db)

2718 2719
            for i in range(len(static_ret)):
                self.assertTrue(dcond.numpy()[i] == static_ret[i])
2720 2721 2722 2723 2724 2725

        # less equal
        with self.static_graph():
            a1 = layers.data(name='a1', shape=[1], dtype='int64')
            b1 = layers.data(name='b1', shape=[1], dtype='int64')
            cond1 = layers.less_equal(x=a1, y=b1)
2726 2727 2728
            static_ret1 = self.get_static_graph_result(
                feed={"a1": value_a, "b1": value_b}, fetch_list=[cond1]
            )[0]
2729
        with self.dynamic_graph():
2730 2731 2732 2733 2734 2735 2736 2737
            with _test_eager_guard():
                da1 = base.to_variable(value_a)
                db1 = base.to_variable(value_b)
                dcond1 = layers.less_equal(x=da1, y=db1)

                for i in range(len(static_ret1)):
                    self.assertTrue(dcond1.numpy()[i] == static_ret1[i])

2738 2739 2740 2741 2742 2743 2744
            da1 = base.to_variable(value_a)
            db1 = base.to_variable(value_b)
            dcond1 = layers.less_equal(x=da1, y=db1)

            for i in range(len(static_ret1)):
                self.assertTrue(dcond1.numpy()[i] == static_ret1[i])

2745
        # greater than
2746 2747 2748 2749
        with self.static_graph():
            a2 = layers.data(name='a2', shape=[1], dtype='int64')
            b2 = layers.data(name='b2', shape=[1], dtype='int64')
            cond2 = layers.greater_than(x=a2, y=b2)
2750 2751 2752
            static_ret2 = self.get_static_graph_result(
                feed={"a2": value_a, "b2": value_b}, fetch_list=[cond2]
            )[0]
2753
        with self.dynamic_graph():
2754 2755 2756 2757 2758 2759 2760 2761
            with _test_eager_guard():
                da2 = base.to_variable(value_a)
                db2 = base.to_variable(value_b)
                dcond2 = layers.greater_than(x=da2, y=db2)

                for i in range(len(static_ret2)):
                    self.assertTrue(dcond2.numpy()[i] == static_ret2[i])

2762 2763 2764 2765 2766 2767 2768
            da2 = base.to_variable(value_a)
            db2 = base.to_variable(value_b)
            dcond2 = layers.greater_than(x=da2, y=db2)

            for i in range(len(static_ret2)):
                self.assertTrue(dcond2.numpy()[i] == static_ret2[i])

2769
        # greater equal
2770 2771 2772 2773
        with self.static_graph():
            a3 = layers.data(name='a3', shape=[1], dtype='int64')
            b3 = layers.data(name='b3', shape=[1], dtype='int64')
            cond3 = layers.greater_equal(x=a3, y=b3)
2774 2775 2776
            static_ret3 = self.get_static_graph_result(
                feed={"a3": value_a, "b3": value_b}, fetch_list=[cond3]
            )[0]
2777
        with self.dynamic_graph():
2778 2779 2780 2781 2782 2783 2784 2785
            with _test_eager_guard():
                da3 = base.to_variable(value_a)
                db3 = base.to_variable(value_b)
                dcond3 = layers.greater_equal(x=da3, y=db3)

                for i in range(len(static_ret3)):
                    self.assertTrue(dcond3.numpy()[i] == static_ret3[i])

2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797
            da3 = base.to_variable(value_a)
            db3 = base.to_variable(value_b)
            dcond3 = layers.greater_equal(x=da3, y=db3)

            for i in range(len(static_ret3)):
                self.assertTrue(dcond3.numpy()[i] == static_ret3[i])

        # equal
        with self.static_graph():
            a4 = layers.data(name='a4', shape=[1], dtype='int64')
            b4 = layers.data(name='b4', shape=[1], dtype='int64')
            cond4 = layers.equal(x=a4, y=b4)
2798 2799 2800
            static_ret4 = self.get_static_graph_result(
                feed={"a4": value_a, "b4": value_b}, fetch_list=[cond4]
            )[0]
2801
        with self.dynamic_graph():
2802 2803 2804 2805 2806 2807 2808 2809
            with _test_eager_guard():
                da4 = base.to_variable(value_a)
                db4 = base.to_variable(value_b)
                dcond4 = layers.equal(x=da4, y=db4)

                for i in range(len(static_ret4)):
                    self.assertTrue(dcond4.numpy()[i] == static_ret4[i])

2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821
            da4 = base.to_variable(value_a)
            db4 = base.to_variable(value_b)
            dcond4 = layers.equal(x=da4, y=db4)

            for i in range(len(static_ret4)):
                self.assertTrue(dcond4.numpy()[i] == static_ret4[i])

        # not equal
        with self.static_graph():
            a5 = layers.data(name='a5', shape=[1], dtype='int64')
            b5 = layers.data(name='b5', shape=[1], dtype='int64')
            cond5 = layers.equal(x=a5, y=b5)
2822 2823 2824
            static_ret5 = self.get_static_graph_result(
                feed={"a5": value_a, "b5": value_b}, fetch_list=[cond5]
            )[0]
2825
        with self.dynamic_graph():
2826 2827 2828 2829 2830 2831 2832 2833
            with _test_eager_guard():
                da5 = base.to_variable(value_a)
                db5 = base.to_variable(value_b)
                dcond5 = layers.equal(x=da5, y=db5)

                for i in range(len(static_ret5)):
                    self.assertTrue(dcond5.numpy()[i] == static_ret5[i])

2834 2835 2836 2837 2838 2839 2840
            da5 = base.to_variable(value_a)
            db5 = base.to_variable(value_b)
            dcond5 = layers.equal(x=da5, y=db5)

            for i in range(len(static_ret5)):
                self.assertTrue(dcond5.numpy()[i] == static_ret5[i])

2841 2842 2843 2844 2845 2846 2847 2848
    def test_cond(self):
        def less_than_branch(a, b):
            return fluid.layers.elementwise_add(a, b)

        def greater_equal_branch(a, b):
            return fluid.layers.elementwise_sub(a, b)

        with self.static_graph():
2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864
            a = fluid.layers.fill_constant(
                shape=[1], dtype='float32', value=0.1
            )
            b = fluid.layers.fill_constant(
                shape=[1], dtype='float32', value=0.23
            )
            out = fluid.layers.cond(
                a >= b,
                lambda: greater_equal_branch(a, b),
                lambda: less_than_branch(a, b),
            )
            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
2865 2866 2867 2868 2869
            exe = fluid.Executor(place)
            ret = exe.run(fetch_list=[out])
            static_res = ret[0]

        with self.dynamic_graph():
2870 2871 2872
            with _test_eager_guard():
                a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32'))
                b = fluid.dygraph.to_variable(
2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884
                    np.array([0.23]).astype('float32')
                )
                out = layers.cond(
                    a < b,
                    lambda: less_than_branch(a, b),
                    lambda: greater_equal_branch(a, b),
                )
                out2 = layers.cond(
                    a >= b,
                    lambda: greater_equal_branch(a, b),
                    lambda: less_than_branch(a, b),
                )
2885 2886
                eager_dynamic_res = out.numpy()
                eager_dynamic_res2 = out2.numpy()
2887 2888 2889
                np.testing.assert_array_equal(
                    eager_dynamic_res, eager_dynamic_res2
                )
2890 2891 2892 2893 2894
                with self.assertRaises(TypeError):
                    layers.cond(a < b, 'str', 'str')
                with self.assertRaises(TypeError):
                    layers.cond(a >= b, 'str', 'str')

2895 2896
            a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32'))
            b = fluid.dygraph.to_variable(np.array([0.23]).astype('float32'))
2897 2898 2899 2900 2901 2902 2903 2904 2905 2906
            out = layers.cond(
                a < b,
                lambda: less_than_branch(a, b),
                lambda: greater_equal_branch(a, b),
            )
            out2 = layers.cond(
                a >= b,
                lambda: greater_equal_branch(a, b),
                lambda: less_than_branch(a, b),
            )
2907 2908
            dynamic_res = out.numpy()
            dynamic_res2 = out2.numpy()
2909
            np.testing.assert_array_equal(dynamic_res, dynamic_res2)
2910 2911 2912 2913 2914
            with self.assertRaises(TypeError):
                layers.cond(a < b, 'str', 'str')
            with self.assertRaises(TypeError):
                layers.cond(a >= b, 'str', 'str')

2915 2916
        np.testing.assert_array_equal(static_res, dynamic_res)
        np.testing.assert_array_equal(static_res, eager_dynamic_res)
2917

2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936
    def test_case(self):
        def fn_1():
            return layers.fill_constant(shape=[1, 2], dtype='float32', value=1)

        def fn_2():
            return layers.fill_constant(shape=[2, 2], dtype='int32', value=2)

        def fn_3():
            return layers.fill_constant(shape=[3], dtype='int32', value=3)

        with self.static_graph():
            x = layers.fill_constant(shape=[1], dtype='float32', value=0.3)
            y = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
            z = layers.fill_constant(shape=[1], dtype='float32', value=0.2)

            pred_1 = layers.less_than(z, x)  # true: 0.2 < 0.3
            pred_2 = layers.less_than(x, y)  # false: 0.3 < 0.1
            pred_3 = layers.equal(x, y)  # false: 0.3 == 0.1

2937 2938 2939
            out_1 = layers.case(
                pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3
            )
2940 2941
            out_2 = layers.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)])

2942 2943 2944 2945 2946
            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
2947 2948 2949 2950
            exe = fluid.Executor(place)
            static_res1, static_res2 = exe.run(fetch_list=[out_1, out_2])

        with self.dynamic_graph():
2951 2952 2953 2954 2955 2956 2957 2958 2959
            with _test_eager_guard():
                x = layers.fill_constant(shape=[1], dtype='float32', value=0.3)
                y = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
                z = layers.fill_constant(shape=[1], dtype='float32', value=0.2)

                pred_1 = layers.less_than(z, x)  # true: 0.2 < 0.3
                pred_2 = layers.less_than(x, y)  # false: 0.3 < 0.1
                pred_3 = layers.equal(x, y)  # false: 0.3 == 0.1

2960 2961 2962 2963 2964 2965
                out_1 = layers.case(
                    pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3
                )
                out_2 = layers.case(
                    pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)]
                )
2966 2967 2968
                eager_dynamic_res1 = out_1.numpy()
                eager_dynamic_res2 = out_2.numpy()

2969 2970 2971 2972 2973 2974 2975 2976
            x = layers.fill_constant(shape=[1], dtype='float32', value=0.3)
            y = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
            z = layers.fill_constant(shape=[1], dtype='float32', value=0.2)

            pred_1 = layers.less_than(z, x)  # true: 0.2 < 0.3
            pred_2 = layers.less_than(x, y)  # false: 0.3 < 0.1
            pred_3 = layers.equal(x, y)  # false: 0.3 == 0.1

2977 2978 2979
            out_1 = layers.case(
                pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3
            )
2980 2981 2982 2983
            out_2 = layers.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)])
            dynamic_res1 = out_1.numpy()
            dynamic_res2 = out_2.numpy()

2984 2985 2986 2987
        np.testing.assert_array_equal(static_res1, dynamic_res1)
        np.testing.assert_array_equal(static_res2, dynamic_res2)
        np.testing.assert_array_equal(static_res1, eager_dynamic_res1)
        np.testing.assert_array_equal(static_res2, eager_dynamic_res2)
2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002

    def test_switch_case(self):
        def fn_1():
            return layers.fill_constant(shape=[1, 2], dtype='float32', value=1)

        def fn_2():
            return layers.fill_constant(shape=[2, 2], dtype='int32', value=2)

        def fn_3():
            return layers.fill_constant(shape=[3], dtype='int32', value=3)

        with self.static_graph():
            index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1)
            index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2)

3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022
            out_1 = layers.switch_case(
                branch_index=index_1,
                branch_fns={1: fn_1, 2: fn_2},
                default=fn_3,
            )
            out_2 = layers.switch_case(
                branch_index=index_2,
                branch_fns=[(1, fn_1), (2, fn_2)],
                default=fn_3,
            )
            out_3 = layers.switch_case(
                branch_index=index_2,
                branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)],
            )

            place = (
                fluid.CUDAPlace(0)
                if core.is_compiled_with_cuda()
                else fluid.CPUPlace()
            )
3023 3024
            exe = fluid.Executor(place)
            static_res1, static_res2, static_res3 = exe.run(
3025 3026
                fetch_list=[out_1, out_2, out_3]
            )
3027 3028

        with self.dynamic_graph():
3029
            with _test_eager_guard():
3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050
                index_1 = layers.fill_constant(
                    shape=[1], dtype='int32', value=1
                )
                index_2 = layers.fill_constant(
                    shape=[1], dtype='int32', value=2
                )

                out_1 = layers.switch_case(
                    branch_index=index_1,
                    branch_fns={1: fn_1, 2: fn_2},
                    default=fn_3,
                )
                out_2 = layers.switch_case(
                    branch_index=index_2,
                    branch_fns=[(1, fn_1), (2, fn_2)],
                    default=fn_3,
                )
                out_3 = layers.switch_case(
                    branch_index=index_2,
                    branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)],
                )
3051 3052 3053 3054 3055

                eager_dynamic_res1 = out_1.numpy()
                eager_dynamic_res2 = out_2.numpy()
                eager_dynamic_res3 = out_3.numpy()

3056 3057 3058
            index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1)
            index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2)

3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072
            out_1 = layers.switch_case(
                branch_index=index_1,
                branch_fns={1: fn_1, 2: fn_2},
                default=fn_3,
            )
            out_2 = layers.switch_case(
                branch_index=index_2,
                branch_fns=[(1, fn_1), (2, fn_2)],
                default=fn_3,
            )
            out_3 = layers.switch_case(
                branch_index=index_2,
                branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)],
            )
3073 3074 3075 3076 3077

            dynamic_res1 = out_1.numpy()
            dynamic_res2 = out_2.numpy()
            dynamic_res3 = out_3.numpy()

3078 3079 3080 3081 3082 3083
        np.testing.assert_array_equal(static_res1, dynamic_res1)
        np.testing.assert_array_equal(static_res2, dynamic_res2)
        np.testing.assert_array_equal(static_res3, dynamic_res3)
        np.testing.assert_array_equal(static_res1, eager_dynamic_res1)
        np.testing.assert_array_equal(static_res2, eager_dynamic_res2)
        np.testing.assert_array_equal(static_res3, eager_dynamic_res3)
3084

3085 3086 3087 3088
    def test_crop_tensor(self):
        with self.static_graph():
            x = fluid.layers.data(name="x1", shape=[6, 5, 8])

3089 3090 3091 3092 3093 3094
            dim1 = fluid.layers.data(
                name="dim1", shape=[1], append_batch_size=False
            )
            dim2 = fluid.layers.data(
                name="dim2", shape=[1], append_batch_size=False
            )
3095
            crop_shape1 = (1, 2, 4, 4)
3096 3097 3098
            crop_shape2 = fluid.layers.data(
                name="crop_shape", shape=[4], append_batch_size=False
            )
3099 3100
            crop_shape3 = [-1, dim1, dim2, 4]
            crop_offsets1 = [0, 0, 1, 0]
3101 3102 3103
            crop_offsets2 = fluid.layers.data(
                name="crop_offset", shape=[4], append_batch_size=False
            )
3104 3105
            crop_offsets3 = [0, dim1, dim2, 0]

3106 3107 3108 3109 3110 3111 3112 3113 3114
            out1 = fluid.layers.crop_tensor(
                x, shape=crop_shape1, offsets=crop_offsets1
            )
            out2 = fluid.layers.crop_tensor(
                x, shape=crop_shape2, offsets=crop_offsets2
            )
            out3 = fluid.layers.crop_tensor(
                x, shape=crop_shape3, offsets=crop_offsets3
            )
3115 3116 3117 3118 3119

            self.assertIsNotNone(out1)
            self.assertIsNotNone(out2)
            self.assertIsNotNone(out3)

3120 3121 3122
    def test_shard_index(self):
        with self.static_graph():
            x = fluid.layers.data(name="label", shape=[4, 1], dtype='int64')
3123 3124 3125
            shard_label = fluid.layers.shard_index(
                input=x, index_num=20, nshards=2, shard_id=0
            )
3126 3127 3128

        self.assertIsNotNone(shard_label)

3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141
    def test_accuracy(self):
        x = np.random.rand(3, 32, 32).astype("float32")
        y = np.array([[1], [0], [1]])
        with self.static_graph():
            data = fluid.data(name="input", shape=[-1, 32, 32], dtype="float32")
            label = fluid.data(name="label", shape=[-1, 1], dtype="int")
            fc_out = fluid.layers.fc(input=data, size=10)
            predict = fluid.layers.softmax(input=fc_out)
            result = fluid.layers.accuracy(input=predict, label=label, k=5)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)

            exe.run(fluid.default_startup_program())
L
Leo Chen 已提交
3142 3143
            # x = np.random.rand(3, 32, 32).astype("float32")
            # y = np.array([[1], [0], [1]])
3144 3145 3146
            static_out = exe.run(
                feed={"input": x, "label": y}, fetch_list=result[0]
            )
3147

L
Leo Chen 已提交
3148
        with self.dynamic_graph(force_to_use_cpu=True):
3149 3150 3151 3152 3153 3154
            data = base.to_variable(x)
            label = base.to_variable(y)
            fc_out = fluid.layers.fc(data, size=10)
            predict = fluid.layers.softmax(fc_out)
            dynamic_out = fluid.layers.accuracy(input=predict, label=label, k=5)

3155
        np.testing.assert_array_equal(static_out[0], dynamic_out.numpy())
3156

Y
Yu Yang 已提交
3157

3158
class TestBook(LayerTest):
H
hong 已提交
3159 3160
    def setUp(self):
        self.only_static_set = set({"make_word_embedding"})
3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171
        self.not_compare_static_dygraph_set = set(
            {
                "make_gaussian_random",
                "make_gaussian_random_batch_size_like",
                "make_kldiv_loss",
                "make_prelu",
                "make_sampled_softmax_with_cross_entropy",
                "make_sampling_id",
                "make_uniform_random_batch_size_like",
            }
        )
3172
        self.all_close_compare = set({"make_spectral_norm"})
H
hong 已提交
3173

3174
    def func_all_layers(self):
3175 3176 3177 3178 3179
        attrs = (getattr(self, name) for name in dir(self))
        methods = filter(inspect.ismethod, attrs)
        for method in methods:
            if not method.__name__.startswith('make_'):
                continue
M
minqiyang 已提交
3180 3181 3182
            self._low_data_bound = 0
            self._high_data_bound = 2
            self._batch_size = 2
3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194
            self._feed_dict = {}
            self._force_to_use_cpu = False
            with self.static_graph():
                static_var = method()
                if isinstance(static_var, tuple):
                    static_var = static_var[0]

                if static_var is not None:
                    fetch_list = [static_var.name]
                    static_result = self.get_static_graph_result(
                        feed=self._feed_dict,
                        fetch_list=fetch_list,
3195 3196
                        force_to_use_cpu=self._force_to_use_cpu,
                    )
H
hong 已提交
3197

3198 3199 3200
                else:
                    assert method.__name__ in ('make_get_places')
                    continue
H
hong 已提交
3201 3202
            if method.__name__ in self.only_static_set:
                continue
3203 3204 3205 3206 3207

            with self.dynamic_graph(self._force_to_use_cpu):
                dy_result = method()
                if isinstance(dy_result, tuple):
                    dy_result = dy_result[0]
3208
                dy_result_value = dy_result.numpy()
3209

3210
            if method.__name__ in self.all_close_compare:
3211 3212 3213 3214 3215 3216
                np.testing.assert_allclose(
                    static_result[0],
                    dy_result_value,
                    rtol=1e-05,
                    atol=0,
                    err_msg='Result of function [{}] compare failed'.format(
3217 3218 3219
                        method.__name__
                    ),
                )
3220 3221
                continue

H
hong 已提交
3222
            if method.__name__ not in self.not_compare_static_dygraph_set:
3223 3224 3225 3226
                np.testing.assert_array_equal(
                    static_result[0],
                    dy_result_value,
                    err_msg='Result of function [{}] not equal'.format(
3227 3228 3229
                        method.__name__
                    ),
                )
3230

3231 3232 3233 3234 3235
    def test_all_layers(self):
        with _test_eager_guard():
            self.func_all_layers()
        self.func_all_layers()

3236 3237 3238
    def _get_np_data(self, shape, dtype, append_batch_size=True):
        np.random.seed(self.seed)
        if append_batch_size:
M
minqiyang 已提交
3239
            shape = [self._batch_size] + shape
3240 3241 3242 3243 3244
        if dtype == 'float32':
            return np.random.random(shape).astype(dtype)
        elif dtype == 'float64':
            return np.random.random(shape).astype(dtype)
        elif dtype == 'int32':
3245 3246 3247
            return np.random.randint(
                self._low_data_bound, self._high_data_bound, shape
            ).astype(dtype)
3248
        elif dtype == 'int64':
3249 3250 3251 3252 3253 3254 3255
            return np.random.randint(
                self._low_data_bound, self._high_data_bound, shape
            ).astype(dtype)

    def _get_data(
        self, name, shape, dtype, set_feed_dict=True, append_batch_size=True
    ):
3256
        if base.enabled():
3257 3258 3259 3260 3261
            return base.to_variable(
                value=self._get_np_data(shape, dtype, append_batch_size),
                name=name,
                zero_copy=False,
            )
3262 3263
        else:
            if set_feed_dict:
3264
                self._feed_dict[name] = self._get_np_data(
3265 3266 3267 3268 3269 3270 3271 3272
                    shape, dtype, append_batch_size
                )
            return layers.data(
                name=name,
                shape=shape,
                dtype=dtype,
                append_batch_size=append_batch_size,
            )
3273 3274

    def make_sampled_softmax_with_cross_entropy(self):
3275 3276 3277
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
M
minqiyang 已提交
3278
            logits = self._get_data(name='Logits', shape=[256], dtype='float32')
M
minqiyang 已提交
3279
            label = self._get_data(name='Label', shape=[1], dtype='int64')
3280
            num_samples = 25
3281
            output = layers.sampled_softmax_with_cross_entropy(
3282 3283 3284
                logits, label, num_samples
            )
            return output
3285 3286

    def make_fit_a_line(self):
3287 3288 3289 3290
        with program_guard(
            fluid.default_main_program(),
            startup_program=fluid.default_startup_program(),
        ):
3291
            x = self._get_data(name='x', shape=[13], dtype='float32')
Y
Yu Yang 已提交
3292
            y_predict = layers.fc(input=x, size=1, act=None)
3293
            y = self._get_data(name='y', shape=[1], dtype='float32')
Y
Yu Yang 已提交
3294
            cost = layers.square_error_cost(input=y_predict, label=y)
3295
            avg_cost = paddle.mean(cost)
3296
            return avg_cost
Y
Yu Yang 已提交
3297

3298
    def make_recognize_digits_mlp(self):
3299 3300 3301
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
Y
Yu Yang 已提交
3302
            # Change g_program, so the rest layers use `g_program`
3303 3304
            images = self._get_data(name='pixel', shape=[784], dtype='float32')
            label = self._get_data(name='label', shape=[1], dtype='int64')
Y
Yu Yang 已提交
3305 3306
            hidden1 = layers.fc(input=images, size=128, act='relu')
            hidden2 = layers.fc(input=hidden1, size=64, act='relu')
3307 3308 3309 3310 3311 3312
            predict = layers.fc(
                input=[hidden2, hidden1],
                size=10,
                act='softmax',
                param_attr=["sftmax.w1", "sftmax.w2"],
            )
Y
Yu Yang 已提交
3313
            cost = layers.cross_entropy(input=predict, label=label)
3314
            avg_cost = paddle.mean(cost)
3315
            return avg_cost
Y
Yu Yang 已提交
3316

3317
    def make_conv2d_transpose(self):
3318 3319 3320
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3321
            img = self._get_data(name='pixel', shape=[3, 2, 2], dtype='float32')
3322 3323 3324
            return layers.conv2d_transpose(
                input=img, num_filters=10, output_size=28
            )
3325

3326
    def make_recognize_digits_conv(self):
3327 3328 3329 3330 3331 3332
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            images = self._get_data(
                name='pixel', shape=[1, 28, 28], dtype='float32'
            )
3333
            label = self._get_data(name='label', shape=[1], dtype='int64')
3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349
            conv_pool_1 = nets.simple_img_conv_pool(
                input=images,
                filter_size=5,
                num_filters=2,
                pool_size=2,
                pool_stride=2,
                act="relu",
            )
            conv_pool_2 = nets.simple_img_conv_pool(
                input=conv_pool_1,
                filter_size=5,
                num_filters=4,
                pool_size=2,
                pool_stride=2,
                act="relu",
            )
Y
Yu Yang 已提交
3350 3351 3352

            predict = layers.fc(input=conv_pool_2, size=10, act="softmax")
            cost = layers.cross_entropy(input=predict, label=label)
3353
            avg_cost = paddle.mean(cost)
3354
            return avg_cost
Y
Yu Yang 已提交
3355

3356
    def make_word_embedding(self):
3357 3358 3359
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
Y
Yu Yang 已提交
3360 3361
            dict_size = 10000
            embed_size = 32
3362
            first_word = self._get_data(name='firstw', shape=[1], dtype='int64')
3363 3364 3365
            second_word = self._get_data(
                name='secondw', shape=[1], dtype='int64'
            )
3366 3367 3368
            third_word = self._get_data(name='thirdw', shape=[1], dtype='int64')
            forth_word = self._get_data(name='forthw', shape=[1], dtype='int64')
            next_word = self._get_data(name='nextw', shape=[1], dtype='int64')
Y
Yu Yang 已提交
3369

3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394
            embed_first = layers.embedding(
                input=first_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )
            embed_second = layers.embedding(
                input=second_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )

            embed_third = layers.embedding(
                input=third_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )
            embed_forth = layers.embedding(
                input=forth_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w',
            )
Y
Yu Yang 已提交
3395 3396 3397

            concat_embed = layers.concat(
                input=[embed_first, embed_second, embed_third, embed_forth],
3398 3399
                axis=1,
            )
Y
Yu Yang 已提交
3400 3401

            hidden1 = layers.fc(input=concat_embed, size=256, act='sigmoid')
3402 3403 3404
            predict_word = layers.fc(
                input=hidden1, size=dict_size, act='softmax'
            )
Y
Yu Yang 已提交
3405
            cost = layers.cross_entropy(input=predict_word, label=next_word)
3406
            avg_cost = paddle.mean(cost)
3407
            return avg_cost
Y
Yu Yang 已提交
3408

3409
    def make_sigmoid_cross_entropy(self):
3410 3411 3412
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3413 3414
            dat = self._get_data(name='data', shape=[10], dtype='float32')
            lbl = self._get_data(name='label', shape=[10], dtype='float32')
3415
            ignore_index = -1
3416 3417 3418
            return layers.sigmoid_cross_entropy_with_logits(
                x=dat, label=lbl, ignore_index=ignore_index
            )
3419 3420 3421 3422 3423 3424

    def make_hsigmoid(self):
        self._force_to_use_cpu = True
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            x = self._get_data(name='x', shape=[2], dtype='float32')
            y = self._get_data(name='y', shape=[2], dtype='int64')
3425
            return layers.hsigmoid(input=x, label=y, num_classes=2)
W
weixing02 已提交
3426

J
JiabinYang 已提交
3427
        # test hsigmod with custom tree structure
J
JiabinYang 已提交
3428 3429
        program2 = Program()
        with program_guard(program2):
3430 3431
            x2 = self._get_data(name='x2', shape=[4, 8], dtype='float32')
            y2 = self._get_data(name='y2', shape=[4], dtype='int64')
3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445
            path_table = self._get_data(
                name='path_table', shape=[4, 6], dtype='int64'
            )
            path_code = self._get_data(
                name='path_code', shape=[4, 6], dtype='int64'
            )
            return layers.hsigmoid(
                input=x2,
                label=y2,
                num_classes=6,
                path_table=path_table,
                path_code=path_code,
                is_custom=True,
            )
J
JiabinYang 已提交
3446

3447
    def make_pool2d(self):
3448 3449 3450
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3451
            x = self._get_data(name='x', shape=[3, 224, 224], dtype='float32')
3452 3453 3454
            return layers.pool2d(
                x, pool_size=[5, 3], pool_stride=[1, 2], pool_padding=(2, 1)
            )
3455

K
Kaipeng Deng 已提交
3456
    def make_pool2d_infershape(self):
3457 3458 3459
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
3460 3461
            theta = self._get_data("theta", shape=[2, 3], dtype='float32')
            x = fluid.layers.affine_grid(theta, out_shape=[2, 3, 244, 244])
3462 3463 3464
            return layers.pool2d(
                x, pool_size=[5, 3], pool_stride=[1, 2], pool_padding=(2, 1)
            )
K
Kaipeng Deng 已提交
3465 3466

    def make_pool3d(self):
3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x = self._get_data(
                name='x', shape=[3, 244, 244, 244], dtype='float32'
            )
            return layers.pool3d(
                x,
                pool_size=[5, 3, 2],
                pool_stride=[1, 2, 3],
                pool_padding=(2, 1, 1),
            )
K
Kaipeng Deng 已提交
3479

3480
    def make_adaptive_pool2d(self):
3481 3482 3483
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3484
            x = self._get_data(name='x', shape=[3, 224, 224], dtype='float32')
3485
            return layers.adaptive_pool2d(x, [3, 3], pool_type='avg')
D
dengkaipeng 已提交
3486
            pool, mask = layers.adaptive_pool2d(x, [3, 3], require_index=True)
3487 3488 3489
            return pool
            return mask
            return layers.adaptive_pool2d(x, 3, pool_type='avg')
3490
            pool, mask = layers.adaptive_pool2d(x, 3, require_index=True)
3491 3492
            return pool
            return mask
3493 3494

    def make_adaptive_pool3d(self):
3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x = self._get_data(
                name='x', shape=[3, 244, 224, 224], dtype='float32'
            )
            return layers.adaptive_pool3d(x, [3, 3, 3], pool_type='avg')
            pool, mask = layers.adaptive_pool3d(
                x, [3, 3, 3], require_index=True
            )
            return pool
            return mask
            return layers.adaptive_pool3d(x, 3, pool_type='avg')
3508
            pool, mask = layers.adaptive_pool3d(x, 3, require_index=True)
3509 3510
            return pool
            return mask
3511

3512
    def make_lstm_unit(self):
3513 3514 3515 3516 3517 3518
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x_t_data = self._get_data(
                name='x_t_data', shape=[10, 10], dtype='float32'
            )
Y
yangyaming 已提交
3519
            x_t = layers.fc(input=x_t_data, size=10)
3520 3521 3522
            prev_hidden_data = self._get_data(
                name='prev_hidden_data', shape=[10, 30], dtype='float32'
            )
Y
yangyaming 已提交
3523
            prev_hidden = layers.fc(input=prev_hidden_data, size=30)
3524 3525 3526
            prev_cell_data = self._get_data(
                name='prev_cell', shape=[10, 30], dtype='float32'
            )
Y
yangyaming 已提交
3527
            prev_cell = layers.fc(input=prev_cell_data, size=30)
3528 3529 3530
            return layers.lstm_unit(
                x_t=x_t, hidden_t_prev=prev_hidden, cell_t_prev=prev_cell
            )
3531

3532
    def make_softmax(self):
3533 3534 3535
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3536
            data = self._get_data(name='data', shape=[10], dtype='float32')
D
dangqingqing 已提交
3537
            hid = layers.fc(input=data, size=20)
3538
            return layers.softmax(hid, axis=1)
D
dangqingqing 已提交
3539

3540
    def make_space_to_depth(self):
3541 3542 3543 3544 3545 3546 3547 3548 3549 3550
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            data = self._get_data(
                name='data',
                shape=[32, 9, 6, 6],
                append_batch_size=False,
                dtype='float32',
            )
            return layers.space_to_depth(data, 3)
J
JiabinYang 已提交
3551

3552
    def make_lrn(self):
3553 3554 3555
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3556
            data = self._get_data(name='data', shape=[6, 2, 2], dtype='float32')
3557
            return layers.lrn(data)
3558

3559
    def make_get_places(self):
3560 3561 3562
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3563
            get_places(device_count=1)
X
xuezhong 已提交
3564

3565
    @prog_scope()
3566
    def make_nce(self):
Y
Yang Yu 已提交
3567 3568
        window_size = 5
        words = []
3569
        for i in range(window_size):
Y
Yang Yu 已提交
3570
            words.append(
3571 3572 3573 3574
                self._get_data(
                    name='word_{0}'.format(i), shape=[1], dtype='int64'
                )
            )
Y
Yang Yu 已提交
3575 3576

        dict_size = 10000
M
minqiyang 已提交
3577
        label_word = int(window_size // 2) + 1
Y
Yang Yu 已提交
3578 3579

        embs = []
3580
        for i in range(window_size):
Y
Yang Yu 已提交
3581 3582 3583
            if i == label_word:
                continue

3584 3585 3586 3587 3588 3589
            emb = layers.embedding(
                input=words[i],
                size=[dict_size, 32],
                param_attr='emb.w',
                is_sparse=True,
            )
Y
Yang Yu 已提交
3590 3591 3592 3593

            embs.append(emb)

        embs = layers.concat(input=embs, axis=1)
3594 3595 3596 3597 3598 3599 3600
        loss = layers.nce(
            input=embs,
            label=words[label_word],
            num_total_classes=dict_size,
            param_attr='nce.w',
            bias_attr='nce.b',
        )
3601
        avg_loss = paddle.mean(loss)
3602
        return avg_loss
Y
Yang Yu 已提交
3603

3604
    def make_multiplex(self):
3605 3606 3607
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3608 3609 3610
            x1 = self._get_data(name='x1', shape=[4], dtype='float32')
            x2 = self._get_data(name='x2', shape=[4], dtype='float32')
            index = self._get_data(name='index', shape=[1], dtype='int32')
3611
            out = layers.multiplex(inputs=[x1, x2], index=index)
3612
            return out
3613 3614

    def make_softmax_with_cross_entropy(self):
3615 3616 3617
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3618 3619
            x = self._get_data(name='x', shape=[16], dtype='float32')
            y = self._get_data(name='label', shape=[1], dtype='int64')
3620
            loss, softmax = layers.softmax_with_cross_entropy(
3621 3622
                x, y, return_softmax=True
            )
3623 3624 3625
            self.assertIsNotNone(loss)
            self.assertIsNotNone(softmax)

3626
            loss = layers.softmax_with_cross_entropy(x, y)
3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640
            self.assertIsNotNone(loss)

            x1 = self._get_data(name='x1', shape=[16, 32, 64], dtype='float32')
            y1 = self._get_data(name='label1', shape=[1, 32, 64], dtype='int64')
            y2 = self._get_data(name='label2', shape=[16, 1, 64], dtype='int64')
            y3 = self._get_data(name='label3', shape=[16, 32, 1], dtype='int64')
            loss1 = layers.softmax_with_cross_entropy(x1, y1, axis=1)
            loss2 = layers.softmax_with_cross_entropy(x1, y2, axis=2)
            loss3 = layers.softmax_with_cross_entropy(x1, y3, axis=3)
            loss4 = layers.softmax_with_cross_entropy(x1, y3, axis=-1)
            self.assertIsNotNone(loss1)
            self.assertIsNotNone(loss2)
            self.assertIsNotNone(loss3)
            self.assertIsNotNone(loss4)
3641
            return loss4
3642 3643

    def make_smooth_l1(self):
3644 3645 3646
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3647 3648
            x = self._get_data(name='x', shape=[4], dtype='float32')
            y = self._get_data(name='label', shape=[4], dtype='float32')
3649
            loss = layers.smooth_l1(x, y)
3650
            return loss
3651

3652
    def make_scatter(self):
3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x = self._get_data(
                name='x', shape=[3, 3], append_batch_size=False, dtype='float32'
            )
            idx = self._get_data(
                name='idx', shape=[2], append_batch_size=False, dtype='int32'
            )
            updates = self._get_data(
                name='updates',
                shape=[2, 3],
                append_batch_size=False,
                dtype='float32',
            )
3668
            out = layers.scatter(input=x, index=idx, updates=updates)
3669
            return out
Y
yangyaming 已提交
3670

3671 3672 3673 3674
    def make_one_hot(self):
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            label = self._get_data(name="label", shape=[1], dtype="int32")
            one_hot_label = layers.one_hot(input=label, depth=10)
3675
            return one_hot_label
3676

3677 3678 3679 3680 3681
    def make_label_smooth(self):
        # TODO(minqiyang): support gpu ut
        self._force_to_use_cpu = True
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            label = self._get_data(name="label", shape=[1], dtype="int32")
3682
            one_hot_label = layers.one_hot(input=label, depth=10)
3683 3684 3685 3686
            smooth_label = layers.label_smooth(
                label=one_hot_label, epsilon=0.1, dtype="int32"
            )
            return smooth_label
3687

3688
    def make_topk(self):
3689 3690 3691
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3692 3693
            data = self._get_data(name="label", shape=[200], dtype="float32")
            values, indices = layers.topk(data, k=5)
3694 3695
            return values
            return indices
J
jerrywgz 已提交
3696

3697
    def make_resize_bilinear(self):
3698 3699 3700
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3701
            x = self._get_data(name='x', shape=[3, 9, 6], dtype="float32")
B
baiyf 已提交
3702
            output = layers.resize_bilinear(x, out_shape=[12, 12])
3703
            return output
K
Kaipeng Deng 已提交
3704 3705

    def make_resize_bilinear_by_scale(self):
3706 3707 3708
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
3709 3710
            x = self._get_data(name='x', shape=[3, 9, 6], dtype="float32")
            output = layers.resize_bilinear(x, scale=1.5)
3711
            return output
3712

3713
    def make_resize_nearest(self):
K
Kaipeng Deng 已提交
3714
        try:
3715 3716 3717
            with program_guard(
                fluid.default_main_program(), fluid.default_startup_program()
            ):
K
Kaipeng Deng 已提交
3718 3719 3720 3721 3722 3723
                x = self._get_data(name='x1', shape=[3, 9, 6], dtype="float32")
                output = layers.resize_nearest(x, out_shape=[12, 12])
        except ValueError:
            pass

        try:
3724 3725 3726 3727 3728 3729
            with program_guard(
                fluid.default_main_program(), fluid.default_startup_program()
            ):
                x = self._get_data(
                    name='x2', shape=[3, 9, 6, 7], dtype="float32"
                )
K
Kaipeng Deng 已提交
3730 3731 3732 3733
                output = layers.resize_nearest(x, out_shape=[12, 12, 12])
        except ValueError:
            pass

3734 3735 3736
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3737
            x = self._get_data(name='x', shape=[3, 9, 6], dtype="float32")
3738
            output = layers.resize_nearest(x, out_shape=[12, 12])
3739
            return output
K
Kaipeng Deng 已提交
3740 3741

    def make_resize_nearest_by_scale(self):
3742 3743 3744
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
3745 3746
            x = self._get_data(name='x1', shape=[3, 9, 6], dtype="float32")
            output = layers.resize_nearest(x, scale=1.8)
3747
            return output
K
Kaipeng Deng 已提交
3748 3749 3750

    def make_resize_trilinear(self):
        try:
3751 3752 3753
            with program_guard(
                fluid.default_main_program(), fluid.default_startup_program()
            ):
K
Kaipeng Deng 已提交
3754 3755 3756 3757 3758 3759
                x = self._get_data(name='x2', shape=[3, 9, 6], dtype="float32")
                output = layers.resize_trilinear(x, out_shape=[12, 12, 12])
        except ValueError:
            pass

        try:
3760 3761 3762 3763 3764 3765
            with program_guard(
                fluid.default_main_program(), fluid.default_startup_program()
            ):
                x = self._get_data(
                    name='x', shape=[3, 9, 6, 7], dtype="float32"
                )
K
Kaipeng Deng 已提交
3766 3767 3768 3769
                output = layers.resize_trilinear(x, out_shape=[12, 12])
        except ValueError:
            pass

3770 3771 3772
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
3773 3774
            x = self._get_data(name='x', shape=[3, 9, 6, 7], dtype="float32")
            output = layers.resize_trilinear(x, out_shape=[12, 12, 12])
3775
            return output
K
Kaipeng Deng 已提交
3776 3777

    def make_resize_trilinear_by_scale(self):
3778 3779 3780
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
3781 3782
            x = self._get_data(name='x', shape=[3, 9, 6, 7], dtype="float32")
            output = layers.resize_trilinear(x, scale=2.1)
3783
            return output
3784

3785
    def make_polygon_box_transform(self):
3786 3787 3788
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3789
            x = self._get_data(name='x', shape=[8, 4, 4], dtype="float32")
3790
            output = layers.polygon_box_transform(input=x)
3791
            return output
3792

3793
    def make_l2_normalize(self):
3794 3795 3796
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3797
            x = self._get_data(name='x', shape=[8, 7, 10], dtype="float32")
3798
            output = layers.l2_normalize(x, axis=1)
3799
            return output
3800

3801
    def make_crop(self):
3802 3803 3804
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3805 3806
            x = self._get_data(name='x', shape=[3, 5], dtype="float32")
            y = self._get_data(name='y', shape=[2, 3], dtype="float32")
3807
            output = layers.crop(x, shape=y)
3808
            return output
3809 3810 3811 3812

    def make_mean_iou(self):
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            x = self._get_data(name='x', shape=[16], dtype='int32')
M
minqiyang 已提交
3813 3814
            y = self._get_data(name='label', shape=[16], dtype='int32')
            iou = layers.mean_iou(x, y, self._high_data_bound)
3815
            return iou
W
whs 已提交
3816

3817
    def make_argsort(self):
3818 3819 3820
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3821
            data = self._get_data(name='x', shape=[2, 3, 3], dtype="float32")
3822
            out, ids = layers.argsort(input=data, axis=1)
3823 3824
            return out
            return ids
3825 3826

    def make_rank_loss(self):
3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            label = self._get_data(
                name='label',
                append_batch_size=False,
                shape=[16, 1],
                dtype="float32",
            )
            left = self._get_data(
                name='left',
                append_batch_size=False,
                shape=[16, 1],
                dtype="float32",
            )
            right = self._get_data(
                name='right',
                append_batch_size=False,
                shape=[16, 1],
                dtype="float32",
            )
3848
            out = layers.rank_loss(label, left, right, name="rank_loss")
3849
            return out
3850

3851
    def make_shape(self):
3852 3853 3854 3855 3856 3857
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 100, 100], dtype="float32"
            )
G
fix  
gongweibao 已提交
3858
            out = layers.shape(input)
3859
            return out
B
Bai Yifan 已提交
3860

3861
    def make_pad2d(self):
3862 3863 3864 3865 3866 3867
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 100, 100], dtype="float32"
            )
3868
            paddings = layers.fill_constant(shape=[4], dtype='int32', value=1)
3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884
            out = layers.pad2d(
                input,
                paddings=[1, 2, 3, 4],
                mode='reflect',
                data_format='NCHW',
                name="shape",
            )
            out_1 = layers.pad2d(
                input,
                paddings=paddings,
                mode='reflect',
                data_format='NCHW',
                name="shape",
            )
            return out
            return out_1
W
whs 已提交
3885

3886
    def make_prelu(self):
3887 3888 3889 3890 3891 3892
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[5, 200, 100, 100], dtype="float32"
            )
J
jerrywgz 已提交
3893
            mode = 'channel'
3894 3895 3896 3897 3898 3899 3900
            out = layers.prelu(
                input,
                mode,
                param_attr=ParamAttr(initializer=Constant(1.0)),
                name='prelu',
            )
            return out
J
jerrywgz 已提交
3901

3902
    def make_soft_relu(self):
3903 3904 3905
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3906
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3907
            out = layers.soft_relu(input, threshold=30.0, name='soft_relu')
3908
            return out
T
tensor-tang 已提交
3909

3910
    def make_sigmoid(self):
3911 3912 3913
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3914
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3915
            out = layers.sigmoid(input, name='sigmoid')
3916
            return out
T
tensor-tang 已提交
3917

3918
    def make_exp(self):
3919 3920 3921
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3922
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3923
            out = layers.exp(input, name='exp')
3924
            return out
T
tensor-tang 已提交
3925

3926
    def make_tanh(self):
3927 3928 3929
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3930
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3931
            out = layers.tanh(input, name='tanh')
3932
            return out
T
tensor-tang 已提交
3933

3934
    def make_tanh_shrink(self):
3935 3936 3937
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3938
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3939
            out = layers.tanh_shrink(input, name='tanh_shrink')
3940
            return out
T
tensor-tang 已提交
3941

3942
    def make_sqrt(self):
3943 3944 3945
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3946
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3947
            out = layers.sqrt(input, name='sqrt')
3948
            return out
T
tensor-tang 已提交
3949

3950
    def make_abs(self):
3951 3952 3953
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3954
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3955
            out = layers.abs(input, name='abs')
3956
            return out
T
tensor-tang 已提交
3957

3958
    def make_ceil(self):
3959 3960 3961
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3962
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3963
            out = layers.ceil(input, name='ceil')
3964
            return out
T
tensor-tang 已提交
3965

3966
    def make_floor(self):
3967 3968 3969
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3970
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3971
            out = layers.floor(input, name='floor')
3972
            return out
T
tensor-tang 已提交
3973

3974
    def make_cos(self):
3975 3976 3977
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3978
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3979
            out = layers.cos(input, name='cos')
3980
            return out
T
tensor-tang 已提交
3981

3982
    def make_sin(self):
3983 3984 3985
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3986
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3987
            out = layers.sin(input, name='sin')
3988
            return out
T
tensor-tang 已提交
3989

3990
    def make_round(self):
3991 3992 3993
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
3994
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3995
            out = layers.round(input, name='round')
3996
            return out
T
tensor-tang 已提交
3997

3998
    def make_reciprocal(self):
3999 4000 4001
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
4002
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
4003
            out = layers.reciprocal(input, name='reciprocal')
4004
            return out
T
tensor-tang 已提交
4005

4006
    def make_square(self):
4007 4008 4009
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
4010
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
4011
            out = layers.square(input, name='square')
4012
            return out
T
tensor-tang 已提交
4013

4014
    def make_softplus(self):
4015 4016 4017
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
4018
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
4019
            out = layers.softplus(input, name='softplus')
4020
            return out
T
tensor-tang 已提交
4021

4022
    def make_softsign(self):
4023 4024 4025
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
4026
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
4027
            out = layers.softsign(input, name='softsign')
4028
            return out
T
tensor-tang 已提交
4029

K
Kaipeng Deng 已提交
4030
    def make_mish(self):
4031 4032 4033
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
K
Kaipeng Deng 已提交
4034 4035
            input = self._get_data(name="input", shape=[16], dtype="float32")
            out = layers.mish(input, name='mish')
4036
            return out
K
Kaipeng Deng 已提交
4037

4038
    def make_cross_entropy(self):
4039 4040 4041
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
4042 4043
            x = self._get_data(name="x", shape=[30, 10], dtype="float32")
            label = self._get_data(name="label", shape=[30, 1], dtype="int64")
4044 4045
            mode = 'channel'
            out = layers.cross_entropy(x, label, False, 4)
4046
            return out
4047

4048 4049 4050 4051 4052
    def make_bpr_loss(self):
        self._force_to_use_cpu = True
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            x = self._get_data(name="x", shape=[30, 10], dtype="float32")
            label = self._get_data(name="label", shape=[30, 1], dtype="int64")
4053
            out = layers.bpr_loss(x, label)
4054
            return out
4055

4056
    def make_expand(self):
4057 4058 4059
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
4060
            x = self._get_data(name="input", shape=[10], dtype='int32')
W
whs 已提交
4061
            out = layers.expand(x, [1, 2])
4062
            return out
W
whs 已提交
4063

4064
    def make_uniform_random_batch_size_like(self):
4065 4066 4067 4068 4069 4070
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[13, 11], dtype='float32'
            )
G
fix  
gongweibao 已提交
4071
            out = layers.uniform_random_batch_size_like(input, [-1, 11])
4072
            return out
G
fix  
gongweibao 已提交
4073

4074
    def make_gaussian_random(self):
4075 4076 4077
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
G
fix  
gongweibao 已提交
4078
            out = layers.gaussian_random(shape=[20, 30])
4079
            return out
G
fix  
gongweibao 已提交
4080

4081
    def make_sampling_id(self):
4082 4083 4084 4085 4086 4087 4088 4089 4090
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x = self._get_data(
                name="X",
                shape=[13, 11],
                dtype='float32',
                append_batch_size=False,
            )
G
fix  
gongweibao 已提交
4091 4092

            out = layers.sampling_id(x)
4093
            return out
G
fix  
gongweibao 已提交
4094

4095
    def make_gaussian_random_batch_size_like(self):
4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[13, 11], dtype='float32'
            )

            out = layers.gaussian_random_batch_size_like(
                input, shape=[-1, 11], mean=1.0, std=2.0
            )
            return out
G
fix  
gongweibao 已提交
4107

4108
    def make_sum(self):
4109 4110 4111 4112 4113 4114
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[13, 11], dtype='float32'
            )
G
fix  
gongweibao 已提交
4115 4116

            out = layers.sum(input)
4117
            return out
G
fix  
gongweibao 已提交
4118

4119
    def make_slice(self):
G
fix  
gongweibao 已提交
4120 4121 4122 4123
        starts = [1, 0, 2]
        ends = [3, 3, 4]
        axes = [0, 1, 2]

4124 4125 4126 4127 4128 4129
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 4, 5, 6], dtype='float32'
            )
G
fix  
gongweibao 已提交
4130 4131

            out = layers.slice(input, axes=axes, starts=starts, ends=ends)
4132
            return out
G
merge  
gongweibao 已提交
4133

4134
    def make_scale_variable(self):
4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = self._get_data(
                name="input", shape=[3, 4, 5, 6], dtype='float32'
            )
            scale_var = self._get_data(
                name="scale",
                shape=[1],
                dtype='float32',
                append_batch_size=False,
            )
4147
            out = layers.scale(input, scale=scale_var)
4148 4149
            return out

4150
    def make_softshrink(self):
4151 4152 4153
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
4154
            input = self._get_data(name="input", shape=[16], dtype="float32")
4155
            out = layers.softshrink(input, alpha=0.3)
4156
            return out
G
fix  
gongweibao 已提交
4157

M
minqiyang 已提交
4158
    def make_iou_similarity(self):
4159 4160 4161
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
M
minqiyang 已提交
4162 4163
            x = self._get_data(name="x", shape=[4], dtype="float32")
            y = self._get_data(name="y", shape=[4], dtype="float32")
X
Xin Pan 已提交
4164
            out = layers.iou_similarity(x, y, name='iou_similarity')
4165
            return out
4166 4167

    def make_grid_sampler(self):
4168 4169 4170
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
4171 4172
            x = self._get_data(name='x', shape=[3, 5, 7], dtype='float32')
            grid = self._get_data(name='grid', shape=[5, 7, 2], dtype='float32')
D
dengkaipeng 已提交
4173
            out = layers.grid_sampler(x, grid)
4174
            return out
4175 4176

    def make_bilinear_tensor_product_layer(self):
4177 4178 4179
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
4180 4181 4182 4183
            data = self._get_data(name='data', shape=[4], dtype="float32")

            theta = self._get_data(name="theta", shape=[5], dtype="float32")
            out = layers.bilinear_tensor_product(data, theta, 6)
4184
            return out
4185 4186

    def make_batch_norm(self):
4187 4188 4189 4190 4191 4192
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            data = self._get_data(
                name='data', shape=[32, 128, 128], dtype="float32"
            )
4193
            out = layers.batch_norm(data)
4194
            return out
4195

4196
    def make_batch_norm_momentum_variable(self):
4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            data = self._get_data(
                name='data', shape=[32, 128, 128], dtype="float32"
            )
            momentum = self._get_data(
                name='momentum',
                shape=[1],
                dtype='float32',
                append_batch_size=False,
            )
4209
            out = layers.batch_norm(data, momentum=momentum)
4210
            return out
4211

K
Kaipeng Deng 已提交
4212
    def make_inplace_abn(self):
4213 4214 4215 4216 4217 4218
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            data = self._get_data(
                name='data', shape=[32, 128, 128], dtype="float32"
            )
K
Kaipeng Deng 已提交
4219
            out = layers.inplace_abn(data, act='leaky_relu', act_alpha=0.2)
4220
            return out
K
Kaipeng Deng 已提交
4221 4222

    def make_inplace_abn_momentum_variable(self):
4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            data = self._get_data(
                name='data', shape=[32, 128, 128], dtype="float32"
            )
            momentum = self._get_data(
                name='momentum',
                shape=[1],
                dtype='float32',
                append_batch_size=False,
            )
            out = layers.inplace_abn(
                data, momentum=momentum, act='elu', act_alpha=2.0
            )
            return out
K
Kaipeng Deng 已提交
4239

4240
    def make_range(self):
4241 4242 4243
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
4244
            layers.range(0, 10, 2, 'int32')
4245 4246 4247 4248 4249 4250
            layers.range(0.1, 10.0, 0.2, 'float32')
            layers.range(0.1, 10.0, 0.2, 'float64')
            start = layers.fill_constant(shape=[1], value=0.1, dtype="float32")
            end = layers.fill_constant(shape=[1], value=10.0, dtype="float32")
            step = layers.fill_constant(shape=[1], value=0.2, dtype="float32")
            y = layers.range(start, end, step, 'float64')
4251 4252 4253
            return y

    def make_spectral_norm(self):
4254 4255 4256 4257 4258 4259 4260 4261 4262
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            weight = self._get_data(
                name='weight',
                shape=[2, 3, 32, 32],
                dtype="float32",
                append_batch_size=False,
            )
4263
            out = layers.spectral_norm(weight, dim=1, power_iters=1)
4264
            return out
4265 4266

    def make_kldiv_loss(self):
4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            x = self._get_data(
                name='x',
                shape=[32, 128, 128],
                dtype="float32",
                append_batch_size=False,
            )
            target = self._get_data(
                name='target',
                shape=[32, 128, 128],
                dtype="float32",
                append_batch_size=False,
            )
4282
            loss = layers.kldiv_loss(x=x, target=target, reduction='batchmean')
4283
            return loss
4284 4285

    def make_temporal_shift(self):
4286 4287 4288
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
4289 4290
            x = self._get_data(name="X", shape=[16, 4, 4], dtype="float32")
            out = layers.temporal_shift(x, seg_num=2, shift_ratio=0.2)
4291
            return out
4292 4293

    def make_shuffle_channel(self):
4294 4295 4296
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
4297 4298
            x = self._get_data(name="X", shape=[16, 4, 4], dtype="float32")
            out = layers.shuffle_channel(x, group=4)
4299
            return out
4300

M
minqiyang 已提交
4301
    def make_fsp_matrix(self):
4302 4303 4304
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
4305 4306 4307
            x = self._get_data(name="X", shape=[16, 4, 4], dtype="float32")
            y = self._get_data(name="Y", shape=[8, 4, 4], dtype="float32")
            out = layers.fsp_matrix(x, y)
4308
            return out
4309

M
minqiyang 已提交
4310
    def make_pixel_shuffle(self):
4311 4312 4313
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
M
minqiyang 已提交
4314 4315
            x = self._get_data(name="X", shape=[9, 4, 4], dtype="float32")
            out = layers.pixel_shuffle(x, upscale_factor=3)
4316
            return out
M
minqiyang 已提交
4317

R
ruri 已提交
4318
    def make_mse_loss(self):
4319 4320 4321
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
R
ruri 已提交
4322 4323 4324
            x = self._get_data(name="X", shape=[1], dtype="float32")
            y = self._get_data(name="Y", shape=[1], dtype="float32")
            out = layers.mse_loss(input=x, label=y)
4325
            return out
R
ruri 已提交
4326

4327
    def make_square_error_cost(self):
4328 4329 4330
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
4331 4332 4333
            x = self._get_data(name="X", shape=[1], dtype="float32")
            y = self._get_data(name="Y", shape=[1], dtype="float32")
            out = layers.square_error_cost(input=x, label=y)
4334
            return out
4335

4336 4337 4338 4339
    def test_dynamic_lstmp(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            hidden_dim, proj_dim = 16, 8
4340 4341 4342
            seq_data = layers.data(
                name='seq_data', shape=[10, 10], dtype='float32', lod_level=1
            )
4343 4344
            fc_out = layers.fc(input=seq_data, size=4 * hidden_dim)
            self.assertIsNotNone(
4345 4346 4347 4348
                layers.dynamic_lstmp(
                    input=fc_out, size=4 * hidden_dim, proj_size=proj_dim
                )
            )
4349 4350 4351 4352

    def test_linear_chain_crf(self):
        with self.static_graph():
            label_dict_len = 10
4353 4354 4355
            feature = layers.data(name='feature', shape=[784], dtype='float32')
            label = layers.data(name='label', shape=[1], dtype='int64')
            emission = layers.fc(input=feature, size=10)
4356 4357 4358 4359 4360 4361
            crf = layers.linear_chain_crf(
                input=emission, label=label, param_attr=ParamAttr(name="crfw")
            )
            crf_decode = layers.crf_decoding(
                input=emission, param_attr=ParamAttr(name="crfw")
            )
4362 4363
            self.assertIsNotNone(crf)
            self.assertIsNotNone(crf_decode)
4364 4365 4366 4367 4368 4369
            return layers.chunk_eval(
                input=crf_decode,
                label=label,
                chunk_scheme="IOB",
                num_chunk_types=(label_dict_len - 1) // 2,
            )
4370 4371 4372 4373

    def test_linear_chain_crf_padding(self):
        with self.static_graph():
            label_dict_len, max_len = 10, 20
4374 4375 4376
            feature = layers.data(
                name='feature', shape=[max_len, 784], dtype='float32'
            )
4377 4378 4379
            label = layers.data(name='label', shape=[max_len], dtype='int64')
            length = layers.data(name='length', shape=[1], dtype='int64')
            emission = layers.fc(input=feature, size=10, num_flatten_dims=2)
4380 4381 4382 4383 4384 4385 4386 4387 4388
            crf = layers.linear_chain_crf(
                input=emission,
                label=label,
                length=length,
                param_attr=ParamAttr(name="crfw"),
            )
            crf_decode = layers.crf_decoding(
                input=emission, length=length, param_attr=ParamAttr(name="crfw")
            )
4389 4390
            self.assertIsNotNone(crf)
            self.assertIsNotNone(crf_decode)
4391 4392 4393 4394 4395 4396 4397
            return layers.chunk_eval(
                input=crf_decode,
                label=label,
                seq_length=length,
                chunk_scheme="IOB",
                num_chunk_types=(label_dict_len - 1) // 2,
            )
4398 4399 4400 4401 4402 4403

    def test_im2sequence(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[3, 128, 128], dtype='float32')
            y = layers.data(name='y', shape=[], dtype='float32')
4404 4405 4406 4407 4408 4409 4410 4411
            output = layers.im2sequence(
                input=x,
                input_image_size=y,
                stride=[1, 1],
                filter_size=[2, 2],
                out_stride=[1, 1],
            )
            return output
4412 4413 4414 4415

    def test_lod_reset(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
4416
            # case 1
4417
            x = layers.data(name='x', shape=[10], dtype='float32')
4418 4419 4420
            y = layers.data(
                name='y', shape=[10, 20], dtype='float32', lod_level=2
            )
4421 4422 4423
            z = layers.lod_reset(x=x, y=y)
            self.assertTrue(z.lod_level == 2)
            # case 2
4424
            lod_tensor_in = layers.data(name='lod_in', shape=[1], dtype='int32')
4425 4426 4427 4428 4429 4430
            z = layers.lod_reset(x=x, y=lod_tensor_in)
            self.assertTrue(z.lod_level == 1)
            # case 3
            z = layers.lod_reset(x=x, target_lod=[1, 2, 3])
            self.assertTrue(z.lod_level == 1)
            return z
4431

W
whs 已提交
4432
    def test_affine_grid(self):
4433
        with self.static_graph():
W
whs 已提交
4434 4435 4436 4437
            data = layers.data(name='data', shape=[2, 3, 3], dtype="float32")
            out, ids = layers.argsort(input=data, axis=1)

            theta = layers.data(name="theta", shape=[2, 3], dtype="float32")
4438
            out_shape = layers.data(name="out_shape", shape=[-1], dtype="int32")
W
whs 已提交
4439 4440 4441 4442 4443
            data_0 = layers.affine_grid(theta, out_shape)
            data_1 = layers.affine_grid(theta, [5, 3, 28, 28])

            self.assertIsNotNone(data_0)
            self.assertIsNotNone(data_1)
D
dengkaipeng 已提交
4444

W
wangchaochaohu 已提交
4445 4446 4447 4448 4449 4450 4451
    def test_stridedslice(self):
        axes = [0, 1, 2]
        starts = [1, 0, 2]
        ends = [3, 3, 4]
        strides = [1, 1, 1]
        with self.static_graph():
            x = layers.data(name="x", shape=[245, 30, 30], dtype="float32")
4452 4453 4454
            out = layers.strided_slice(
                x, axes=axes, starts=starts, ends=ends, strides=strides
            )
W
wangchaochaohu 已提交
4455 4456
            return out

4457 4458
    def test_fill_constant_batch_size_like(self):
        with self.static_graph():
4459 4460 4461 4462 4463 4464
            like = fluid.layers.fill_constant(
                shape=[1, 200], value=10, dtype='int64'
            )
            out = layers.fill_constant_batch_size_like(
                input=like, shape=[2, 3300], value=1315454564656, dtype='int64'
            )
4465 4466
            return out

4467 4468 4469 4470
    def test_psroi_pool(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name="x", shape=[245, 30, 30], dtype="float32")
4471 4472 4473
            rois = layers.data(
                name="rois", shape=[4], dtype="float32", lod_level=1
            )
4474
            output = layers.psroi_pool(x, rois, 5, 0.25, 7, 7)
4475
            return output
4476

4477 4478 4479 4480
    def test_sequence_expand(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[10], dtype='float32')
4481 4482 4483 4484
            y = layers.data(
                name='y', shape=[10, 20], dtype='float32', lod_level=2
            )
            return layers.sequence_expand(x=x, y=y, ref_level=1)
4485

4486 4487 4488 4489 4490
    def test_sequence_reshape(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[8], dtype='float32', lod_level=1)
            out = layers.sequence_reshape(input=x, new_dim=16)
4491
            return out
4492

4493 4494 4495 4496
    def test_sequence_unpad(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[10, 5], dtype='float32')
4497
            length = layers.data(name='length', shape=[], dtype='int64')
4498
            return layers.sequence_unpad(x=x, length=length)
4499

4500 4501 4502
    def test_sequence_softmax(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
4503 4504 4505
            seq_data = layers.data(
                name='seq_data', shape=[10, 10], dtype='float32', lod_level=1
            )
4506
            seq = layers.fc(input=seq_data, size=20)
4507
            return layers.sequence_softmax(seq)
4508

4509 4510 4511 4512 4513
    def test_sequence_unsqueeze(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[8, 2], dtype='float32')
            out = layers.unsqueeze(input=x, axes=[1])
4514
            return out
4515

4516 4517 4518
    def test_sequence_scatter(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535
            x = layers.data(
                name='x', shape=[3, 6], append_batch_size=False, dtype='float32'
            )
            idx = layers.data(
                name='idx',
                shape=[12, 1],
                append_batch_size=False,
                dtype='int32',
                lod_level=1,
            )
            updates = layers.data(
                name='updates',
                shape=[12, 1],
                append_batch_size=False,
                dtype='float32',
                lod_level=1,
            )
4536
            out = layers.sequence_scatter(input=x, index=idx, updates=updates)
4537
            return out
W
whs 已提交
4538

4539 4540 4541 4542
    def test_sequence_slice(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            import numpy as np
4543 4544 4545 4546

            seqs = layers.data(
                name='x', shape=[10, 5], dtype='float32', lod_level=1
            )
4547 4548
            offset = layers.assign(input=np.array([[0, 1]]).astype('int32'))
            length = layers.assign(input=np.array([[2, 1]]).astype('int32'))
4549 4550 4551 4552
            out = layers.sequence_slice(
                input=seqs, offset=offset, length=length
            )
            return out
W
whs 已提交
4553

J
Jiawei Wang 已提交
4554 4555 4556
    def test_filter_by_instag(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574
            x1 = layers.data(
                name='Ins', shape=[32, 1], dtype='float32', lod_level=0
            )
            x2 = layers.data(
                name='Ins_tag',
                shape=[32, 1],
                dtype='int64',
                lod_level=0,
                stop_gradient=True,
            )
            x3 = layers.create_global_var(
                shape=[1, 1],
                value=20,
                dtype='int64',
                persistable=True,
                force_cpu=True,
                name='Filter_tag',
            )
J
Jiawei Wang 已提交
4575 4576
            out1, out2 = layers.filter_by_instag(x1, x2, x3, is_lod=True)

Z
zhoushiyu 已提交
4577 4578 4579
    def test_shuffle_batch(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
4580 4581 4582
            x = layers.data(
                name='X', shape=[4, 50], dtype='float32', lod_level=0
            )
Z
zhoushiyu 已提交
4583 4584 4585 4586 4587
            out1 = fluid.contrib.layers.shuffle_batch(x)
            default_main_program().random_seed = 1000
            out2 = fluid.contrib.layers.shuffle_batch(x)
            self.assertIsNotNone(out1)
            self.assertIsNotNone(out2)
4588
            return out1
Z
zhoushiyu 已提交
4589

4590 4591 4592 4593
    def test_partial_sum(self):
        with self.static_graph():
            x = fluid.data(name="x", shape=[None, 3], dtype="float32")
            y = fluid.data(name="y", shape=[None, 3], dtype="float32")
4594 4595 4596 4597
            sum = fluid.contrib.layers.partial_sum(
                [x, y], start_index=0, length=2
            )
            return sum
4598

S
ShenLiang 已提交
4599 4600 4601 4602 4603 4604 4605 4606 4607
    def test_batch_fc(self):
        with self.static_graph():
            input = fluid.data(name="input", shape=[16, 2, 3], dtype="float32")
            out = fluid.contrib.layers.batch_fc(
                input=input,
                param_size=[16, 3, 10],
                param_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="w_0",
4608 4609
                    initializer=fluid.initializer.Xavier(uniform=False),
                ),
S
ShenLiang 已提交
4610 4611 4612 4613
                bias_size=[16, 10],
                bias_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="b_0",
4614 4615 4616 4617 4618
                    initializer=fluid.initializer.Xavier(uniform=False),
                ),
                act="relu",
            )
        return out
S
ShenLiang 已提交
4619

S
ShenLiang 已提交
4620 4621 4622
    def test_rank_attention(self):
        with self.static_graph():
            input = fluid.data(name="input", shape=[None, 2], dtype="float32")
4623 4624 4625
            rank_offset = fluid.data(
                name="rank_offset", shape=[None, 7], dtype="int32"
            )
S
ShenLiang 已提交
4626 4627 4628 4629 4630 4631 4632
            out = fluid.contrib.layers.rank_attention(
                input=input,
                rank_offset=rank_offset,
                rank_param_shape=[18, 3],
                rank_param_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="ubm_rank_param.w_0",
4633 4634 4635 4636 4637
                    initializer=fluid.initializer.Xavier(uniform=False),
                ),
                max_rank=3,
            )
            return out
S
ShenLiang 已提交
4638

4639
    def test_roi_pool(self):
4640 4641 4642 4643
        x_np = np.random.rand(2, 3, 8, 8).astype('float32')
        rois_np = np.random.rand(3, 4).astype('float32')
        rois_num_np = np.array([1, 2]).astype('int32')

4644
        with self.static_graph():
4645 4646 4647 4648
            x = layers.data(name="x", shape=[3, 8, 8], dtype="float32")
            rois = layers.data(name="rois", shape=[4], dtype="float32")
            rois_num = fluid.data(name="rois_num", shape=[None], dtype="int32")
            output = layers.roi_pool(x, rois, 4, 4, 0.5, rois_num=rois_num)
4649 4650 4651 4652
            static_res = self.get_static_graph_result(
                feed={'x': x_np, 'rois': rois_np, 'rois_num': rois_num_np},
                fetch_list=[output],
            )[0]
4653 4654

        with self.dynamic_graph():
4655 4656 4657 4658
            with _test_eager_guard():
                x_dy = base.to_variable(x_np)
                rois_dy = base.to_variable(rois_np)
                rois_num_dy = base.to_variable(rois_num_np)
4659 4660 4661
                dy_eager_res = layers.roi_pool(
                    x_dy, rois_dy, 4, 4, 0.5, rois_num=rois_num_dy
                )
4662 4663
                dy_eager_res_value = dy_eager_res[0].numpy()

4664 4665 4666
            x_dy = base.to_variable(x_np)
            rois_dy = base.to_variable(rois_np)
            rois_num_dy = base.to_variable(rois_num_np)
4667 4668 4669
            dy_res = layers.roi_pool(
                x_dy, rois_dy, 4, 4, 0.5, rois_num=rois_num_dy
            )
4670
            dy_res_value = dy_res[0].numpy()
4671 4672
        np.testing.assert_array_equal(static_res, dy_res_value)
        np.testing.assert_array_equal(static_res, dy_eager_res_value)
4673 4674 4675 4676 4677 4678 4679 4680

    def test_sequence_enumerate(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name="input", shape=[1], dtype='int32', lod_level=1)
            out = layers.sequence_enumerate(input=x, win_size=2, pad_value=0)

    def test_roi_align(self):
4681 4682 4683 4684
        x_np = np.random.rand(2, 3, 8, 8).astype('float32')
        rois_np = np.random.rand(3, 4).astype('float32')
        rois_num_np = np.array([1, 2]).astype('int32')

4685
        with self.static_graph():
4686 4687 4688 4689
            x = layers.data(name="x", shape=[3, 8, 8], dtype="float32")
            rois = layers.data(name="rois", shape=[4], dtype="float32")
            rois_num = fluid.data(name="rois_num", shape=[None], dtype="int32")
            output = layers.roi_align(x, rois, 4, 4, 0.5, 2, rois_num=rois_num)
4690 4691 4692 4693
            static_res = self.get_static_graph_result(
                feed={'x': x_np, 'rois': rois_np, 'rois_num': rois_num_np},
                fetch_list=[output],
            )[0]
4694 4695

        with self.dynamic_graph():
4696 4697 4698 4699
            with _test_eager_guard():
                x_dy = base.to_variable(x_np)
                rois_dy = base.to_variable(rois_np)
                rois_num_dy = base.to_variable(rois_num_np)
4700 4701 4702
                dy_eager_res = layers.roi_align(
                    x_dy, rois_dy, 4, 4, 0.5, 2, rois_num=rois_num_dy
                )
4703 4704
                dy_eager_res_value = dy_eager_res.numpy()

4705 4706 4707
            x_dy = base.to_variable(x_np)
            rois_dy = base.to_variable(rois_np)
            rois_num_dy = base.to_variable(rois_num_np)
4708 4709 4710
            dy_res = layers.roi_align(
                x_dy, rois_dy, 4, 4, 0.5, 2, rois_num=rois_num_dy
            )
4711
            dy_res_value = dy_res.numpy()
4712 4713
        np.testing.assert_array_equal(static_res, dy_eager_res_value)
        np.testing.assert_array_equal(static_res, dy_res_value)
4714

4715 4716 4717 4718 4719 4720 4721
    def test_dice_loss(self):
        num_classes = 4
        eps = 1e-6
        input_np = np.random.rand(2, 3, num_classes).astype('float32')
        label_np = np.random.randint(0, num_classes, [2, 3, 1], dtype=np.int64)

        with self.static_graph():
4722 4723 4724 4725 4726 4727
            input_ = layers.data(
                name="input", shape=[None, 3, num_classes], dtype="float32"
            )
            label_ = layers.data(
                name="label", shape=[None, 3, 1], dtype="int64"
            )
4728
            output = layers.dice_loss(input_, label_, eps)
4729 4730 4731
            static_res = self.get_static_graph_result(
                feed={'input': input_np, 'label': label_np}, fetch_list=[output]
            )[0]
4732 4733

        with self.dynamic_graph():
4734 4735 4736 4737 4738 4739
            with _test_eager_guard():
                input_ = base.to_variable(input_np)
                label_ = base.to_variable(label_np)
                dy_eager_res = layers.dice_loss(input_, label_, eps)
                dy_eager_res_value = dy_eager_res.numpy()

4740 4741 4742 4743
            input_ = base.to_variable(input_np)
            label_ = base.to_variable(label_np)
            dy_res = layers.dice_loss(input_, label_, eps)
            dy_res_value = dy_res.numpy()
4744 4745
        np.testing.assert_array_equal(static_res, dy_res_value)
        np.testing.assert_array_equal(static_res, dy_eager_res_value)
4746

4747 4748 4749 4750
    def test_roi_perspective_transform(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name="x", shape=[256, 30, 30], dtype="float32")
4751 4752 4753
            rois = layers.data(
                name="rois", shape=[8], dtype="float32", lod_level=1
            )
4754
            output = layers.roi_perspective_transform(x, rois, 7, 7, 0.6)
4755
            return output
4756 4757 4758 4759 4760 4761

    def test_row_conv(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[16], dtype='float32', lod_level=1)
            out = layers.row_conv(input=x, future_context_size=2)
4762
            return out
4763 4764 4765 4766

    def test_simple_conv2d(self):
        # TODO(minqiyang): dygraph do not support layers with param now
        with self.static_graph():
4767 4768 4769 4770 4771 4772
            images = layers.data(
                name='pixel', shape=[3, 48, 48], dtype='float32'
            )
            return layers.conv2d(
                input=images, num_filters=3, filter_size=[4, 4]
            )
4773 4774 4775 4776 4777 4778

    def test_squeeze(self):
        # TODO(minqiyang): dygraph do not support layers with param now
        with self.static_graph():
            x = layers.data(name='x', shape=[1, 1, 4], dtype='float32')
            out = layers.squeeze(input=x, axes=[2])
4779
            return out
4780 4781 4782 4783

    def test_flatten(self):
        # TODO(minqiyang): dygraph do not support op without kernel now
        with self.static_graph():
4784 4785 4786 4787 4788 4789
            x = layers.data(
                name='x',
                append_batch_size=False,
                shape=[4, 4, 3],
                dtype="float32",
            )
4790
            out = layers.flatten(x, axis=1, name="flatten")
4791
            return out
4792

Z
zhoukunsheng 已提交
4793 4794 4795 4796 4797 4798 4799
    def test_linspace(self):
        program = Program()
        with program_guard(program):
            out = layers.linspace(20, 10, 5, 'float64')
            self.assertIsNotNone(out)
        print(str(program))

4800
    def test_deformable_conv(self):
4801
        with self.static_graph():
4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828
            input = layers.data(
                name='input',
                append_batch_size=False,
                shape=[2, 3, 32, 32],
                dtype="float32",
            )
            offset = layers.data(
                name='offset',
                append_batch_size=False,
                shape=[2, 18, 32, 32],
                dtype="float32",
            )
            mask = layers.data(
                name='mask',
                append_batch_size=False,
                shape=[2, 9, 32, 32],
                dtype="float32",
            )
            out = layers.deformable_conv(
                input=input,
                offset=offset,
                mask=mask,
                num_filters=2,
                filter_size=3,
                padding=1,
            )
            return out
4829 4830 4831

    def test_deformable_conv2(self):
        with self.static_graph():
4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849
            input = fluid.data(
                name='input', shape=[None, 3, None, None], dtype="float32"
            )
            offset = fluid.data(
                name='offset', shape=[None, 18, None, None], dtype="float32"
            )
            mask = fluid.data(
                name='mask', shape=[None, 9, None, None], dtype="float32"
            )
            out = layers.deformable_conv(
                input=input,
                offset=offset,
                mask=mask,
                num_filters=2,
                filter_size=3,
                padding=1,
            )
            return out
4850

4851 4852 4853 4854
    def test_unfold(self):
        with self.static_graph():
            x = layers.data(name='x', shape=[3, 20, 20], dtype='float32')
            out = layers.unfold(x, [3, 3], 1, 1, 1)
4855
            return out
4856

4857 4858 4859 4860
    def test_partial_concat(self):
        with self.static_graph():
            x = fluid.data(name="x", shape=[None, 3], dtype="float32")
            y = fluid.data(name="y", shape=[None, 3], dtype="float32")
4861 4862 4863 4864 4865 4866
            concat1 = fluid.contrib.layers.partial_concat(
                [x, y], start_index=0, length=2
            )
            concat2 = fluid.contrib.layers.partial_concat(
                x, start_index=0, length=-1
            )
4867 4868
            return concat1, concat2

C
cjt222 已提交
4869
    def test_deform_roi_pooling(self):
4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = layers.data(
                name='input',
                shape=[2, 3, 32, 32],
                dtype='float32',
                append_batch_size=False,
            )
            rois = layers.data(
                name="rois", shape=[4], dtype='float32', lod_level=1
            )
            trans = layers.data(
                name="trans",
                shape=[2, 3, 32, 32],
                dtype='float32',
                append_batch_size=False,
            )
            out = layers.deformable_roi_pooling(
                input=input,
                rois=rois,
                trans=trans,
                no_trans=False,
                spatial_scale=1.0,
                group_size=(1, 1),
                pooled_height=8,
                pooled_width=8,
                part_size=(8, 8),
                sample_per_part=4,
                trans_std=0.1,
            )
        return out
C
cjt222 已提交
4902

4903
    def test_deformable_conv_v1(self):
4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = layers.data(
                name='input',
                append_batch_size=False,
                shape=[2, 3, 32, 32],
                dtype="float32",
            )
            offset = layers.data(
                name='offset',
                append_batch_size=False,
                shape=[2, 18, 32, 32],
                dtype="float32",
            )
            out = layers.deformable_conv(
                input=input,
                offset=offset,
                mask=None,
                num_filters=2,
                filter_size=3,
                padding=1,
                modulated=False,
            )
            return out
4929

4930
    def test_retinanet_target_assign(self):
4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            bbox_pred = layers.data(
                name='bbox_pred',
                shape=[1, 100, 4],
                append_batch_size=False,
                dtype='float32',
            )
            cls_logits = layers.data(
                name='cls_logits',
                shape=[1, 100, 10],
                append_batch_size=False,
                dtype='float32',
            )
            anchor_box = layers.data(
                name='anchor_box',
                shape=[100, 4],
                append_batch_size=False,
                dtype='float32',
            )
            anchor_var = layers.data(
                name='anchor_var',
                shape=[100, 4],
                append_batch_size=False,
                dtype='float32',
            )
            gt_boxes = layers.data(
                name='gt_boxes',
                shape=[10, 4],
                append_batch_size=False,
                dtype='float32',
            )
            gt_labels = layers.data(
                name='gt_labels',
                shape=[10, 1],
                append_batch_size=False,
                dtype='int32',
            )
            is_crowd = layers.data(
                name='is_crowd',
                shape=[1],
                append_batch_size=False,
                dtype='int32',
            )
            im_info = layers.data(
                name='im_info',
                shape=[1, 3],
                append_batch_size=False,
                dtype='float32',
            )
            return layers.retinanet_target_assign(
                bbox_pred,
                cls_logits,
                anchor_box,
                anchor_var,
                gt_boxes,
                gt_labels,
                is_crowd,
                im_info,
                10,
            )
4993

4994
    def test_sigmoid_focal_loss(self):
4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = layers.data(
                name='data',
                shape=[10, 80],
                append_batch_size=False,
                dtype='float32',
            )
            label = layers.data(
                name='label',
                shape=[10, 1],
                append_batch_size=False,
                dtype='int32',
            )
            fg_num = layers.data(
                name='fg_num', shape=[1], append_batch_size=False, dtype='int32'
            )
            out = fluid.layers.sigmoid_focal_loss(
                x=input, label=label, fg_num=fg_num, gamma=2.0, alpha=0.25
            )
            return out
5017

5018
    def test_addmm(self):
5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            input = layers.data(
                name='input_data',
                shape=[3, 3],
                append_batch_size=False,
                dtype='float32',
            )
            x = layers.data(
                name='x', shape=[3, 2], append_batch_size=False, dtype='float32'
            )
            y = layers.data(
                name='y', shape=[2, 3], append_batch_size=False, dtype='float32'
            )
5034 5035

            out = paddle.addmm(input=input, x=x, y=y)
5036
            return out
5037

5038
    def test_retinanet_detection_output(self):
5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065
        with program_guard(
            fluid.default_main_program(), fluid.default_startup_program()
        ):
            bboxes = layers.data(
                name='bboxes',
                shape=[1, 21, 4],
                append_batch_size=False,
                dtype='float32',
            )
            scores = layers.data(
                name='scores',
                shape=[1, 21, 10],
                append_batch_size=False,
                dtype='float32',
            )
            anchors = layers.data(
                name='anchors',
                shape=[21, 4],
                append_batch_size=False,
                dtype='float32',
            )
            im_info = layers.data(
                name="im_info",
                shape=[1, 3],
                append_batch_size=False,
                dtype='float32',
            )
5066 5067 5068 5069 5070 5071 5072 5073 5074
            nmsed_outs = layers.retinanet_detection_output(
                bboxes=[bboxes, bboxes],
                scores=[scores, scores],
                anchors=[anchors, anchors],
                im_info=im_info,
                score_threshold=0.05,
                nms_top_k=1000,
                keep_top_k=100,
                nms_threshold=0.3,
5075 5076 5077
                nms_eta=1.0,
            )
            return nmsed_outs
5078

5079 5080 5081
    def test_warpctc_with_padding(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
5082 5083 5084 5085 5086 5087
            input_length = layers.data(
                name='logits_length', shape=[11], dtype='int64'
            )
            label_length = layers.data(
                name='labels_length', shape=[12], dtype='int64'
            )
5088
            label = layers.data(name='label', shape=[12, 1], dtype='int32')
5089 5090 5091 5092 5093 5094 5095 5096 5097 5098
            predict = layers.data(
                name='predict', shape=[4, 4, 8], dtype='float32'
            )
            output = layers.warpctc(
                input=predict,
                label=label,
                input_length=input_length,
                label_length=label_length,
            )
            return output
5099

5100 5101
    def test_edit_distance(self):
        with self.static_graph():
5102 5103 5104 5105 5106 5107
            predict = layers.data(
                name='predict', shape=[-1, 1], dtype='int64', lod_level=1
            )
            label = layers.data(
                name='label', shape=[-1, 1], dtype='int64', lod_level=1
            )
5108 5109 5110
            evaluator = fluid.evaluator.EditDistance(predict, label)
            return evaluator.metrics

5111 5112 5113 5114
    def test_basic_gru(self):
        input_size = 128
        hidden_size = 256
        with self.static_graph():
5115 5116 5117 5118 5119 5120 5121 5122 5123
            input = fluid.data(
                name="input", shape=[None, None, input_size], dtype='float32'
            )
            pre_hidden = fluid.data(
                name="pre_hidden", shape=[None, hidden_size], dtype='float32'
            )
            sequence_length = fluid.data(
                name="sequence_length", shape=[None], dtype='int32'
            )
5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134

            for bidirectional in [True, False]:
                for batch_first in [True, False]:
                    rnn_out, last_hidden = fluid.contrib.layers.basic_gru(
                        input,
                        pre_hidden,
                        hidden_size=256,
                        num_layers=2,
                        sequence_length=sequence_length,
                        dropout_prob=0.5,
                        bidirectional=bidirectional,
5135 5136
                        batch_first=batch_first,
                    )
5137

Y
Yu Yang 已提交
5138

5139 5140 5141 5142
class TestMetricsDetectionMap(unittest.TestCase):
    def test_detection_map(self):
        program = fluid.Program()
        with program_guard(program):
5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163
            detect_res = fluid.layers.data(
                name='detect_res',
                shape=[10, 6],
                append_batch_size=False,
                dtype='float32',
            )
            label = fluid.layers.data(
                name='label',
                shape=[10, 1],
                append_batch_size=False,
                dtype='float32',
            )
            box = fluid.layers.data(
                name='bbox',
                shape=[10, 4],
                append_batch_size=False,
                dtype='float32',
            )
            map_eval = fluid.metrics.DetectionMAP(
                detect_res, label, box, class_num=21
            )
5164 5165 5166 5167 5168 5169
            cur_map, accm_map = map_eval.get_map_var()
            self.assertIsNotNone(cur_map)
            self.assertIsNotNone(accm_map)
        print(str(program))


5170 5171 5172 5173
class ExampleNet(paddle.nn.Layer):
    def __init__(self):
        super(ExampleNet, self).__init__()
        self.weight = self.create_parameter(
5174 5175
            shape=[1, 1], attr=paddle.ParamAttr(trainable=False)
        )
5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188

    def forward(self):
        # only for test parameter trainable attr
        pass


class TestLayerParameterTrainableSet(unittest.TestCase):
    def test_layer_parameter_set(self):
        with fluid.dygraph.guard():
            net = ExampleNet()
            self.assertFalse(net.weight.trainable)


5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205
class TestLayerTrainingAttribute(unittest.TestCase):
    def test_set_train_eval_in_dynamic_mode(self):
        with fluid.dygraph.guard():
            net = paddle.nn.Dropout()
            net.train()
            self.assertTrue(net.training)
            net.eval()
            self.assertFalse(net.training)

    def test_set_train_eval_in_static_mode(self):
        net = paddle.nn.Dropout()
        net.train()
        self.assertTrue(net.training)
        net.eval()
        self.assertFalse(net.training)


J
Jiabin Yang 已提交
5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235
class MyLayer(paddle.nn.Layer):
    def __init__(self):
        super(MyLayer, self).__init__()
        self._linear = paddle.nn.Linear(1, 1)
        self._dropout = paddle.nn.Dropout(p=0.5)

    def forward(self, input):
        temp = self._linear(input)
        temp = self._dropout(temp)
        return temp


class MySuperLayer(paddle.nn.Layer):
    def __init__(self):
        super(MySuperLayer, self).__init__()
        self._mylayer = MyLayer()

    def forward(self, input):
        temp = self._mylayer(input)
        return temp


class TestSubLayerCount(unittest.TestCase):
    def test_sublayer(self):
        with fluid.dygraph.guard():
            mySuperlayer = MySuperLayer()
            self.assertTrue(len(mySuperlayer.sublayers()) == 3)
            self.assertTrue(len(mySuperlayer.sublayers(include_self=True)) == 4)


Y
Yu Yang 已提交
5236
if __name__ == '__main__':
5237
    paddle.enable_static()
Y
Yu Yang 已提交
5238
    unittest.main()