test_layers.py 212.2 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Q
Qiao Longfei 已提交
15 16
import unittest

17 18
import contextlib
import numpy as np
19
from decorator_helper import prog_scope
20 21
import inspect
from six.moves import filter
22 23 24

import paddle
import paddle.fluid as fluid
25
from paddle.fluid.layers.device import get_places
26 27 28
import paddle.fluid.nets as nets
from paddle.fluid.framework import Program, program_guard, default_main_program
from paddle.fluid.param_attr import ParamAttr
29
from paddle.fluid import core
J
jerrywgz 已提交
30
from paddle.fluid.initializer import Constant
31 32
import paddle.fluid.layers as layers
from test_imperative_base import new_program_scope
L
lujun 已提交
33 34
from paddle.fluid.dygraph import nn
from paddle.fluid.dygraph import base
35
from paddle.fluid.dygraph import to_variable
36
from paddle.fluid.framework import _test_eager_guard
37 38 39


class LayerTest(unittest.TestCase):
40

41 42 43 44 45 46 47 48
    @classmethod
    def setUpClass(cls):
        cls.seed = 111

    @classmethod
    def tearDownClass(cls):
        pass

49 50 51 52 53 54 55 56
    def _get_place(self, force_to_use_cpu=False):
        # this option for ops that only have cpu kernel
        if force_to_use_cpu:
            return core.CPUPlace()
        else:
            if core.is_compiled_with_cuda():
                return core.CUDAPlace(0)
            return core.CPUPlace()
57 58 59 60

    @contextlib.contextmanager
    def static_graph(self):
        with new_program_scope():
C
cnn 已提交
61
            paddle.seed(self.seed)
L
Leo Chen 已提交
62
            paddle.framework.random._manual_program_seed(self.seed)
63 64
            yield

65 66 67 68 69 70
    def get_static_graph_result(self,
                                feed,
                                fetch_list,
                                with_lod=False,
                                force_to_use_cpu=False):
        exe = fluid.Executor(self._get_place(force_to_use_cpu))
71 72 73
        exe.run(fluid.default_startup_program())
        return exe.run(fluid.default_main_program(),
                       feed=feed,
74 75
                       fetch_list=fetch_list,
                       return_numpy=(not with_lod))
76 77

    @contextlib.contextmanager
78
    def dynamic_graph(self, force_to_use_cpu=False):
L
lujun 已提交
79
        with fluid.dygraph.guard(
80
                self._get_place(force_to_use_cpu=force_to_use_cpu)):
C
cnn 已提交
81
            paddle.seed(self.seed)
L
Leo Chen 已提交
82
            paddle.framework.random._manual_program_seed(self.seed)
83 84 85 86
            yield


class TestLayer(LayerTest):
87

88
    def test_custom_layer_with_kwargs(self):
89

90
        class CustomLayer(fluid.Layer):
91

92 93
            def __init__(self, input_size, linear1_size=4):
                super(CustomLayer, self).__init__()
94 95 96
                self.linear1 = nn.Linear(input_size,
                                         linear1_size,
                                         bias_attr=False)
97 98 99 100 101 102
                self.linear2 = nn.Linear(linear1_size, 1, bias_attr=False)

            def forward(self, x, do_linear2=False):
                ret = self.linear1(x)
                if do_linear2:
                    ret = self.linear2(ret)
103 104 105
                return ret

        with self.dynamic_graph():
106 107 108 109 110
            with _test_eager_guard():
                inp = np.ones([3, 3], dtype='float32')
                x = base.to_variable(inp)
                custom = CustomLayer(input_size=3, linear1_size=2)
                ret = custom(x, do_linear2=False)
111
                np.testing.assert_array_equal(ret.numpy().shape, [3, 2])
112
                ret = custom(x, do_linear2=True)
113
                np.testing.assert_array_equal(ret.numpy().shape, [3, 1])
114 115
            inp = np.ones([3, 3], dtype='float32')
            x = base.to_variable(inp)
116 117
            custom = CustomLayer(input_size=3, linear1_size=2)
            ret = custom(x, do_linear2=False)
118
            np.testing.assert_array_equal(ret.numpy().shape, [3, 2])
119
            ret = custom(x, do_linear2=True)
120
            np.testing.assert_array_equal(ret.numpy().shape, [3, 1])
121

122 123 124
    def test_dropout(self):
        inp = np.ones([3, 32, 32], dtype='float32')
        with self.static_graph():
125 126 127 128
            t = layers.data(name='data',
                            shape=[3, 32, 32],
                            dtype='float32',
                            append_batch_size=False)
129 130
            dropout = nn.Dropout(p=0.35, seed=1, is_test=False)
            ret = dropout(t)
131 132 133 134
            ret2 = fluid.layers.dropout(t,
                                        dropout_prob=0.35,
                                        seed=1,
                                        is_test=False)
135 136 137
            static_ret, static_ret2 = self.get_static_graph_result(
                feed={'data': inp}, fetch_list=[ret, ret2])
        with self.dynamic_graph():
138 139 140 141
            with _test_eager_guard():
                t = base.to_variable(inp)
                dropout = nn.Dropout(p=0.35, seed=1, is_test=False)
                dy_eager_ret = dropout(t)
142 143 144 145
                dy_eager_ret2 = fluid.layers.dropout(t,
                                                     dropout_prob=0.35,
                                                     seed=1,
                                                     is_test=False)
146 147 148
                dy_eager_ret_value = dy_eager_ret.numpy()
                dy_eager_ret2_value = dy_eager_ret2.numpy()

149 150 151
            t = base.to_variable(inp)
            dropout = nn.Dropout(p=0.35, seed=1, is_test=False)
            dy_ret = dropout(t)
152 153 154 155
            dy_ret2 = fluid.layers.dropout(t,
                                           dropout_prob=0.35,
                                           seed=1,
                                           is_test=False)
156 157 158
            dy_ret_value = dy_ret.numpy()
            dy_ret2_value = dy_ret2.numpy()

159 160
        np.testing.assert_array_equal(dy_eager_ret_value, dy_eager_ret2_value)
        np.testing.assert_array_equal(static_ret, dy_eager_ret_value)
161

162 163 164
        np.testing.assert_array_equal(static_ret, static_ret2)
        np.testing.assert_array_equal(dy_ret_value, dy_ret2_value)
        np.testing.assert_array_equal(static_ret, dy_ret_value)
165

S
songyouwei 已提交
166 167 168
    def test_linear(self):
        inp = np.ones([3, 32, 32], dtype='float32')
        with self.static_graph():
169 170 171 172
            t = layers.data(name='data',
                            shape=[3, 32, 32],
                            dtype='float32',
                            append_batch_size=False)
S
songyouwei 已提交
173 174 175
            linear = nn.Linear(
                32, 4, bias_attr=fluid.initializer.ConstantInitializer(value=1))
            ret = linear(t)
176 177
            static_ret = self.get_static_graph_result(feed={'data': inp},
                                                      fetch_list=[ret])[0]
S
songyouwei 已提交
178
        with self.dynamic_graph():
179 180 181 182 183 184 185 186 187
            with _test_eager_guard():
                t = base.to_variable(inp)
                linear = nn.Linear(
                    32,
                    4,
                    bias_attr=fluid.initializer.ConstantInitializer(value=1))
                dy_eager_ret = linear(t)
                dy_eager_ret_value = dy_eager_ret.numpy()

S
songyouwei 已提交
188 189 190 191 192 193
            t = base.to_variable(inp)
            linear = nn.Linear(
                32, 4, bias_attr=fluid.initializer.ConstantInitializer(value=1))
            dy_ret = linear(t)
            dy_ret_value = dy_ret.numpy()

194 195
        np.testing.assert_array_equal(static_ret, dy_eager_ret_value)
        np.testing.assert_array_equal(static_ret, dy_ret_value)
S
songyouwei 已提交
196

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
        with self.static_graph():

            # the input of Linear must be Variable.
            def test_Variable():
                inp = np.ones([3, 32, 32], dtype='float32')
                linear = nn.Linear(
                    32,
                    4,
                    bias_attr=fluid.initializer.ConstantInitializer(value=1))
                linear_ret1 = linear(inp)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Linear must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
                inp = np.ones([3, 32, 32], dtype='int32')
214 215 216 217 218 219 220 221 222 223 224
                linear = nn.Linear(
                    32,
                    4,
                    bias_attr=fluid.initializer.ConstantInitializer(value=1))
                linear_ret2 = linear(inp)

            self.assertRaises(TypeError, test_type)

    def test_Flatten(self):
        inp = np.ones([3, 4, 4, 5], dtype='float32')
        with self.static_graph():
225 226 227 228
            t = layers.data(name='data',
                            shape=[3, 4, 4, 5],
                            dtype='float32',
                            append_batch_size=False)
229 230
            flatten = nn.Flatten()
            ret = flatten(t)
231 232
            static_ret = self.get_static_graph_result(feed={'data': inp},
                                                      fetch_list=[ret])[0]
233
        with self.dynamic_graph():
234 235 236 237 238 239
            with _test_eager_guard():
                t = base.to_variable(inp)
                flatten = nn.Flatten()
                dy_eager_ret = flatten(t)
                dy_eager_ret_value = dy_eager_ret.numpy()

240 241 242 243 244
            t = base.to_variable(inp)
            flatten = nn.Flatten()
            dy_ret = flatten(t)
            dy_ret_value = dy_ret.numpy()

245 246
        np.testing.assert_array_equal(static_ret, dy_eager_ret_value)
        np.testing.assert_array_equal(static_ret, dy_ret_value)
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264

        with self.static_graph():

            # the input of Linear must be Variable.
            def test_Variable():
                inp = np.ones([3, 32, 32], dtype='float32')
                linear = nn.Linear(
                    32,
                    4,
                    bias_attr=fluid.initializer.ConstantInitializer(value=1))
                linear_ret1 = linear(inp)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Linear must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
                inp = np.ones([3, 32, 32], dtype='int32')
265 266 267 268 269 270 271 272
                linear = nn.Linear(
                    32,
                    4,
                    bias_attr=fluid.initializer.ConstantInitializer(value=1))
                linear_ret2 = linear(inp)

            self.assertRaises(TypeError, test_type)

273 274 275
    def test_layer_norm(self):
        inp = np.ones([3, 32, 32], dtype='float32')
        with self.static_graph():
276 277 278 279
            t = layers.data(name='data',
                            shape=[3, 32, 32],
                            dtype='float32',
                            append_batch_size=False)
280 281 282 283
            ret = layers.layer_norm(
                t,
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
                act='sigmoid')
284 285
            static_ret = self.get_static_graph_result(feed={'data': inp},
                                                      fetch_list=[ret])[0]
286
        with self.static_graph():
287 288 289 290
            t = layers.data(name='data',
                            shape=[3, 32, 32],
                            dtype='float32',
                            append_batch_size=False)
291
            lm = nn.LayerNorm(
292
                normalized_shape=[32, 32],
293 294
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
                act='sigmoid')
295
            ret = lm(t)
296 297
            static_ret2 = self.get_static_graph_result(feed={'data': inp},
                                                       fetch_list=[ret])[0]
298
        with self.dynamic_graph():
299 300 301 302 303 304 305 306
            with _test_eager_guard():
                lm = nn.LayerNorm(
                    normalized_shape=[32, 32],
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                    act='sigmoid')
                dy_eager_ret = lm(base.to_variable(inp))
                dy_eager_ret_value = dy_eager_ret.numpy()

307
            lm = nn.LayerNorm(
308
                normalized_shape=[32, 32],
309 310
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
                act='sigmoid')
311
            dy_ret = lm(base.to_variable(inp))
312
            dy_ret_value = dy_ret.numpy()
313

314
        with self.dynamic_graph():
315 316 317 318 319 320 321 322 323 324 325 326 327
            with _test_eager_guard():
                lm = nn.LayerNorm(
                    normalized_shape=[32, 32],
                    shift=False,
                    scale=False,
                    param_attr=fluid.initializer.ConstantInitializer(value=1),
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                    act='sigmoid')
                lm(base.to_variable(inp))

                self.assertFalse(hasattr(lm, "_scale_w"))
                self.assertFalse(hasattr(lm, "_bias_w"))

328
            lm = nn.LayerNorm(
329
                normalized_shape=[32, 32],
330 331 332 333 334 335 336 337 338
                shift=False,
                scale=False,
                param_attr=fluid.initializer.ConstantInitializer(value=1),
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
                act='sigmoid')
            lm(base.to_variable(inp))

            self.assertFalse(hasattr(lm, "_scale_w"))
            self.assertFalse(hasattr(lm, "_bias_w"))
339

340 341 342
        np.testing.assert_array_equal(static_ret, static_ret2)
        np.testing.assert_array_equal(dy_eager_ret_value, static_ret2)
        np.testing.assert_array_equal(dy_ret_value, static_ret2)
343

344
        with self.dynamic_graph():
345 346 347 348 349 350 351 352
            with _test_eager_guard():
                lm = nn.LayerNorm(
                    normalized_shape=[16, 32],
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                    act='sigmoid')
                with self.assertRaises(ValueError):
                    lm(base.to_variable(inp))

353 354 355 356 357 358 359
            lm = nn.LayerNorm(
                normalized_shape=[16, 32],
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
                act='sigmoid')
            with self.assertRaises(ValueError):
                lm(base.to_variable(inp))

C
ceci3 已提交
360 361 362 363
    def test_SyncBatchNorm(self):
        if core.is_compiled_with_cuda():
            with self.static_graph():
                t = layers.data(name='t', shape=[-1, 3, 5, 5], dtype='float32')
C
ceci3 已提交
364
                my_sync_bn = paddle.nn.SyncBatchNorm(3)
C
ceci3 已提交
365 366
                ret = my_sync_bn(t)
                static_ret = self.get_static_graph_result(
367
                    feed={'t': np.ones([3, 3, 5, 5], dtype='float32')},
C
ceci3 已提交
368 369 370
                    fetch_list=[ret])[0]

            with self.dynamic_graph():
371 372 373 374 375 376
                with _test_eager_guard():
                    t = np.ones([3, 3, 5, 5], dtype='float32')
                    my_syncbn = paddle.nn.SyncBatchNorm(3)
                    dy_eager_ret = my_syncbn(base.to_variable(t))
                    dy_eager_ret_value = dy_eager_ret.numpy()

C
ceci3 已提交
377 378 379 380
                t = np.ones([3, 3, 5, 5], dtype='float32')
                my_syncbn = paddle.nn.SyncBatchNorm(3)
                dy_ret = my_syncbn(base.to_variable(t))
                dy_ret_value = dy_ret.numpy()
381 382
            np.testing.assert_array_equal(static_ret, dy_ret_value)
            np.testing.assert_array_equal(static_ret, dy_eager_ret_value)
C
ceci3 已提交
383

384 385 386 387 388
    def test_relu(self):
        with self.static_graph():
            t = layers.data(name='t', shape=[3, 3], dtype='float32')
            ret = layers.relu(t)
            static_ret = self.get_static_graph_result(
389 390
                feed={'t': np.ones([3, 3],
                                   dtype='float32')}, fetch_list=[ret])[0]
391 392

        with self.dynamic_graph():
393 394 395 396 397
            with _test_eager_guard():
                t = np.ones([3, 3], dtype='float32')
                dy_eager_ret = layers.relu(base.to_variable(t))
                dy_eager_ret_value = dy_eager_ret.numpy()

398 399
            t = np.ones([3, 3], dtype='float32')
            dy_ret = layers.relu(base.to_variable(t))
400
            dy_ret_value = dy_ret.numpy()
401

402 403
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05)
C
ceci3 已提交
404

405 406 407 408 409
    def test_matmul(self):
        with self.static_graph():
            t = layers.data(name='t', shape=[3, 3], dtype='float32')
            t2 = layers.data(name='t2', shape=[3, 3], dtype='float32')
            ret = layers.matmul(t, t2)
410 411 412 413 414 415 416
            static_ret = self.get_static_graph_result(feed={
                't':
                np.ones([3, 3], dtype='float32'),
                't2':
                np.ones([3, 3], dtype='float32')
            },
                                                      fetch_list=[ret])[0]
417 418

        with self.dynamic_graph():
419 420 421
            with _test_eager_guard():
                t = np.ones([3, 3], dtype='float32')
                t2 = np.ones([3, 3], dtype='float32')
422 423
                dy_eager_ret = layers.matmul(base.to_variable(t),
                                             base.to_variable(t2))
424 425
                dy_eager_ret_value = dy_eager_ret.numpy()

426 427
            t = np.ones([3, 3], dtype='float32')
            t2 = np.ones([3, 3], dtype='float32')
X
polish  
Xin Pan 已提交
428
            dy_ret = layers.matmul(base.to_variable(t), base.to_variable(t2))
429
            dy_ret_value = dy_ret.numpy()
430

431 432
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05)
433

434 435 436 437 438
    def test_conv2d(self):
        with self.static_graph():
            images = layers.data(name='pixel', shape=[3, 5, 5], dtype='float32')
            ret = layers.conv2d(input=images, num_filters=3, filter_size=[2, 2])
            static_ret = self.get_static_graph_result(
439
                feed={'pixel': np.ones([2, 3, 5, 5], dtype='float32')},
440 441 442 443
                fetch_list=[ret])[0]

        with self.static_graph():
            images = layers.data(name='pixel', shape=[3, 5, 5], dtype='float32')
444 445 446
            conv2d = nn.Conv2D(num_channels=3,
                               num_filters=3,
                               filter_size=[2, 2])
447 448
            ret = conv2d(images)
            static_ret2 = self.get_static_graph_result(
449
                feed={'pixel': np.ones([2, 3, 5, 5], dtype='float32')},
450 451 452
                fetch_list=[ret])[0]

        with self.dynamic_graph():
453 454
            with _test_eager_guard():
                images = np.ones([2, 3, 5, 5], dtype='float32')
455 456 457
                conv2d = nn.Conv2D(num_channels=3,
                                   num_filters=3,
                                   filter_size=[2, 2])
458 459 460
                dy_eager_ret = conv2d(base.to_variable(images))
                dy_eager_ret_value = dy_eager_ret.numpy()

461
            images = np.ones([2, 3, 5, 5], dtype='float32')
462 463 464
            conv2d = nn.Conv2D(num_channels=3,
                               num_filters=3,
                               filter_size=[2, 2])
465
            dy_ret = conv2d(base.to_variable(images))
466
            dy_ret_value = dy_ret.numpy()
467

468
        with self.dynamic_graph():
469 470
            with _test_eager_guard():
                images = np.ones([2, 3, 5, 5], dtype='float32')
471 472 473 474
                conv2d = nn.Conv2D(num_channels=3,
                                   num_filters=3,
                                   filter_size=[2, 2],
                                   bias_attr=False)
475 476 477
                dy_ret = conv2d(base.to_variable(images))
                self.assertTrue(conv2d.bias is None)

478
            images = np.ones([2, 3, 5, 5], dtype='float32')
479 480 481 482
            conv2d = nn.Conv2D(num_channels=3,
                               num_filters=3,
                               filter_size=[2, 2],
                               bias_attr=False)
483
            dy_ret = conv2d(base.to_variable(images))
484
            self.assertTrue(conv2d.bias is None)
485

486 487 488 489
        with self.static_graph():
            # the input of Conv2D must be Variable.
            def test_Variable():
                images = np.ones([2, 3, 5, 5], dtype='float32')
490 491 492
                conv2d = nn.Conv2D(num_channels=3,
                                   num_filters=3,
                                   filter_size=[2, 2])
493 494 495 496 497 498 499
                conv2d_ret1 = conv2d(images)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Conv2D must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
500 501 502 503 504 505
                images = layers.data(name='pixel',
                                     shape=[3, 5, 5],
                                     dtype='int32')
                conv2d = nn.Conv2D(num_channels=3,
                                   num_filters=3,
                                   filter_size=[2, 2])
506 507 508 509
                conv2d_ret2 = conv2d(images)

            self.assertRaises(TypeError, test_type)

510 511 512
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
Y
Yu Yang 已提交
513

514
        with self.dynamic_graph():
515 516 517 518 519 520
            with _test_eager_guard():
                images = np.ones([2, 3, 5, 5], dtype='float32')
                custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
                        custom_weight))
521 522 523 524 525 526 527
                conv2d1 = nn.Conv2D(num_channels=3,
                                    num_filters=3,
                                    filter_size=[2, 2])
                conv2d2 = nn.Conv2D(num_channels=3,
                                    num_filters=3,
                                    filter_size=[2, 2],
                                    param_attr=weight_attr)
528 529 530 531 532 533 534 535 536 537
                dy_ret1 = conv2d1(base.to_variable(images))
                dy_ret2 = conv2d2(base.to_variable(images))
                self.assertFalse(
                    np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

                conv2d1_weight_np = conv2d1.weight.numpy()
                conv2d1_bias = conv2d1.bias
                self.assertFalse(
                    np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy()))
                conv2d2.weight.set_value(conv2d1_weight_np)
538 539
                np.testing.assert_array_equal(conv2d1_weight_np,
                                              conv2d2.weight.numpy())
540 541 542
                conv2d2.bias.set_value(conv2d1_bias)
                dy_ret1 = conv2d1(base.to_variable(images))
                dy_ret2 = conv2d2(base.to_variable(images))
543
                np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
544 545 546

                conv2d2.weight = conv2d1.weight
                conv2d2.bias = conv2d1.bias
547 548 549 550
                np.testing.assert_array_equal(conv2d1.weight.numpy(),
                                              conv2d2.weight.numpy())
                np.testing.assert_array_equal(conv2d1.bias.numpy(),
                                              conv2d2.bias.numpy())
551

552 553
            images = np.ones([2, 3, 5, 5], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
554 555 556 557 558 559 560 561 562
            weight_attr = fluid.ParamAttr(initializer=fluid.initializer.
                                          NumpyArrayInitializer(custom_weight))
            conv2d1 = nn.Conv2D(num_channels=3,
                                num_filters=3,
                                filter_size=[2, 2])
            conv2d2 = nn.Conv2D(num_channels=3,
                                num_filters=3,
                                filter_size=[2, 2],
                                param_attr=weight_attr)
563 564 565 566 567 568 569 570 571
            dy_ret1 = conv2d1(base.to_variable(images))
            dy_ret2 = conv2d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv2d1_weight_np = conv2d1.weight.numpy()
            conv2d1_bias = conv2d1.bias
            self.assertFalse(
                np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy()))
            conv2d2.weight.set_value(conv2d1_weight_np)
572 573
            np.testing.assert_array_equal(conv2d1_weight_np,
                                          conv2d2.weight.numpy())
574 575 576
            conv2d2.bias.set_value(conv2d1_bias)
            dy_ret1 = conv2d1(base.to_variable(images))
            dy_ret2 = conv2d2(base.to_variable(images))
577
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
578 579 580

            conv2d2.weight = conv2d1.weight
            conv2d2.bias = conv2d1.bias
581 582 583 584
            np.testing.assert_array_equal(conv2d1.weight.numpy(),
                                          conv2d2.weight.numpy())
            np.testing.assert_array_equal(conv2d1.bias.numpy(),
                                          conv2d2.bias.numpy())
585

M
minqiyang 已提交
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
    def test_gru_unit(self):
        lod = [[2, 4, 3]]
        D = 5
        T = sum(lod[0])
        N = len(lod[0])

        input = np.random.rand(T, 3 * D).astype('float32')
        hidden_input = np.random.rand(T, D).astype('float32')

        with self.static_graph():
            x = layers.data(name='x', shape=[-1, D * 3], dtype='float32')
            hidden = layers.data(name='hidden', shape=[-1, D], dtype='float32')
            updated_hidden, reset_hidden_pre, gate = layers.gru_unit(
                input=x, hidden=hidden, size=D * 3)
            static_ret = self.get_static_graph_result(
601 602 603 604
                feed={
                    'x': input,
                    'hidden': hidden_input
                },
M
minqiyang 已提交
605 606 607 608 609 610 611
                fetch_list=[updated_hidden, reset_hidden_pre, gate])

        with self.static_graph():
            x = layers.data(name='x', shape=[-1, D * 3], dtype='float32')
            hidden = layers.data(name='hidden', shape=[-1, D], dtype='float32')
            updated_hidden, reset_hidden_pre, gate = layers.gru_unit(
                input=x, hidden=hidden, size=D * 3)
612
            gru = nn.GRUUnit(size=D * 3)
M
minqiyang 已提交
613 614 615
            updated_hidden, reset_hidden_pre, gate = gru(x, hidden)

            static_ret2 = self.get_static_graph_result(
616 617 618 619
                feed={
                    'x': input,
                    'hidden': hidden_input
                },
M
minqiyang 已提交
620 621 622
                fetch_list=[updated_hidden, reset_hidden_pre, gate])

        with self.dynamic_graph():
623 624
            with _test_eager_guard():
                gru = nn.GRUUnit(size=D * 3)
625 626
                dy_eager_ret = gru(base.to_variable(input),
                                   base.to_variable(hidden_input))
627 628 629 630
                dy_eager_ret_value = []
                for i in range(len(static_ret)):
                    dy_eager_ret_value.append(dy_eager_ret[i].numpy())

631
            gru = nn.GRUUnit(size=D * 3)
632 633
            dy_ret = gru(base.to_variable(input),
                         base.to_variable(hidden_input))
634 635 636
            dy_ret_value = []
            for i in range(len(static_ret)):
                dy_ret_value.append(dy_ret[i].numpy())
M
minqiyang 已提交
637 638

        for i in range(len(static_ret)):
639 640 641 642 643 644 645 646 647
            np.testing.assert_allclose(static_ret[i],
                                       static_ret2[i],
                                       rtol=1e-05)
            np.testing.assert_allclose(static_ret[i],
                                       dy_ret_value[i],
                                       rtol=1e-05)
            np.testing.assert_allclose(static_ret[i],
                                       dy_eager_ret_value[i],
                                       rtol=1e-05)
M
minqiyang 已提交
648

649
        with self.dynamic_graph():
650 651 652 653 654 655 656
            with _test_eager_guard():
                custom_weight = np.random.randn(D, D * 3).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
                        custom_weight))
                gru1 = nn.GRUUnit(size=D * 3)
                gru2 = nn.GRUUnit(size=D * 3, param_attr=weight_attr)
657 658 659 660
                dy_ret1 = gru1(base.to_variable(input),
                               base.to_variable(hidden_input))
                dy_ret2 = gru2(base.to_variable(input),
                               base.to_variable(hidden_input))
661 662 663 664 665 666
                self.assertFalse(
                    np.array_equal(gru1.weight.numpy(), gru2.weight.numpy()))
                for o1, o2 in zip(dy_ret1, dy_ret2):
                    self.assertFalse(np.array_equal(o1.numpy(), o2.numpy()))
                gru2.weight.set_value(gru1.weight.numpy())
                gru2.bias.set_value(gru1.bias)
667 668 669 670
                dy_ret1 = gru1(base.to_variable(input),
                               base.to_variable(hidden_input))
                dy_ret2 = gru2(base.to_variable(input),
                               base.to_variable(hidden_input))
671
                for o1, o2 in zip(dy_ret1, dy_ret2):
672
                    np.testing.assert_array_equal(o1.numpy(), o2.numpy())
673 674 675

                gru2.weight = gru1.weight
                gru2.bias = gru1.bias
676 677 678 679
                np.testing.assert_array_equal(gru1.weight.numpy(),
                                              gru2.weight.numpy())
                np.testing.assert_array_equal(gru1.bias.numpy(),
                                              gru2.bias.numpy())
680

681
            custom_weight = np.random.randn(D, D * 3).astype("float32")
682 683
            weight_attr = fluid.ParamAttr(initializer=fluid.initializer.
                                          NumpyArrayInitializer(custom_weight))
684 685
            gru1 = nn.GRUUnit(size=D * 3)
            gru2 = nn.GRUUnit(size=D * 3, param_attr=weight_attr)
686 687 688 689
            dy_ret1 = gru1(base.to_variable(input),
                           base.to_variable(hidden_input))
            dy_ret2 = gru2(base.to_variable(input),
                           base.to_variable(hidden_input))
690 691 692 693 694 695
            self.assertFalse(
                np.array_equal(gru1.weight.numpy(), gru2.weight.numpy()))
            for o1, o2 in zip(dy_ret1, dy_ret2):
                self.assertFalse(np.array_equal(o1.numpy(), o2.numpy()))
            gru2.weight.set_value(gru1.weight.numpy())
            gru2.bias.set_value(gru1.bias)
696 697 698 699
            dy_ret1 = gru1(base.to_variable(input),
                           base.to_variable(hidden_input))
            dy_ret2 = gru2(base.to_variable(input),
                           base.to_variable(hidden_input))
700
            for o1, o2 in zip(dy_ret1, dy_ret2):
701
                np.testing.assert_array_equal(o1.numpy(), o2.numpy())
702 703 704

            gru2.weight = gru1.weight
            gru2.bias = gru1.bias
705 706 707
            np.testing.assert_array_equal(gru1.weight.numpy(),
                                          gru2.weight.numpy())
            np.testing.assert_array_equal(gru1.bias.numpy(), gru2.bias.numpy())
708

X
Xin Pan 已提交
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
    def test_elementwise_math(self):
        n = np.ones([3, 3], dtype='float32')
        n2 = np.ones([3, 3], dtype='float32') * 1.1
        n3 = np.ones([3, 3], dtype='float32') * 2
        n4 = np.ones([3, 3], dtype='float32') * 3
        n5 = np.ones([3, 3], dtype='float32') * 4
        n6 = np.ones([3, 3], dtype='float32') * 5

        with self.static_graph():
            t = layers.data(name='t', shape=[3, 3], dtype='float32')
            t2 = layers.data(name='t2', shape=[3, 3], dtype='float32')
            t3 = layers.data(name='t3', shape=[3, 3], dtype='float32')
            t4 = layers.data(name='t4', shape=[3, 3], dtype='float32')
            t5 = layers.data(name='t5', shape=[3, 3], dtype='float32')
            t6 = layers.data(name='t6', shape=[3, 3], dtype='float32')

            ret = layers.elementwise_add(t, t2)
            ret = layers.elementwise_pow(ret, t3)
            ret = layers.elementwise_div(ret, t4)
            ret = layers.elementwise_sub(ret, t5)
            ret = layers.elementwise_mul(ret, t6)

731 732 733 734 735 736 737 738 739
            static_ret = self.get_static_graph_result(feed={
                't': n,
                't2': n2,
                't3': n3,
                't4': n4,
                't5': n5,
                't6': n6
            },
                                                      fetch_list=[ret])[0]
X
Xin Pan 已提交
740 741

        with self.dynamic_graph():
742 743 744 745 746 747 748 749
            with _test_eager_guard():
                ret = layers.elementwise_add(to_variable(n), to_variable(n2))
                ret = layers.elementwise_pow(ret, to_variable(n3))
                ret = layers.elementwise_div(ret, to_variable(n4))
                ret = layers.elementwise_sub(ret, to_variable(n5))
                dy_eager_ret = layers.elementwise_mul(ret, to_variable(n6))
                dy_eager_ret_value = dy_eager_ret.numpy()

750 751 752 753 754
            ret = layers.elementwise_add(to_variable(n), to_variable(n2))
            ret = layers.elementwise_pow(ret, to_variable(n3))
            ret = layers.elementwise_div(ret, to_variable(n4))
            ret = layers.elementwise_sub(ret, to_variable(n5))
            dy_ret = layers.elementwise_mul(ret, to_variable(n6))
755
            dy_ret_value = dy_ret.numpy()
756

757 758
        np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05)
X
Xin Pan 已提交
759 760 761 762 763 764

    def test_elementwise_minmax(self):
        n = np.ones([3, 3], dtype='float32')
        n2 = np.ones([3, 3], dtype='float32') * 2

        with self.dynamic_graph():
765
            with _test_eager_guard():
766 767 768 769
                min_eager_ret = layers.elementwise_min(to_variable(n),
                                                       to_variable(n2))
                max_eager_ret = layers.elementwise_max(to_variable(n),
                                                       to_variable(n2))
770 771 772
                min_eager_ret_value = min_eager_ret.numpy()
                max_eager_ret_value = max_eager_ret.numpy()

773 774
            min_ret = layers.elementwise_min(to_variable(n), to_variable(n2))
            max_ret = layers.elementwise_max(to_variable(n), to_variable(n2))
775 776
            min_ret_value = min_ret.numpy()
            max_ret_value = max_ret.numpy()
X
Xin Pan 已提交
777

778 779 780 781
        np.testing.assert_allclose(n, min_ret_value, rtol=1e-05)
        np.testing.assert_allclose(n2, max_ret_value, rtol=1e-05)
        np.testing.assert_allclose(n, min_eager_ret_value, rtol=1e-05)
        np.testing.assert_allclose(n2, max_eager_ret_value, rtol=1e-05)
X
Xin Pan 已提交
782

783 784 785 786 787 788 789
    def test_sequence_conv(self):
        inp_np = np.arange(12).reshape([3, 4]).astype('float32')
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()
        with self.static_graph():
790 791 792 793 794
            seq = layers.data(name='seq_in',
                              shape=[3, 4],
                              dtype='float32',
                              lod_level=1,
                              append_batch_size=False)
795
            out = layers.sequence_conv(seq, 2, act='sigmoid')
796 797 798 799 800 801 802 803
            static_rlt = self.get_static_graph_result(feed={
                "seq_in":
                fluid.create_lod_tensor(data=inp_np,
                                        recursive_seq_lens=[[1, 1, 1]],
                                        place=place)
            },
                                                      fetch_list=[out],
                                                      with_lod=True)[0]
804 805

        with self.static_graph():
806 807 808 809 810
            seq = layers.data(name='seq_in',
                              shape=[3, 4],
                              dtype='float32',
                              lod_level=1,
                              append_batch_size=False)
811
            seq_conv = nn.SequenceConv('seq_conv', num_filters=2, act='sigmoid')
812
            out = seq_conv(seq)
813 814 815 816 817 818 819 820
            static_rlt2 = self.get_static_graph_result(feed={
                "seq_in":
                fluid.create_lod_tensor(data=inp_np,
                                        recursive_seq_lens=[[1, 1, 1]],
                                        place=place)
            },
                                                       fetch_list=[out],
                                                       with_lod=True)[0]
821 822
        np.testing.assert_array_equal(np.array(static_rlt),
                                      np.array(static_rlt2))
823 824 825 826 827 828

    def test_conv2d_transpose(self):
        inp_np = np.arange(0, 24).reshape([2, 3, 2, 2]).astype('float32')
        with self.static_graph():
            img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32')
            out = layers.conv2d_transpose(
829 830
                input=img,
                num_filters=10,
831
                filter_size=27,
832 833
                act='sigmoid',
                bias_attr=fluid.initializer.ConstantInitializer(value=1))
834 835
            static_rlt = self.get_static_graph_result(feed={'pixel': inp_np},
                                                      fetch_list=[out])[0]
836 837 838
        with self.static_graph():
            img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32')
            conv2d_transpose = nn.Conv2DTranspose(
839
                num_channels=3,
840
                num_filters=10,
841
                filter_size=27,
842 843
                act='sigmoid',
                bias_attr=fluid.initializer.ConstantInitializer(value=1))
844
            out = conv2d_transpose(img)
845 846
            static_rlt2 = self.get_static_graph_result(feed={'pixel': inp_np},
                                                       fetch_list=[out])[0]
847
        with self.dynamic_graph():
848 849 850 851 852 853 854 855 856 857
            with _test_eager_guard():
                conv2d_transpose = nn.Conv2DTranspose(
                    num_channels=3,
                    num_filters=10,
                    filter_size=27,
                    act='sigmoid',
                    bias_attr=fluid.initializer.ConstantInitializer(value=1))
                dy_eager_rlt = conv2d_transpose(base.to_variable(inp_np))
                dy_eager_rlt_value = dy_eager_rlt.numpy()

858
            conv2d_transpose = nn.Conv2DTranspose(
859
                num_channels=3,
860
                num_filters=10,
861
                filter_size=27,
862 863
                act='sigmoid',
                bias_attr=fluid.initializer.ConstantInitializer(value=1))
864
            dy_rlt = conv2d_transpose(base.to_variable(inp_np))
865
            dy_rlt_value = dy_rlt.numpy()
866 867 868
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt2, rtol=1e-05)
        np.testing.assert_allclose(dy_eager_rlt_value, static_rlt2, rtol=1e-05)
869

870
        with self.dynamic_graph():
871 872 873 874 875 876
            with _test_eager_guard():
                images = np.ones([2, 3, 5, 5], dtype='float32')
                custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
                        custom_weight))
877 878 879 880 881 882 883
                conv2d1 = nn.Conv2DTranspose(num_channels=3,
                                             num_filters=3,
                                             filter_size=[2, 2])
                conv2d2 = nn.Conv2DTranspose(num_channels=3,
                                             num_filters=3,
                                             filter_size=[2, 2],
                                             param_attr=weight_attr)
884 885 886 887 888 889 890 891 892 893
                dy_ret1 = conv2d1(base.to_variable(images))
                dy_ret2 = conv2d2(base.to_variable(images))
                self.assertFalse(
                    np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

                conv2d1_weight_np = conv2d1.weight.numpy()
                conv2d1_bias = conv2d1.bias
                self.assertFalse(
                    np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy()))
                conv2d2.weight.set_value(conv2d1_weight_np)
894 895
                np.testing.assert_array_equal(conv2d1_weight_np,
                                              conv2d2.weight.numpy())
896 897 898
                conv2d2.bias.set_value(conv2d1_bias)
                dy_ret1 = conv2d1(base.to_variable(images))
                dy_ret2 = conv2d2(base.to_variable(images))
899
                np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
900 901 902

                conv2d2.weight = conv2d1.weight
                conv2d2.bias = conv2d1.bias
903 904 905 906
                np.testing.assert_array_equal(conv2d1.weight.numpy(),
                                              conv2d2.weight.numpy())
                np.testing.assert_array_equal(conv2d1.bias.numpy(),
                                              conv2d2.bias.numpy())
907

908 909
            images = np.ones([2, 3, 5, 5], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
910 911 912 913 914 915 916 917 918
            weight_attr = fluid.ParamAttr(initializer=fluid.initializer.
                                          NumpyArrayInitializer(custom_weight))
            conv2d1 = nn.Conv2DTranspose(num_channels=3,
                                         num_filters=3,
                                         filter_size=[2, 2])
            conv2d2 = nn.Conv2DTranspose(num_channels=3,
                                         num_filters=3,
                                         filter_size=[2, 2],
                                         param_attr=weight_attr)
919 920 921 922 923 924 925 926 927
            dy_ret1 = conv2d1(base.to_variable(images))
            dy_ret2 = conv2d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv2d1_weight_np = conv2d1.weight.numpy()
            conv2d1_bias = conv2d1.bias
            self.assertFalse(
                np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy()))
            conv2d2.weight.set_value(conv2d1_weight_np)
928 929
            np.testing.assert_array_equal(conv2d1_weight_np,
                                          conv2d2.weight.numpy())
930 931 932
            conv2d2.bias.set_value(conv2d1_bias)
            dy_ret1 = conv2d1(base.to_variable(images))
            dy_ret2 = conv2d2(base.to_variable(images))
933
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
934 935 936

            conv2d2.weight = conv2d1.weight
            conv2d2.bias = conv2d1.bias
937 938 939 940
            np.testing.assert_array_equal(conv2d1.weight.numpy(),
                                          conv2d2.weight.numpy())
            np.testing.assert_array_equal(conv2d1.bias.numpy(),
                                          conv2d2.bias.numpy())
941

942 943 944 945 946
        with self.static_graph():

            # the input of Conv2DTranspose must be Variable.
            def test_Variable():
                images = np.ones([2, 3, 5, 5], dtype='float32')
947 948 949
                conv2d = nn.Conv2DTranspose(num_channels=3,
                                            num_filters=3,
                                            filter_size=[2, 2])
950 951 952 953 954 955 956
                conv2d_ret1 = conv2d(images)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of Conv2DTranspose must be float16 or float32 or float64
            # float16 only can be set on GPU place
            def test_type():
957 958 959 960 961 962
                images = layers.data(name='pixel',
                                     shape=[3, 5, 5],
                                     dtype='int32')
                conv2d = nn.Conv2DTranspose(num_channels=3,
                                            num_filters=3,
                                            filter_size=[2, 2])
963 964 965 966
                conv2d_ret2 = conv2d(images)

            self.assertRaises(TypeError, test_type)

967 968 969 970 971
    def test_bilinear_tensor_product(self):
        inp_np_x = np.array([[1, 2, 3]]).astype('float32')
        inp_np_y = np.array([[4, 5, 6]]).astype('float32')

        with self.static_graph():
972 973 974 975 976 977 978 979
            data_x = layers.data(name='x',
                                 shape=[1, 3],
                                 dtype="float32",
                                 append_batch_size=False)
            data_y = layers.data(name='y',
                                 shape=[1, 3],
                                 dtype="float32",
                                 append_batch_size=False)
980 981 982 983 984 985
            out = layers.bilinear_tensor_product(
                data_x,
                data_y,
                6,
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
                act='sigmoid')
986

987 988 989 990 991
            static_rlt = self.get_static_graph_result(feed={
                'x': inp_np_x,
                'y': inp_np_y
            },
                                                      fetch_list=[out])[0]
992

993
        with self.static_graph():
994 995 996 997 998 999 1000 1001
            data_x = layers.data(name='x',
                                 shape=[1, 3],
                                 dtype="float32",
                                 append_batch_size=False)
            data_y = layers.data(name='y',
                                 shape=[1, 3],
                                 dtype="float32",
                                 append_batch_size=False)
1002
            btp = nn.BilinearTensorProduct(
1003 1004
                3,
                3,
1005 1006 1007
                6,
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
                act='sigmoid')
1008
            out = btp(data_x, data_y)
1009 1010 1011 1012 1013
            static_rlt2 = self.get_static_graph_result(feed={
                'x': inp_np_x,
                'y': inp_np_y
            },
                                                       fetch_list=[out])[0]
1014
        with self.dynamic_graph():
1015 1016 1017 1018 1019 1020 1021
            with _test_eager_guard():
                btp = nn.BilinearTensorProduct(
                    3,
                    3,
                    6,
                    bias_attr=fluid.initializer.ConstantInitializer(value=1),
                    act='sigmoid')
1022 1023
                dy_eager_rlt = btp(base.to_variable(inp_np_x),
                                   base.to_variable(inp_np_y))
1024 1025
                dy_eager_rlt_value = dy_eager_rlt.numpy()

1026
            btp = nn.BilinearTensorProduct(
1027 1028
                3,
                3,
1029 1030 1031
                6,
                bias_attr=fluid.initializer.ConstantInitializer(value=1),
                act='sigmoid')
1032
            dy_rlt = btp(base.to_variable(inp_np_x), base.to_variable(inp_np_y))
1033
            dy_rlt_value = dy_rlt.numpy()
1034

1035
        with self.dynamic_graph():
1036 1037
            with _test_eager_guard():
                btp2 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid')
1038 1039
                dy_eager_rlt2 = btp2(base.to_variable(inp_np_x),
                                     base.to_variable(inp_np_y))
1040 1041
                dy_eager_rlt2_value = dy_eager_rlt2.numpy()

1042
            btp2 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid')
1043 1044
            dy_rlt2 = btp2(base.to_variable(inp_np_x),
                           base.to_variable(inp_np_y))
1045
            dy_rlt2_value = dy_rlt2.numpy()
1046

1047
        with self.static_graph():
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
            data_x2 = layers.data(name='x',
                                  shape=[1, 3],
                                  dtype="float32",
                                  append_batch_size=False)
            data_y2 = layers.data(name='y',
                                  shape=[1, 3],
                                  dtype="float32",
                                  append_batch_size=False)
            out2 = layers.bilinear_tensor_product(data_x2,
                                                  data_y2,
                                                  6,
                                                  act='sigmoid')

            static_rlt3 = self.get_static_graph_result(feed={
                'x': inp_np_x,
                'y': inp_np_y
            },
                                                       fetch_list=[out2])[0]
1066

1067 1068 1069 1070 1071
        np.testing.assert_array_equal(dy_rlt2_value, static_rlt3)
        np.testing.assert_array_equal(dy_eager_rlt2_value, static_rlt3)
        np.testing.assert_array_equal(static_rlt2, static_rlt)
        np.testing.assert_array_equal(dy_rlt_value, static_rlt)
        np.testing.assert_array_equal(dy_eager_rlt_value, static_rlt)
1072

1073
        with self.dynamic_graph():
1074 1075 1076 1077 1078 1079
            with _test_eager_guard():
                custom_weight = np.random.randn(6, 3, 3).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
                        custom_weight))
                btp1 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid')
1080 1081 1082 1083 1084 1085 1086 1087 1088
                btp2 = nn.BilinearTensorProduct(3,
                                                3,
                                                6,
                                                act='sigmoid',
                                                param_attr=weight_attr)
                dy_rlt1 = btp1(base.to_variable(inp_np_x),
                               base.to_variable(inp_np_y))
                dy_rlt2 = btp2(base.to_variable(inp_np_x),
                               base.to_variable(inp_np_y))
1089 1090 1091 1092
                self.assertFalse(
                    np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()))
                btp2.weight.set_value(btp1.weight.numpy())
                btp2.bias.set_value(btp1.bias)
1093 1094 1095 1096
                dy_rlt1 = btp1(base.to_variable(inp_np_x),
                               base.to_variable(inp_np_y))
                dy_rlt2 = btp2(base.to_variable(inp_np_x),
                               base.to_variable(inp_np_y))
1097
                np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
1098 1099 1100

                btp2.weight = btp1.weight
                btp2.bias = btp1.bias
1101 1102 1103 1104
                np.testing.assert_array_equal(btp1.weight.numpy(),
                                              btp2.weight.numpy())
                np.testing.assert_array_equal(btp1.bias.numpy(),
                                              btp2.bias.numpy())
1105

1106
            custom_weight = np.random.randn(6, 3, 3).astype("float32")
1107 1108
            weight_attr = fluid.ParamAttr(initializer=fluid.initializer.
                                          NumpyArrayInitializer(custom_weight))
1109
            btp1 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid')
1110 1111 1112 1113 1114 1115 1116 1117 1118
            btp2 = nn.BilinearTensorProduct(3,
                                            3,
                                            6,
                                            act='sigmoid',
                                            param_attr=weight_attr)
            dy_rlt1 = btp1(base.to_variable(inp_np_x),
                           base.to_variable(inp_np_y))
            dy_rlt2 = btp2(base.to_variable(inp_np_x),
                           base.to_variable(inp_np_y))
1119 1120 1121
            self.assertFalse(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()))
            btp2.weight.set_value(btp1.weight.numpy())
            btp2.bias.set_value(btp1.bias)
1122 1123 1124 1125
            dy_rlt1 = btp1(base.to_variable(inp_np_x),
                           base.to_variable(inp_np_y))
            dy_rlt2 = btp2(base.to_variable(inp_np_x),
                           base.to_variable(inp_np_y))
1126
            np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
1127 1128 1129

            btp2.weight = btp1.weight
            btp2.bias = btp1.bias
1130 1131 1132
            np.testing.assert_array_equal(btp1.weight.numpy(),
                                          btp2.weight.numpy())
            np.testing.assert_array_equal(btp1.bias.numpy(), btp2.bias.numpy())
1133

1134
    def prelu_test(self, mode):
1135 1136
        inp_np = np.ones([5, 200, 100, 100]).astype('float32')
        with self.static_graph():
1137 1138 1139 1140 1141 1142 1143 1144 1145
            data_t = layers.data(name="input",
                                 shape=[5, 200, 100, 100],
                                 dtype="float32",
                                 append_batch_size=False)
            out = layers.prelu(data_t,
                               mode,
                               param_attr=ParamAttr(initializer=Constant(1.0)))
            static_rlt = self.get_static_graph_result(feed={"input": inp_np},
                                                      fetch_list=[out])[0]
1146 1147

        with self.static_graph():
1148 1149 1150 1151 1152 1153 1154 1155
            data_t = layers.data(name="input",
                                 shape=[5, 200, 100, 100],
                                 dtype="float32",
                                 append_batch_size=False)
            prelu = nn.PRelu(mode=mode,
                             channel=inp_np.shape[1],
                             input_shape=data_t.shape,
                             param_attr=ParamAttr(initializer=Constant(1.0)))
1156
            out = prelu(data_t)
1157 1158
            static_rlt2 = self.get_static_graph_result(feed={"input": inp_np},
                                                       fetch_list=[out])[0]
1159 1160

        with self.dynamic_graph():
1161 1162 1163 1164 1165 1166 1167 1168 1169
            with _test_eager_guard():
                prelu = nn.PRelu(
                    mode=mode,
                    channel=inp_np.shape[1],
                    input_shape=inp_np.shape,
                    param_attr=ParamAttr(initializer=Constant(1.0)))
                dy_eager_rlt = prelu(base.to_variable(inp_np))
                dy_eager_rlt_value = dy_eager_rlt.numpy()

1170 1171 1172 1173
            prelu = nn.PRelu(mode=mode,
                             channel=inp_np.shape[1],
                             input_shape=inp_np.shape,
                             param_attr=ParamAttr(initializer=Constant(1.0)))
1174
            dy_rlt = prelu(base.to_variable(inp_np))
1175
            dy_rlt_value = dy_rlt.numpy()
1176

1177 1178 1179
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_eager_rlt_value, static_rlt, rtol=1e-05)
1180

1181
        with self.dynamic_graph():
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
            with _test_eager_guard():
                inp_np = np.random.randn(5, 200, 100, 100).astype("float32")
                inp = base.to_variable(inp_np)
                prelu1 = nn.PRelu(
                    mode=mode,
                    channel=inp_np.shape[1],
                    input_shape=inp_np.shape,
                    param_attr=ParamAttr(initializer=Constant(2.0)))
                prelu2 = nn.PRelu(
                    mode=mode,
                    channel=inp_np.shape[1],
                    input_shape=inp_np.shape,
                    param_attr=ParamAttr(initializer=Constant(1.0)))
                dy_rlt1 = prelu1(inp)
                dy_rlt2 = prelu2(inp)
                self.assertFalse(
1198 1199
                    np.array_equal(prelu1.weight.numpy(),
                                   prelu2.weight.numpy()))
1200 1201 1202 1203 1204
                self.assertFalse(
                    np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()))
                prelu2.weight.set_value(prelu1.weight.numpy())
                dy_rlt1 = prelu1(inp)
                dy_rlt2 = prelu2(inp)
1205
                np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
1206 1207

                prelu2.weight = prelu1.weight
1208 1209
                np.testing.assert_array_equal(prelu1.weight.numpy(),
                                              prelu2.weight.numpy())
1210

1211 1212
            inp_np = np.random.randn(5, 200, 100, 100).astype("float32")
            inp = base.to_variable(inp_np)
1213 1214 1215 1216 1217 1218 1219 1220
            prelu1 = nn.PRelu(mode=mode,
                              channel=inp_np.shape[1],
                              input_shape=inp_np.shape,
                              param_attr=ParamAttr(initializer=Constant(2.0)))
            prelu2 = nn.PRelu(mode=mode,
                              channel=inp_np.shape[1],
                              input_shape=inp_np.shape,
                              param_attr=ParamAttr(initializer=Constant(1.0)))
1221 1222 1223 1224 1225 1226 1227 1228
            dy_rlt1 = prelu1(inp)
            dy_rlt2 = prelu2(inp)
            self.assertFalse(
                np.array_equal(prelu1.weight.numpy(), prelu2.weight.numpy()))
            self.assertFalse(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()))
            prelu2.weight.set_value(prelu1.weight.numpy())
            dy_rlt1 = prelu1(inp)
            dy_rlt2 = prelu2(inp)
1229
            np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
1230 1231

            prelu2.weight = prelu1.weight
1232 1233
            np.testing.assert_array_equal(prelu1.weight.numpy(),
                                          prelu2.weight.numpy())
1234

1235 1236 1237 1238 1239
    def test_prelu(self):
        self.prelu_test("channel")
        self.prelu_test("element")
        self.prelu_test("all")

1240 1241 1242 1243 1244
    def test_embeding(self):
        inp_word = np.array([[[1]]]).astype('int64')
        dict_size = 20
        with self.static_graph():
            data_t = layers.data(name='word', shape=[1], dtype='int64')
1245 1246 1247 1248 1249 1250
            emb = layers.embedding(input=data_t,
                                   size=[dict_size, 32],
                                   param_attr='emb.w',
                                   is_sparse=False)
            static_rlt = self.get_static_graph_result(feed={'word': inp_word},
                                                      fetch_list=[emb])[0]
1251 1252
        with self.static_graph():
            data_t = layers.data(name='word', shape=[1], dtype='int64')
1253 1254 1255
            emb2 = nn.Embedding(size=[dict_size, 32],
                                param_attr='emb.w',
                                is_sparse=False)
1256
            emb_rlt = emb2(data_t)
1257 1258
            static_rlt2 = self.get_static_graph_result(feed={'word': inp_word},
                                                       fetch_list=[emb_rlt])[0]
1259
        with self.dynamic_graph():
1260
            with _test_eager_guard():
1261 1262 1263
                emb2 = nn.Embedding(size=[dict_size, 32],
                                    param_attr='eager_emb.w',
                                    is_sparse=False)
1264 1265 1266
                dy_eager_rlt = emb2(base.to_variable(inp_word))
                dy_eager_rlt_value = dy_eager_rlt.numpy()

1267 1268 1269
            emb2 = nn.Embedding(size=[dict_size, 32],
                                param_attr='emb.w',
                                is_sparse=False)
1270 1271
            dy_rlt = emb2(base.to_variable(inp_word))
            dy_rlt_value = dy_rlt.numpy()
1272 1273

        self.assertTrue(np.allclose(static_rlt2, static_rlt))
1274
        self.assertTrue(np.allclose(dy_rlt_value, static_rlt))
1275
        self.assertTrue(np.allclose(dy_eager_rlt_value, static_rlt))
1276

1277
        with self.dynamic_graph():
1278 1279 1280 1281 1282 1283
            with _test_eager_guard():
                custom_weight = np.random.randn(dict_size, 32).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
                        custom_weight))
                emb1 = nn.Embedding(size=[dict_size, 32], is_sparse=False)
1284 1285 1286
                emb2 = nn.Embedding(size=[dict_size, 32],
                                    param_attr=weight_attr,
                                    is_sparse=False)
1287 1288 1289 1290
                rep1 = emb1(base.to_variable(inp_word))
                rep2 = emb2(base.to_variable(inp_word))
                self.assertFalse(
                    np.array_equal(emb1.weight.numpy(), custom_weight))
1291 1292
                np.testing.assert_array_equal(emb2.weight.numpy(),
                                              custom_weight)
1293 1294 1295
                self.assertFalse(np.array_equal(rep1.numpy(), rep2.numpy()))
                emb2.weight.set_value(emb1.weight.numpy())
                rep2 = emb2(base.to_variable(inp_word))
1296
                np.testing.assert_array_equal(rep1.numpy(), rep2.numpy())
1297 1298

                emb2.weight = emb1.weight
1299 1300
                np.testing.assert_array_equal(emb1.weight.numpy(),
                                              emb2.weight.numpy())
1301

1302
            custom_weight = np.random.randn(dict_size, 32).astype("float32")
1303 1304
            weight_attr = fluid.ParamAttr(initializer=fluid.initializer.
                                          NumpyArrayInitializer(custom_weight))
1305
            emb1 = nn.Embedding(size=[dict_size, 32], is_sparse=False)
1306 1307 1308
            emb2 = nn.Embedding(size=[dict_size, 32],
                                param_attr=weight_attr,
                                is_sparse=False)
1309 1310 1311
            rep1 = emb1(base.to_variable(inp_word))
            rep2 = emb2(base.to_variable(inp_word))
            self.assertFalse(np.array_equal(emb1.weight.numpy(), custom_weight))
1312
            np.testing.assert_array_equal(emb2.weight.numpy(), custom_weight)
1313 1314 1315
            self.assertFalse(np.array_equal(rep1.numpy(), rep2.numpy()))
            emb2.weight.set_value(emb1.weight.numpy())
            rep2 = emb2(base.to_variable(inp_word))
1316
            np.testing.assert_array_equal(rep1.numpy(), rep2.numpy())
1317 1318

            emb2.weight = emb1.weight
1319 1320
            np.testing.assert_array_equal(emb1.weight.numpy(),
                                          emb2.weight.numpy())
1321

1322 1323 1324 1325
    def test_nce(self):
        window_size = 5
        dict_size = 20
        label_word = int(window_size // 2) + 1
1326
        inp_word = np.array([[1], [2], [3], [4], [5]]).astype('int64')
1327 1328 1329 1330 1331 1332
        nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype('float32')
        seed = 1
        with self.static_graph():
            words = []
            for i in range(window_size):
                words.append(
1333 1334 1335 1336 1337 1338
                    layers.data(name='word_{0}'.format(i),
                                shape=[None],
                                dtype='int64'))
            sample_weights = layers.fill_constant(shape=[5, 1],
                                                  dtype='float32',
                                                  value=1)
1339 1340 1341 1342 1343
            embs = []
            for i in range(window_size):
                if i == label_word:
                    continue

1344 1345 1346 1347
                emb = fluid.embedding(input=words[i],
                                      size=[dict_size, 32],
                                      param_attr='emb.w',
                                      is_sparse=False)
1348 1349 1350
                embs.append(emb)

            embs = layers.concat(input=embs, axis=1)
1351
            wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
1352
            nce_loss = layers.nce(input=embs,
1353
                                  label=wl,
1354 1355 1356 1357 1358 1359
                                  num_total_classes=dict_size,
                                  num_neg_samples=2,
                                  sampler="custom_dist",
                                  custom_dist=nid_freq_arr.tolist(),
                                  seed=seed,
                                  param_attr='nce.w',
1360 1361
                                  bias_attr='nce.b',
                                  sample_weight=sample_weights)
1362 1363 1364
            feed_dict = dict()
            for i in range(window_size):
                feed_dict['word_{0}'.format(i)] = inp_word[i]
1365 1366
            static_rlt = self.get_static_graph_result(feed=feed_dict,
                                                      fetch_list=[nce_loss])[0]
W
Weilong Wu 已提交
1367

1368 1369 1370 1371
        with self.static_graph():
            words = []
            for i in range(window_size):
                words.append(
1372 1373 1374 1375 1376 1377 1378 1379 1380
                    layers.data(name='word_{0}'.format(i),
                                shape=[None],
                                dtype='int64'))
            sample_weights = layers.fill_constant(shape=[5, 1],
                                                  dtype='float32',
                                                  value=1)
            emb = nn.Embedding(size=[dict_size, 32],
                               param_attr='emb.w',
                               is_sparse=False)
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390

            embs2 = []
            for i in range(window_size):
                if i == label_word:
                    continue

                emb_rlt = emb(words[i])
                embs2.append(emb_rlt)

            embs2 = layers.concat(input=embs2, axis=1)
1391 1392
            nce = nn.NCE(num_total_classes=dict_size,
                         dim=embs2.shape[1],
1393 1394 1395 1396 1397
                         num_neg_samples=2,
                         sampler="custom_dist",
                         custom_dist=nid_freq_arr.tolist(),
                         seed=seed,
                         param_attr='nce.w',
1398 1399
                         bias_attr='nce.b',
                         sample_weight=sample_weights)
1400

1401 1402
            wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
            nce_loss2 = nce(embs2, wl)
1403 1404 1405 1406
            feed_dict = dict()
            for i in range(len(words)):
                feed_dict['word_{0}'.format(i)] = inp_word[i]

1407 1408 1409
            static_rlt2 = self.get_static_graph_result(feed=feed_dict,
                                                       fetch_list=[nce_loss2
                                                                   ])[0]
1410

L
Leo Chen 已提交
1411
        with self.dynamic_graph():
W
Weilong Wu 已提交
1412 1413 1414 1415
            with _test_eager_guard():
                words = []
                for i in range(window_size):
                    words.append(base.to_variable(inp_word[i]))
1416 1417 1418 1419 1420 1421
                sample_weights = layers.fill_constant(shape=[5, 1],
                                                      dtype='float32',
                                                      value=1)
                emb = nn.Embedding(size=[dict_size, 32],
                                   param_attr='eager_emb.w',
                                   is_sparse=False)
W
Weilong Wu 已提交
1422 1423 1424 1425 1426 1427 1428 1429 1430

                embs3 = []
                for i in range(window_size):
                    if i == label_word:
                        continue

                    emb_rlt = emb(words[i])
                    embs3.append(emb_rlt)

1431 1432 1433
                embs3 = layers.concat(input=embs3,
                                      axis=fluid.dygraph.to_variable(
                                          np.array([1])))
W
Weilong Wu 已提交
1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447
                nce = nn.NCE(num_total_classes=dict_size,
                             dim=embs3.shape[1],
                             num_neg_samples=2,
                             sampler="custom_dist",
                             custom_dist=nid_freq_arr.tolist(),
                             seed=seed,
                             param_attr='eager_nce.w',
                             bias_attr='eager_nce.b',
                             sample_weight=sample_weights)

                wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
                dy_eager_rlt = nce(embs3, wl)
                dy_eager_rlt_value = dy_eager_rlt.numpy()

1448 1449 1450
            words = []
            for i in range(window_size):
                words.append(base.to_variable(inp_word[i]))
1451 1452 1453 1454 1455 1456
            sample_weights = layers.fill_constant(shape=[5, 1],
                                                  dtype='float32',
                                                  value=1)
            emb = nn.Embedding(size=[dict_size, 32],
                               param_attr='emb.w',
                               is_sparse=False)
1457 1458 1459 1460 1461 1462 1463 1464 1465

            embs3 = []
            for i in range(window_size):
                if i == label_word:
                    continue

                emb_rlt = emb(words[i])
                embs3.append(emb_rlt)

1466 1467
            embs3 = layers.concat(input=embs3,
                                  axis=fluid.dygraph.to_variable(np.array([1])))
1468 1469
            nce = nn.NCE(num_total_classes=dict_size,
                         dim=embs3.shape[1],
1470 1471 1472 1473 1474
                         num_neg_samples=2,
                         sampler="custom_dist",
                         custom_dist=nid_freq_arr.tolist(),
                         seed=seed,
                         param_attr='nce.w',
1475 1476
                         bias_attr='nce.b',
                         sample_weight=sample_weights)
1477

1478 1479
            wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
            dy_rlt = nce(embs3, wl)
1480
            dy_rlt_value = dy_rlt.numpy()
1481

1482 1483 1484
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_eager_rlt_value, static_rlt, rtol=1e-05)
1485

L
Leo Chen 已提交
1486
        with self.dynamic_graph():
W
Weilong Wu 已提交
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499
            with _test_eager_guard():
                custom_weight = np.random.randn(dict_size,
                                                128).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
                        custom_weight))
                words = []
                for i in range(window_size):
                    words.append(base.to_variable(inp_word[i]))
                sample_weights = layers.fill_constant(
                    shape=fluid.dygraph.to_variable(np.array([5, 1])),
                    dtype='float32',
                    value=1)
1500 1501 1502
                emb = nn.Embedding(size=[dict_size, 32],
                                   param_attr='eager_emb.w',
                                   is_sparse=False)
W
Weilong Wu 已提交
1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541

                embs3 = []
                for i in range(window_size):
                    if i == label_word:
                        continue

                    emb_rlt = emb(words[i])
                    embs3.append(emb_rlt)

                embs3 = layers.concat(input=embs3, axis=1)
                nce1 = nn.NCE(num_total_classes=dict_size,
                              dim=embs3.shape[1],
                              num_neg_samples=2,
                              sampler="custom_dist",
                              custom_dist=nid_freq_arr.tolist(),
                              seed=seed,
                              param_attr='eager_nce1.w',
                              bias_attr='eager_nce1.b',
                              sample_weight=sample_weights)

                nce2 = nn.NCE(num_total_classes=dict_size,
                              dim=embs3.shape[1],
                              num_neg_samples=2,
                              sampler="custom_dist",
                              custom_dist=nid_freq_arr.tolist(),
                              seed=seed,
                              param_attr=weight_attr,
                              bias_attr='eager_nce2.b',
                              sample_weight=sample_weights)

                wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
                nce1_loss = nce1(embs3, wl)
                nce2_loss = nce2(embs3, wl)
                self.assertFalse(
                    np.array_equal(nce1_loss.numpy(), nce2_loss.numpy()))
                nce2.weight.set_value(nce1.weight.numpy())
                nce2.bias.set_value(nce1.bias)
                nce1_loss = nce1(embs3, wl)
                nce2_loss = nce2(embs3, wl)
1542 1543
                np.testing.assert_array_equal(nce1_loss.numpy(),
                                              nce2_loss.numpy())
W
Weilong Wu 已提交
1544 1545 1546

                nce2.weight = nce1.weight
                nce2.bias = nce1.bias
1547 1548 1549 1550
                np.testing.assert_array_equal(nce1.weight.numpy(),
                                              nce2.weight.numpy())
                np.testing.assert_array_equal(nce1.bias.numpy(),
                                              nce2.bias.numpy())
W
Weilong Wu 已提交
1551

1552
            custom_weight = np.random.randn(dict_size, 128).astype("float32")
1553 1554
            weight_attr = fluid.ParamAttr(initializer=fluid.initializer.
                                          NumpyArrayInitializer(custom_weight))
1555 1556 1557 1558
            words = []
            for i in range(window_size):
                words.append(base.to_variable(inp_word[i]))
            sample_weights = layers.fill_constant(
S
songyouwei 已提交
1559 1560 1561
                shape=fluid.dygraph.to_variable(np.array([5, 1])),
                dtype='float32',
                value=1)
1562 1563 1564
            emb = nn.Embedding(size=[dict_size, 32],
                               param_attr='emb.w',
                               is_sparse=False)
1565 1566 1567 1568 1569 1570 1571 1572 1573 1574

            embs3 = []
            for i in range(window_size):
                if i == label_word:
                    continue

                emb_rlt = emb(words[i])
                embs3.append(emb_rlt)

            embs3 = layers.concat(input=embs3, axis=1)
1575 1576
            nce1 = nn.NCE(num_total_classes=dict_size,
                          dim=embs3.shape[1],
1577 1578 1579 1580 1581 1582 1583 1584
                          num_neg_samples=2,
                          sampler="custom_dist",
                          custom_dist=nid_freq_arr.tolist(),
                          seed=seed,
                          param_attr='nce1.w',
                          bias_attr='nce1.b',
                          sample_weight=sample_weights)

1585 1586
            nce2 = nn.NCE(num_total_classes=dict_size,
                          dim=embs3.shape[1],
1587 1588 1589 1590
                          num_neg_samples=2,
                          sampler="custom_dist",
                          custom_dist=nid_freq_arr.tolist(),
                          seed=seed,
1591
                          param_attr=weight_attr,
1592 1593 1594
                          bias_attr='nce2.b',
                          sample_weight=sample_weights)

1595 1596 1597
            wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
            nce1_loss = nce1(embs3, wl)
            nce2_loss = nce2(embs3, wl)
1598 1599 1600 1601
            self.assertFalse(
                np.array_equal(nce1_loss.numpy(), nce2_loss.numpy()))
            nce2.weight.set_value(nce1.weight.numpy())
            nce2.bias.set_value(nce1.bias)
1602 1603
            nce1_loss = nce1(embs3, wl)
            nce2_loss = nce2(embs3, wl)
1604
            np.testing.assert_array_equal(nce1_loss.numpy(), nce2_loss.numpy())
1605 1606 1607

            nce2.weight = nce1.weight
            nce2.bias = nce1.bias
1608 1609 1610
            np.testing.assert_array_equal(nce1.weight.numpy(),
                                          nce2.weight.numpy())
            np.testing.assert_array_equal(nce1.bias.numpy(), nce2.bias.numpy())
1611

S
songyouwei 已提交
1612 1613
    def test_one_hot(self):
        with self.dynamic_graph():
1614
            with _test_eager_guard():
1615 1616
                label = fluid.dygraph.to_variable(np.array([[1], [1], [3],
                                                            [0]]))
1617 1618
                one_hot_label1 = fluid.layers.one_hot(input=label, depth=4)
                one_hot_label2 = fluid.layers.one_hot(
1619
                    input=label, depth=fluid.dygraph.to_variable(np.array([4])))
1620 1621
                np.testing.assert_array_equal(one_hot_label1.numpy(),
                                              one_hot_label2.numpy())
1622

S
songyouwei 已提交
1623 1624 1625 1626
            label = fluid.dygraph.to_variable(np.array([[1], [1], [3], [0]]))
            one_hot_label1 = fluid.layers.one_hot(input=label, depth=4)
            one_hot_label2 = fluid.layers.one_hot(
                input=label, depth=fluid.dygraph.to_variable(np.array([4])))
1627 1628
            np.testing.assert_array_equal(one_hot_label1.numpy(),
                                          one_hot_label2.numpy())
S
songyouwei 已提交
1629 1630 1631

    def test_split(self):
        with self.dynamic_graph():
1632 1633 1634
            with _test_eager_guard():
                input = fluid.dygraph.to_variable(np.random.random((3, 8, 5)))
                x0, x1 = fluid.layers.split(input, num_or_sections=2, dim=1)
1635 1636 1637 1638
                x00, x11 = fluid.layers.split(input,
                                              num_or_sections=2,
                                              dim=fluid.dygraph.to_variable(
                                                  np.array([1])))
1639 1640
                np.testing.assert_array_equal(x0.numpy(), x00.numpy())
                np.testing.assert_array_equal(x1.numpy(), x11.numpy())
1641

S
songyouwei 已提交
1642 1643
            input = fluid.dygraph.to_variable(np.random.random((3, 8, 5)))
            x0, x1 = fluid.layers.split(input, num_or_sections=2, dim=1)
1644 1645 1646 1647
            x00, x11 = fluid.layers.split(input,
                                          num_or_sections=2,
                                          dim=fluid.dygraph.to_variable(
                                              np.array([1])))
1648 1649
            np.testing.assert_array_equal(x0.numpy(), x00.numpy())
            np.testing.assert_array_equal(x1.numpy(), x11.numpy())
S
songyouwei 已提交
1650 1651 1652

    def test_topk(self):
        with self.dynamic_graph():
1653 1654 1655 1656 1657
            with _test_eager_guard():
                input = fluid.dygraph.to_variable(np.random.random((13, 11)))
                top5_values1, top5_indices1 = layers.topk(input, k=5)
                top5_values2, top5_indices2 = layers.topk(
                    input, k=fluid.dygraph.to_variable(np.array([5])))
1658 1659 1660 1661
                np.testing.assert_array_equal(top5_values1.numpy(),
                                              top5_values2.numpy())
                np.testing.assert_array_equal(top5_indices1.numpy(),
                                              top5_indices2.numpy())
1662

S
songyouwei 已提交
1663 1664 1665 1666
            input = fluid.dygraph.to_variable(np.random.random((13, 11)))
            top5_values1, top5_indices1 = layers.topk(input, k=5)
            top5_values2, top5_indices2 = layers.topk(
                input, k=fluid.dygraph.to_variable(np.array([5])))
1667 1668 1669 1670
            np.testing.assert_array_equal(top5_values1.numpy(),
                                          top5_values2.numpy())
            np.testing.assert_array_equal(top5_indices1.numpy(),
                                          top5_indices2.numpy())
S
songyouwei 已提交
1671

L
lujun 已提交
1672 1673
    def test_conv3d(self):
        with self.static_graph():
1674 1675 1676
            images = layers.data(name='pixel',
                                 shape=[3, 6, 6, 6],
                                 dtype='float32')
1677
            ret = layers.conv3d(input=images, num_filters=3, filter_size=2)
L
lujun 已提交
1678
            static_ret = self.get_static_graph_result(
1679
                feed={'pixel': np.ones([2, 3, 6, 6, 6], dtype='float32')},
L
lujun 已提交
1680 1681 1682
                fetch_list=[ret])[0]

        with self.static_graph():
1683 1684 1685
            images = layers.data(name='pixel',
                                 shape=[3, 6, 6, 6],
                                 dtype='float32')
1686
            conv3d = nn.Conv3D(num_channels=3, num_filters=3, filter_size=2)
L
lujun 已提交
1687 1688
            ret = conv3d(images)
            static_ret2 = self.get_static_graph_result(
1689
                feed={'pixel': np.ones([2, 3, 6, 6, 6], dtype='float32')},
L
lujun 已提交
1690 1691 1692
                fetch_list=[ret])[0]

        with self.dynamic_graph():
1693 1694 1695 1696 1697 1698
            with _test_eager_guard():
                images = np.ones([2, 3, 6, 6, 6], dtype='float32')
                conv3d = nn.Conv3D(num_channels=3, num_filters=3, filter_size=2)
                dy_eager_ret = conv3d(base.to_variable(images))
                dy_eager_rlt_value = dy_eager_ret.numpy()

L
lujun 已提交
1699
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
1700
            conv3d = nn.Conv3D(num_channels=3, num_filters=3, filter_size=2)
L
lujun 已提交
1701
            dy_ret = conv3d(base.to_variable(images))
1702
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
1703

1704 1705 1706
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
1707

1708
        with self.dynamic_graph():
1709 1710 1711 1712 1713 1714
            with _test_eager_guard():
                images = np.ones([2, 3, 6, 6, 6], dtype='float32')
                custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
                        custom_weight))
1715 1716 1717 1718 1719 1720 1721
                conv3d1 = nn.Conv3D(num_channels=3,
                                    num_filters=3,
                                    filter_size=2)
                conv3d2 = nn.Conv3D(num_channels=3,
                                    num_filters=3,
                                    filter_size=2,
                                    param_attr=weight_attr)
1722 1723 1724 1725 1726 1727 1728 1729 1730 1731
                dy_ret1 = conv3d1(base.to_variable(images))
                dy_ret2 = conv3d2(base.to_variable(images))
                self.assertFalse(
                    np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

                conv3d1_weight_np = conv3d1.weight.numpy()
                conv3d1_bias = conv3d1.bias
                self.assertFalse(
                    np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy()))
                conv3d2.weight.set_value(conv3d1_weight_np)
1732 1733
                np.testing.assert_array_equal(conv3d1_weight_np,
                                              conv3d2.weight.numpy())
1734 1735 1736
                conv3d1.bias.set_value(conv3d1_bias)
                dy_ret1 = conv3d1(base.to_variable(images))
                dy_ret2 = conv3d2(base.to_variable(images))
1737
                np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
1738 1739 1740

                conv3d2.weight = conv3d1.weight
                conv3d2.bias = conv3d1.bias
1741 1742 1743 1744
                np.testing.assert_array_equal(conv3d1.weight.numpy(),
                                              conv3d2.weight.numpy())
                np.testing.assert_array_equal(conv3d1.bias.numpy(),
                                              conv3d2.bias.numpy())
1745

1746 1747
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
1748 1749
            weight_attr = fluid.ParamAttr(initializer=fluid.initializer.
                                          NumpyArrayInitializer(custom_weight))
1750
            conv3d1 = nn.Conv3D(num_channels=3, num_filters=3, filter_size=2)
1751 1752 1753 1754
            conv3d2 = nn.Conv3D(num_channels=3,
                                num_filters=3,
                                filter_size=2,
                                param_attr=weight_attr)
1755 1756 1757 1758 1759 1760 1761 1762 1763
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv3d1_weight_np = conv3d1.weight.numpy()
            conv3d1_bias = conv3d1.bias
            self.assertFalse(
                np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy()))
            conv3d2.weight.set_value(conv3d1_weight_np)
1764 1765
            np.testing.assert_array_equal(conv3d1_weight_np,
                                          conv3d2.weight.numpy())
1766 1767 1768
            conv3d1.bias.set_value(conv3d1_bias)
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
1769
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
1770 1771 1772

            conv3d2.weight = conv3d1.weight
            conv3d2.bias = conv3d1.bias
1773 1774 1775 1776
            np.testing.assert_array_equal(conv3d1.weight.numpy(),
                                          conv3d2.weight.numpy())
            np.testing.assert_array_equal(conv3d1.bias.numpy(),
                                          conv3d2.bias.numpy())
1777

L
lujun 已提交
1778 1779 1780 1781 1782 1783 1784 1785
    def test_row_conv(self):
        input = np.arange(15).reshape([3, 5]).astype('float32')
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        with self.static_graph():
1786 1787 1788 1789 1790
            x = layers.data(name='X',
                            shape=[3, 5],
                            dtype='float32',
                            lod_level=1,
                            append_batch_size=False)
L
lujun 已提交
1791
            ret = layers.row_conv(input=x, future_context_size=2)
1792 1793 1794 1795 1796 1797 1798 1799
            static_ret = self.get_static_graph_result(feed={
                'X':
                fluid.create_lod_tensor(data=input,
                                        recursive_seq_lens=[[1, 1, 1]],
                                        place=place)
            },
                                                      fetch_list=[ret],
                                                      with_lod=True)[0]
L
lujun 已提交
1800 1801

        with self.static_graph():
1802 1803 1804 1805 1806
            x = layers.data(name='X',
                            shape=[3, 5],
                            dtype='float32',
                            lod_level=1,
                            append_batch_size=False)
L
lujun 已提交
1807 1808
            rowConv = nn.RowConv('RowConv', future_context_size=2)
            ret = rowConv(x)
1809 1810 1811 1812 1813 1814 1815 1816
            static_ret2 = self.get_static_graph_result(feed={
                'X':
                fluid.create_lod_tensor(data=input,
                                        recursive_seq_lens=[[1, 1, 1]],
                                        place=place)
            },
                                                       fetch_list=[ret],
                                                       with_lod=True)[0]
L
lujun 已提交
1817

1818
        # TODO: dygraph can't support LODTensor
L
lujun 已提交
1819

1820
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
1821

1822
    def func_group_norm(self):
L
lujun 已提交
1823 1824 1825 1826 1827 1828 1829 1830 1831 1832
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
1833 1834 1835 1836 1837
            X = fluid.layers.data(name='X',
                                  shape=shape,
                                  dtype='float32',
                                  lod_level=1,
                                  append_batch_size=False)
1838 1839 1840
            ret = layers.group_norm(
                input=X,
                groups=2,
1841
                param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5),
1842
                bias_attr=fluid.initializer.ConstantInitializer(value=1))
1843 1844 1845 1846 1847 1848 1849 1850
            static_ret = self.get_static_graph_result(feed={
                'X':
                fluid.create_lod_tensor(data=input,
                                        recursive_seq_lens=[[1, 1]],
                                        place=place)
            },
                                                      fetch_list=[ret],
                                                      with_lod=True)[0]
L
lujun 已提交
1851 1852

        with self.static_graph():
1853 1854 1855 1856 1857
            X = fluid.layers.data(name='X',
                                  shape=shape,
                                  dtype='float32',
                                  lod_level=1,
                                  append_batch_size=False)
1858 1859 1860
            groupNorm = nn.GroupNorm(
                channels=shape[1],
                groups=2,
1861
                param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5),
1862
                bias_attr=fluid.initializer.ConstantInitializer(value=1))
L
lujun 已提交
1863
            ret = groupNorm(X)
1864 1865 1866 1867 1868 1869 1870 1871
            static_ret2 = self.get_static_graph_result(feed={
                'X':
                fluid.create_lod_tensor(data=input,
                                        recursive_seq_lens=[[1, 1]],
                                        place=place)
            },
                                                       fetch_list=[ret],
                                                       with_lod=True)[0]
L
lujun 已提交
1872 1873

        with self.dynamic_graph():
1874 1875 1876
            groupNorm = nn.GroupNorm(
                channels=shape[1],
                groups=2,
1877
                param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5),
1878
                bias_attr=fluid.initializer.ConstantInitializer(value=1))
L
lujun 已提交
1879
            dy_ret = groupNorm(base.to_variable(input))
1880
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
1881

1882 1883
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
1884

1885 1886 1887 1888 1889
    def test_group_norm(self):
        with _test_eager_guard():
            self.func_group_norm()
        self.func_group_norm()

1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900
    def test_instance_norm(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
1901 1902 1903 1904
            X = fluid.layers.data(name='X',
                                  shape=shape,
                                  dtype='float32',
                                  append_batch_size=False)
1905
            ret = layers.instance_norm(input=X)
1906 1907
            static_ret = self.get_static_graph_result(feed={'X': input},
                                                      fetch_list=[ret])[0]
1908 1909

        with self.static_graph():
1910 1911 1912 1913
            X = fluid.layers.data(name='X',
                                  shape=shape,
                                  dtype='float32',
                                  append_batch_size=False)
1914 1915
            instanceNorm = nn.InstanceNorm(num_channels=shape[1])
            ret = instanceNorm(X)
1916 1917
            static_ret2 = self.get_static_graph_result(feed={'X': input},
                                                       fetch_list=[ret])[0]
1918 1919

        with self.dynamic_graph():
1920 1921 1922 1923 1924
            with _test_eager_guard():
                instanceNorm = nn.InstanceNorm(num_channels=shape[1])
                dy_eager_ret = instanceNorm(base.to_variable(input))
                dy_eager_rlt_value = dy_eager_ret.numpy()

1925 1926 1927 1928 1929
            instanceNorm = nn.InstanceNorm(num_channels=shape[1])
            dy_ret = instanceNorm(base.to_variable(input))
            dy_rlt_value = dy_ret.numpy()

        with self.dynamic_graph():
1930 1931 1932 1933 1934
            with _test_eager_guard():
                instanceNorm = nn.InstanceNorm(num_channels=shape[1])
                dy_eager_ret = instanceNorm(base.to_variable(input))
                dy_eager_rlt_value2 = dy_eager_ret.numpy()

1935
            instanceNorm = nn.InstanceNorm(num_channels=shape[1])
1936 1937 1938
            dy_ret = instanceNorm(base.to_variable(input))
            dy_rlt_value2 = dy_ret.numpy()

1939 1940 1941 1942 1943
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_rlt_value2, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value2, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
1944 1945 1946 1947

        with self.static_graph():
            # the input of InstanceNorm must be Variable.
            def test_Variable():
1948
                instanceNorm = nn.InstanceNorm(num_channels=shape[1])
1949 1950 1951 1952 1953 1954 1955
                ret1 = instanceNorm(input)

            self.assertRaises(TypeError, test_Variable)

            # the input dtype of InstanceNorm must be float32 or float64
            def test_type():
                input = np.random.random(shape).astype('int32')
1956
                instanceNorm = nn.InstanceNorm(num_channels=shape[1])
1957 1958 1959 1960
                ret2 = instanceNorm(input)

            self.assertRaises(TypeError, test_type)

L
lujun 已提交
1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971
    def test_spectral_norm(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        shape = (2, 4, 3, 3)

        input = np.random.random(shape).astype('float32')

        with self.static_graph():
1972 1973 1974 1975 1976
            Weight = fluid.layers.data(name='Weight',
                                       shape=shape,
                                       dtype='float32',
                                       lod_level=1,
                                       append_batch_size=False)
L
lujun 已提交
1977
            ret = layers.spectral_norm(weight=Weight, dim=1, power_iters=2)
1978 1979 1980 1981 1982 1983 1984 1985
            static_ret = self.get_static_graph_result(feed={
                'Weight':
                fluid.create_lod_tensor(data=input,
                                        recursive_seq_lens=[[1, 1]],
                                        place=place),
            },
                                                      fetch_list=[ret],
                                                      with_lod=True)[0]
L
lujun 已提交
1986 1987

        with self.static_graph():
1988 1989 1990 1991 1992
            Weight = fluid.layers.data(name='Weight',
                                       shape=shape,
                                       dtype='float32',
                                       lod_level=1,
                                       append_batch_size=False)
1993
            spectralNorm = nn.SpectralNorm(shape, dim=1, power_iters=2)
L
lujun 已提交
1994
            ret = spectralNorm(Weight)
1995 1996 1997 1998 1999 2000 2001 2002
            static_ret2 = self.get_static_graph_result(feed={
                'Weight':
                fluid.create_lod_tensor(data=input,
                                        recursive_seq_lens=[[1, 1]],
                                        place=place)
            },
                                                       fetch_list=[ret],
                                                       with_lod=True)[0]
L
lujun 已提交
2003 2004

        with self.dynamic_graph():
2005 2006 2007 2008 2009
            with _test_eager_guard():
                spectralNorm = nn.SpectralNorm(shape, dim=1, power_iters=2)
                dy_eager_ret = spectralNorm(base.to_variable(input))
                dy_eager_rlt_value = dy_eager_ret.numpy()

2010
            spectralNorm = nn.SpectralNorm(shape, dim=1, power_iters=2)
L
lujun 已提交
2011
            dy_ret = spectralNorm(base.to_variable(input))
2012
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
2013

2014 2015 2016
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
L
lujun 已提交
2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027

    def test_tree_conv(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()
        adj_array = [1, 2, 1, 3, 1, 4, 1, 5, 2, 6, 2, 7, 2, 8, 4, 9, 4, 10]
        adj = np.array(adj_array).reshape((1, 9, 2)).astype('int32')
        adj = np.tile(adj, (1, 1, 1))
        vectors = np.random.random((1, 10, 5)).astype('float32')
        with self.static_graph():
2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054
            NodesVector = fluid.layers.data(name='NodesVector',
                                            shape=(1, 10, 5),
                                            dtype='float32',
                                            lod_level=1,
                                            append_batch_size=False)
            EdgeSet = fluid.layers.data(name='EdgeSet',
                                        shape=(1, 9, 2),
                                        dtype='int32',
                                        lod_level=1,
                                        append_batch_size=False)
            ret = fluid.contrib.layers.tree_conv(nodes_vector=NodesVector,
                                                 edge_set=EdgeSet,
                                                 output_size=6,
                                                 num_filters=1,
                                                 max_depth=2)
            static_ret = self.get_static_graph_result(feed={
                'NodesVector':
                fluid.create_lod_tensor(data=vectors,
                                        recursive_seq_lens=[[1]],
                                        place=place),
                'EdgeSet':
                fluid.create_lod_tensor(data=adj,
                                        recursive_seq_lens=[[1]],
                                        place=place)
            },
                                                      fetch_list=[ret],
                                                      with_lod=False)[0]
L
lujun 已提交
2055 2056

        with self.static_graph():
2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070
            NodesVector = fluid.layers.data(name='NodesVector',
                                            shape=(1, 10, 5),
                                            dtype='float32',
                                            lod_level=1,
                                            append_batch_size=False)
            EdgeSet = fluid.layers.data(name='EdgeSet',
                                        shape=(1, 9, 2),
                                        dtype='int32',
                                        lod_level=1,
                                        append_batch_size=False)
            treeConv = nn.TreeConv(feature_size=5,
                                   output_size=6,
                                   num_filters=1,
                                   max_depth=2)
L
lujun 已提交
2071
            ret = treeConv(NodesVector, EdgeSet)
2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083
            static_ret2 = self.get_static_graph_result(feed={
                'NodesVector':
                fluid.create_lod_tensor(data=vectors,
                                        recursive_seq_lens=[[1]],
                                        place=place),
                'EdgeSet':
                fluid.create_lod_tensor(data=adj,
                                        recursive_seq_lens=[[1]],
                                        place=place)
            },
                                                       fetch_list=[ret],
                                                       with_lod=False)[0]
L
lujun 已提交
2084 2085

        with self.dynamic_graph():
2086
            with _test_eager_guard():
2087 2088 2089 2090 2091 2092
                treeConv = nn.TreeConv(feature_size=5,
                                       output_size=6,
                                       num_filters=1,
                                       max_depth=2)
                dy_eager_ret = treeConv(base.to_variable(vectors),
                                        base.to_variable(adj))
2093 2094
                dy_eager_rlt_value = dy_eager_ret.numpy()

2095 2096 2097 2098
            treeConv = nn.TreeConv(feature_size=5,
                                   output_size=6,
                                   num_filters=1,
                                   max_depth=2)
L
lujun 已提交
2099
            dy_ret = treeConv(base.to_variable(vectors), base.to_variable(adj))
2100
            dy_rlt_value = dy_ret.numpy()
L
lujun 已提交
2101

2102 2103 2104
        np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
        np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05)
L
lujun 已提交
2105

2106
        with self.dynamic_graph():
2107 2108 2109 2110 2111
            with _test_eager_guard():
                custom_weight = np.random.randn(5, 3, 6, 1).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
                        custom_weight))
2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126
                treeConv1 = nn.TreeConv(feature_size=5,
                                        output_size=6,
                                        num_filters=1,
                                        max_depth=2,
                                        bias_attr='eager_tc1_b')
                treeConv2 = nn.TreeConv(feature_size=5,
                                        output_size=6,
                                        num_filters=1,
                                        max_depth=2,
                                        param_attr=weight_attr,
                                        bias_attr='eager_tc2_b')
                dy_ret1 = treeConv1(base.to_variable(vectors),
                                    base.to_variable(adj))
                dy_ret2 = treeConv2(base.to_variable(vectors),
                                    base.to_variable(adj))
2127 2128 2129 2130
                self.assertFalse(
                    np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))
                treeConv2.weight.set_value(treeConv1.weight.numpy())
                treeConv2.bias.set_value(treeConv1.bias)
2131 2132 2133 2134
                dy_ret1 = treeConv1(base.to_variable(vectors),
                                    base.to_variable(adj))
                dy_ret2 = treeConv2(base.to_variable(vectors),
                                    base.to_variable(adj))
2135
                np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
2136 2137 2138

                treeConv2.weight = treeConv1.weight
                treeConv2.bias = treeConv1.bias
2139 2140 2141 2142
                np.testing.assert_array_equal(treeConv1.weight.numpy(),
                                              treeConv2.weight.numpy())
                np.testing.assert_array_equal(treeConv1.bias.numpy(),
                                              treeConv2.bias.numpy())
2143

2144
            custom_weight = np.random.randn(5, 3, 6, 1).astype("float32")
2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161
            weight_attr = fluid.ParamAttr(initializer=fluid.initializer.
                                          NumpyArrayInitializer(custom_weight))
            treeConv1 = nn.TreeConv(feature_size=5,
                                    output_size=6,
                                    num_filters=1,
                                    max_depth=2,
                                    bias_attr='tc1_b')
            treeConv2 = nn.TreeConv(feature_size=5,
                                    output_size=6,
                                    num_filters=1,
                                    max_depth=2,
                                    param_attr=weight_attr,
                                    bias_attr='tc2_b')
            dy_ret1 = treeConv1(base.to_variable(vectors),
                                base.to_variable(adj))
            dy_ret2 = treeConv2(base.to_variable(vectors),
                                base.to_variable(adj))
2162 2163 2164
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))
            treeConv2.weight.set_value(treeConv1.weight.numpy())
            treeConv2.bias.set_value(treeConv1.bias)
2165 2166 2167 2168
            dy_ret1 = treeConv1(base.to_variable(vectors),
                                base.to_variable(adj))
            dy_ret2 = treeConv2(base.to_variable(vectors),
                                base.to_variable(adj))
2169
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
2170 2171 2172

            treeConv2.weight = treeConv1.weight
            treeConv2.bias = treeConv1.bias
2173 2174 2175 2176
            np.testing.assert_array_equal(treeConv1.weight.numpy(),
                                          treeConv2.weight.numpy())
            np.testing.assert_array_equal(treeConv1.bias.numpy(),
                                          treeConv2.bias.numpy())
2177

L
lujun 已提交
2178
    def test_conv3d_transpose(self):
2179 2180
        input_array = np.arange(0, 48).reshape([2, 3, 2, 2,
                                                2]).astype('float32')
L
lujun 已提交
2181 2182 2183

        with self.static_graph():
            img = layers.data(name='pixel', shape=[3, 2, 2, 2], dtype='float32')
2184 2185 2186 2187
            out = layers.conv3d_transpose(input=img,
                                          num_filters=12,
                                          filter_size=12,
                                          use_cudnn=False)
L
lujun 已提交
2188 2189 2190 2191
            static_rlt = self.get_static_graph_result(
                feed={'pixel': input_array}, fetch_list=[out])[0]
        with self.static_graph():
            img = layers.data(name='pixel', shape=[3, 2, 2, 2], dtype='float32')
2192 2193 2194 2195
            conv3d_transpose = nn.Conv3DTranspose(num_channels=3,
                                                  num_filters=12,
                                                  filter_size=12,
                                                  use_cudnn=False)
L
lujun 已提交
2196 2197 2198 2199
            out = conv3d_transpose(img)
            static_rlt2 = self.get_static_graph_result(
                feed={'pixel': input_array}, fetch_list=[out])[0]
        with self.dynamic_graph():
2200
            with _test_eager_guard():
2201 2202 2203 2204
                conv3d_transpose = nn.Conv3DTranspose(num_channels=3,
                                                      num_filters=12,
                                                      filter_size=12,
                                                      use_cudnn=False)
2205 2206 2207
                dy_eager_rlt = conv3d_transpose(base.to_variable(input_array))
                dy_eager_rlt_value = dy_eager_rlt.numpy()

2208 2209 2210 2211
            conv3d_transpose = nn.Conv3DTranspose(num_channels=3,
                                                  num_filters=12,
                                                  filter_size=12,
                                                  use_cudnn=False)
L
lujun 已提交
2212
            dy_rlt = conv3d_transpose(base.to_variable(input_array))
2213
            dy_rlt_value = dy_rlt.numpy()
2214 2215 2216
        np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05)
        np.testing.assert_allclose(dy_eager_rlt_value, static_rlt, rtol=1e-05)
L
lujun 已提交
2217

2218
        with self.dynamic_graph():
2219 2220 2221 2222 2223 2224
            with _test_eager_guard():
                images = np.ones([2, 3, 6, 6, 6], dtype='float32')
                custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
                weight_attr = fluid.ParamAttr(
                    initializer=fluid.initializer.NumpyArrayInitializer(
                        custom_weight))
2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235
                conv3d1 = nn.Conv3DTranspose(num_channels=3,
                                             num_filters=3,
                                             filter_size=2,
                                             bias_attr='eager_conv3d1_b',
                                             use_cudnn=False)
                conv3d2 = nn.Conv3DTranspose(num_channels=3,
                                             num_filters=3,
                                             filter_size=2,
                                             param_attr=weight_attr,
                                             bias_attr='eager_conv3d2_b',
                                             use_cudnn=False)
2236 2237 2238 2239 2240 2241 2242 2243 2244 2245
                dy_ret1 = conv3d1(base.to_variable(images))
                dy_ret2 = conv3d2(base.to_variable(images))
                self.assertFalse(
                    np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

                conv3d1_weight_np = conv3d1.weight.numpy()
                conv3d1_bias = conv3d1.bias
                self.assertFalse(
                    np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy()))
                conv3d2.weight.set_value(conv3d1_weight_np)
2246 2247
                np.testing.assert_array_equal(conv3d1_weight_np,
                                              conv3d2.weight.numpy())
2248 2249 2250
                conv3d1.bias.set_value(conv3d1_bias)
                dy_ret1 = conv3d1(base.to_variable(images))
                dy_ret2 = conv3d2(base.to_variable(images))
2251
                np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
2252 2253 2254

                conv3d2.weight = conv3d1.weight
                conv3d2.bias = conv3d1.bias
2255 2256 2257 2258
                np.testing.assert_array_equal(conv3d1.weight.numpy(),
                                              conv3d2.weight.numpy())
                np.testing.assert_array_equal(conv3d1.bias.numpy(),
                                              conv3d2.bias.numpy())
2259

2260 2261
            images = np.ones([2, 3, 6, 6, 6], dtype='float32')
            custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274
            weight_attr = fluid.ParamAttr(initializer=fluid.initializer.
                                          NumpyArrayInitializer(custom_weight))
            conv3d1 = nn.Conv3DTranspose(num_channels=3,
                                         num_filters=3,
                                         filter_size=2,
                                         bias_attr='conv3d1_b',
                                         use_cudnn=False)
            conv3d2 = nn.Conv3DTranspose(num_channels=3,
                                         num_filters=3,
                                         filter_size=2,
                                         param_attr=weight_attr,
                                         bias_attr='conv3d2_b',
                                         use_cudnn=False)
2275 2276 2277 2278 2279 2280 2281 2282 2283
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
            self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))

            conv3d1_weight_np = conv3d1.weight.numpy()
            conv3d1_bias = conv3d1.bias
            self.assertFalse(
                np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy()))
            conv3d2.weight.set_value(conv3d1_weight_np)
2284 2285
            np.testing.assert_array_equal(conv3d1_weight_np,
                                          conv3d2.weight.numpy())
2286 2287 2288
            conv3d1.bias.set_value(conv3d1_bias)
            dy_ret1 = conv3d1(base.to_variable(images))
            dy_ret2 = conv3d2(base.to_variable(images))
2289
            np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy())
2290 2291 2292

            conv3d2.weight = conv3d1.weight
            conv3d2.bias = conv3d1.bias
2293 2294 2295 2296
            np.testing.assert_array_equal(conv3d1.weight.numpy(),
                                          conv3d2.weight.numpy())
            np.testing.assert_array_equal(conv3d1.bias.numpy(),
                                          conv3d2.bias.numpy())
2297

2298 2299 2300 2301 2302 2303 2304 2305
    def test_eye_op(self):
        np_eye = np.eye(3, 2)
        array_rlt1 = [np_eye for _ in range(3)]
        stack_rlt1 = np.stack(array_rlt1, axis=0)
        array_rlt2 = [stack_rlt1 for _ in range(4)]
        stack_rlt2 = np.stack(array_rlt2, axis=0)

        with self.dynamic_graph():
2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
            with _test_eager_guard():
                eager_eye_tensor = layers.eye(num_rows=3, num_columns=2)
                eager_eye_tensor_rlt1 = layers.eye(num_rows=3,
                                                   num_columns=2,
                                                   batch_shape=[3])
                eager_eye_tensor_rlt2 = layers.eye(num_rows=3,
                                                   num_columns=2,
                                                   batch_shape=[4, 3])
                eager_diag_tensor = layers.eye(20)
                eager_eye_tensor_value = eager_eye_tensor.numpy()
                eager_eye_tensor_rlt1_value = eager_eye_tensor_rlt1.numpy()
                eager_eye_tensor_rlt2_value = eager_eye_tensor_rlt2.numpy()
                eager_diag_tensor_value = eager_diag_tensor.numpy()

2320 2321 2322 2323 2324 2325 2326 2327
            eye_tensor = layers.eye(num_rows=3, num_columns=2)
            eye_tensor_rlt1 = layers.eye(num_rows=3,
                                         num_columns=2,
                                         batch_shape=[3])
            eye_tensor_rlt2 = layers.eye(num_rows=3,
                                         num_columns=2,
                                         batch_shape=[4, 3])
            diag_tensor = layers.eye(20)
2328 2329 2330 2331
            eye_tensor_value = eye_tensor.numpy()
            eye_tensor_rlt1_value = eye_tensor_rlt1.numpy()
            eye_tensor_rlt2_value = eye_tensor_rlt2.numpy()
            diag_tensor_value = diag_tensor.numpy()
2332

2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351
        np.testing.assert_allclose(eager_eye_tensor_value, np_eye, rtol=1e-05)
        np.testing.assert_allclose(eager_eye_tensor_rlt1_value,
                                   stack_rlt1,
                                   rtol=1e-05)
        np.testing.assert_allclose(eager_eye_tensor_rlt2_value,
                                   stack_rlt2,
                                   rtol=1e-05)
        np.testing.assert_allclose(eager_diag_tensor_value,
                                   np.eye(20),
                                   rtol=1e-05)

        np.testing.assert_allclose(eye_tensor_value, np_eye, rtol=1e-05)
        np.testing.assert_allclose(eye_tensor_rlt1_value,
                                   stack_rlt1,
                                   rtol=1e-05)
        np.testing.assert_allclose(eye_tensor_rlt2_value,
                                   stack_rlt2,
                                   rtol=1e-05)
        np.testing.assert_allclose(diag_tensor_value, np.eye(20), rtol=1e-05)
2352 2353 2354 2355 2356 2357 2358 2359 2360 2361

        with self.assertRaises(TypeError):
            layers.eye(num_rows=3.1)
        with self.assertRaises(TypeError):
            layers.eye(num_rows=3, num_columns=2.2)
        with self.assertRaises(TypeError):
            layers.eye(num_rows=3, batch_shape=2)
        with self.assertRaises(TypeError):
            layers.eye(num_rows=3, batch_shape=[-1])

2362
    def func_while_loop(self):
2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379
        with self.static_graph():
            i = layers.fill_constant(shape=[1], dtype='int64', value=0)
            ten = layers.fill_constant(shape=[1], dtype='int64', value=10)

            def cond(i):
                return layers.less_than(i, ten)

            def body(i):
                return i + 1

            out = layers.while_loop(cond, body, [i])
            static_ret = self.get_static_graph_result(feed={}, fetch_list=out)

        with self.dynamic_graph():
            i = layers.fill_constant(shape=[1], dtype='int64', value=0)
            ten = layers.fill_constant(shape=[1], dtype='int64', value=10)

2380
            def cond1(i):
2381 2382
                return layers.less_than(i, ten)

2383
            def body1(i):
2384 2385
                return i + 1

2386
            dy_ret = layers.while_loop(cond1, body1, [i])
2387 2388 2389 2390 2391 2392
            with self.assertRaises(ValueError):
                j = layers.fill_constant(shape=[1], dtype='int64', value=0)

                def body2(i):
                    return i + 1, i + 2

2393
                layers.while_loop(cond1, body2, [j])
2394

2395
        np.testing.assert_array_equal(static_ret[0], dy_ret[0].numpy())
2396

2397 2398 2399 2400 2401
    def test_while_loop(self):
        with _test_eager_guard():
            self.func_while_loop()
        self.func_while_loop()

2402 2403 2404 2405 2406 2407 2408 2409
    def test_compare(self):
        value_a = np.arange(3)
        value_b = np.arange(3)
        # less than
        with self.static_graph():
            a = layers.data(name='a', shape=[1], dtype='int64')
            b = layers.data(name='b', shape=[1], dtype='int64')
            cond = layers.less_than(x=a, y=b)
2410 2411 2412 2413 2414
            static_ret = self.get_static_graph_result(feed={
                "a": value_a,
                "b": value_b
            },
                                                      fetch_list=[cond])[0]
2415
        with self.dynamic_graph():
2416 2417 2418 2419 2420 2421 2422 2423
            with _test_eager_guard():
                da = base.to_variable(value_a)
                db = base.to_variable(value_b)
                dcond = layers.less_than(x=da, y=db)

                for i in range(len(static_ret)):
                    self.assertTrue(dcond.numpy()[i] == static_ret[i])

2424 2425 2426 2427
            da = base.to_variable(value_a)
            db = base.to_variable(value_b)
            dcond = layers.less_than(x=da, y=db)

2428 2429
            for i in range(len(static_ret)):
                self.assertTrue(dcond.numpy()[i] == static_ret[i])
2430 2431 2432 2433 2434 2435

        # less equal
        with self.static_graph():
            a1 = layers.data(name='a1', shape=[1], dtype='int64')
            b1 = layers.data(name='b1', shape=[1], dtype='int64')
            cond1 = layers.less_equal(x=a1, y=b1)
2436 2437 2438 2439 2440
            static_ret1 = self.get_static_graph_result(feed={
                "a1": value_a,
                "b1": value_b
            },
                                                       fetch_list=[cond1])[0]
2441
        with self.dynamic_graph():
2442 2443 2444 2445 2446 2447 2448 2449
            with _test_eager_guard():
                da1 = base.to_variable(value_a)
                db1 = base.to_variable(value_b)
                dcond1 = layers.less_equal(x=da1, y=db1)

                for i in range(len(static_ret1)):
                    self.assertTrue(dcond1.numpy()[i] == static_ret1[i])

2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461
            da1 = base.to_variable(value_a)
            db1 = base.to_variable(value_b)
            dcond1 = layers.less_equal(x=da1, y=db1)

            for i in range(len(static_ret1)):
                self.assertTrue(dcond1.numpy()[i] == static_ret1[i])

        #greater than
        with self.static_graph():
            a2 = layers.data(name='a2', shape=[1], dtype='int64')
            b2 = layers.data(name='b2', shape=[1], dtype='int64')
            cond2 = layers.greater_than(x=a2, y=b2)
2462 2463 2464 2465 2466
            static_ret2 = self.get_static_graph_result(feed={
                "a2": value_a,
                "b2": value_b
            },
                                                       fetch_list=[cond2])[0]
2467
        with self.dynamic_graph():
2468 2469 2470 2471 2472 2473 2474 2475
            with _test_eager_guard():
                da2 = base.to_variable(value_a)
                db2 = base.to_variable(value_b)
                dcond2 = layers.greater_than(x=da2, y=db2)

                for i in range(len(static_ret2)):
                    self.assertTrue(dcond2.numpy()[i] == static_ret2[i])

2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487
            da2 = base.to_variable(value_a)
            db2 = base.to_variable(value_b)
            dcond2 = layers.greater_than(x=da2, y=db2)

            for i in range(len(static_ret2)):
                self.assertTrue(dcond2.numpy()[i] == static_ret2[i])

        #greater equal
        with self.static_graph():
            a3 = layers.data(name='a3', shape=[1], dtype='int64')
            b3 = layers.data(name='b3', shape=[1], dtype='int64')
            cond3 = layers.greater_equal(x=a3, y=b3)
2488 2489 2490 2491 2492
            static_ret3 = self.get_static_graph_result(feed={
                "a3": value_a,
                "b3": value_b
            },
                                                       fetch_list=[cond3])[0]
2493
        with self.dynamic_graph():
2494 2495 2496 2497 2498 2499 2500 2501
            with _test_eager_guard():
                da3 = base.to_variable(value_a)
                db3 = base.to_variable(value_b)
                dcond3 = layers.greater_equal(x=da3, y=db3)

                for i in range(len(static_ret3)):
                    self.assertTrue(dcond3.numpy()[i] == static_ret3[i])

2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513
            da3 = base.to_variable(value_a)
            db3 = base.to_variable(value_b)
            dcond3 = layers.greater_equal(x=da3, y=db3)

            for i in range(len(static_ret3)):
                self.assertTrue(dcond3.numpy()[i] == static_ret3[i])

        # equal
        with self.static_graph():
            a4 = layers.data(name='a4', shape=[1], dtype='int64')
            b4 = layers.data(name='b4', shape=[1], dtype='int64')
            cond4 = layers.equal(x=a4, y=b4)
2514 2515 2516 2517 2518
            static_ret4 = self.get_static_graph_result(feed={
                "a4": value_a,
                "b4": value_b
            },
                                                       fetch_list=[cond4])[0]
2519
        with self.dynamic_graph():
2520 2521 2522 2523 2524 2525 2526 2527
            with _test_eager_guard():
                da4 = base.to_variable(value_a)
                db4 = base.to_variable(value_b)
                dcond4 = layers.equal(x=da4, y=db4)

                for i in range(len(static_ret4)):
                    self.assertTrue(dcond4.numpy()[i] == static_ret4[i])

2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539
            da4 = base.to_variable(value_a)
            db4 = base.to_variable(value_b)
            dcond4 = layers.equal(x=da4, y=db4)

            for i in range(len(static_ret4)):
                self.assertTrue(dcond4.numpy()[i] == static_ret4[i])

        # not equal
        with self.static_graph():
            a5 = layers.data(name='a5', shape=[1], dtype='int64')
            b5 = layers.data(name='b5', shape=[1], dtype='int64')
            cond5 = layers.equal(x=a5, y=b5)
2540 2541 2542 2543 2544
            static_ret5 = self.get_static_graph_result(feed={
                "a5": value_a,
                "b5": value_b
            },
                                                       fetch_list=[cond5])[0]
2545
        with self.dynamic_graph():
2546 2547 2548 2549 2550 2551 2552 2553
            with _test_eager_guard():
                da5 = base.to_variable(value_a)
                db5 = base.to_variable(value_b)
                dcond5 = layers.equal(x=da5, y=db5)

                for i in range(len(static_ret5)):
                    self.assertTrue(dcond5.numpy()[i] == static_ret5[i])

2554 2555 2556 2557 2558 2559 2560
            da5 = base.to_variable(value_a)
            db5 = base.to_variable(value_b)
            dcond5 = layers.equal(x=da5, y=db5)

            for i in range(len(static_ret5)):
                self.assertTrue(dcond5.numpy()[i] == static_ret5[i])

2561
    def test_cond(self):
2562

2563 2564 2565 2566 2567 2568 2569
        def less_than_branch(a, b):
            return fluid.layers.elementwise_add(a, b)

        def greater_equal_branch(a, b):
            return fluid.layers.elementwise_sub(a, b)

        with self.static_graph():
2570 2571 2572 2573 2574 2575
            a = fluid.layers.fill_constant(shape=[1],
                                           dtype='float32',
                                           value=0.1)
            b = fluid.layers.fill_constant(shape=[1],
                                           dtype='float32',
                                           value=0.23)
2576 2577
            out = fluid.layers.cond(a >= b, lambda: greater_equal_branch(a, b),
                                    lambda: less_than_branch(a, b))
2578 2579
            place = fluid.CUDAPlace(
                0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
2580 2581 2582 2583 2584
            exe = fluid.Executor(place)
            ret = exe.run(fetch_list=[out])
            static_res = ret[0]

        with self.dynamic_graph():
2585 2586 2587 2588 2589 2590 2591 2592 2593 2594
            with _test_eager_guard():
                a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32'))
                b = fluid.dygraph.to_variable(
                    np.array([0.23]).astype('float32'))
                out = layers.cond(a < b, lambda: less_than_branch(a, b),
                                  lambda: greater_equal_branch(a, b))
                out2 = layers.cond(a >= b, lambda: greater_equal_branch(a, b),
                                   lambda: less_than_branch(a, b))
                eager_dynamic_res = out.numpy()
                eager_dynamic_res2 = out2.numpy()
2595 2596
                np.testing.assert_array_equal(eager_dynamic_res,
                                              eager_dynamic_res2)
2597 2598 2599 2600 2601
                with self.assertRaises(TypeError):
                    layers.cond(a < b, 'str', 'str')
                with self.assertRaises(TypeError):
                    layers.cond(a >= b, 'str', 'str')

2602 2603 2604 2605 2606 2607 2608 2609
            a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32'))
            b = fluid.dygraph.to_variable(np.array([0.23]).astype('float32'))
            out = layers.cond(a < b, lambda: less_than_branch(a, b),
                              lambda: greater_equal_branch(a, b))
            out2 = layers.cond(a >= b, lambda: greater_equal_branch(a, b),
                               lambda: less_than_branch(a, b))
            dynamic_res = out.numpy()
            dynamic_res2 = out2.numpy()
2610
            np.testing.assert_array_equal(dynamic_res, dynamic_res2)
2611 2612 2613 2614 2615
            with self.assertRaises(TypeError):
                layers.cond(a < b, 'str', 'str')
            with self.assertRaises(TypeError):
                layers.cond(a >= b, 'str', 'str')

2616 2617
        np.testing.assert_array_equal(static_res, dynamic_res)
        np.testing.assert_array_equal(static_res, eager_dynamic_res)
2618

2619
    def test_case(self):
2620

2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638
        def fn_1():
            return layers.fill_constant(shape=[1, 2], dtype='float32', value=1)

        def fn_2():
            return layers.fill_constant(shape=[2, 2], dtype='int32', value=2)

        def fn_3():
            return layers.fill_constant(shape=[3], dtype='int32', value=3)

        with self.static_graph():
            x = layers.fill_constant(shape=[1], dtype='float32', value=0.3)
            y = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
            z = layers.fill_constant(shape=[1], dtype='float32', value=0.2)

            pred_1 = layers.less_than(z, x)  # true: 0.2 < 0.3
            pred_2 = layers.less_than(x, y)  # false: 0.3 < 0.1
            pred_3 = layers.equal(x, y)  # false: 0.3 == 0.1

2639 2640
            out_1 = layers.case(pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)],
                                default=fn_3)
2641 2642
            out_2 = layers.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)])

2643 2644
            place = fluid.CUDAPlace(
                0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
2645 2646 2647 2648
            exe = fluid.Executor(place)
            static_res1, static_res2 = exe.run(fetch_list=[out_1, out_2])

        with self.dynamic_graph():
2649 2650 2651 2652 2653 2654 2655 2656 2657
            with _test_eager_guard():
                x = layers.fill_constant(shape=[1], dtype='float32', value=0.3)
                y = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
                z = layers.fill_constant(shape=[1], dtype='float32', value=0.2)

                pred_1 = layers.less_than(z, x)  # true: 0.2 < 0.3
                pred_2 = layers.less_than(x, y)  # false: 0.3 < 0.1
                pred_3 = layers.equal(x, y)  # false: 0.3 == 0.1

2658 2659 2660 2661 2662
                out_1 = layers.case(pred_fn_pairs=[(pred_1, fn_1),
                                                   (pred_2, fn_2)],
                                    default=fn_3)
                out_2 = layers.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3,
                                                                    fn_3)])
2663 2664 2665
                eager_dynamic_res1 = out_1.numpy()
                eager_dynamic_res2 = out_2.numpy()

2666 2667 2668 2669 2670 2671 2672 2673
            x = layers.fill_constant(shape=[1], dtype='float32', value=0.3)
            y = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
            z = layers.fill_constant(shape=[1], dtype='float32', value=0.2)

            pred_1 = layers.less_than(z, x)  # true: 0.2 < 0.3
            pred_2 = layers.less_than(x, y)  # false: 0.3 < 0.1
            pred_3 = layers.equal(x, y)  # false: 0.3 == 0.1

2674 2675
            out_1 = layers.case(pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)],
                                default=fn_3)
2676 2677 2678 2679
            out_2 = layers.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)])
            dynamic_res1 = out_1.numpy()
            dynamic_res2 = out_2.numpy()

2680 2681 2682 2683
        np.testing.assert_array_equal(static_res1, dynamic_res1)
        np.testing.assert_array_equal(static_res2, dynamic_res2)
        np.testing.assert_array_equal(static_res1, eager_dynamic_res1)
        np.testing.assert_array_equal(static_res2, eager_dynamic_res2)
2684 2685

    def test_switch_case(self):
2686

2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699
        def fn_1():
            return layers.fill_constant(shape=[1, 2], dtype='float32', value=1)

        def fn_2():
            return layers.fill_constant(shape=[2, 2], dtype='int32', value=2)

        def fn_3():
            return layers.fill_constant(shape=[3], dtype='int32', value=3)

        with self.static_graph():
            index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1)
            index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2)

2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714
            out_1 = layers.switch_case(branch_index=index_1,
                                       branch_fns={
                                           1: fn_1,
                                           2: fn_2
                                       },
                                       default=fn_3)
            out_2 = layers.switch_case(branch_index=index_2,
                                       branch_fns=[(1, fn_1), (2, fn_2)],
                                       default=fn_3)
            out_3 = layers.switch_case(branch_index=index_2,
                                       branch_fns=[(0, fn_1), (4, fn_2),
                                                   (7, fn_3)])

            place = fluid.CUDAPlace(
                0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
2715 2716 2717 2718 2719
            exe = fluid.Executor(place)
            static_res1, static_res2, static_res3 = exe.run(
                fetch_list=[out_1, out_2, out_3])

        with self.dynamic_graph():
2720
            with _test_eager_guard():
2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739
                index_1 = layers.fill_constant(shape=[1],
                                               dtype='int32',
                                               value=1)
                index_2 = layers.fill_constant(shape=[1],
                                               dtype='int32',
                                               value=2)

                out_1 = layers.switch_case(branch_index=index_1,
                                           branch_fns={
                                               1: fn_1,
                                               2: fn_2
                                           },
                                           default=fn_3)
                out_2 = layers.switch_case(branch_index=index_2,
                                           branch_fns=[(1, fn_1), (2, fn_2)],
                                           default=fn_3)
                out_3 = layers.switch_case(branch_index=index_2,
                                           branch_fns=[(0, fn_1), (4, fn_2),
                                                       (7, fn_3)])
2740 2741 2742 2743 2744

                eager_dynamic_res1 = out_1.numpy()
                eager_dynamic_res2 = out_2.numpy()
                eager_dynamic_res3 = out_3.numpy()

2745 2746 2747
            index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1)
            index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2)

2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759
            out_1 = layers.switch_case(branch_index=index_1,
                                       branch_fns={
                                           1: fn_1,
                                           2: fn_2
                                       },
                                       default=fn_3)
            out_2 = layers.switch_case(branch_index=index_2,
                                       branch_fns=[(1, fn_1), (2, fn_2)],
                                       default=fn_3)
            out_3 = layers.switch_case(branch_index=index_2,
                                       branch_fns=[(0, fn_1), (4, fn_2),
                                                   (7, fn_3)])
2760 2761 2762 2763 2764

            dynamic_res1 = out_1.numpy()
            dynamic_res2 = out_2.numpy()
            dynamic_res3 = out_3.numpy()

2765 2766 2767 2768 2769 2770
        np.testing.assert_array_equal(static_res1, dynamic_res1)
        np.testing.assert_array_equal(static_res2, dynamic_res2)
        np.testing.assert_array_equal(static_res3, dynamic_res3)
        np.testing.assert_array_equal(static_res1, eager_dynamic_res1)
        np.testing.assert_array_equal(static_res2, eager_dynamic_res2)
        np.testing.assert_array_equal(static_res3, eager_dynamic_res3)
2771

2772 2773 2774 2775
    def test_crop_tensor(self):
        with self.static_graph():
            x = fluid.layers.data(name="x1", shape=[6, 5, 8])

2776 2777 2778 2779 2780 2781
            dim1 = fluid.layers.data(name="dim1",
                                     shape=[1],
                                     append_batch_size=False)
            dim2 = fluid.layers.data(name="dim2",
                                     shape=[1],
                                     append_batch_size=False)
2782
            crop_shape1 = (1, 2, 4, 4)
2783 2784 2785
            crop_shape2 = fluid.layers.data(name="crop_shape",
                                            shape=[4],
                                            append_batch_size=False)
2786 2787
            crop_shape3 = [-1, dim1, dim2, 4]
            crop_offsets1 = [0, 0, 1, 0]
2788 2789 2790
            crop_offsets2 = fluid.layers.data(name="crop_offset",
                                              shape=[4],
                                              append_batch_size=False)
2791 2792
            crop_offsets3 = [0, dim1, dim2, 0]

2793 2794 2795 2796 2797 2798 2799 2800 2801
            out1 = fluid.layers.crop_tensor(x,
                                            shape=crop_shape1,
                                            offsets=crop_offsets1)
            out2 = fluid.layers.crop_tensor(x,
                                            shape=crop_shape2,
                                            offsets=crop_offsets2)
            out3 = fluid.layers.crop_tensor(x,
                                            shape=crop_shape3,
                                            offsets=crop_offsets3)
2802 2803 2804 2805 2806

            self.assertIsNotNone(out1)
            self.assertIsNotNone(out2)
            self.assertIsNotNone(out3)

2807 2808 2809
    def test_shard_index(self):
        with self.static_graph():
            x = fluid.layers.data(name="label", shape=[4, 1], dtype='int64')
2810 2811 2812 2813
            shard_label = fluid.layers.shard_index(input=x,
                                                   index_num=20,
                                                   nshards=2,
                                                   shard_id=0)
2814 2815 2816

        self.assertIsNotNone(shard_label)

2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829
    def test_accuracy(self):
        x = np.random.rand(3, 32, 32).astype("float32")
        y = np.array([[1], [0], [1]])
        with self.static_graph():
            data = fluid.data(name="input", shape=[-1, 32, 32], dtype="float32")
            label = fluid.data(name="label", shape=[-1, 1], dtype="int")
            fc_out = fluid.layers.fc(input=data, size=10)
            predict = fluid.layers.softmax(input=fc_out)
            result = fluid.layers.accuracy(input=predict, label=label, k=5)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)

            exe.run(fluid.default_startup_program())
L
Leo Chen 已提交
2830 2831
            # x = np.random.rand(3, 32, 32).astype("float32")
            # y = np.array([[1], [0], [1]])
2832 2833 2834 2835
            static_out = exe.run(feed={
                "input": x,
                "label": y
            },
2836 2837
                                 fetch_list=result[0])

L
Leo Chen 已提交
2838
        with self.dynamic_graph(force_to_use_cpu=True):
2839 2840 2841 2842 2843 2844
            data = base.to_variable(x)
            label = base.to_variable(y)
            fc_out = fluid.layers.fc(data, size=10)
            predict = fluid.layers.softmax(fc_out)
            dynamic_out = fluid.layers.accuracy(input=predict, label=label, k=5)

2845
        np.testing.assert_array_equal(static_out[0], dynamic_out.numpy())
2846

Y
Yu Yang 已提交
2847

2848
class TestBook(LayerTest):
2849

H
hong 已提交
2850 2851 2852 2853 2854 2855 2856 2857
    def setUp(self):
        self.only_static_set = set({"make_word_embedding"})
        self.not_compare_static_dygraph_set = set({
            "make_gaussian_random", "make_gaussian_random_batch_size_like",
            "make_kldiv_loss", "make_prelu",
            "make_sampled_softmax_with_cross_entropy", "make_sampling_id",
            "make_uniform_random_batch_size_like"
        })
2858
        self.all_close_compare = set({"make_spectral_norm"})
H
hong 已提交
2859

2860
    def func_all_layers(self):
2861 2862 2863 2864 2865
        attrs = (getattr(self, name) for name in dir(self))
        methods = filter(inspect.ismethod, attrs)
        for method in methods:
            if not method.__name__.startswith('make_'):
                continue
M
minqiyang 已提交
2866 2867 2868
            self._low_data_bound = 0
            self._high_data_bound = 2
            self._batch_size = 2
2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881
            self._feed_dict = {}
            self._force_to_use_cpu = False
            with self.static_graph():
                static_var = method()
                if isinstance(static_var, tuple):
                    static_var = static_var[0]

                if static_var is not None:
                    fetch_list = [static_var.name]
                    static_result = self.get_static_graph_result(
                        feed=self._feed_dict,
                        fetch_list=fetch_list,
                        force_to_use_cpu=self._force_to_use_cpu)
H
hong 已提交
2882

2883 2884 2885
                else:
                    assert method.__name__ in ('make_get_places')
                    continue
H
hong 已提交
2886 2887
            if method.__name__ in self.only_static_set:
                continue
2888 2889 2890 2891 2892

            with self.dynamic_graph(self._force_to_use_cpu):
                dy_result = method()
                if isinstance(dy_result, tuple):
                    dy_result = dy_result[0]
2893
                dy_result_value = dy_result.numpy()
2894

2895
            if method.__name__ in self.all_close_compare:
2896 2897 2898 2899 2900 2901
                np.testing.assert_allclose(
                    static_result[0],
                    dy_result_value,
                    rtol=1e-05,
                    atol=0,
                    err_msg='Result of function [{}] compare failed'.format(
2902 2903 2904
                        method.__name__))
                continue

H
hong 已提交
2905
            if method.__name__ not in self.not_compare_static_dygraph_set:
2906 2907 2908 2909 2910
                np.testing.assert_array_equal(
                    static_result[0],
                    dy_result_value,
                    err_msg='Result of function [{}] not equal'.format(
                        method.__name__))
2911

2912 2913 2914 2915 2916
    def test_all_layers(self):
        with _test_eager_guard():
            self.func_all_layers()
        self.func_all_layers()

2917 2918 2919
    def _get_np_data(self, shape, dtype, append_batch_size=True):
        np.random.seed(self.seed)
        if append_batch_size:
M
minqiyang 已提交
2920
            shape = [self._batch_size] + shape
2921 2922 2923 2924 2925
        if dtype == 'float32':
            return np.random.random(shape).astype(dtype)
        elif dtype == 'float64':
            return np.random.random(shape).astype(dtype)
        elif dtype == 'int32':
M
minqiyang 已提交
2926 2927
            return np.random.randint(self._low_data_bound,
                                     self._high_data_bound, shape).astype(dtype)
2928
        elif dtype == 'int64':
M
minqiyang 已提交
2929 2930
            return np.random.randint(self._low_data_bound,
                                     self._high_data_bound, shape).astype(dtype)
2931 2932 2933 2934 2935 2936 2937 2938

    def _get_data(self,
                  name,
                  shape,
                  dtype,
                  set_feed_dict=True,
                  append_batch_size=True):
        if base.enabled():
2939 2940 2941 2942
            return base.to_variable(value=self._get_np_data(
                shape, dtype, append_batch_size),
                                    name=name,
                                    zero_copy=False)
2943 2944
        else:
            if set_feed_dict:
2945 2946 2947 2948 2949 2950
                self._feed_dict[name] = self._get_np_data(
                    shape, dtype, append_batch_size)
            return layers.data(name=name,
                               shape=shape,
                               dtype=dtype,
                               append_batch_size=append_batch_size)
2951 2952 2953 2954

    def make_sampled_softmax_with_cross_entropy(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
M
minqiyang 已提交
2955
            logits = self._get_data(name='Logits', shape=[256], dtype='float32')
M
minqiyang 已提交
2956
            label = self._get_data(name='Label', shape=[1], dtype='int64')
2957
            num_samples = 25
2958 2959
            output = layers.sampled_softmax_with_cross_entropy(
                logits, label, num_samples)
2960 2961 2962
            return (output)

    def make_fit_a_line(self):
2963 2964
        with program_guard(fluid.default_main_program(),
                           startup_program=fluid.default_startup_program()):
2965
            x = self._get_data(name='x', shape=[13], dtype='float32')
Y
Yu Yang 已提交
2966
            y_predict = layers.fc(input=x, size=1, act=None)
2967
            y = self._get_data(name='y', shape=[1], dtype='float32')
Y
Yu Yang 已提交
2968
            cost = layers.square_error_cost(input=y_predict, label=y)
2969
            avg_cost = paddle.mean(cost)
2970
            return (avg_cost)
Y
Yu Yang 已提交
2971

2972 2973 2974
    def make_recognize_digits_mlp(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
Y
Yu Yang 已提交
2975
            # Change g_program, so the rest layers use `g_program`
2976 2977
            images = self._get_data(name='pixel', shape=[784], dtype='float32')
            label = self._get_data(name='label', shape=[1], dtype='int64')
Y
Yu Yang 已提交
2978 2979
            hidden1 = layers.fc(input=images, size=128, act='relu')
            hidden2 = layers.fc(input=hidden1, size=64, act='relu')
2980 2981 2982 2983
            predict = layers.fc(input=[hidden2, hidden1],
                                size=10,
                                act='softmax',
                                param_attr=["sftmax.w1", "sftmax.w2"])
Y
Yu Yang 已提交
2984
            cost = layers.cross_entropy(input=predict, label=label)
2985
            avg_cost = paddle.mean(cost)
2986
            return (avg_cost)
Y
Yu Yang 已提交
2987

2988 2989 2990 2991
    def make_conv2d_transpose(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            img = self._get_data(name='pixel', shape=[3, 2, 2], dtype='float32')
2992 2993 2994
            return layers.conv2d_transpose(input=img,
                                           num_filters=10,
                                           output_size=28)
2995

2996 2997 2998
    def make_recognize_digits_conv(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
2999 3000 3001
            images = self._get_data(name='pixel',
                                    shape=[1, 28, 28],
                                    dtype='float32')
3002
            label = self._get_data(name='label', shape=[1], dtype='int64')
3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014
            conv_pool_1 = nets.simple_img_conv_pool(input=images,
                                                    filter_size=5,
                                                    num_filters=2,
                                                    pool_size=2,
                                                    pool_stride=2,
                                                    act="relu")
            conv_pool_2 = nets.simple_img_conv_pool(input=conv_pool_1,
                                                    filter_size=5,
                                                    num_filters=4,
                                                    pool_size=2,
                                                    pool_stride=2,
                                                    act="relu")
Y
Yu Yang 已提交
3015 3016 3017

            predict = layers.fc(input=conv_pool_2, size=10, act="softmax")
            cost = layers.cross_entropy(input=predict, label=label)
3018
            avg_cost = paddle.mean(cost)
3019
            return avg_cost
Y
Yu Yang 已提交
3020

3021 3022 3023
    def make_word_embedding(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
Y
Yu Yang 已提交
3024 3025
            dict_size = 10000
            embed_size = 32
3026
            first_word = self._get_data(name='firstw', shape=[1], dtype='int64')
3027 3028 3029
            second_word = self._get_data(name='secondw',
                                         shape=[1],
                                         dtype='int64')
3030 3031 3032
            third_word = self._get_data(name='thirdw', shape=[1], dtype='int64')
            forth_word = self._get_data(name='forthw', shape=[1], dtype='int64')
            next_word = self._get_data(name='nextw', shape=[1], dtype='int64')
Y
Yu Yang 已提交
3033

3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050
            embed_first = layers.embedding(input=first_word,
                                           size=[dict_size, embed_size],
                                           dtype='float32',
                                           param_attr='shared_w')
            embed_second = layers.embedding(input=second_word,
                                            size=[dict_size, embed_size],
                                            dtype='float32',
                                            param_attr='shared_w')

            embed_third = layers.embedding(input=third_word,
                                           size=[dict_size, embed_size],
                                           dtype='float32',
                                           param_attr='shared_w')
            embed_forth = layers.embedding(input=forth_word,
                                           size=[dict_size, embed_size],
                                           dtype='float32',
                                           param_attr='shared_w')
Y
Yu Yang 已提交
3051 3052 3053 3054 3055 3056 3057 3058 3059 3060

            concat_embed = layers.concat(
                input=[embed_first, embed_second, embed_third, embed_forth],
                axis=1)

            hidden1 = layers.fc(input=concat_embed, size=256, act='sigmoid')
            predict_word = layers.fc(input=hidden1,
                                     size=dict_size,
                                     act='softmax')
            cost = layers.cross_entropy(input=predict_word, label=next_word)
3061
            avg_cost = paddle.mean(cost)
3062
            return (avg_cost)
Y
Yu Yang 已提交
3063

3064 3065 3066 3067 3068
    def make_sigmoid_cross_entropy(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            dat = self._get_data(name='data', shape=[10], dtype='float32')
            lbl = self._get_data(name='label', shape=[10], dtype='float32')
3069
            ignore_index = -1
3070 3071 3072 3073 3074 3075 3076 3077 3078
            return (layers.sigmoid_cross_entropy_with_logits(
                x=dat, label=lbl, ignore_index=ignore_index))

    def make_hsigmoid(self):
        self._force_to_use_cpu = True
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            x = self._get_data(name='x', shape=[2], dtype='float32')
            y = self._get_data(name='y', shape=[2], dtype='int64')
            return (layers.hsigmoid(input=x, label=y, num_classes=2))
W
weixing02 已提交
3079

J
JiabinYang 已提交
3080
        # test hsigmod with custom tree structure
J
JiabinYang 已提交
3081 3082
        program2 = Program()
        with program_guard(program2):
3083 3084
            x2 = self._get_data(name='x2', shape=[4, 8], dtype='float32')
            y2 = self._get_data(name='y2', shape=[4], dtype='int64')
3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096
            path_table = self._get_data(name='path_table',
                                        shape=[4, 6],
                                        dtype='int64')
            path_code = self._get_data(name='path_code',
                                       shape=[4, 6],
                                       dtype='int64')
            return (layers.hsigmoid(input=x2,
                                    label=y2,
                                    num_classes=6,
                                    path_table=path_table,
                                    path_code=path_code,
                                    is_custom=True))
J
JiabinYang 已提交
3097

3098 3099 3100 3101
    def make_pool2d(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name='x', shape=[3, 224, 224], dtype='float32')
3102 3103 3104 3105
            return (layers.pool2d(x,
                                  pool_size=[5, 3],
                                  pool_stride=[1, 2],
                                  pool_padding=(2, 1)))
3106

K
Kaipeng Deng 已提交
3107 3108 3109 3110 3111
    def make_pool2d_infershape(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            theta = self._get_data("theta", shape=[2, 3], dtype='float32')
            x = fluid.layers.affine_grid(theta, out_shape=[2, 3, 244, 244])
3112 3113 3114 3115
            return (layers.pool2d(x,
                                  pool_size=[5, 3],
                                  pool_stride=[1, 2],
                                  pool_padding=(2, 1)))
K
Kaipeng Deng 已提交
3116 3117 3118 3119

    def make_pool3d(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3120 3121 3122 3123 3124 3125 3126
            x = self._get_data(name='x',
                               shape=[3, 244, 244, 244],
                               dtype='float32')
            return (layers.pool3d(x,
                                  pool_size=[5, 3, 2],
                                  pool_stride=[1, 2, 3],
                                  pool_padding=(2, 1, 1)))
K
Kaipeng Deng 已提交
3127

3128 3129 3130 3131 3132
    def make_adaptive_pool2d(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name='x', shape=[3, 224, 224], dtype='float32')
            return (layers.adaptive_pool2d(x, [3, 3], pool_type='avg'))
D
dengkaipeng 已提交
3133
            pool, mask = layers.adaptive_pool2d(x, [3, 3], require_index=True)
3134 3135 3136
            return (pool)
            return (mask)
            return (layers.adaptive_pool2d(x, 3, pool_type='avg'))
3137
            pool, mask = layers.adaptive_pool2d(x, 3, require_index=True)
3138 3139 3140 3141 3142 3143
            return (pool)
            return (mask)

    def make_adaptive_pool3d(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3144 3145 3146
            x = self._get_data(name='x',
                               shape=[3, 244, 224, 224],
                               dtype='float32')
3147
            return (layers.adaptive_pool3d(x, [3, 3, 3], pool_type='avg'))
3148 3149
            pool, mask = layers.adaptive_pool3d(x, [3, 3, 3],
                                                require_index=True)
3150 3151 3152
            return (pool)
            return (mask)
            return (layers.adaptive_pool3d(x, 3, pool_type='avg'))
3153
            pool, mask = layers.adaptive_pool3d(x, 3, require_index=True)
3154 3155
            return (pool)
            return (mask)
3156

3157 3158 3159
    def make_lstm_unit(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3160 3161 3162
            x_t_data = self._get_data(name='x_t_data',
                                      shape=[10, 10],
                                      dtype='float32')
Y
yangyaming 已提交
3163
            x_t = layers.fc(input=x_t_data, size=10)
3164 3165 3166
            prev_hidden_data = self._get_data(name='prev_hidden_data',
                                              shape=[10, 30],
                                              dtype='float32')
Y
yangyaming 已提交
3167
            prev_hidden = layers.fc(input=prev_hidden_data, size=30)
3168 3169 3170
            prev_cell_data = self._get_data(name='prev_cell',
                                            shape=[10, 30],
                                            dtype='float32')
Y
yangyaming 已提交
3171
            prev_cell = layers.fc(input=prev_cell_data, size=30)
3172 3173 3174
            return (layers.lstm_unit(x_t=x_t,
                                     hidden_t_prev=prev_hidden,
                                     cell_t_prev=prev_cell))
3175

3176 3177 3178 3179
    def make_softmax(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            data = self._get_data(name='data', shape=[10], dtype='float32')
D
dangqingqing 已提交
3180
            hid = layers.fc(input=data, size=20)
3181
            return (layers.softmax(hid, axis=1))
D
dangqingqing 已提交
3182

3183 3184 3185
    def make_space_to_depth(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3186 3187 3188 3189
            data = self._get_data(name='data',
                                  shape=[32, 9, 6, 6],
                                  append_batch_size=False,
                                  dtype='float32')
3190
            return (layers.space_to_depth(data, 3))
J
JiabinYang 已提交
3191

3192 3193 3194 3195 3196
    def make_lrn(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            data = self._get_data(name='data', shape=[6, 2, 2], dtype='float32')
            return (layers.lrn(data))
3197

3198 3199 3200 3201
    def make_get_places(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            get_places(device_count=1)
X
xuezhong 已提交
3202

3203
    @prog_scope()
3204
    def make_nce(self):
Y
Yang Yu 已提交
3205 3206
        window_size = 5
        words = []
3207
        for i in range(window_size):
Y
Yang Yu 已提交
3208
            words.append(
3209 3210 3211
                self._get_data(name='word_{0}'.format(i),
                               shape=[1],
                               dtype='int64'))
Y
Yang Yu 已提交
3212 3213

        dict_size = 10000
M
minqiyang 已提交
3214
        label_word = int(window_size // 2) + 1
Y
Yang Yu 已提交
3215 3216

        embs = []
3217
        for i in range(window_size):
Y
Yang Yu 已提交
3218 3219 3220
            if i == label_word:
                continue

3221 3222 3223 3224
            emb = layers.embedding(input=words[i],
                                   size=[dict_size, 32],
                                   param_attr='emb.w',
                                   is_sparse=True)
Y
Yang Yu 已提交
3225 3226 3227 3228 3229 3230 3231 3232 3233

            embs.append(emb)

        embs = layers.concat(input=embs, axis=1)
        loss = layers.nce(input=embs,
                          label=words[label_word],
                          num_total_classes=dict_size,
                          param_attr='nce.w',
                          bias_attr='nce.b')
3234
        avg_loss = paddle.mean(loss)
3235
        return (avg_loss)
Y
Yang Yu 已提交
3236

3237 3238 3239 3240 3241 3242
    def make_multiplex(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x1 = self._get_data(name='x1', shape=[4], dtype='float32')
            x2 = self._get_data(name='x2', shape=[4], dtype='float32')
            index = self._get_data(name='index', shape=[1], dtype='int32')
3243
            out = layers.multiplex(inputs=[x1, x2], index=index)
3244 3245 3246 3247 3248 3249 3250
            return (out)

    def make_softmax_with_cross_entropy(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name='x', shape=[16], dtype='float32')
            y = self._get_data(name='label', shape=[1], dtype='int64')
3251 3252
            loss, softmax = layers.softmax_with_cross_entropy(
                x, y, return_softmax=True)
3253 3254 3255
            self.assertIsNotNone(loss)
            self.assertIsNotNone(softmax)

3256
            loss = layers.softmax_with_cross_entropy(x, y)
3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271
            self.assertIsNotNone(loss)

            x1 = self._get_data(name='x1', shape=[16, 32, 64], dtype='float32')
            y1 = self._get_data(name='label1', shape=[1, 32, 64], dtype='int64')
            y2 = self._get_data(name='label2', shape=[16, 1, 64], dtype='int64')
            y3 = self._get_data(name='label3', shape=[16, 32, 1], dtype='int64')
            loss1 = layers.softmax_with_cross_entropy(x1, y1, axis=1)
            loss2 = layers.softmax_with_cross_entropy(x1, y2, axis=2)
            loss3 = layers.softmax_with_cross_entropy(x1, y3, axis=3)
            loss4 = layers.softmax_with_cross_entropy(x1, y3, axis=-1)
            self.assertIsNotNone(loss1)
            self.assertIsNotNone(loss2)
            self.assertIsNotNone(loss3)
            self.assertIsNotNone(loss4)
            return (loss4)
3272 3273 3274 3275 3276 3277

    def make_smooth_l1(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name='x', shape=[4], dtype='float32')
            y = self._get_data(name='label', shape=[4], dtype='float32')
3278
            loss = layers.smooth_l1(x, y)
3279
            return (loss)
3280

3281 3282 3283
    def make_scatter(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295
            x = self._get_data(name='x',
                               shape=[3, 3],
                               append_batch_size=False,
                               dtype='float32')
            idx = self._get_data(name='idx',
                                 shape=[2],
                                 append_batch_size=False,
                                 dtype='int32')
            updates = self._get_data(name='updates',
                                     shape=[2, 3],
                                     append_batch_size=False,
                                     dtype='float32')
3296
            out = layers.scatter(input=x, index=idx, updates=updates)
3297
            return (out)
Y
yangyaming 已提交
3298

3299 3300 3301 3302 3303 3304
    def make_one_hot(self):
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            label = self._get_data(name="label", shape=[1], dtype="int32")
            one_hot_label = layers.one_hot(input=label, depth=10)
            return (one_hot_label)

3305 3306 3307 3308 3309
    def make_label_smooth(self):
        # TODO(minqiyang): support gpu ut
        self._force_to_use_cpu = True
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            label = self._get_data(name="label", shape=[1], dtype="int32")
3310
            one_hot_label = layers.one_hot(input=label, depth=10)
3311 3312 3313
            smooth_label = layers.label_smooth(label=one_hot_label,
                                               epsilon=0.1,
                                               dtype="int32")
3314
            return (smooth_label)
3315

3316 3317 3318 3319 3320 3321 3322
    def make_topk(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            data = self._get_data(name="label", shape=[200], dtype="float32")
            values, indices = layers.topk(data, k=5)
            return (values)
            return (indices)
J
jerrywgz 已提交
3323

3324 3325 3326 3327
    def make_resize_bilinear(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name='x', shape=[3, 9, 6], dtype="float32")
B
baiyf 已提交
3328
            output = layers.resize_bilinear(x, out_shape=[12, 12])
3329
            return (output)
K
Kaipeng Deng 已提交
3330 3331 3332 3333 3334 3335

    def make_resize_bilinear_by_scale(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name='x', shape=[3, 9, 6], dtype="float32")
            output = layers.resize_bilinear(x, scale=1.5)
3336
            return (output)
3337

3338
    def make_resize_nearest(self):
K
Kaipeng Deng 已提交
3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349
        try:
            with program_guard(fluid.default_main_program(),
                               fluid.default_startup_program()):
                x = self._get_data(name='x1', shape=[3, 9, 6], dtype="float32")
                output = layers.resize_nearest(x, out_shape=[12, 12])
        except ValueError:
            pass

        try:
            with program_guard(fluid.default_main_program(),
                               fluid.default_startup_program()):
3350 3351 3352
                x = self._get_data(name='x2',
                                   shape=[3, 9, 6, 7],
                                   dtype="float32")
K
Kaipeng Deng 已提交
3353 3354 3355 3356
                output = layers.resize_nearest(x, out_shape=[12, 12, 12])
        except ValueError:
            pass

3357 3358 3359
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name='x', shape=[3, 9, 6], dtype="float32")
3360
            output = layers.resize_nearest(x, out_shape=[12, 12])
3361
            return (output)
K
Kaipeng Deng 已提交
3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381

    def make_resize_nearest_by_scale(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name='x1', shape=[3, 9, 6], dtype="float32")
            output = layers.resize_nearest(x, scale=1.8)
            return (output)

    def make_resize_trilinear(self):
        try:
            with program_guard(fluid.default_main_program(),
                               fluid.default_startup_program()):
                x = self._get_data(name='x2', shape=[3, 9, 6], dtype="float32")
                output = layers.resize_trilinear(x, out_shape=[12, 12, 12])
        except ValueError:
            pass

        try:
            with program_guard(fluid.default_main_program(),
                               fluid.default_startup_program()):
3382 3383 3384
                x = self._get_data(name='x',
                                   shape=[3, 9, 6, 7],
                                   dtype="float32")
K
Kaipeng Deng 已提交
3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399
                output = layers.resize_trilinear(x, out_shape=[12, 12])
        except ValueError:
            pass

        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name='x', shape=[3, 9, 6, 7], dtype="float32")
            output = layers.resize_trilinear(x, out_shape=[12, 12, 12])
            return (output)

    def make_resize_trilinear_by_scale(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name='x', shape=[3, 9, 6, 7], dtype="float32")
            output = layers.resize_trilinear(x, scale=2.1)
3400
            return (output)
3401

3402 3403 3404 3405
    def make_polygon_box_transform(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name='x', shape=[8, 4, 4], dtype="float32")
3406
            output = layers.polygon_box_transform(input=x)
3407
            return (output)
3408

3409 3410 3411 3412
    def make_l2_normalize(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name='x', shape=[8, 7, 10], dtype="float32")
3413
            output = layers.l2_normalize(x, axis=1)
3414
            return output
3415

3416 3417 3418 3419 3420
    def make_crop(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name='x', shape=[3, 5], dtype="float32")
            y = self._get_data(name='y', shape=[2, 3], dtype="float32")
3421
            output = layers.crop(x, shape=y)
3422 3423 3424 3425 3426
            return (output)

    def make_mean_iou(self):
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            x = self._get_data(name='x', shape=[16], dtype='int32')
M
minqiyang 已提交
3427 3428
            y = self._get_data(name='label', shape=[16], dtype='int32')
            iou = layers.mean_iou(x, y, self._high_data_bound)
3429
            return (iou)
W
whs 已提交
3430

3431 3432 3433 3434
    def make_argsort(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            data = self._get_data(name='x', shape=[2, 3, 3], dtype="float32")
3435
            out, ids = layers.argsort(input=data, axis=1)
3436 3437 3438 3439 3440 3441
            return (out)
            return (ids)

    def make_rank_loss(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453
            label = self._get_data(name='label',
                                   append_batch_size=False,
                                   shape=[16, 1],
                                   dtype="float32")
            left = self._get_data(name='left',
                                  append_batch_size=False,
                                  shape=[16, 1],
                                  dtype="float32")
            right = self._get_data(name='right',
                                   append_batch_size=False,
                                   shape=[16, 1],
                                   dtype="float32")
3454
            out = layers.rank_loss(label, left, right, name="rank_loss")
3455
            return (out)
3456

3457 3458 3459
    def make_shape(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3460 3461 3462
            input = self._get_data(name="input",
                                   shape=[3, 100, 100],
                                   dtype="float32")
G
fix  
gongweibao 已提交
3463
            out = layers.shape(input)
3464
            return (out)
B
Bai Yifan 已提交
3465

3466 3467 3468
    def make_pad2d(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3469 3470 3471
            input = self._get_data(name="input",
                                   shape=[3, 100, 100],
                                   dtype="float32")
3472
            paddings = layers.fill_constant(shape=[4], dtype='int32', value=1)
3473 3474 3475 3476 3477 3478 3479 3480 3481 3482
            out = layers.pad2d(input,
                               paddings=[1, 2, 3, 4],
                               mode='reflect',
                               data_format='NCHW',
                               name="shape")
            out_1 = layers.pad2d(input,
                                 paddings=paddings,
                                 mode='reflect',
                                 data_format='NCHW',
                                 name="shape")
3483 3484
            return (out)
            return (out_1)
W
whs 已提交
3485

3486 3487 3488
    def make_prelu(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3489 3490 3491
            input = self._get_data(name="input",
                                   shape=[5, 200, 100, 100],
                                   dtype="float32")
J
jerrywgz 已提交
3492
            mode = 'channel'
3493 3494 3495 3496
            out = layers.prelu(input,
                               mode,
                               param_attr=ParamAttr(initializer=Constant(1.0)),
                               name='prelu')
3497
            return (out)
J
jerrywgz 已提交
3498

3499 3500 3501 3502
    def make_soft_relu(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3503
            out = layers.soft_relu(input, threshold=30.0, name='soft_relu')
3504
            return (out)
T
tensor-tang 已提交
3505

3506 3507 3508 3509
    def make_sigmoid(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3510
            out = layers.sigmoid(input, name='sigmoid')
3511
            return (out)
T
tensor-tang 已提交
3512

3513 3514 3515 3516
    def make_exp(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3517
            out = layers.exp(input, name='exp')
3518
            return (out)
T
tensor-tang 已提交
3519

3520 3521 3522 3523
    def make_tanh(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3524
            out = layers.tanh(input, name='tanh')
3525
            return (out)
T
tensor-tang 已提交
3526

3527 3528 3529 3530
    def make_tanh_shrink(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3531
            out = layers.tanh_shrink(input, name='tanh_shrink')
3532
            return (out)
T
tensor-tang 已提交
3533

3534 3535 3536 3537
    def make_sqrt(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3538
            out = layers.sqrt(input, name='sqrt')
3539
            return (out)
T
tensor-tang 已提交
3540

3541 3542 3543 3544
    def make_abs(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3545
            out = layers.abs(input, name='abs')
3546
            return (out)
T
tensor-tang 已提交
3547

3548 3549 3550 3551
    def make_ceil(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3552
            out = layers.ceil(input, name='ceil')
3553
            return (out)
T
tensor-tang 已提交
3554

3555 3556 3557 3558
    def make_floor(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3559
            out = layers.floor(input, name='floor')
3560
            return (out)
T
tensor-tang 已提交
3561

3562 3563 3564 3565
    def make_cos(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3566
            out = layers.cos(input, name='cos')
3567
            return (out)
T
tensor-tang 已提交
3568

3569 3570 3571 3572
    def make_sin(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3573
            out = layers.sin(input, name='sin')
3574
            return (out)
T
tensor-tang 已提交
3575

3576 3577 3578 3579
    def make_round(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3580
            out = layers.round(input, name='round')
3581
            return (out)
T
tensor-tang 已提交
3582

3583 3584 3585 3586
    def make_reciprocal(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3587
            out = layers.reciprocal(input, name='reciprocal')
3588
            return (out)
T
tensor-tang 已提交
3589

3590 3591 3592 3593
    def make_square(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3594
            out = layers.square(input, name='square')
3595
            return (out)
T
tensor-tang 已提交
3596

3597 3598 3599 3600
    def make_softplus(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3601
            out = layers.softplus(input, name='softplus')
3602
            return (out)
T
tensor-tang 已提交
3603

3604 3605 3606 3607
    def make_softsign(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            input = self._get_data(name="input", shape=[16], dtype="float32")
T
tensor-tang 已提交
3608
            out = layers.softsign(input, name='softsign')
3609
            return (out)
T
tensor-tang 已提交
3610

K
Kaipeng Deng 已提交
3611 3612 3613 3614 3615 3616 3617
    def make_mish(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            input = self._get_data(name="input", shape=[16], dtype="float32")
            out = layers.mish(input, name='mish')
            return (out)

3618 3619 3620 3621 3622
    def make_cross_entropy(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name="x", shape=[30, 10], dtype="float32")
            label = self._get_data(name="label", shape=[30, 1], dtype="int64")
3623 3624
            mode = 'channel'
            out = layers.cross_entropy(x, label, False, 4)
3625
            return (out)
3626

3627 3628 3629 3630 3631
    def make_bpr_loss(self):
        self._force_to_use_cpu = True
        with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()):
            x = self._get_data(name="x", shape=[30, 10], dtype="float32")
            label = self._get_data(name="label", shape=[30, 1], dtype="int64")
3632
            out = layers.bpr_loss(x, label)
3633
            return (out)
3634

3635 3636 3637 3638
    def make_expand(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name="input", shape=[10], dtype='int32')
W
whs 已提交
3639
            out = layers.expand(x, [1, 2])
3640
            return out
W
whs 已提交
3641

3642 3643 3644
    def make_uniform_random_batch_size_like(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3645 3646 3647
            input = self._get_data(name="input",
                                   shape=[13, 11],
                                   dtype='float32')
G
fix  
gongweibao 已提交
3648
            out = layers.uniform_random_batch_size_like(input, [-1, 11])
3649
            return (out)
G
fix  
gongweibao 已提交
3650

3651 3652 3653
    def make_gaussian_random(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
G
fix  
gongweibao 已提交
3654
            out = layers.gaussian_random(shape=[20, 30])
3655
            return (out)
G
fix  
gongweibao 已提交
3656

3657 3658 3659
    def make_sampling_id(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3660 3661 3662 3663
            x = self._get_data(name="X",
                               shape=[13, 11],
                               dtype='float32',
                               append_batch_size=False)
G
fix  
gongweibao 已提交
3664 3665

            out = layers.sampling_id(x)
3666
            return (out)
G
fix  
gongweibao 已提交
3667

3668 3669 3670
    def make_gaussian_random_batch_size_like(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3671 3672 3673 3674 3675 3676 3677 3678
            input = self._get_data(name="input",
                                   shape=[13, 11],
                                   dtype='float32')

            out = layers.gaussian_random_batch_size_like(input,
                                                         shape=[-1, 11],
                                                         mean=1.0,
                                                         std=2.0)
3679
            return (out)
G
fix  
gongweibao 已提交
3680

3681 3682 3683
    def make_sum(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3684 3685 3686
            input = self._get_data(name="input",
                                   shape=[13, 11],
                                   dtype='float32')
G
fix  
gongweibao 已提交
3687 3688

            out = layers.sum(input)
3689
            return (out)
G
fix  
gongweibao 已提交
3690

3691
    def make_slice(self):
G
fix  
gongweibao 已提交
3692 3693 3694 3695
        starts = [1, 0, 2]
        ends = [3, 3, 4]
        axes = [0, 1, 2]

3696 3697
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3698 3699 3700
            input = self._get_data(name="input",
                                   shape=[3, 4, 5, 6],
                                   dtype='float32')
G
fix  
gongweibao 已提交
3701 3702

            out = layers.slice(input, axes=axes, starts=starts, ends=ends)
3703
            return out
G
merge  
gongweibao 已提交
3704

3705 3706 3707
    def make_scale_variable(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3708 3709 3710 3711 3712 3713 3714
            input = self._get_data(name="input",
                                   shape=[3, 4, 5, 6],
                                   dtype='float32')
            scale_var = self._get_data(name="scale",
                                       shape=[1],
                                       dtype='float32',
                                       append_batch_size=False)
3715
            out = layers.scale(input, scale=scale_var)
3716 3717
            return out

3718 3719 3720 3721
    def make_softshrink(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            input = self._get_data(name="input", shape=[16], dtype="float32")
3722
            out = layers.softshrink(input, alpha=0.3)
3723
            return (out)
G
fix  
gongweibao 已提交
3724

M
minqiyang 已提交
3725
    def make_iou_similarity(self):
3726 3727
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
M
minqiyang 已提交
3728 3729
            x = self._get_data(name="x", shape=[4], dtype="float32")
            y = self._get_data(name="y", shape=[4], dtype="float32")
X
Xin Pan 已提交
3730
            out = layers.iou_similarity(x, y, name='iou_similarity')
3731 3732 3733 3734 3735 3736 3737
            return (out)

    def make_grid_sampler(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name='x', shape=[3, 5, 7], dtype='float32')
            grid = self._get_data(name='grid', shape=[5, 7, 2], dtype='float32')
D
dengkaipeng 已提交
3738
            out = layers.grid_sampler(x, grid)
3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752
            return (out)

    def make_bilinear_tensor_product_layer(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            data = self._get_data(name='data', shape=[4], dtype="float32")

            theta = self._get_data(name="theta", shape=[5], dtype="float32")
            out = layers.bilinear_tensor_product(data, theta, 6)
            return (out)

    def make_batch_norm(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3753 3754 3755
            data = self._get_data(name='data',
                                  shape=[32, 128, 128],
                                  dtype="float32")
3756 3757 3758
            out = layers.batch_norm(data)
            return (out)

3759 3760 3761
    def make_batch_norm_momentum_variable(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3762 3763 3764 3765 3766 3767 3768
            data = self._get_data(name='data',
                                  shape=[32, 128, 128],
                                  dtype="float32")
            momentum = self._get_data(name='momentum',
                                      shape=[1],
                                      dtype='float32',
                                      append_batch_size=False)
3769 3770 3771
            out = layers.batch_norm(data, momentum=momentum)
            return (out)

K
Kaipeng Deng 已提交
3772 3773 3774
    def make_inplace_abn(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3775 3776 3777
            data = self._get_data(name='data',
                                  shape=[32, 128, 128],
                                  dtype="float32")
K
Kaipeng Deng 已提交
3778 3779 3780 3781 3782 3783
            out = layers.inplace_abn(data, act='leaky_relu', act_alpha=0.2)
            return (out)

    def make_inplace_abn_momentum_variable(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794
            data = self._get_data(name='data',
                                  shape=[32, 128, 128],
                                  dtype="float32")
            momentum = self._get_data(name='momentum',
                                      shape=[1],
                                      dtype='float32',
                                      append_batch_size=False)
            out = layers.inplace_abn(data,
                                     momentum=momentum,
                                     act='elu',
                                     act_alpha=2.0)
K
Kaipeng Deng 已提交
3795 3796
            return (out)

3797 3798 3799 3800
    def make_range(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            layers.range(0, 10, 2, 'int32')
3801 3802 3803 3804 3805 3806
            layers.range(0.1, 10.0, 0.2, 'float32')
            layers.range(0.1, 10.0, 0.2, 'float64')
            start = layers.fill_constant(shape=[1], value=0.1, dtype="float32")
            end = layers.fill_constant(shape=[1], value=10.0, dtype="float32")
            step = layers.fill_constant(shape=[1], value=0.2, dtype="float32")
            y = layers.range(start, end, step, 'float64')
3807 3808 3809 3810 3811
            return y

    def make_spectral_norm(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3812 3813 3814 3815
            weight = self._get_data(name='weight',
                                    shape=[2, 3, 32, 32],
                                    dtype="float32",
                                    append_batch_size=False)
3816 3817 3818 3819 3820 3821
            out = layers.spectral_norm(weight, dim=1, power_iters=1)
            return (out)

    def make_kldiv_loss(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
3822 3823 3824 3825 3826 3827 3828 3829
            x = self._get_data(name='x',
                               shape=[32, 128, 128],
                               dtype="float32",
                               append_batch_size=False)
            target = self._get_data(name='target',
                                    shape=[32, 128, 128],
                                    dtype="float32",
                                    append_batch_size=False)
3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846
            loss = layers.kldiv_loss(x=x, target=target, reduction='batchmean')
            return (loss)

    def make_temporal_shift(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name="X", shape=[16, 4, 4], dtype="float32")
            out = layers.temporal_shift(x, seg_num=2, shift_ratio=0.2)
            return (out)

    def make_shuffle_channel(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name="X", shape=[16, 4, 4], dtype="float32")
            out = layers.shuffle_channel(x, group=4)
            return (out)

M
minqiyang 已提交
3847
    def make_fsp_matrix(self):
3848 3849 3850 3851 3852 3853 3854
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name="X", shape=[16, 4, 4], dtype="float32")
            y = self._get_data(name="Y", shape=[8, 4, 4], dtype="float32")
            out = layers.fsp_matrix(x, y)
            return (out)

M
minqiyang 已提交
3855 3856 3857 3858 3859 3860 3861
    def make_pixel_shuffle(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name="X", shape=[9, 4, 4], dtype="float32")
            out = layers.pixel_shuffle(x, upscale_factor=3)
            return (out)

R
ruri 已提交
3862 3863 3864 3865 3866 3867 3868 3869
    def make_mse_loss(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name="X", shape=[1], dtype="float32")
            y = self._get_data(name="Y", shape=[1], dtype="float32")
            out = layers.mse_loss(input=x, label=y)
            return (out)

3870 3871 3872 3873 3874 3875 3876 3877
    def make_square_error_cost(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
            x = self._get_data(name="X", shape=[1], dtype="float32")
            y = self._get_data(name="Y", shape=[1], dtype="float32")
            out = layers.square_error_cost(input=x, label=y)
            return (out)

3878 3879 3880 3881
    def test_dynamic_lstmp(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            hidden_dim, proj_dim = 16, 8
3882 3883 3884 3885
            seq_data = layers.data(name='seq_data',
                                   shape=[10, 10],
                                   dtype='float32',
                                   lod_level=1)
3886 3887
            fc_out = layers.fc(input=seq_data, size=4 * hidden_dim)
            self.assertIsNotNone(
3888 3889 3890
                layers.dynamic_lstmp(input=fc_out,
                                     size=4 * hidden_dim,
                                     proj_size=proj_dim))
3891 3892 3893 3894

    def test_linear_chain_crf(self):
        with self.static_graph():
            label_dict_len = 10
3895 3896 3897
            feature = layers.data(name='feature', shape=[784], dtype='float32')
            label = layers.data(name='label', shape=[1], dtype='int64')
            emission = layers.fc(input=feature, size=10)
3898 3899 3900 3901 3902
            crf = layers.linear_chain_crf(input=emission,
                                          label=label,
                                          param_attr=ParamAttr(name="crfw"))
            crf_decode = layers.crf_decoding(input=emission,
                                             param_attr=ParamAttr(name="crfw"))
3903 3904
            self.assertFalse(crf is None)
            self.assertFalse(crf_decode is None)
3905 3906 3907 3908
            return layers.chunk_eval(input=crf_decode,
                                     label=label,
                                     chunk_scheme="IOB",
                                     num_chunk_types=(label_dict_len - 1) // 2)
3909 3910 3911 3912

    def test_linear_chain_crf_padding(self):
        with self.static_graph():
            label_dict_len, max_len = 10, 20
3913 3914 3915
            feature = layers.data(name='feature',
                                  shape=[max_len, 784],
                                  dtype='float32')
3916 3917 3918
            label = layers.data(name='label', shape=[max_len], dtype='int64')
            length = layers.data(name='length', shape=[1], dtype='int64')
            emission = layers.fc(input=feature, size=10, num_flatten_dims=2)
3919 3920 3921 3922 3923 3924 3925
            crf = layers.linear_chain_crf(input=emission,
                                          label=label,
                                          length=length,
                                          param_attr=ParamAttr(name="crfw"))
            crf_decode = layers.crf_decoding(input=emission,
                                             length=length,
                                             param_attr=ParamAttr(name="crfw"))
3926 3927
            self.assertFalse(crf is None)
            self.assertFalse(crf_decode is None)
3928 3929 3930 3931 3932
            return layers.chunk_eval(input=crf_decode,
                                     label=label,
                                     seq_length=length,
                                     chunk_scheme="IOB",
                                     num_chunk_types=(label_dict_len - 1) // 2)
3933 3934 3935 3936 3937 3938

    def test_im2sequence(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[3, 128, 128], dtype='float32')
            y = layers.data(name='y', shape=[], dtype='float32')
3939 3940 3941 3942 3943
            output = layers.im2sequence(input=x,
                                        input_image_size=y,
                                        stride=[1, 1],
                                        filter_size=[2, 2],
                                        out_stride=[1, 1])
3944 3945 3946 3947 3948
            return (output)

    def test_lod_reset(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
3949
            # case 1
3950
            x = layers.data(name='x', shape=[10], dtype='float32')
3951 3952 3953 3954
            y = layers.data(name='y',
                            shape=[10, 20],
                            dtype='float32',
                            lod_level=2)
3955 3956 3957
            z = layers.lod_reset(x=x, y=y)
            self.assertTrue(z.lod_level == 2)
            # case 2
3958
            lod_tensor_in = layers.data(name='lod_in', shape=[1], dtype='int32')
3959 3960 3961 3962 3963 3964
            z = layers.lod_reset(x=x, y=lod_tensor_in)
            self.assertTrue(z.lod_level == 1)
            # case 3
            z = layers.lod_reset(x=x, target_lod=[1, 2, 3])
            self.assertTrue(z.lod_level == 1)
            return z
3965

W
whs 已提交
3966
    def test_affine_grid(self):
3967
        with self.static_graph():
W
whs 已提交
3968 3969 3970 3971
            data = layers.data(name='data', shape=[2, 3, 3], dtype="float32")
            out, ids = layers.argsort(input=data, axis=1)

            theta = layers.data(name="theta", shape=[2, 3], dtype="float32")
3972
            out_shape = layers.data(name="out_shape", shape=[-1], dtype="int32")
W
whs 已提交
3973 3974 3975 3976 3977
            data_0 = layers.affine_grid(theta, out_shape)
            data_1 = layers.affine_grid(theta, [5, 3, 28, 28])

            self.assertIsNotNone(data_0)
            self.assertIsNotNone(data_1)
D
dengkaipeng 已提交
3978

W
wangchaochaohu 已提交
3979 3980 3981 3982 3983 3984 3985
    def test_stridedslice(self):
        axes = [0, 1, 2]
        starts = [1, 0, 2]
        ends = [3, 3, 4]
        strides = [1, 1, 1]
        with self.static_graph():
            x = layers.data(name="x", shape=[245, 30, 30], dtype="float32")
3986 3987 3988 3989 3990
            out = layers.strided_slice(x,
                                       axes=axes,
                                       starts=starts,
                                       ends=ends,
                                       strides=strides)
W
wangchaochaohu 已提交
3991 3992
            return out

3993 3994
    def test_fill_constant_batch_size_like(self):
        with self.static_graph():
3995 3996 3997 3998 3999 4000 4001
            like = fluid.layers.fill_constant(shape=[1, 200],
                                              value=10,
                                              dtype='int64')
            out = layers.fill_constant_batch_size_like(input=like,
                                                       shape=[2, 3300],
                                                       value=1315454564656,
                                                       dtype='int64')
4002 4003
            return out

4004 4005 4006 4007
    def test_psroi_pool(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name="x", shape=[245, 30, 30], dtype="float32")
4008 4009 4010 4011
            rois = layers.data(name="rois",
                               shape=[4],
                               dtype="float32",
                               lod_level=1)
4012 4013
            output = layers.psroi_pool(x, rois, 5, 0.25, 7, 7)
            return (output)
4014

4015 4016 4017 4018
    def test_sequence_expand(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[10], dtype='float32')
4019 4020 4021 4022
            y = layers.data(name='y',
                            shape=[10, 20],
                            dtype='float32',
                            lod_level=2)
4023
            return (layers.sequence_expand(x=x, y=y, ref_level=1))
4024

4025 4026 4027 4028 4029 4030
    def test_sequence_reshape(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[8], dtype='float32', lod_level=1)
            out = layers.sequence_reshape(input=x, new_dim=16)
            return (out)
4031

4032 4033 4034 4035
    def test_sequence_unpad(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[10, 5], dtype='float32')
4036
            length = layers.data(name='length', shape=[], dtype='int64')
4037
            return (layers.sequence_unpad(x=x, length=length))
4038

4039 4040 4041
    def test_sequence_softmax(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
4042 4043 4044 4045
            seq_data = layers.data(name='seq_data',
                                   shape=[10, 10],
                                   dtype='float32',
                                   lod_level=1)
4046 4047
            seq = layers.fc(input=seq_data, size=20)
            return (layers.sequence_softmax(seq))
4048

4049 4050 4051 4052 4053 4054
    def test_sequence_unsqueeze(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[8, 2], dtype='float32')
            out = layers.unsqueeze(input=x, axes=[1])
            return (out)
4055

4056 4057 4058
    def test_sequence_scatter(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072
            x = layers.data(name='x',
                            shape=[3, 6],
                            append_batch_size=False,
                            dtype='float32')
            idx = layers.data(name='idx',
                              shape=[12, 1],
                              append_batch_size=False,
                              dtype='int32',
                              lod_level=1)
            updates = layers.data(name='updates',
                                  shape=[12, 1],
                                  append_batch_size=False,
                                  dtype='float32',
                                  lod_level=1)
4073 4074
            out = layers.sequence_scatter(input=x, index=idx, updates=updates)
            return (out)
W
whs 已提交
4075

4076 4077 4078 4079
    def test_sequence_slice(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            import numpy as np
4080 4081 4082 4083
            seqs = layers.data(name='x',
                               shape=[10, 5],
                               dtype='float32',
                               lod_level=1)
4084 4085
            offset = layers.assign(input=np.array([[0, 1]]).astype('int32'))
            length = layers.assign(input=np.array([[2, 1]]).astype('int32'))
4086 4087 4088
            out = layers.sequence_slice(input=seqs,
                                        offset=offset,
                                        length=length)
4089
            return (out)
W
whs 已提交
4090

J
Jiawei Wang 已提交
4091 4092 4093
    def test_filter_by_instag(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108
            x1 = layers.data(name='Ins',
                             shape=[32, 1],
                             dtype='float32',
                             lod_level=0)
            x2 = layers.data(name='Ins_tag',
                             shape=[32, 1],
                             dtype='int64',
                             lod_level=0,
                             stop_gradient=True)
            x3 = layers.create_global_var(shape=[1, 1],
                                          value=20,
                                          dtype='int64',
                                          persistable=True,
                                          force_cpu=True,
                                          name='Filter_tag')
J
Jiawei Wang 已提交
4109 4110
            out1, out2 = layers.filter_by_instag(x1, x2, x3, is_lod=True)

Z
zhoushiyu 已提交
4111 4112 4113
    def test_shuffle_batch(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
4114 4115 4116 4117
            x = layers.data(name='X',
                            shape=[4, 50],
                            dtype='float32',
                            lod_level=0)
Z
zhoushiyu 已提交
4118 4119 4120 4121 4122 4123 4124
            out1 = fluid.contrib.layers.shuffle_batch(x)
            default_main_program().random_seed = 1000
            out2 = fluid.contrib.layers.shuffle_batch(x)
            self.assertIsNotNone(out1)
            self.assertIsNotNone(out2)
            return (out1)

4125 4126 4127 4128
    def test_partial_sum(self):
        with self.static_graph():
            x = fluid.data(name="x", shape=[None, 3], dtype="float32")
            y = fluid.data(name="y", shape=[None, 3], dtype="float32")
4129 4130 4131
            sum = fluid.contrib.layers.partial_sum([x, y],
                                                   start_index=0,
                                                   length=2)
4132 4133
            return (sum)

S
ShenLiang 已提交
4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151
    def test_batch_fc(self):
        with self.static_graph():
            input = fluid.data(name="input", shape=[16, 2, 3], dtype="float32")
            out = fluid.contrib.layers.batch_fc(
                input=input,
                param_size=[16, 3, 10],
                param_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="w_0",
                    initializer=fluid.initializer.Xavier(uniform=False)),
                bias_size=[16, 10],
                bias_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="b_0",
                    initializer=fluid.initializer.Xavier(uniform=False)),
                act="relu")
        return (out)

S
ShenLiang 已提交
4152 4153 4154
    def test_rank_attention(self):
        with self.static_graph():
            input = fluid.data(name="input", shape=[None, 2], dtype="float32")
4155 4156 4157
            rank_offset = fluid.data(name="rank_offset",
                                     shape=[None, 7],
                                     dtype="int32")
S
ShenLiang 已提交
4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168
            out = fluid.contrib.layers.rank_attention(
                input=input,
                rank_offset=rank_offset,
                rank_param_shape=[18, 3],
                rank_param_attr=fluid.ParamAttr(
                    learning_rate=1.0,
                    name="ubm_rank_param.w_0",
                    initializer=fluid.initializer.Xavier(uniform=False)),
                max_rank=3)
            return (out)

4169
    def test_roi_pool(self):
4170 4171 4172 4173
        x_np = np.random.rand(2, 3, 8, 8).astype('float32')
        rois_np = np.random.rand(3, 4).astype('float32')
        rois_num_np = np.array([1, 2]).astype('int32')

4174
        with self.static_graph():
4175 4176 4177 4178
            x = layers.data(name="x", shape=[3, 8, 8], dtype="float32")
            rois = layers.data(name="rois", shape=[4], dtype="float32")
            rois_num = fluid.data(name="rois_num", shape=[None], dtype="int32")
            output = layers.roi_pool(x, rois, 4, 4, 0.5, rois_num=rois_num)
4179 4180 4181 4182 4183 4184
            static_res = self.get_static_graph_result(feed={
                'x': x_np,
                'rois': rois_np,
                'rois_num': rois_num_np
            },
                                                      fetch_list=[output])[0]
4185 4186

        with self.dynamic_graph():
4187 4188 4189 4190
            with _test_eager_guard():
                x_dy = base.to_variable(x_np)
                rois_dy = base.to_variable(rois_np)
                rois_num_dy = base.to_variable(rois_num_np)
4191 4192 4193 4194 4195 4196
                dy_eager_res = layers.roi_pool(x_dy,
                                               rois_dy,
                                               4,
                                               4,
                                               0.5,
                                               rois_num=rois_num_dy)
4197 4198
                dy_eager_res_value = dy_eager_res[0].numpy()

4199 4200 4201
            x_dy = base.to_variable(x_np)
            rois_dy = base.to_variable(rois_np)
            rois_num_dy = base.to_variable(rois_num_np)
4202 4203 4204 4205 4206 4207
            dy_res = layers.roi_pool(x_dy,
                                     rois_dy,
                                     4,
                                     4,
                                     0.5,
                                     rois_num=rois_num_dy)
4208
            dy_res_value = dy_res[0].numpy()
4209 4210
        np.testing.assert_array_equal(static_res, dy_res_value)
        np.testing.assert_array_equal(static_res, dy_eager_res_value)
4211 4212 4213 4214 4215 4216 4217 4218

    def test_sequence_enumerate(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name="input", shape=[1], dtype='int32', lod_level=1)
            out = layers.sequence_enumerate(input=x, win_size=2, pad_value=0)

    def test_roi_align(self):
4219 4220 4221 4222
        x_np = np.random.rand(2, 3, 8, 8).astype('float32')
        rois_np = np.random.rand(3, 4).astype('float32')
        rois_num_np = np.array([1, 2]).astype('int32')

4223
        with self.static_graph():
4224 4225 4226 4227
            x = layers.data(name="x", shape=[3, 8, 8], dtype="float32")
            rois = layers.data(name="rois", shape=[4], dtype="float32")
            rois_num = fluid.data(name="rois_num", shape=[None], dtype="int32")
            output = layers.roi_align(x, rois, 4, 4, 0.5, 2, rois_num=rois_num)
4228 4229 4230 4231 4232 4233
            static_res = self.get_static_graph_result(feed={
                'x': x_np,
                'rois': rois_np,
                'rois_num': rois_num_np
            },
                                                      fetch_list=[output])[0]
4234 4235

        with self.dynamic_graph():
4236 4237 4238 4239
            with _test_eager_guard():
                x_dy = base.to_variable(x_np)
                rois_dy = base.to_variable(rois_np)
                rois_num_dy = base.to_variable(rois_num_np)
4240 4241 4242 4243 4244 4245 4246
                dy_eager_res = layers.roi_align(x_dy,
                                                rois_dy,
                                                4,
                                                4,
                                                0.5,
                                                2,
                                                rois_num=rois_num_dy)
4247 4248
                dy_eager_res_value = dy_eager_res.numpy()

4249 4250 4251
            x_dy = base.to_variable(x_np)
            rois_dy = base.to_variable(rois_np)
            rois_num_dy = base.to_variable(rois_num_np)
4252 4253 4254 4255 4256 4257 4258
            dy_res = layers.roi_align(x_dy,
                                      rois_dy,
                                      4,
                                      4,
                                      0.5,
                                      2,
                                      rois_num=rois_num_dy)
4259
            dy_res_value = dy_res.numpy()
4260 4261
        np.testing.assert_array_equal(static_res, dy_eager_res_value)
        np.testing.assert_array_equal(static_res, dy_res_value)
4262

4263 4264 4265 4266 4267 4268 4269
    def test_dice_loss(self):
        num_classes = 4
        eps = 1e-6
        input_np = np.random.rand(2, 3, num_classes).astype('float32')
        label_np = np.random.randint(0, num_classes, [2, 3, 1], dtype=np.int64)

        with self.static_graph():
4270 4271 4272 4273 4274 4275
            input_ = layers.data(name="input",
                                 shape=[None, 3, num_classes],
                                 dtype="float32")
            label_ = layers.data(name="label",
                                 shape=[None, 3, 1],
                                 dtype="int64")
4276
            output = layers.dice_loss(input_, label_, eps)
4277 4278 4279 4280 4281
            static_res = self.get_static_graph_result(feed={
                'input': input_np,
                'label': label_np
            },
                                                      fetch_list=[output])[0]
4282 4283

        with self.dynamic_graph():
4284 4285 4286 4287 4288 4289
            with _test_eager_guard():
                input_ = base.to_variable(input_np)
                label_ = base.to_variable(label_np)
                dy_eager_res = layers.dice_loss(input_, label_, eps)
                dy_eager_res_value = dy_eager_res.numpy()

4290 4291 4292 4293
            input_ = base.to_variable(input_np)
            label_ = base.to_variable(label_np)
            dy_res = layers.dice_loss(input_, label_, eps)
            dy_res_value = dy_res.numpy()
4294 4295
        np.testing.assert_array_equal(static_res, dy_res_value)
        np.testing.assert_array_equal(static_res, dy_eager_res_value)
4296

4297 4298 4299 4300
    def test_roi_perspective_transform(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name="x", shape=[256, 30, 30], dtype="float32")
4301 4302 4303 4304
            rois = layers.data(name="rois",
                               shape=[8],
                               dtype="float32",
                               lod_level=1)
4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317
            output = layers.roi_perspective_transform(x, rois, 7, 7, 0.6)
            return (output)

    def test_row_conv(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
            x = layers.data(name='x', shape=[16], dtype='float32', lod_level=1)
            out = layers.row_conv(input=x, future_context_size=2)
            return (out)

    def test_simple_conv2d(self):
        # TODO(minqiyang): dygraph do not support layers with param now
        with self.static_graph():
4318 4319 4320 4321 4322 4323
            images = layers.data(name='pixel',
                                 shape=[3, 48, 48],
                                 dtype='float32')
            return layers.conv2d(input=images,
                                 num_filters=3,
                                 filter_size=[4, 4])
4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334

    def test_squeeze(self):
        # TODO(minqiyang): dygraph do not support layers with param now
        with self.static_graph():
            x = layers.data(name='x', shape=[1, 1, 4], dtype='float32')
            out = layers.squeeze(input=x, axes=[2])
            return (out)

    def test_flatten(self):
        # TODO(minqiyang): dygraph do not support op without kernel now
        with self.static_graph():
4335 4336 4337 4338
            x = layers.data(name='x',
                            append_batch_size=False,
                            shape=[4, 4, 3],
                            dtype="float32")
4339 4340
            out = layers.flatten(x, axis=1, name="flatten")
            return (out)
4341

Z
zhoukunsheng 已提交
4342 4343 4344 4345 4346 4347 4348
    def test_linspace(self):
        program = Program()
        with program_guard(program):
            out = layers.linspace(20, 10, 5, 'float64')
            self.assertIsNotNone(out)
        print(str(program))

4349
    def test_deformable_conv(self):
4350
        with self.static_graph():
4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368
            input = layers.data(name='input',
                                append_batch_size=False,
                                shape=[2, 3, 32, 32],
                                dtype="float32")
            offset = layers.data(name='offset',
                                 append_batch_size=False,
                                 shape=[2, 18, 32, 32],
                                 dtype="float32")
            mask = layers.data(name='mask',
                               append_batch_size=False,
                               shape=[2, 9, 32, 32],
                               dtype="float32")
            out = layers.deformable_conv(input=input,
                                         offset=offset,
                                         mask=mask,
                                         num_filters=2,
                                         filter_size=3,
                                         padding=1)
4369 4370 4371 4372
            return (out)

    def test_deformable_conv2(self):
        with self.static_graph():
4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387
            input = fluid.data(name='input',
                               shape=[None, 3, None, None],
                               dtype="float32")
            offset = fluid.data(name='offset',
                                shape=[None, 18, None, None],
                                dtype="float32")
            mask = fluid.data(name='mask',
                              shape=[None, 9, None, None],
                              dtype="float32")
            out = layers.deformable_conv(input=input,
                                         offset=offset,
                                         mask=mask,
                                         num_filters=2,
                                         filter_size=3,
                                         padding=1)
4388
            return (out)
4389

4390 4391 4392 4393 4394 4395
    def test_unfold(self):
        with self.static_graph():
            x = layers.data(name='x', shape=[3, 20, 20], dtype='float32')
            out = layers.unfold(x, [3, 3], 1, 1, 1)
            return (out)

4396 4397 4398 4399
    def test_partial_concat(self):
        with self.static_graph():
            x = fluid.data(name="x", shape=[None, 3], dtype="float32")
            y = fluid.data(name="y", shape=[None, 3], dtype="float32")
4400 4401 4402 4403 4404 4405
            concat1 = fluid.contrib.layers.partial_concat([x, y],
                                                          start_index=0,
                                                          length=2)
            concat2 = fluid.contrib.layers.partial_concat(x,
                                                          start_index=0,
                                                          length=-1)
4406 4407
            return concat1, concat2

C
cjt222 已提交
4408 4409 4410
    def test_deform_roi_pooling(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433
            input = layers.data(name='input',
                                shape=[2, 3, 32, 32],
                                dtype='float32',
                                append_batch_size=False)
            rois = layers.data(name="rois",
                               shape=[4],
                               dtype='float32',
                               lod_level=1)
            trans = layers.data(name="trans",
                                shape=[2, 3, 32, 32],
                                dtype='float32',
                                append_batch_size=False)
            out = layers.deformable_roi_pooling(input=input,
                                                rois=rois,
                                                trans=trans,
                                                no_trans=False,
                                                spatial_scale=1.0,
                                                group_size=(1, 1),
                                                pooled_height=8,
                                                pooled_width=8,
                                                part_size=(8, 8),
                                                sample_per_part=4,
                                                trans_std=0.1)
C
cjt222 已提交
4434 4435
        return (out)

4436 4437 4438
    def test_deformable_conv_v1(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453
            input = layers.data(name='input',
                                append_batch_size=False,
                                shape=[2, 3, 32, 32],
                                dtype="float32")
            offset = layers.data(name='offset',
                                 append_batch_size=False,
                                 shape=[2, 18, 32, 32],
                                 dtype="float32")
            out = layers.deformable_conv(input=input,
                                         offset=offset,
                                         mask=None,
                                         num_filters=2,
                                         filter_size=3,
                                         padding=1,
                                         modulated=False)
4454 4455
            return (out)

4456 4457 4458
    def test_retinanet_target_assign(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494
            bbox_pred = layers.data(name='bbox_pred',
                                    shape=[1, 100, 4],
                                    append_batch_size=False,
                                    dtype='float32')
            cls_logits = layers.data(name='cls_logits',
                                     shape=[1, 100, 10],
                                     append_batch_size=False,
                                     dtype='float32')
            anchor_box = layers.data(name='anchor_box',
                                     shape=[100, 4],
                                     append_batch_size=False,
                                     dtype='float32')
            anchor_var = layers.data(name='anchor_var',
                                     shape=[100, 4],
                                     append_batch_size=False,
                                     dtype='float32')
            gt_boxes = layers.data(name='gt_boxes',
                                   shape=[10, 4],
                                   append_batch_size=False,
                                   dtype='float32')
            gt_labels = layers.data(name='gt_labels',
                                    shape=[10, 1],
                                    append_batch_size=False,
                                    dtype='int32')
            is_crowd = layers.data(name='is_crowd',
                                   shape=[1],
                                   append_batch_size=False,
                                   dtype='int32')
            im_info = layers.data(name='im_info',
                                  shape=[1, 3],
                                  append_batch_size=False,
                                  dtype='float32')
            return (layers.retinanet_target_assign(bbox_pred, cls_logits,
                                                   anchor_box, anchor_var,
                                                   gt_boxes, gt_labels,
                                                   is_crowd, im_info, 10))
4495

4496 4497 4498
    def test_sigmoid_focal_loss(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515
            input = layers.data(name='data',
                                shape=[10, 80],
                                append_batch_size=False,
                                dtype='float32')
            label = layers.data(name='label',
                                shape=[10, 1],
                                append_batch_size=False,
                                dtype='int32')
            fg_num = layers.data(name='fg_num',
                                 shape=[1],
                                 append_batch_size=False,
                                 dtype='int32')
            out = fluid.layers.sigmoid_focal_loss(x=input,
                                                  label=label,
                                                  fg_num=fg_num,
                                                  gamma=2.,
                                                  alpha=0.25)
4516 4517
            return (out)

4518 4519 4520
    def test_addmm(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532
            input = layers.data(name='input_data',
                                shape=[3, 3],
                                append_batch_size=False,
                                dtype='float32')
            x = layers.data(name='x',
                            shape=[3, 2],
                            append_batch_size=False,
                            dtype='float32')
            y = layers.data(name='y',
                            shape=[2, 3],
                            append_batch_size=False,
                            dtype='float32')
4533 4534 4535 4536

            out = paddle.addmm(input=input, x=x, y=y)
            return (out)

4537 4538 4539
    def test_retinanet_detection_output(self):
        with program_guard(fluid.default_main_program(),
                           fluid.default_startup_program()):
4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555
            bboxes = layers.data(name='bboxes',
                                 shape=[1, 21, 4],
                                 append_batch_size=False,
                                 dtype='float32')
            scores = layers.data(name='scores',
                                 shape=[1, 21, 10],
                                 append_batch_size=False,
                                 dtype='float32')
            anchors = layers.data(name='anchors',
                                  shape=[21, 4],
                                  append_batch_size=False,
                                  dtype='float32')
            im_info = layers.data(name="im_info",
                                  shape=[1, 3],
                                  append_batch_size=False,
                                  dtype='float32')
4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567
            nmsed_outs = layers.retinanet_detection_output(
                bboxes=[bboxes, bboxes],
                scores=[scores, scores],
                anchors=[anchors, anchors],
                im_info=im_info,
                score_threshold=0.05,
                nms_top_k=1000,
                keep_top_k=100,
                nms_threshold=0.3,
                nms_eta=1.)
            return (nmsed_outs)

4568 4569 4570
    def test_warpctc_with_padding(self):
        # TODO(minqiyang): dygraph do not support lod now
        with self.static_graph():
4571 4572 4573 4574 4575 4576
            input_length = layers.data(name='logits_length',
                                       shape=[11],
                                       dtype='int64')
            label_length = layers.data(name='labels_length',
                                       shape=[12],
                                       dtype='int64')
4577
            label = layers.data(name='label', shape=[12, 1], dtype='int32')
4578 4579 4580 4581 4582 4583 4584
            predict = layers.data(name='predict',
                                  shape=[4, 4, 8],
                                  dtype='float32')
            output = layers.warpctc(input=predict,
                                    label=label,
                                    input_length=input_length,
                                    label_length=label_length)
4585 4586
            return (output)

4587 4588
    def test_edit_distance(self):
        with self.static_graph():
4589 4590 4591 4592 4593 4594 4595 4596
            predict = layers.data(name='predict',
                                  shape=[-1, 1],
                                  dtype='int64',
                                  lod_level=1)
            label = layers.data(name='label',
                                shape=[-1, 1],
                                dtype='int64',
                                lod_level=1)
4597 4598 4599
            evaluator = fluid.evaluator.EditDistance(predict, label)
            return evaluator.metrics

4600 4601 4602 4603
    def test_basic_gru(self):
        input_size = 128
        hidden_size = 256
        with self.static_graph():
4604 4605 4606 4607 4608 4609 4610 4611 4612
            input = fluid.data(name="input",
                               shape=[None, None, input_size],
                               dtype='float32')
            pre_hidden = fluid.data(name="pre_hidden",
                                    shape=[None, hidden_size],
                                    dtype='float32')
            sequence_length = fluid.data(name="sequence_length",
                                         shape=[None],
                                         dtype='int32')
4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625

            for bidirectional in [True, False]:
                for batch_first in [True, False]:
                    rnn_out, last_hidden = fluid.contrib.layers.basic_gru(
                        input,
                        pre_hidden,
                        hidden_size=256,
                        num_layers=2,
                        sequence_length=sequence_length,
                        dropout_prob=0.5,
                        bidirectional=bidirectional,
                        batch_first=batch_first)

Y
Yu Yang 已提交
4626

4627
class TestMetricsDetectionMap(unittest.TestCase):
4628

4629 4630 4631
    def test_detection_map(self):
        program = fluid.Program()
        with program_guard(program):
4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647
            detect_res = fluid.layers.data(name='detect_res',
                                           shape=[10, 6],
                                           append_batch_size=False,
                                           dtype='float32')
            label = fluid.layers.data(name='label',
                                      shape=[10, 1],
                                      append_batch_size=False,
                                      dtype='float32')
            box = fluid.layers.data(name='bbox',
                                    shape=[10, 4],
                                    append_batch_size=False,
                                    dtype='float32')
            map_eval = fluid.metrics.DetectionMAP(detect_res,
                                                  label,
                                                  box,
                                                  class_num=21)
4648 4649 4650 4651 4652 4653
            cur_map, accm_map = map_eval.get_map_var()
            self.assertIsNotNone(cur_map)
            self.assertIsNotNone(accm_map)
        print(str(program))


4654
class ExampleNet(paddle.nn.Layer):
4655

4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666
    def __init__(self):
        super(ExampleNet, self).__init__()
        self.weight = self.create_parameter(
            shape=[1, 1], attr=paddle.ParamAttr(trainable=False))

    def forward(self):
        # only for test parameter trainable attr
        pass


class TestLayerParameterTrainableSet(unittest.TestCase):
4667

4668 4669 4670 4671 4672 4673
    def test_layer_parameter_set(self):
        with fluid.dygraph.guard():
            net = ExampleNet()
            self.assertFalse(net.weight.trainable)


4674
class TestLayerTrainingAttribute(unittest.TestCase):
4675

4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691
    def test_set_train_eval_in_dynamic_mode(self):
        with fluid.dygraph.guard():
            net = paddle.nn.Dropout()
            net.train()
            self.assertTrue(net.training)
            net.eval()
            self.assertFalse(net.training)

    def test_set_train_eval_in_static_mode(self):
        net = paddle.nn.Dropout()
        net.train()
        self.assertTrue(net.training)
        net.eval()
        self.assertFalse(net.training)


J
Jiabin Yang 已提交
4692
class MyLayer(paddle.nn.Layer):
4693

J
Jiabin Yang 已提交
4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705
    def __init__(self):
        super(MyLayer, self).__init__()
        self._linear = paddle.nn.Linear(1, 1)
        self._dropout = paddle.nn.Dropout(p=0.5)

    def forward(self, input):
        temp = self._linear(input)
        temp = self._dropout(temp)
        return temp


class MySuperLayer(paddle.nn.Layer):
4706

J
Jiabin Yang 已提交
4707 4708 4709 4710 4711 4712 4713 4714 4715 4716
    def __init__(self):
        super(MySuperLayer, self).__init__()
        self._mylayer = MyLayer()

    def forward(self, input):
        temp = self._mylayer(input)
        return temp


class TestSubLayerCount(unittest.TestCase):
4717

J
Jiabin Yang 已提交
4718 4719 4720 4721 4722 4723 4724
    def test_sublayer(self):
        with fluid.dygraph.guard():
            mySuperlayer = MySuperLayer()
            self.assertTrue(len(mySuperlayer.sublayers()) == 3)
            self.assertTrue(len(mySuperlayer.sublayers(include_self=True)) == 4)


Y
Yu Yang 已提交
4725
if __name__ == '__main__':
4726
    paddle.enable_static()
Y
Yu Yang 已提交
4727
    unittest.main()