test_imperative_basic.py 38.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

17
import numpy as np
18
from test_imperative_base import new_program_scope
19

20
import paddle
21
import paddle.fluid as fluid
22
from paddle.fluid import core
23
import paddle.fluid.dygraph_utils as dygraph_utils
24
from paddle.fluid.dygraph.layer_object_helper import LayerObjectHelper
25
from paddle.fluid.framework import _in_legacy_dygraph, _test_eager_guard
26
from paddle.fluid.layer_helper import LayerHelper
27 28


29
class MyLayer(fluid.Layer):
30
    def __init__(self):
31
        super().__init__()
32 33

    def forward(self, inputs):
M
minqiyang 已提交
34
        x = fluid.layers.relu(inputs)
35
        self._x_for_debug = x
X
Xin Pan 已提交
36
        x = fluid.layers.elementwise_mul(x, x)
37
        x = paddle.sum(x)
X
Xin Pan 已提交
38
        return [x]
39 40


41
class MLP(fluid.Layer):
42
    def __init__(self, input_size):
43
        super().__init__()
44
        self._linear1 = paddle.nn.Linear(
45 46
            input_size,
            3,
47 48
            weight_attr=paddle.ParamAttr(
                initializer=paddle.nn.initializer.Constant(value=0.1)
49
            ),
50 51
            bias_attr=paddle.ParamAttr(
                initializer=paddle.nn.initializer.Constant(value=0.1)
52 53
            ),
        )
54
        self._linear2 = paddle.nn.Linear(
55 56
            3,
            4,
57 58
            weight_attr=paddle.ParamAttr(
                initializer=paddle.nn.initializer.Constant(value=0.1)
59
            ),
60 61
            bias_attr=paddle.ParamAttr(
                initializer=paddle.nn.initializer.Constant(value=0.1)
62 63
            ),
        )
X
Xin Pan 已提交
64 65

    def forward(self, inputs):
66 67
        x = self._linear1(inputs)
        x = self._linear2(x)
68
        x = paddle.sum(x)
X
Xin Pan 已提交
69 70 71
        return x


72
class SimpleRNNCell(fluid.Layer):
73
    def __init__(self, step_input_size, hidden_size, output_size, param_attr):
74
        super().__init__()
75 76 77
        self.step_input_size = step_input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
78 79
        self._dtype = core.VarDesc.VarType.FP32
        self.param_attr = param_attr
80 81 82 83

        i2h_param_shape = [self.step_input_size, self.hidden_size]
        h2h_param_shape = [self.hidden_size, self.hidden_size]
        h2o_param_shape = [self.output_size, self.hidden_size]
S
songyouwei 已提交
84
        self._i2h_w = None
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
        self._i2h_w = self.create_parameter(
            attr=self.param_attr,
            shape=i2h_param_shape,
            dtype=self._dtype,
            is_bias=False,
        )
        self._h2h_w = self.create_parameter(
            attr=self.param_attr,
            shape=h2h_param_shape,
            dtype=self._dtype,
            is_bias=False,
        )
        self._h2o_w = self.create_parameter(
            attr=self.param_attr,
            shape=h2o_param_shape,
            dtype=self._dtype,
            is_bias=False,
        )
103 104

    def forward(self, input, pre_hidden):
105 106 107
        tmp_i2h = paddle.fluid.layers.nn.mul(input, self._i2h_w)
        tmp_h2h = paddle.fluid.layers.nn.mul(pre_hidden, self._h2h_w)
        hidden = paddle.add(tmp_h2h, tmp_i2h)
108
        hidden = self._helper.append_activation(hidden, act='tanh')
109 110
        out = paddle.fluid.layers.nn.mul(hidden, self._h2o_w)
        softmax_out = paddle.nn.functional.softmax(out)
111
        reduce_out = paddle.sum(softmax_out)
112 113 114
        return reduce_out, hidden


115
class SimpleRNN(fluid.Layer):
116
    def __init__(self):
117
        super().__init__()
J
JiabinYang 已提交
118 119
        self.seq_len = 4
        self._cell = SimpleRNNCell(
120 121 122 123 124
            3,
            3,
            3,
            fluid.ParamAttr(initializer=fluid.initializer.Constant(value=0.1)),
        )
J
JiabinYang 已提交
125 126

    def forward(self, inputs):
J
JiabinYang 已提交
127
        outs = list()
J
JiabinYang 已提交
128 129
        pre_hiddens = list()

130 131 132 133 134 135 136 137
        init_hidden = self.create_parameter(
            attr=fluid.ParamAttr(
                initializer=fluid.initializer.Constant(value=0.1)
            ),
            shape=[1, 3],
            dtype='float32',
            is_bias=False,
        )
J
JiabinYang 已提交
138
        pre_hidden = init_hidden
J
JiabinYang 已提交
139
        for i in range(self.seq_len):
2
201716010711 已提交
140
            input = paddle.slice(inputs, axes=[1], starts=[i], ends=[i + 1])
141
            input = paddle.reshape(input, shape=[1, 3])
J
JiabinYang 已提交
142 143
            out_softmax, pre_hidden = self._cell(input, pre_hidden)
            outs.append(out_softmax)
J
JiabinYang 已提交
144

J
JiabinYang 已提交
145
        return outs, pre_hiddens
J
JiabinYang 已提交
146 147


M
minqiyang 已提交
148
class TestImperative(unittest.TestCase):
149
    def functional_dygraph_context(self):
150 151 152 153
        self.assertFalse(fluid.dygraph.enabled())
        fluid.enable_dygraph()
        self.assertTrue(fluid.dygraph.enabled())
        np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
154
        var_inp = paddle.to_tensor(np_inp)
155 156 157 158 159 160 161 162 163
        mlp = MLP(input_size=2)
        out = mlp(var_inp)
        dy_out1 = out.numpy()
        out.backward()
        dy_grad1 = mlp._linear1.weight.gradient()
        fluid.disable_dygraph()
        self.assertFalse(fluid.dygraph.enabled())
        with fluid.dygraph.guard():
            self.assertTrue(fluid.dygraph.enabled())
164
            var_inp = paddle.to_tensor(np_inp)
165 166 167 168 169 170
            mlp = MLP(input_size=2)
            out = mlp(var_inp)
            dy_out2 = out.numpy()
            out.backward()
            dy_grad2 = mlp._linear1.weight.gradient()
        self.assertFalse(fluid.dygraph.enabled())
171 172
        np.testing.assert_array_equal(dy_out1, dy_out2)
        np.testing.assert_array_equal(dy_grad1, dy_grad2)
173

174 175 176 177 178 179
    def test_functional_dygraph_context(self):
        with _test_eager_guard():
            self.functional_dygraph_context()
        self.functional_dygraph_context()

    def functional_paddle_imperative_dygraph_context(self):
180 181 182
        self.assertFalse(paddle.in_dynamic_mode())
        paddle.disable_static()
        self.assertTrue(paddle.in_dynamic_mode())
183
        np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
Z
Zhou Wei 已提交
184
        var_inp = paddle.to_tensor(np_inp)
185 186 187 188 189
        mlp = MLP(input_size=2)
        out = mlp(var_inp)
        dy_out1 = out.numpy()
        out.backward()
        dy_grad1 = mlp._linear1.weight.gradient()
190 191 192 193
        paddle.enable_static()
        self.assertFalse(paddle.in_dynamic_mode())
        paddle.disable_static()
        self.assertTrue(paddle.in_dynamic_mode())
Z
Zhou Wei 已提交
194
        var_inp = paddle.to_tensor(np_inp)
195 196 197 198 199 200 201
        mlp = MLP(input_size=2)
        out = mlp(var_inp)
        dy_out2 = out.numpy()
        out.backward()
        dy_grad2 = mlp._linear1.weight.gradient()
        paddle.enable_static()
        self.assertFalse(paddle.in_dynamic_mode())
202 203
        np.testing.assert_array_equal(dy_out1, dy_out2)
        np.testing.assert_array_equal(dy_grad1, dy_grad2)
204

205 206 207 208 209 210
    def test_functional_paddle_imperative_dygraph_context(self):
        with _test_eager_guard():
            self.functional_paddle_imperative_dygraph_context()
        self.functional_paddle_imperative_dygraph_context()

    def func_isinstance(self):
211 212 213
        var = fluid.layers.data(shape=[1], name='x', dtype='float32')
        self.assertTrue(isinstance(var, fluid.Variable))
        with fluid.dygraph.guard():
J
Jiabin Yang 已提交
214
            if not _in_legacy_dygraph():
215
                var_base = paddle.to_tensor(np.array([3, 4, 5]))
216
                self.assertTrue(isinstance(var_base, core.eager.Tensor))
217 218 219 220 221 222 223 224 225
            else:
                var_base = paddle.to_tensor(np.array([3, 4, 5]))
                self.assertTrue(isinstance(var_base, core.VarBase))
                self.assertTrue(isinstance(var_base, fluid.Variable))

    def test_isinstance(self):
        with _test_eager_guard():
            self.func_isinstance()
        self.func_isinstance()
226

227
    def func_create_varbase(self):
228 229
        x = np.ones([2, 2], np.float32)
        y = np.zeros([3, 3], np.float32)
230 231
        t = fluid.Tensor()
        t.set(x, fluid.CPUPlace())
J
Jiabin Yang 已提交
232
        if not _in_legacy_dygraph():
233 234 235
            egr_tmp = fluid.core.eager.Tensor(
                value=x, place=fluid.core.CPUPlace()
            )
236
            egr_tmp2 = fluid.core.eager.Tensor(y, fluid.core.CPUPlace())
237
            egr_tmp3 = paddle.to_tensor(x)
238
            egr_tmp4 = fluid.core.eager.Tensor(y)
J
Jiabin Yang 已提交
239
            egr_tmp5 = fluid.core.eager.Tensor(value=x)
240
            egr_tmp6 = fluid.core.eager.Tensor(t)
241

242 243 244 245 246 247
            np.testing.assert_array_equal(x, egr_tmp.numpy())
            np.testing.assert_array_equal(y, egr_tmp2.numpy())
            np.testing.assert_array_equal(x, egr_tmp3.numpy())
            np.testing.assert_array_equal(y, egr_tmp4.numpy())
            np.testing.assert_array_equal(x, egr_tmp5.numpy())
            np.testing.assert_array_equal(x, egr_tmp6.numpy())
248
        else:
249 250
            tmp = fluid.core.VarBase(value=x, place=fluid.core.CPUPlace())
            tmp2 = fluid.core.VarBase(y, fluid.core.CPUPlace())
251
            tmp3 = paddle.to_tensor(x)
252 253
            tmp4 = fluid.core.VarBase(y)
            tmp5 = fluid.core.VarBase(value=x)
254
            tmp6 = fluid.core.VarBase(t)
255

256 257 258 259 260 261
            np.testing.assert_array_equal(x, tmp.numpy())
            np.testing.assert_array_equal(y, tmp2.numpy())
            np.testing.assert_array_equal(x, tmp3.numpy())
            np.testing.assert_array_equal(y, tmp4.numpy())
            np.testing.assert_array_equal(x, tmp5.numpy())
            np.testing.assert_array_equal(x, tmp6.numpy())
262

263 264 265 266 267 268
    def test_create_varbase(self):
        with fluid.dygraph.guard():
            with _test_eager_guard():
                self.func_create_varbase()
            self.func_create_varbase()

269 270 271
    def test_no_grad_guard(self):
        data = np.array([[2, 3], [4, 5]]).astype('float32')
        with fluid.dygraph.guard():
272
            l0 = paddle.nn.Linear(2, 2)
273
            self.assertIsNone(l0.weight._grad_ivar())
274
            l1 = paddle.nn.Linear(2, 2)
275 276 277 278
            with fluid.dygraph.no_grad():
                self.assertTrue(l1.weight.stop_gradient is False)
                tmp = l1.weight * 2
                self.assertTrue(tmp.stop_gradient)
279
            x = paddle.to_tensor(data)
280
            y = paddle.add(l0(x), tmp)
281 282 283
            o = l1(y)
            o.backward()

284 285
            self.assertIsNone(tmp._grad_ivar())
            self.assertIsNotNone(l0.weight._grad_ivar())
286

287 288 289
    def test_paddle_imperative_no_grad_guard(self):
        data = np.array([[2, 3], [4, 5]]).astype('float32')
        with fluid.dygraph.guard():
290
            l0 = paddle.nn.Linear(2, 2)
291
            self.assertIsNone(l0.weight._grad_ivar())
292
            l1 = paddle.nn.Linear(2, 2)
293
            with paddle.no_grad():
294 295
                self.assertTrue(l1.weight.stop_gradient is False)
                tmp = l1.weight * 2
296
                self.assertTrue(tmp.stop_gradient)
297
            x = paddle.to_tensor(data)
298
            y = paddle.add(l0(x), tmp)
299 300 301
            o = l1(y)
            o.backward()

302 303
            self.assertIsNone(tmp._grad_ivar())
            self.assertIsNotNone(l0.weight._grad_ivar())
304

305 306 307
    def test_paddle_imperative_set_grad_enabled(self):
        data = np.array([[2, 3], [4, 5]]).astype('float32')
        with fluid.dygraph.guard():
308
            l0 = paddle.nn.Linear(2, 2)
309
            self.assertIsNone(l0.weight._grad_ivar())
310
            l1 = paddle.nn.Linear(2, 2)
311 312 313 314 315 316 317
            with paddle.set_grad_enabled(False):
                self.assertTrue(l1.weight.stop_gradient is False)
                tmp = l1.weight * 2
                with paddle.set_grad_enabled(True):
                    tmp2 = l1.weight * 2
                self.assertTrue(tmp.stop_gradient)
                self.assertTrue(tmp2.stop_gradient is False)
318
            x = paddle.to_tensor(data)
319
            y = paddle.add(l0(x), tmp2)
320 321 322
            o = l1(y)
            o.backward()

323 324 325
            self.assertIsNone(tmp._grad_ivar())
            self.assertIsNotNone(tmp2._grad_ivar())
            self.assertIsNotNone(l0.weight._grad_ivar())
326

W
wuhuanzhou 已提交
327 328 329 330 331 332 333
    def test_paddle_imperative_is_grad_enabled(self):
        with fluid.dygraph.guard():
            with paddle.set_grad_enabled(False):
                self.assertTrue(paddle.is_grad_enabled() is False)
                with paddle.set_grad_enabled(True):
                    self.assertTrue(paddle.is_grad_enabled())

334
    def func_sum_op(self):
M
minqiyang 已提交
335
        x = np.ones([2, 2], np.float32)
L
lujun 已提交
336
        with fluid.dygraph.guard():
M
minqiyang 已提交
337 338
            inputs = []
            for _ in range(10):
339
                tmp = paddle.to_tensor(x)
340 341
                tmp.stop_gradient = False
                inputs.append(tmp)
342
            ret = paddle.add_n(inputs)
343
            loss = paddle.sum(ret)
L
lujun 已提交
344
            loss.backward()
345 346 347
        with fluid.dygraph.guard():
            inputs2 = []
            for _ in range(10):
348
                tmp = paddle.to_tensor(x)
349 350
                tmp.stop_gradient = False
                inputs2.append(tmp)
351
            ret2 = paddle.add_n(inputs2)
352
            loss2 = paddle.sum(ret2)
353 354
            fluid.set_flags({'FLAGS_sort_sum_gradient': True})
            loss2.backward()
355

356 357 358
            np.testing.assert_allclose(ret.numpy(), x * 10, rtol=1e-05)
            np.testing.assert_allclose(inputs[0].gradient(), x, rtol=1e-05)
            np.testing.assert_allclose(ret2.numpy(), x * 10, rtol=1e-05)
359
            a = inputs2[0].gradient()
360
            np.testing.assert_allclose(inputs2[0].gradient(), x, rtol=1e-05)
M
minqiyang 已提交
361

362 363 364 365 366
    def test_sum_op(self):
        with _test_eager_guard():
            self.func_sum_op()
        self.func_sum_op()

367
    def func_empty_var(self):
368 369 370
        with fluid.dygraph.guard():
            cur_program = fluid.Program()
            cur_block = cur_program.current_block()
371
            # Normally, we don't allow tensor with -1 shape being created in dygraph mode, this test is not good.
J
Jiabin Yang 已提交
372
            if _in_legacy_dygraph():
373 374 375
                new_variable = cur_block.create_var(
                    name="X", shape=[-1, 23, 48], dtype='float32'
                )
376
            else:
377 378 379
                new_variable = cur_block.create_var(
                    name="X", shape=[1, 23, 48], dtype='float32'
                )
380 381 382
            try:
                new_variable.numpy()
            except Exception as e:
383
                assert type(e) == ValueError
384 385 386 387

            try:
                new_variable.backward()
            except Exception as e:
388
                assert type(e) == core.EnforceNotMet
J
Jiabin Yang 已提交
389 390 391 392
            try:
                new_variable.clear_gradient()
            except Exception as e:
                assert type(e) == core.EnforceNotMet
393

394 395 396 397
    def test_empty_var(self):
        with _test_eager_guard():
            self.func_empty_var()
        self.func_empty_var()
398

399
    def func_empty_grad(self):
400 401
        with fluid.dygraph.guard():
            x = np.ones([2, 2], np.float32)
402
            new_var = paddle.to_tensor(x)
403
            self.assertIsNone(new_var.gradient())
J
Jiabin Yang 已提交
404 405 406 407
            try:
                new_var.clear_gradient()
            except Exception as e:
                assert type(e) == core.EnforceNotMet
408 409 410 411

        with fluid.dygraph.guard():
            cur_program = fluid.Program()
            cur_block = cur_program.current_block()
412
            # Normally, we don't allow tensor with -1 shape being created in dygraph mode, this test is not good.
J
Jiabin Yang 已提交
413
            if _in_legacy_dygraph():
414 415 416
                new_variable = cur_block.create_var(
                    name="X", shape=[-1, 23, 48], dtype='float32'
                )
417
            else:
418 419 420
                new_variable = cur_block.create_var(
                    name="X", shape=[1, 23, 48], dtype='float32'
                )
421 422 423 424 425
            try:
                new_variable.gradient()
            except Exception as e:
                assert type(e) == ValueError

426 427 428 429 430 431
    def test_empty_grad(self):
        with _test_eager_guard():
            self.func_empty_grad()
        self.func_empty_grad()

    def func_set_persistable(self):
432 433
        with fluid.dygraph.guard():
            x = np.ones([2, 2], np.float32)
434
            new_var = paddle.to_tensor(x)
435 436
            self.assertFalse(new_var.persistable)
            new_var.persistable = True
437
            self.assertTrue(new_var.persistable)
438

439 440 441 442 443 444
    def test_set_persistable(self):
        with _test_eager_guard():
            self.func_set_persistable()
        self.func_set_persistable()

    def func_layer(self):
L
lujun 已提交
445
        with fluid.dygraph.guard():
446
            l = fluid.Layer("l")
M
minqiyang 已提交
447 448
            self.assertRaises(NotImplementedError, l.forward, [])

449 450 451 452 453 454
    def test_layer(self):
        with _test_eager_guard():
            self.func_layer()
        self.func_layer()

    def func_layer_in_out(self):
M
minqiyang 已提交
455
        np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32)
L
lujun 已提交
456
        with fluid.dygraph.guard():
457
            var_inp = paddle.to_tensor(np_inp)
458
            var_inp.stop_gradient = False
459
            l = MyLayer()
M
minqiyang 已提交
460 461
            x = l(var_inp)[0]
            self.assertIsNotNone(x)
462
            dy_out = x.numpy()
L
lujun 已提交
463
            x.backward()
464
            dy_grad = l._x_for_debug.gradient()
M
minqiyang 已提交
465

466
        with fluid.dygraph.guard():
467
            var_inp2 = paddle.to_tensor(np_inp)
468
            var_inp2.stop_gradient = False
469
            l2 = MyLayer()
470 471 472
            x2 = l2(var_inp2)[0]
            self.assertIsNotNone(x2)
            dy_out2 = x2.numpy()
473 474
            fluid.set_flags({'FLAGS_sort_sum_gradient': True})
            x2.backward()
475 476
            dy_grad2 = l2._x_for_debug.gradient()

M
minqiyang 已提交
477
        with new_program_scope():
478 479 480
            inp = fluid.layers.data(
                name="inp", shape=[3], append_batch_size=False
            )
481
            l = MyLayer()
M
minqiyang 已提交
482 483
            x = l(inp)[0]
            param_grads = fluid.backward.append_backward(
484 485 486 487 488 489 490
                x, parameter_list=[l._x_for_debug.name]
            )[0]
            exe = fluid.Executor(
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
M
minqiyang 已提交
491 492 493

            static_out, static_grad = exe.run(
                feed={inp.name: np_inp},
494 495
                fetch_list=[x.name, param_grads[1].name],
            )
M
minqiyang 已提交
496

497 498 499 500
        np.testing.assert_array_equal(dy_out, static_out)
        np.testing.assert_array_equal(dy_grad, static_grad)
        np.testing.assert_array_equal(dy_out2, static_out)
        np.testing.assert_array_equal(dy_grad2, static_grad)
M
minqiyang 已提交
501

502
    def test_layer_in_out(self):
503
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
504 505 506
        with _test_eager_guard():
            self.func_layer_in_out()
        self.func_layer_in_out()
507
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
508 509

    def func_mlp(self):
M
minqiyang 已提交
510
        np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
L
lujun 已提交
511
        with fluid.dygraph.guard():
512
            var_inp = paddle.to_tensor(np_inp)
513
            mlp = MLP(input_size=2)
M
minqiyang 已提交
514
            out = mlp(var_inp)
515
            dy_out = out.numpy()
L
lujun 已提交
516
            out.backward()
517
            dy_grad = mlp._linear1.weight.gradient()
M
minqiyang 已提交
518

519
        with fluid.dygraph.guard():
520
            var_inp2 = paddle.to_tensor(np_inp)
521
            mlp2 = MLP(input_size=2)
522 523
            out2 = mlp2(var_inp2)
            dy_out2 = out2.numpy()
524 525
            fluid.set_flags({'FLAGS_sort_sum_gradient': True})
            out2.backward()
526
            dy_grad2 = mlp2._linear1.weight.gradient()
527

M
minqiyang 已提交
528
        with new_program_scope():
529 530 531
            inp = fluid.layers.data(
                name="inp", shape=[2, 2], append_batch_size=False
            )
532
            mlp = MLP(input_size=2)
M
minqiyang 已提交
533 534
            out = mlp(inp)
            param_grads = fluid.backward.append_backward(
535 536 537 538 539 540 541
                out, parameter_list=[mlp._linear1.weight.name]
            )[0]
            exe = fluid.Executor(
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
M
minqiyang 已提交
542 543 544 545
            exe.run(fluid.default_startup_program())

            static_out, static_grad = exe.run(
                feed={inp.name: np_inp},
546 547
                fetch_list=[out.name, param_grads[1].name],
            )
M
minqiyang 已提交
548

549 550 551 552
        np.testing.assert_allclose(dy_out, static_out, rtol=1e-05)
        np.testing.assert_allclose(dy_grad, static_grad, rtol=1e-05)
        np.testing.assert_allclose(dy_out2, static_out, rtol=1e-05)
        np.testing.assert_allclose(dy_grad2, static_grad, rtol=1e-05)
M
minqiyang 已提交
553 554

        params = mlp.parameters(True)
555 556 557 558
        self.assertEqual("linear_0.w_0", params[0].name)
        self.assertEqual("linear_0.b_0", params[1].name)
        self.assertEqual("linear_1.w_0", params[2].name)
        self.assertEqual("linear_1.b_0", params[3].name)
M
minqiyang 已提交
559 560
        self.assertEqual(len(params), 4)

J
Jiabin Yang 已提交
561
        sublayers = mlp.sublayers()
562 563
        self.assertEqual(mlp._linear1, sublayers[0])
        self.assertEqual(mlp._linear2, sublayers[1])
M
minqiyang 已提交
564 565
        self.assertEqual(len(sublayers), 2)

566 567 568 569 570
    def test_mlp(self):
        with _test_eager_guard():
            self.func_mlp()
        self.func_mlp()

571 572 573
    def test_gradient_accumulation(self):
        def test_single_api(sort_sum_gradient):
            fluid.set_flags({'FLAGS_sort_sum_gradient': sort_sum_gradient})
574
            x = paddle.to_tensor(5.0, stop_gradient=False)
575 576 577
            for i in range(10):
                y = paddle.pow(x, 4.0)
                y.backward()
578
                self.assertEqual(x.grad.numpy(), (i + 1) * 500)
579
            x.clear_gradient()
580
            self.assertEqual(x.grad.numpy(), 0.0)
581
            for i in range(10):
582 583
                y = paddle.pow(x, 4.0)
                y.backward()
584
                self.assertEqual(x.grad.numpy(), (i + 1) * 500)
585
            x.clear_grad()
586
            self.assertEqual(x.grad.numpy(), 0.0)
587 588 589

        def test_simple_net(sort_sum_gradient):
            fluid.set_flags({'FLAGS_sort_sum_gradient': sort_sum_gradient})
590 591 592
            x = paddle.to_tensor(5.0, stop_gradient=False)
            y = paddle.to_tensor(2.0, stop_gradient=False)
            z = paddle.to_tensor(3.0, stop_gradient=False)
593 594 595 596

            def fun(x, y, z):
                loss1 = x * x * y
                loss2 = x * z
597 598
                loss1.backward(retain_graph=True)
                loss2.backward(retain_graph=True)
599 600 601
                np.testing.assert_array_equal(x.grad.numpy(), [23.0])
                np.testing.assert_array_equal(y.grad.numpy(), [25.0])
                np.testing.assert_array_equal(z.grad.numpy(), [5.0])
602 603 604 605
                x.clear_grad()
                y.clear_grad()
                z.clear_grad()

606 607
                dx = paddle.grad([loss1], x, create_graph=True)[0]
                loss = loss1 + loss2 + dx
608
                # loss = x*x*y + x*z + 2*x*y
609 610 611 612
                return loss

            loss = fun(x, y, z)
            loss.backward(retain_graph=True)
613
            # x.grad = 2*x*y + z + 2*y = 27
614
            np.testing.assert_array_equal(x.grad.numpy(), [27])
615 616

            loss.backward(retain_graph=True)
617
            np.testing.assert_array_equal(x.grad.numpy(), [54])
618 619

            loss.backward()
620
            np.testing.assert_array_equal(x.grad.numpy(), [81])
621 622 623 624 625 626 627 628 629

            with self.assertRaises(RuntimeError):
                loss.backward()

            loss1 = x * x * y
            loss2 = x * z
            dx = paddle.grad([loss1], x, create_graph=True)[0]
            loss = loss1 + loss2 + dx
            loss.backward()
630 631
            np.testing.assert_array_equal(dx.grad.numpy(), [1])
            np.testing.assert_array_equal(x.grad.numpy(), [108])
632 633 634 635 636 637 638 639 640

        def test_mlp(sort_sum_gradient):
            fluid.set_flags({'FLAGS_sort_sum_gradient': sort_sum_gradient})
            input_size = 5
            paddle.seed(1)
            mlp1 = MLP(input_size=input_size)
            # generate the gradient of each step
            mlp2 = MLP(input_size=input_size)

641 642 643 644
            expected_weight1_grad = 0.0
            expected_bias1_grad = 0.0
            expected_weight2_grad = 0.0
            expected_bias2_grad = 0.0
645

646
            for batch_id in range(100):
647 648 649 650
                x = paddle.uniform([10, input_size])
                detach_x = x.detach()
                clear_loss = mlp2(detach_x)
                clear_loss.backward()
651 652 653 654 655 656 657 658 659 660 661 662
                expected_weight1_grad = (
                    expected_weight1_grad + mlp2._linear1.weight.grad.numpy()
                )
                expected_bias1_grad = (
                    expected_bias1_grad + mlp2._linear1.bias.grad.numpy()
                )
                expected_weight2_grad = (
                    expected_weight2_grad + mlp2._linear2.weight.grad.numpy()
                )
                expected_bias2_grad = (
                    expected_bias2_grad + mlp2._linear2.bias.grad.numpy()
                )
663 664 665 666

                loss = mlp1(x)
                loss.backward()

667
                np.testing.assert_array_equal(loss.grad.numpy(), [1])
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
                np.testing.assert_allclose(
                    mlp1._linear1.weight.grad.numpy(),
                    expected_weight1_grad,
                    rtol=1e-05,
                )
                np.testing.assert_allclose(
                    mlp1._linear1.bias.grad.numpy(),
                    expected_bias1_grad,
                    rtol=1e-05,
                )
                np.testing.assert_allclose(
                    mlp1._linear2.weight.grad.numpy(),
                    expected_weight2_grad,
                    rtol=1e-05,
                )
                np.testing.assert_allclose(
                    mlp1._linear2.bias.grad.numpy(),
                    expected_bias2_grad,
                    rtol=1e-05,
                )
688 689

                mlp2.clear_gradients()
690
                np.testing.assert_array_equal(clear_loss.grad.numpy(), [1])
691
                if ((batch_id + 1) % 10) % 2 == 0:
692
                    mlp1.clear_gradients()
693 694 695 696
                    expected_weight1_grad = 0.0
                    expected_bias1_grad = 0.0
                    expected_weight2_grad = 0.0
                    expected_bias2_grad = 0.0
697 698 699
                elif ((batch_id + 1) % 10) % 2 == 1:
                    mlp1.clear_gradients()
                    mlp1._linear1.weight._set_grad_ivar(
700 701
                        paddle.ones([input_size, 3])
                    )
702
                    mlp1._linear2.weight._set_grad_ivar(paddle.ones([3, 4]))
703 704 705 706
                    expected_weight1_grad = 1.0
                    expected_bias1_grad = 0.0
                    expected_weight2_grad = 1.0
                    expected_bias2_grad = 0.0
707 708 709 710 711 712 713 714 715

        with fluid.dygraph.guard():
            test_single_api(False)
            test_single_api(True)
            test_simple_net(False)
            test_simple_net(True)
            test_mlp(False)
            test_mlp(True)

716
    def func_dygraph_vs_static(self):
717 718
        np_inp1 = np.random.rand(4, 3, 3)
        np_inp2 = np.random.rand(4, 3, 3)
X
Xin Pan 已提交
719 720 721

        # dynamic graph
        with fluid.dygraph.guard():
722 723
            inp1 = paddle.to_tensor(np_inp1)
            inp2 = paddle.to_tensor(np_inp2)
724
            if np.sum(np_inp1) < np.sum(np_inp2):
X
Xin Pan 已提交
725 726 727
                x = fluid.layers.elementwise_add(inp1, inp2)
            else:
                x = fluid.layers.elementwise_sub(inp1, inp2)
L
lujun 已提交
728
            dygraph_result = x.numpy()
X
Xin Pan 已提交
729 730 731

        # static graph
        with new_program_scope():
732 733 734 735 736 737
            inp_data1 = fluid.layers.data(
                name='inp1', shape=[3, 3], dtype=np.float32
            )
            inp_data2 = fluid.layers.data(
                name='inp2', shape=[3, 3], dtype=np.float32
            )
X
Xin Pan 已提交
738

739
            a = paddle.expand(
740
                paddle.reshape(paddle.sum(inp_data1), [1, 1]),
741
                [4, -1],
742
            )
743
            b = paddle.expand(
744
                paddle.reshape(paddle.sum(inp_data2), [1, 1]),
745
                [4, -1],
746
            )
X
Xin Pan 已提交
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
            cond = fluid.layers.less_than(x=a, y=b)

            ie = fluid.layers.IfElse(cond)
            with ie.true_block():
                d1 = ie.input(inp_data1)
                d2 = ie.input(inp_data2)
                d3 = fluid.layers.elementwise_add(d1, d2)
                ie.output(d3)

            with ie.false_block():
                d1 = ie.input(inp_data1)
                d2 = ie.input(inp_data2)
                d3 = fluid.layers.elementwise_sub(d1, d2)
                ie.output(d3)
            out = ie()

763 764 765 766 767 768 769 770 771 772
            exe = fluid.Executor(
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
            static_result = exe.run(
                fluid.default_main_program(),
                feed={'inp1': np_inp1, 'inp2': np_inp2},
                fetch_list=out,
            )[0]
773
        np.testing.assert_allclose(dygraph_result, static_result, rtol=1e-05)
X
Xin Pan 已提交
774

775 776 777 778 779 780
    def test_dygraph_vs_static(self):
        with _test_eager_guard():
            self.func_dygraph_vs_static()
        self.func_dygraph_vs_static()

    def func_rnn(self):
781 782 783 784 785 786 787 788
        np_inp = np.array(
            [
                [1.0, 2.0, 3.0],
                [4.0, 5.0, 6.0],
                [7.0, 8.0, 9.0],
                [10.0, 11.0, 12.0],
            ]
        )
M
minqiyang 已提交
789 790
        np_inp = np_inp.reshape((1, 4, 3))
        np_inp = np_inp.astype(np.float32)
L
lujun 已提交
791
        with fluid.dygraph.guard():
792
            var_inp = paddle.to_tensor(np_inp)
793
            var_inp = paddle.reshape(var_inp, shape=[1, 4, 3])
794
            simple_rnn = SimpleRNN()
M
minqiyang 已提交
795
            outs, pre_hiddens = simple_rnn.forward(var_inp)
796
            dy_out = outs[3].numpy()
L
lujun 已提交
797
            outs[3].backward()
798 799 800
            dy_grad_h2o = simple_rnn._cell._h2o_w.gradient()
            dy_grad_h2h = simple_rnn._cell._h2h_w.gradient()
            dy_grad_i2h = simple_rnn._cell._i2h_w.gradient()
M
minqiyang 已提交
801

802
        with fluid.dygraph.guard():
803
            var_inp2 = paddle.to_tensor(np_inp)
804
            var_inp2 = paddle.reshape(var_inp2, shape=[1, 4, 3])
805
            simple_rnn2 = SimpleRNN()
806 807
            outs2, pre_hiddens2 = simple_rnn2.forward(var_inp2)
            dy_out2 = outs2[3].numpy()
808 809
            fluid.set_flags({'FLAGS_sort_sum_gradient': True})
            outs2[3].backward()
810 811 812 813
            dy_grad_h2o2 = simple_rnn2._cell._h2o_w.gradient()
            dy_grad_h2h2 = simple_rnn2._cell._h2h_w.gradient()
            dy_grad_i2h2 = simple_rnn2._cell._i2h_w.gradient()

M
minqiyang 已提交
814
        with new_program_scope():
815 816 817
            inp = fluid.layers.data(
                name="inp", shape=[1, 4, 3], append_batch_size=False
            )
818
            simple_rnn = SimpleRNN()
M
minqiyang 已提交
819 820 821 822
            outs, pre_hiddens = simple_rnn(inp)
            param_grads = fluid.backward.append_backward(outs[3])
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
823 824 825 826 827 828
            (
                static_out,
                static_grad_h2o,
                static_grad_h2h,
                static_grad_i2h,
            ) = exe.run(
M
minqiyang 已提交
829 830
                feed={inp.name: np_inp},
                fetch_list=[
831 832 833 834 835 836
                    outs[3].name,
                    param_grads[0][1].name,
                    param_grads[1][1].name,
                    param_grads[2][1].name,
                ],
            )
837

838 839 840 841 842 843 844 845
        np.testing.assert_array_equal(dy_out, static_out)
        np.testing.assert_array_equal(dy_grad_h2o, static_grad_h2o)
        np.testing.assert_array_equal(dy_grad_h2h, static_grad_h2h)
        np.testing.assert_array_equal(dy_grad_i2h, static_grad_i2h)
        np.testing.assert_array_equal(dy_out2, static_out)
        np.testing.assert_array_equal(dy_grad_h2o2, static_grad_h2o)
        np.testing.assert_array_equal(dy_grad_h2h2, static_grad_h2h)
        np.testing.assert_array_equal(dy_grad_i2h2, static_grad_i2h)
846 847 848 849 850

    def test_rnn(self):
        with _test_eager_guard():
            self.func_rnn()
        self.func_rnn()
M
minqiyang 已提交
851

852
    def func_layer_attrs(self):
853 854 855 856 857 858
        layer = fluid.dygraph.Layer("test")
        layer.test_attr = 1
        self.assertFalse(hasattr(layer, "whatever"))
        self.assertTrue(hasattr(layer, "test_attr"))
        self.assertEqual(layer.test_attr, 1)

859 860 861 862 863 864 865
        my_layer = MyLayer()
        my_layer.w1 = my_layer.create_parameter([3, 3])
        my_layer.add_parameter('w2', None)
        self.assertEqual(len(my_layer.parameters()), 1)
        self.assertRaises(TypeError, my_layer.__setattr__, 'w1', 'str')
        my_layer.w1 = None
        self.assertEqual(len(my_layer.parameters()), 0)
866
        my_layer.l1 = paddle.nn.Linear(3, 3)
867 868 869 870 871
        self.assertEqual(len(my_layer.sublayers()), 1)
        self.assertRaises(TypeError, my_layer.__setattr__, 'l1', 'str')
        my_layer.l1 = None
        self.assertEqual(len(my_layer.sublayers()), 0)

872 873 874 875 876
    def test_layer_attrs(self):
        with _test_eager_guard():
            self.func_layer_attrs()
        self.func_layer_attrs()

877

878
class TestDygraphUtils(unittest.TestCase):
879
    def func_append_activation_in_dygraph_exception(self):
880 881 882 883 884 885
        with new_program_scope():
            np_inp = np.random.random(size=(10, 20, 30)).astype(np.float32)
            a = fluid.layers.data("a", [10, 20])
            func = dygraph_utils._append_activation_in_dygraph
            self.assertRaises(AssertionError, func, a, act="sigmoid")

886 887 888 889 890 891
    def test_append_activation_in_dygraph_exception(self):
        with _test_eager_guard():
            self.func_append_activation_in_dygraph_exception()
        self.func_append_activation_in_dygraph_exception()

    def func_append_activation_in_dygraph1(self):
892 893 894
        a_np = np.random.random(size=(10, 20, 30)).astype(np.float32)
        func = dygraph_utils._append_activation_in_dygraph
        with fluid.dygraph.guard():
895
            a = paddle.to_tensor(a_np)
896
            res1 = func(a, act="hard_sigmoid")
897
            res2 = paddle.nn.functional.hardsigmoid(a, slope=0.2)
898
            np.testing.assert_array_equal(res1.numpy(), res2.numpy())
899

900 901 902 903 904 905
    def test_append_activation_in_dygraph1(self):
        with _test_eager_guard():
            self.func_append_activation_in_dygraph1()
        self.func_append_activation_in_dygraph1()

    def func_append_activation_in_dygraph2(self):
906 907 908
        a_np = np.random.random(size=(10, 20, 30)).astype(np.float32)
        func = dygraph_utils._append_activation_in_dygraph
        with fluid.dygraph.guard():
909
            a = paddle.to_tensor(a_np)
910
            res1 = func(a, act="sigmoid", use_mkldnn=True, use_cudnn=True)
911
            res2 = paddle.nn.functional.sigmoid(a)
912
            np.testing.assert_allclose(res1.numpy(), res2.numpy(), rtol=1e-05)
913

914 915 916 917 918 919
    def test_append_activation_in_dygraph2(self):
        with _test_eager_guard():
            self.func_append_activation_in_dygraph2()
        self.func_append_activation_in_dygraph2()

    def func_append_activation_in_dygraph3(self):
920 921 922 923
        a_np = np.random.random(size=(10, 20, 30)).astype(np.float32)
        helper = LayerObjectHelper(fluid.unique_name.generate("test"))
        func = helper.append_activation
        with fluid.dygraph.guard():
924
            a = paddle.to_tensor(a_np)
925
            res1 = func(a, act="sigmoid", use_cudnn=True)
926
            res2 = paddle.nn.functional.sigmoid(a)
927
            np.testing.assert_array_equal(res1.numpy(), res2.numpy())
928

929 930 931 932 933 934
    def test_append_activation_in_dygraph3(self):
        with _test_eager_guard():
            self.func_append_activation_in_dygraph3()
        self.func_append_activation_in_dygraph3()

    def func_append_activation_in_dygraph_use_mkldnn(self):
935
        a_np = np.random.uniform(-2, 2, (10, 20, 30)).astype(np.float32)
936 937 938
        helper = LayerHelper(
            fluid.unique_name.generate("test"), act="relu", use_mkldnn=True
        )
939 940
        func = helper.append_activation
        with fluid.dygraph.guard():
941
            a = paddle.to_tensor(a_np)
942 943
            res1 = func(a)
            res2 = fluid.layers.relu(a)
944
            np.testing.assert_array_equal(res1.numpy(), res2.numpy())
945

946 947 948 949 950 951
    def test_append_activation_in_dygraph_use_mkldnn(self):
        with _test_eager_guard():
            self.func_append_activation_in_dygraph_use_mkldnn()
        self.func_append_activation_in_dygraph_use_mkldnn()

    def func_append_activation_in_dygraph_global_use_mkldnn(self):
952 953 954
        a_np = np.random.uniform(-2, 2, (10, 20, 30)).astype(np.float32)
        helper = LayerHelper(fluid.unique_name.generate("test"), act="relu")
        func = helper.append_activation
955
        with fluid.dygraph.guard(fluid.core.CPUPlace()):
956
            a = paddle.to_tensor(a_np)
957 958 959 960 961 962
            fluid.set_flags({'FLAGS_use_mkldnn': True})
            try:
                res1 = func(a)
            finally:
                fluid.set_flags({'FLAGS_use_mkldnn': False})
            res2 = fluid.layers.relu(a)
963
        np.testing.assert_array_equal(res1.numpy(), res2.numpy())
964

965 966 967 968 969 970
    def test_append_activation_in_dygraph_global_use_mkldnn(self):
        with _test_eager_guard():
            self.func_append_activation_in_dygraph_global_use_mkldnn()
        self.func_append_activation_in_dygraph_global_use_mkldnn()

    def func_append_bias_in_dygraph_exception(self):
971 972 973 974 975 976
        with new_program_scope():
            np_inp = np.random.random(size=(10, 20, 30)).astype(np.float32)
            a = fluid.layers.data("a", [10, 20])
            func = dygraph_utils._append_bias_in_dygraph
            self.assertRaises(AssertionError, func, a)

977 978 979 980 981 982
    def test_append_bias_in_dygraph_exception(self):
        with _test_eager_guard():
            self.func_append_bias_in_dygraph_exception()
        self.func_append_bias_in_dygraph_exception()

    def func_append_bias_in_dygraph(self):
983 984 985
        a_np = np.random.random(size=(10, 20, 30)).astype(np.float32)
        func = dygraph_utils._append_bias_in_dygraph
        with fluid.dygraph.guard():
986
            a = paddle.to_tensor(a_np)
987
            res1 = func(a, bias=a)
988
            res2 = paddle.add(a, a)
989
            np.testing.assert_array_equal(res1.numpy(), res2.numpy())
990

991 992 993 994 995
    def test_append_bias_in_dygraph(self):
        with _test_eager_guard():
            self.func_append_bias_in_dygraph()
        self.func_append_bias_in_dygraph()

996

997
class TestDygraphGuardWithError(unittest.TestCase):
998
    def func_without_guard(self):
999
        with fluid.dygraph.guard():
1000
            x = paddle.to_tensor(np.zeros([10, 10]))
1001 1002 1003
        with self.assertRaisesRegexp(
            TypeError, "Please use `with fluid.dygraph.guard()"
        ):
1004 1005
            y = fluid.layers.matmul(x, x)

1006 1007 1008 1009 1010
    def test_without_guard(self):
        with _test_eager_guard():
            self.func_without_guard()
        self.func_without_guard()

1011

1012
class TestMetaclass(unittest.TestCase):
1013
    def func_metaclass(self):
1014 1015
        self.assertEqual(type(MyLayer).__name__, 'type')
        self.assertNotEqual(type(MyLayer).__name__, 'pybind11_type')
J
Jiabin Yang 已提交
1016
        if not _in_legacy_dygraph():
1017
            self.assertEqual(
1018 1019
                type(paddle.fluid.core.eager.Tensor).__name__, 'type'
            )
1020 1021
        else:
            self.assertEqual(
1022 1023
                type(paddle.fluid.core.VarBase).__name__, 'pybind11_type'
            )
1024 1025 1026 1027 1028

    def test_metaclass(self):
        with _test_eager_guard():
            self.func_metaclass()
        self.func_metaclass()
1029 1030


1031
if __name__ == '__main__':
1032
    paddle.enable_static()
1033
    unittest.main()