test_imperative_basic.py 23.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

X
Xin Pan 已提交
15
import contextlib
16 17 18 19 20
import unittest
import numpy as np

import paddle.fluid as fluid
from paddle.fluid import core
21
from paddle.fluid import Linear
M
minqiyang 已提交
22
from test_imperative_base import new_program_scope
23
import paddle.fluid.dygraph_utils as dygraph_utils
24 25


26
class MyLayer(fluid.Layer):
27 28
    def __init__(self):
        super(MyLayer, self).__init__()
29 30

    def forward(self, inputs):
M
minqiyang 已提交
31
        x = fluid.layers.relu(inputs)
32
        self._x_for_debug = x
X
Xin Pan 已提交
33 34 35
        x = fluid.layers.elementwise_mul(x, x)
        x = fluid.layers.reduce_sum(x)
        return [x]
36 37


38
class MLP(fluid.Layer):
39 40
    def __init__(self, input_size):
        super(MLP, self).__init__()
S
songyouwei 已提交
41
        self._linear1 = None
42 43 44 45 46 47 48 49 50 51 52 53 54 55
        self._linear1 = Linear(
            input_size,
            3,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Constant(value=0.1)),
            bias_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Constant(value=0.1)))
        self._linear2 = Linear(
            3,
            4,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Constant(value=0.1)),
            bias_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Constant(value=0.1)))
X
Xin Pan 已提交
56 57

    def forward(self, inputs):
58 59
        x = self._linear1(inputs)
        x = self._linear2(x)
X
Xin Pan 已提交
60 61 62 63
        x = fluid.layers.reduce_sum(x)
        return x


64
class SimpleRNNCell(fluid.Layer):
65 66
    def __init__(self, step_input_size, hidden_size, output_size, param_attr):
        super(SimpleRNNCell, self).__init__()
67 68 69
        self.step_input_size = step_input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
70 71
        self._dtype = core.VarDesc.VarType.FP32
        self.param_attr = param_attr
72 73 74 75

        i2h_param_shape = [self.step_input_size, self.hidden_size]
        h2h_param_shape = [self.hidden_size, self.hidden_size]
        h2o_param_shape = [self.output_size, self.hidden_size]
S
songyouwei 已提交
76
        self._i2h_w = None
77 78
        self._i2h_w = self.create_parameter(
            attr=self.param_attr,
79 80 81
            shape=i2h_param_shape,
            dtype=self._dtype,
            is_bias=False)
82 83
        self._h2h_w = self.create_parameter(
            attr=self.param_attr,
84 85 86
            shape=h2h_param_shape,
            dtype=self._dtype,
            is_bias=False)
87 88
        self._h2o_w = self.create_parameter(
            attr=self.param_attr,
89 90 91 92 93
            shape=h2o_param_shape,
            dtype=self._dtype,
            is_bias=False)

    def forward(self, input, pre_hidden):
94 95 96 97 98 99
        tmp_i2h = self.create_variable(dtype=self._dtype)
        tmp_h2h = self.create_variable(dtype=self._dtype)
        hidden = self.create_variable(dtype=self._dtype)
        out = self.create_variable(dtype=self._dtype)
        softmax_out = self.create_variable(dtype=self._dtype)
        reduce_out = self.create_variable(dtype=self._dtype)
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
        self._helper.append_op(
            type="mul",
            inputs={"X": input,
                    "Y": self._i2h_w},
            outputs={"Out": tmp_i2h},
            attrs={"x_num_col_dims": 1,
                   "y_num_col_dims": 1})

        self._helper.append_op(
            type="mul",
            inputs={"X": pre_hidden,
                    "Y": self._h2h_w},
            outputs={"Out": tmp_h2h},
            attrs={"x_num_col_dims": 1,
                   "y_num_col_dims": 1})

        self._helper.append_op(
            type="elementwise_add",
            inputs={'X': tmp_h2h,
                    'Y': tmp_i2h},
            outputs={'Out': hidden},
            attrs={'axis': -1,
                   'use_mkldnn': False})
123
        hidden = self._helper.append_activation(hidden, act='tanh')
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142

        self._helper.append_op(
            type="mul",
            inputs={"X": hidden,
                    "Y": self._h2o_w},
            outputs={"Out": out},
            attrs={"x_num_col_dims": 1,
                   "y_num_col_dims": 1})

        self._helper.append_op(
            type="softmax",
            inputs={"X": out},
            outputs={"Out": softmax_out},
            attrs={"use_cudnn": False})

        self._helper.append_op(
            type='reduce_sum',
            inputs={'X': softmax_out},
            outputs={'Out': reduce_out},
143
            attrs={'keep_dim': False,
144 145 146 147 148
                   'reduce_all': True})

        return reduce_out, hidden


149
class SimpleRNN(fluid.Layer):
150 151
    def __init__(self):
        super(SimpleRNN, self).__init__()
J
JiabinYang 已提交
152 153 154 155 156 157
        self.seq_len = 4
        self._cell = SimpleRNNCell(
            3,
            3,
            3,
            fluid.ParamAttr(initializer=fluid.initializer.Constant(value=0.1)))
J
JiabinYang 已提交
158 159

    def forward(self, inputs):
J
JiabinYang 已提交
160
        outs = list()
J
JiabinYang 已提交
161 162
        pre_hiddens = list()

163
        init_hidden = self.create_parameter(
J
JiabinYang 已提交
164 165 166 167 168 169
            attr=fluid.ParamAttr(
                initializer=fluid.initializer.Constant(value=0.1)),
            shape=[1, 3],
            dtype='float32',
            is_bias=False)
        pre_hidden = init_hidden
J
JiabinYang 已提交
170
        for i in range(self.seq_len):
J
JiabinYang 已提交
171 172 173
            input = fluid.layers.slice(
                inputs, axes=[1], starts=[i], ends=[i + 1])
            input = fluid.layers.reshape(input, shape=[1, 3])
J
JiabinYang 已提交
174 175
            out_softmax, pre_hidden = self._cell(input, pre_hidden)
            outs.append(out_softmax)
J
JiabinYang 已提交
176

J
JiabinYang 已提交
177
        return outs, pre_hiddens
J
JiabinYang 已提交
178 179


M
minqiyang 已提交
180
class TestImperative(unittest.TestCase):
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
    def test_functional_dygraph_context(self):
        self.assertFalse(fluid.dygraph.enabled())
        fluid.enable_dygraph()
        self.assertTrue(fluid.dygraph.enabled())
        np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
        var_inp = fluid.dygraph.base.to_variable(np_inp)
        mlp = MLP(input_size=2)
        out = mlp(var_inp)
        dy_out1 = out.numpy()
        out.backward()
        dy_grad1 = mlp._linear1.weight.gradient()
        fluid.disable_dygraph()
        self.assertFalse(fluid.dygraph.enabled())
        with fluid.dygraph.guard():
            self.assertTrue(fluid.dygraph.enabled())
            var_inp = fluid.dygraph.base.to_variable(np_inp)
            mlp = MLP(input_size=2)
            out = mlp(var_inp)
            dy_out2 = out.numpy()
            out.backward()
            dy_grad2 = mlp._linear1.weight.gradient()
        self.assertFalse(fluid.dygraph.enabled())
        self.assertTrue(np.array_equal(dy_out1, dy_out2))
        self.assertTrue(np.array_equal(dy_grad1, dy_grad2))

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
    def test_isinstance(self):
        var = fluid.layers.data(shape=[1], name='x', dtype='float32')
        self.assertTrue(isinstance(var, fluid.Variable))
        with fluid.dygraph.guard():
            var_base = fluid.dygraph.base.to_variable(np.array([3, 4, 5]))
            self.assertTrue(isinstance(var_base, core.VarBase))
            self.assertTrue(isinstance(var_base, fluid.Variable))

    def test_create_VarBase(self):
        x = np.ones([2, 2], np.float32)
        y = np.zeros([3, 3], np.float32)
        with fluid.dygraph.guard():
            tmp = fluid.core.VarBase(value=x, place=fluid.core.CPUPlace())
            tmp2 = fluid.core.VarBase(y, fluid.core.CPUPlace())
            tmp3 = fluid.dygraph.base.to_variable(x)
            tmp4 = fluid.core.VarBase(y)
            tmp5 = fluid.core.VarBase(value=x)

            self.assertTrue(np.array_equal(x, tmp.numpy()))
            self.assertTrue(np.array_equal(y, tmp2.numpy()))
            self.assertTrue(np.array_equal(x, tmp3.numpy()))
            self.assertTrue(np.array_equal(y, tmp4.numpy()))
            self.assertTrue(np.array_equal(x, tmp5.numpy()))

230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
    def test_no_grad_guard(self):
        data = np.array([[2, 3], [4, 5]]).astype('float32')
        with fluid.dygraph.guard():
            l0 = fluid.Linear(2, 2)
            self.assertTrue(l0.weight._grad_ivar() is None)
            l1 = fluid.Linear(2, 2)
            with fluid.dygraph.no_grad():
                self.assertTrue(l1.weight.stop_gradient is False)
                tmp = l1.weight * 2
                self.assertTrue(tmp.stop_gradient)
            x = fluid.dygraph.to_variable(data)
            y = l0(x) + tmp
            o = l1(y)
            o.backward()

            self.assertTrue(tmp._grad_ivar() is None)
            self.assertTrue(l0.weight._grad_ivar() is not None)

M
minqiyang 已提交
248 249
    def test_sum_op(self):
        x = np.ones([2, 2], np.float32)
L
lujun 已提交
250
        with fluid.dygraph.guard():
M
minqiyang 已提交
251 252
            inputs = []
            for _ in range(10):
253 254 255
                tmp = fluid.dygraph.base.to_variable(x)
                tmp.stop_gradient = False
                inputs.append(tmp)
M
minqiyang 已提交
256 257
            ret = fluid.layers.sums(inputs)
            loss = fluid.layers.reduce_sum(ret)
L
lujun 已提交
258
            loss.backward()
259 260 261
        with fluid.dygraph.guard():
            inputs2 = []
            for _ in range(10):
262 263 264
                tmp = fluid.dygraph.base.to_variable(x)
                tmp.stop_gradient = False
                inputs2.append(tmp)
265 266 267 268 269 270
            ret2 = fluid.layers.sums(inputs2)
            loss2 = fluid.layers.reduce_sum(ret2)
            backward_strategy = fluid.dygraph.BackwardStrategy()
            backward_strategy.sort_sum_gradient = True
            loss2.backward(backward_strategy)

271 272
            self.assertTrue(np.allclose(ret.numpy(), x * 10))
            self.assertTrue(np.allclose(inputs[0].gradient(), x))
273 274 275
            self.assertTrue(np.allclose(ret2.numpy(), x * 10))
            a = inputs2[0].gradient()
            self.assertTrue(np.allclose(inputs2[0].gradient(), x))
M
minqiyang 已提交
276

277 278 279 280 281 282 283 284 285
    def test_empty_var(self):
        with fluid.dygraph.guard():
            cur_program = fluid.Program()
            cur_block = cur_program.current_block()
            new_variable = cur_block.create_var(
                name="X", shape=[-1, 23, 48], dtype='float32')
            try:
                new_variable.numpy()
            except Exception as e:
286
                assert type(e) == core.EnforceNotMet
287 288 289 290

            try:
                new_variable.backward()
            except Exception as e:
291
                assert type(e) == core.EnforceNotMet
292 293 294 295

            try:
                new_variable.clear_gradient()
            except Exception as e:
296
                assert type(e) == core.EnforceNotMet
297 298 299 300 301 302 303 304 305 306 307 308 309

    def test_empty_grad(self):
        with fluid.dygraph.guard():
            x = np.ones([2, 2], np.float32)
            new_var = fluid.dygraph.base.to_variable(x)
            try:
                new_var.gradient()
            except Exception as e:
                assert type(e) == ValueError

            try:
                new_var.clear_gradient()
            except Exception as e:
310
                assert type(e) == core.EnforceNotMet
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327

        with fluid.dygraph.guard():
            cur_program = fluid.Program()
            cur_block = cur_program.current_block()
            new_variable = cur_block.create_var(
                name="X", shape=[-1, 23, 48], dtype='float32')
            try:
                new_variable.gradient()
            except Exception as e:
                assert type(e) == ValueError

    def test_set_persistable(self):
        with fluid.dygraph.guard():
            x = np.ones([2, 2], np.float32)
            new_var = fluid.dygraph.base.to_variable(x)
            self.assertFalse(new_var.persistable)
            new_var.persistable = True
328
            self.assertTrue(new_var.persistable)
329

M
minqiyang 已提交
330
    def test_layer(self):
L
lujun 已提交
331
        with fluid.dygraph.guard():
M
minqiyang 已提交
332 333
            cl = core.Layer()
            cl.forward([])
334
            l = fluid.Layer("l")
M
minqiyang 已提交
335 336 337 338
            self.assertRaises(NotImplementedError, l.forward, [])

    def test_layer_in_out(self):
        np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32)
L
lujun 已提交
339 340
        with fluid.dygraph.guard():
            var_inp = fluid.dygraph.base.to_variable(np_inp)
341
            var_inp.stop_gradient = False
342
            l = MyLayer()
343
            print(var_inp)
M
minqiyang 已提交
344 345
            x = l(var_inp)[0]
            self.assertIsNotNone(x)
346
            dy_out = x.numpy()
L
lujun 已提交
347
            x.backward()
348
            dy_grad = l._x_for_debug.gradient()
M
minqiyang 已提交
349

350 351
        with fluid.dygraph.guard():
            var_inp2 = fluid.dygraph.base.to_variable(np_inp)
352
            var_inp2.stop_gradient = False
353
            l2 = MyLayer()
354 355 356 357 358 359 360 361
            x2 = l2(var_inp2)[0]
            self.assertIsNotNone(x2)
            dy_out2 = x2.numpy()
            backward_strategy = fluid.dygraph.BackwardStrategy()
            backward_strategy.sort_sum_gradient = True
            x2.backward(backward_strategy)
            dy_grad2 = l2._x_for_debug.gradient()

M
minqiyang 已提交
362 363 364
        with new_program_scope():
            inp = fluid.layers.data(
                name="inp", shape=[3], append_batch_size=False)
365
            l = MyLayer()
M
minqiyang 已提交
366 367 368 369 370 371 372 373 374 375 376 377
            x = l(inp)[0]
            param_grads = fluid.backward.append_backward(
                x, parameter_list=[l._x_for_debug.name])[0]
            exe = fluid.Executor(fluid.CPUPlace(
            ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))

            static_out, static_grad = exe.run(
                feed={inp.name: np_inp},
                fetch_list=[x.name, param_grads[1].name])

        self.assertTrue(np.allclose(dy_out, static_out))
        self.assertTrue(np.allclose(dy_grad, static_grad))
378 379
        self.assertTrue(np.allclose(dy_out2, static_out))
        self.assertTrue(np.allclose(dy_grad2, static_grad))
M
minqiyang 已提交
380 381 382

    def test_mlp(self):
        np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
L
lujun 已提交
383 384
        with fluid.dygraph.guard():
            var_inp = fluid.dygraph.base.to_variable(np_inp)
385
            mlp = MLP(input_size=2)
M
minqiyang 已提交
386
            out = mlp(var_inp)
387
            dy_out = out.numpy()
L
lujun 已提交
388
            out.backward()
389
            dy_grad = mlp._linear1.weight.gradient()
M
minqiyang 已提交
390

391 392
        with fluid.dygraph.guard():
            var_inp2 = fluid.dygraph.base.to_variable(np_inp)
393
            mlp2 = MLP(input_size=2)
394 395 396 397 398
            out2 = mlp2(var_inp2)
            dy_out2 = out2.numpy()
            backward_strategy = fluid.dygraph.BackwardStrategy()
            backward_strategy.sort_sum_gradient = True
            out2.backward(backward_strategy)
399
            dy_grad2 = mlp2._linear1.weight.gradient()
400

M
minqiyang 已提交
401 402 403
        with new_program_scope():
            inp = fluid.layers.data(
                name="inp", shape=[2, 2], append_batch_size=False)
404
            mlp = MLP(input_size=2)
M
minqiyang 已提交
405 406
            out = mlp(inp)
            param_grads = fluid.backward.append_backward(
407
                out, parameter_list=[mlp._linear1.weight.name])[0]
M
minqiyang 已提交
408 409 410 411 412 413 414 415 416 417
            exe = fluid.Executor(fluid.CPUPlace(
            ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
            exe.run(fluid.default_startup_program())

            static_out, static_grad = exe.run(
                feed={inp.name: np_inp},
                fetch_list=[out.name, param_grads[1].name])

        self.assertTrue(np.allclose(dy_out, static_out))
        self.assertTrue(np.allclose(dy_grad, static_grad))
418 419
        self.assertTrue(np.allclose(dy_out2, static_out))
        self.assertTrue(np.allclose(dy_grad2, static_grad))
M
minqiyang 已提交
420 421

        params = mlp.parameters(True)
422 423 424 425
        self.assertEqual("linear_0.w_0", params[0].name)
        self.assertEqual("linear_0.b_0", params[1].name)
        self.assertEqual("linear_1.w_0", params[2].name)
        self.assertEqual("linear_1.b_0", params[3].name)
M
minqiyang 已提交
426 427 428
        self.assertEqual(len(params), 4)

        sublayers = mlp.sublayers(True)
429 430
        self.assertEqual(mlp._linear1, sublayers[0])
        self.assertEqual(mlp._linear2, sublayers[1])
M
minqiyang 已提交
431 432
        self.assertEqual(len(sublayers), 2)

X
Xin Pan 已提交
433
    def test_dygraph_vs_static(self):
434 435
        np_inp1 = np.random.rand(4, 3, 3)
        np_inp2 = np.random.rand(4, 3, 3)
X
Xin Pan 已提交
436 437 438

        # dynamic graph
        with fluid.dygraph.guard():
439 440 441
            inp1 = fluid.dygraph.to_variable(np_inp1)
            inp2 = fluid.dygraph.to_variable(np_inp2)
            if np.sum(np_inp1) < np.sum(np_inp2):
X
Xin Pan 已提交
442 443 444
                x = fluid.layers.elementwise_add(inp1, inp2)
            else:
                x = fluid.layers.elementwise_sub(inp1, inp2)
L
lujun 已提交
445
            dygraph_result = x.numpy()
X
Xin Pan 已提交
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478

        # static graph
        with new_program_scope():
            inp_data1 = fluid.layers.data(
                name='inp1', shape=[3, 3], dtype=np.float32)
            inp_data2 = fluid.layers.data(
                name='inp2', shape=[3, 3], dtype=np.float32)

            a = fluid.layers.expand(
                fluid.layers.reshape(
                    fluid.layers.reduce_sum(inp_data1), [1, 1]), [4, 1])
            b = fluid.layers.expand(
                fluid.layers.reshape(
                    fluid.layers.reduce_sum(inp_data2), [1, 1]), [4, 1])
            cond = fluid.layers.less_than(x=a, y=b)

            ie = fluid.layers.IfElse(cond)
            with ie.true_block():
                d1 = ie.input(inp_data1)
                d2 = ie.input(inp_data2)
                d3 = fluid.layers.elementwise_add(d1, d2)
                ie.output(d3)

            with ie.false_block():
                d1 = ie.input(inp_data1)
                d2 = ie.input(inp_data2)
                d3 = fluid.layers.elementwise_sub(d1, d2)
                ie.output(d3)
            out = ie()

            exe = fluid.Executor(fluid.CPUPlace(
            ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
            static_result = exe.run(fluid.default_main_program(),
479 480
                                    feed={'inp1': np_inp1,
                                          'inp2': np_inp2},
X
Xin Pan 已提交
481 482 483
                                    fetch_list=out)[0]
        self.assertTrue(np.allclose(dygraph_result, static_result))

M
minqiyang 已提交
484 485 486 487 488
    def test_rnn(self):
        np_inp = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0],
                           [10.0, 11.0, 12.0]])
        np_inp = np_inp.reshape((1, 4, 3))
        np_inp = np_inp.astype(np.float32)
L
lujun 已提交
489 490
        with fluid.dygraph.guard():
            var_inp = fluid.dygraph.base.to_variable(np_inp)
M
minqiyang 已提交
491
            var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3])
492
            simple_rnn = SimpleRNN()
M
minqiyang 已提交
493
            outs, pre_hiddens = simple_rnn.forward(var_inp)
494
            dy_out = outs[3].numpy()
L
lujun 已提交
495
            outs[3].backward()
496 497 498
            dy_grad_h2o = simple_rnn._cell._h2o_w.gradient()
            dy_grad_h2h = simple_rnn._cell._h2h_w.gradient()
            dy_grad_i2h = simple_rnn._cell._i2h_w.gradient()
M
minqiyang 已提交
499

500 501 502
        with fluid.dygraph.guard():
            var_inp2 = fluid.dygraph.base.to_variable(np_inp)
            var_inp2 = fluid.layers.reshape(var_inp2, shape=[1, 4, 3])
503
            simple_rnn2 = SimpleRNN()
504 505 506 507 508 509 510 511 512
            outs2, pre_hiddens2 = simple_rnn2.forward(var_inp2)
            dy_out2 = outs2[3].numpy()
            backward_strategy = fluid.dygraph.BackwardStrategy()
            backward_strategy.sort_sum_gradient = True
            outs2[3].backward(backward_strategy)
            dy_grad_h2o2 = simple_rnn2._cell._h2o_w.gradient()
            dy_grad_h2h2 = simple_rnn2._cell._h2h_w.gradient()
            dy_grad_i2h2 = simple_rnn2._cell._i2h_w.gradient()

M
minqiyang 已提交
513 514 515
        with new_program_scope():
            inp = fluid.layers.data(
                name="inp", shape=[1, 4, 3], append_batch_size=False)
516
            simple_rnn = SimpleRNN()
M
minqiyang 已提交
517 518 519 520 521 522 523 524 525 526
            outs, pre_hiddens = simple_rnn(inp)
            param_grads = fluid.backward.append_backward(outs[3])
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
            static_out, static_grad_h2o, static_grad_h2h, static_grad_i2h = exe.run(
                feed={inp.name: np_inp},
                fetch_list=[
                    outs[3].name, param_grads[0][1].name,
                    param_grads[1][1].name, param_grads[2][1].name
                ])
527

M
minqiyang 已提交
528 529 530 531
        self.assertTrue(np.allclose(dy_out, static_out))
        self.assertTrue(np.allclose(dy_grad_h2o, static_grad_h2o))
        self.assertTrue(np.allclose(dy_grad_h2h, static_grad_h2h))
        self.assertTrue(np.allclose(dy_grad_i2h, static_grad_i2h))
532 533 534 535
        self.assertTrue(np.allclose(dy_out2, static_out))
        self.assertTrue(np.allclose(dy_grad_h2o2, static_grad_h2o))
        self.assertTrue(np.allclose(dy_grad_h2h2, static_grad_h2h))
        self.assertTrue(np.allclose(dy_grad_i2h2, static_grad_i2h))
M
minqiyang 已提交
536

537 538 539 540 541 542 543
    def test_layer_attrs(self):
        layer = fluid.dygraph.Layer("test")
        layer.test_attr = 1
        self.assertFalse(hasattr(layer, "whatever"))
        self.assertTrue(hasattr(layer, "test_attr"))
        self.assertEqual(layer.test_attr, 1)

544 545 546 547 548 549 550 551 552 553 554 555 556
        my_layer = MyLayer()
        my_layer.w1 = my_layer.create_parameter([3, 3])
        my_layer.add_parameter('w2', None)
        self.assertEqual(len(my_layer.parameters()), 1)
        self.assertRaises(TypeError, my_layer.__setattr__, 'w1', 'str')
        my_layer.w1 = None
        self.assertEqual(len(my_layer.parameters()), 0)
        my_layer.l1 = fluid.dygraph.Linear(3, 3)
        self.assertEqual(len(my_layer.sublayers()), 1)
        self.assertRaises(TypeError, my_layer.__setattr__, 'l1', 'str')
        my_layer.l1 = None
        self.assertEqual(len(my_layer.sublayers()), 0)

557

558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
class TestDygraphUtils(unittest.TestCase):
    def test_append_activation_in_dygraph_exception(self):
        with new_program_scope():
            np_inp = np.random.random(size=(10, 20, 30)).astype(np.float32)
            a = fluid.layers.data("a", [10, 20])
            func = dygraph_utils._append_activation_in_dygraph
            self.assertRaises(AssertionError, func, a, act="sigmoid")

    def test_append_activation_in_dygraph1(self):
        a_np = np.random.random(size=(10, 20, 30)).astype(np.float32)
        func = dygraph_utils._append_activation_in_dygraph
        with fluid.dygraph.guard():
            a = fluid.dygraph.to_variable(a_np)
            res1 = func(a, act="hard_sigmoid")
            res2 = fluid.layers.hard_sigmoid(a)
            self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))

    def test_append_activation_in_dygraph2(self):
        a_np = np.random.random(size=(10, 20, 30)).astype(np.float32)
        func = dygraph_utils._append_activation_in_dygraph
        with fluid.dygraph.guard():
            a = fluid.dygraph.to_variable(a_np)
            res1 = func(a, act="sigmoid", use_mkldnn=True, use_cudnn=True)
            res2 = fluid.layers.sigmoid(a)
            self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))

    def test_append_bias_in_dygraph_exception(self):
        with new_program_scope():
            np_inp = np.random.random(size=(10, 20, 30)).astype(np.float32)
            a = fluid.layers.data("a", [10, 20])
            func = dygraph_utils._append_bias_in_dygraph
            self.assertRaises(AssertionError, func, a)

    def test_append_bias_in_dygraph(self):
        a_np = np.random.random(size=(10, 20, 30)).astype(np.float32)
        func = dygraph_utils._append_bias_in_dygraph
        with fluid.dygraph.guard():
            a = fluid.dygraph.to_variable(a_np)
            res1 = func(a, bias=a)
            res2 = a + a
            self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))


601 602
if __name__ == '__main__':
    unittest.main()