test_imperative_basic.py 19.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

X
Xin Pan 已提交
15
import contextlib
16 17 18 19 20
import unittest
import numpy as np

import paddle.fluid as fluid
from paddle.fluid import core
21
from paddle.fluid import Linear
M
minqiyang 已提交
22
from test_imperative_base import new_program_scope
23 24


25
class MyLayer(fluid.Layer):
X
Xin Pan 已提交
26 27
    def __init__(self, name_scope):
        super(MyLayer, self).__init__(name_scope)
28 29

    def forward(self, inputs):
M
minqiyang 已提交
30
        x = fluid.layers.relu(inputs)
31
        self._x_for_debug = x
X
Xin Pan 已提交
32 33 34
        x = fluid.layers.elementwise_mul(x, x)
        x = fluid.layers.reduce_sum(x)
        return [x]
35 36


37
class MLP(fluid.Layer):
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
    def __init__(self, input_size):
        super(MLP, self).__init__()
        self._linear1 = Linear(
            input_size,
            3,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Constant(value=0.1)),
            bias_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Constant(value=0.1)))
        self._linear2 = Linear(
            3,
            4,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Constant(value=0.1)),
            bias_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Constant(value=0.1)))
X
Xin Pan 已提交
54 55

    def forward(self, inputs):
56 57
        x = self._linear1(inputs)
        x = self._linear2(x)
X
Xin Pan 已提交
58 59 60 61
        x = fluid.layers.reduce_sum(x)
        return x


62
class SimpleRNNCell(fluid.Layer):
X
Xin Pan 已提交
63 64 65
    def __init__(self, name_scope, step_input_size, hidden_size, output_size,
                 param_attr):
        super(SimpleRNNCell, self).__init__(name_scope)
66 67 68
        self.step_input_size = step_input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
69 70
        self._dtype = core.VarDesc.VarType.FP32
        self.param_attr = param_attr
71

72
    def _build_once(self, inputs, pre_hidden):
73 74 75
        i2h_param_shape = [self.step_input_size, self.hidden_size]
        h2h_param_shape = [self.hidden_size, self.hidden_size]
        h2o_param_shape = [self.output_size, self.hidden_size]
76 77
        self._i2h_w = self.create_parameter(
            attr=self.param_attr,
78 79 80
            shape=i2h_param_shape,
            dtype=self._dtype,
            is_bias=False)
81 82
        self._h2h_w = self.create_parameter(
            attr=self.param_attr,
83 84 85
            shape=h2h_param_shape,
            dtype=self._dtype,
            is_bias=False)
86 87
        self._h2o_w = self.create_parameter(
            attr=self.param_attr,
88 89 90 91 92 93
            shape=h2o_param_shape,
            dtype=self._dtype,
            is_bias=False)

    def forward(self, input, pre_hidden):

94 95 96 97 98 99
        tmp_i2h = self.create_variable(dtype=self._dtype)
        tmp_h2h = self.create_variable(dtype=self._dtype)
        hidden = self.create_variable(dtype=self._dtype)
        out = self.create_variable(dtype=self._dtype)
        softmax_out = self.create_variable(dtype=self._dtype)
        reduce_out = self.create_variable(dtype=self._dtype)
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
        self._helper.append_op(
            type="mul",
            inputs={"X": input,
                    "Y": self._i2h_w},
            outputs={"Out": tmp_i2h},
            attrs={"x_num_col_dims": 1,
                   "y_num_col_dims": 1})

        self._helper.append_op(
            type="mul",
            inputs={"X": pre_hidden,
                    "Y": self._h2h_w},
            outputs={"Out": tmp_h2h},
            attrs={"x_num_col_dims": 1,
                   "y_num_col_dims": 1})

        self._helper.append_op(
            type="elementwise_add",
            inputs={'X': tmp_h2h,
                    'Y': tmp_i2h},
            outputs={'Out': hidden},
            attrs={'axis': -1,
                   'use_mkldnn': False})
123
        hidden = self._helper.append_activation(hidden, act='tanh')
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142

        self._helper.append_op(
            type="mul",
            inputs={"X": hidden,
                    "Y": self._h2o_w},
            outputs={"Out": out},
            attrs={"x_num_col_dims": 1,
                   "y_num_col_dims": 1})

        self._helper.append_op(
            type="softmax",
            inputs={"X": out},
            outputs={"Out": softmax_out},
            attrs={"use_cudnn": False})

        self._helper.append_op(
            type='reduce_sum',
            inputs={'X': softmax_out},
            outputs={'Out': reduce_out},
143
            attrs={'keep_dim': False,
144 145 146 147 148
                   'reduce_all': True})

        return reduce_out, hidden


149
class SimpleRNN(fluid.Layer):
X
Xin Pan 已提交
150 151
    def __init__(self, name_scope):
        super(SimpleRNN, self).__init__(name_scope)
J
JiabinYang 已提交
152 153
        self.seq_len = 4
        self._cell = SimpleRNNCell(
X
Xin Pan 已提交
154
            self.full_name(),
J
JiabinYang 已提交
155 156 157 158
            3,
            3,
            3,
            fluid.ParamAttr(initializer=fluid.initializer.Constant(value=0.1)))
J
JiabinYang 已提交
159 160

    def forward(self, inputs):
J
JiabinYang 已提交
161
        outs = list()
J
JiabinYang 已提交
162 163
        pre_hiddens = list()

164
        init_hidden = self.create_parameter(
J
JiabinYang 已提交
165 166 167 168 169 170
            attr=fluid.ParamAttr(
                initializer=fluid.initializer.Constant(value=0.1)),
            shape=[1, 3],
            dtype='float32',
            is_bias=False)
        pre_hidden = init_hidden
J
JiabinYang 已提交
171
        for i in range(self.seq_len):
J
JiabinYang 已提交
172 173 174
            input = fluid.layers.slice(
                inputs, axes=[1], starts=[i], ends=[i + 1])
            input = fluid.layers.reshape(input, shape=[1, 3])
J
JiabinYang 已提交
175 176
            out_softmax, pre_hidden = self._cell(input, pre_hidden)
            outs.append(out_softmax)
J
JiabinYang 已提交
177

J
JiabinYang 已提交
178
        return outs, pre_hiddens
J
JiabinYang 已提交
179 180


M
minqiyang 已提交
181
class TestImperative(unittest.TestCase):
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
    def test_isinstance(self):
        var = fluid.layers.data(shape=[1], name='x', dtype='float32')
        self.assertTrue(isinstance(var, fluid.Variable))
        with fluid.dygraph.guard():
            var_base = fluid.dygraph.base.to_variable(np.array([3, 4, 5]))
            self.assertTrue(isinstance(var_base, core.VarBase))
            self.assertTrue(isinstance(var_base, fluid.Variable))

    def test_create_VarBase(self):
        x = np.ones([2, 2], np.float32)
        y = np.zeros([3, 3], np.float32)
        with fluid.dygraph.guard():
            tmp = fluid.core.VarBase(value=x, place=fluid.core.CPUPlace())
            tmp2 = fluid.core.VarBase(y, fluid.core.CPUPlace())
            tmp3 = fluid.dygraph.base.to_variable(x)
            tmp4 = fluid.core.VarBase(y)
            tmp5 = fluid.core.VarBase(value=x)

            self.assertTrue(np.array_equal(x, tmp.numpy()))
            self.assertTrue(np.array_equal(y, tmp2.numpy()))
            self.assertTrue(np.array_equal(x, tmp3.numpy()))
            self.assertTrue(np.array_equal(y, tmp4.numpy()))
            self.assertTrue(np.array_equal(x, tmp5.numpy()))

M
minqiyang 已提交
206 207
    def test_sum_op(self):
        x = np.ones([2, 2], np.float32)
L
lujun 已提交
208
        with fluid.dygraph.guard():
M
minqiyang 已提交
209 210
            inputs = []
            for _ in range(10):
211 212 213
                tmp = fluid.dygraph.base.to_variable(x)
                tmp.stop_gradient = False
                inputs.append(tmp)
M
minqiyang 已提交
214 215
            ret = fluid.layers.sums(inputs)
            loss = fluid.layers.reduce_sum(ret)
L
lujun 已提交
216
            loss.backward()
217 218 219
        with fluid.dygraph.guard():
            inputs2 = []
            for _ in range(10):
220 221 222
                tmp = fluid.dygraph.base.to_variable(x)
                tmp.stop_gradient = False
                inputs2.append(tmp)
223 224 225 226 227 228
            ret2 = fluid.layers.sums(inputs2)
            loss2 = fluid.layers.reduce_sum(ret2)
            backward_strategy = fluid.dygraph.BackwardStrategy()
            backward_strategy.sort_sum_gradient = True
            loss2.backward(backward_strategy)

229 230
            self.assertTrue(np.allclose(ret.numpy(), x * 10))
            self.assertTrue(np.allclose(inputs[0].gradient(), x))
231 232 233
            self.assertTrue(np.allclose(ret2.numpy(), x * 10))
            a = inputs2[0].gradient()
            self.assertTrue(np.allclose(inputs2[0].gradient(), x))
M
minqiyang 已提交
234

235 236 237 238 239 240 241 242 243
    def test_empty_var(self):
        with fluid.dygraph.guard():
            cur_program = fluid.Program()
            cur_block = cur_program.current_block()
            new_variable = cur_block.create_var(
                name="X", shape=[-1, 23, 48], dtype='float32')
            try:
                new_variable.numpy()
            except Exception as e:
244
                assert type(e) == core.EnforceNotMet
245 246 247 248

            try:
                new_variable.backward()
            except Exception as e:
249
                assert type(e) == core.EnforceNotMet
250 251 252 253

            try:
                new_variable.clear_gradient()
            except Exception as e:
254
                assert type(e) == core.EnforceNotMet
255 256 257 258 259 260 261 262 263 264 265 266 267

    def test_empty_grad(self):
        with fluid.dygraph.guard():
            x = np.ones([2, 2], np.float32)
            new_var = fluid.dygraph.base.to_variable(x)
            try:
                new_var.gradient()
            except Exception as e:
                assert type(e) == ValueError

            try:
                new_var.clear_gradient()
            except Exception as e:
268
                assert type(e) == core.EnforceNotMet
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285

        with fluid.dygraph.guard():
            cur_program = fluid.Program()
            cur_block = cur_program.current_block()
            new_variable = cur_block.create_var(
                name="X", shape=[-1, 23, 48], dtype='float32')
            try:
                new_variable.gradient()
            except Exception as e:
                assert type(e) == ValueError

    def test_set_persistable(self):
        with fluid.dygraph.guard():
            x = np.ones([2, 2], np.float32)
            new_var = fluid.dygraph.base.to_variable(x)
            self.assertFalse(new_var.persistable)
            new_var.persistable = True
286
            self.assertTrue(new_var.persistable)
287

M
minqiyang 已提交
288
    def test_layer(self):
L
lujun 已提交
289
        with fluid.dygraph.guard():
M
minqiyang 已提交
290 291
            cl = core.Layer()
            cl.forward([])
292
            l = fluid.Layer("l")
M
minqiyang 已提交
293 294 295 296
            self.assertRaises(NotImplementedError, l.forward, [])

    def test_layer_in_out(self):
        np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32)
L
lujun 已提交
297 298
        with fluid.dygraph.guard():
            var_inp = fluid.dygraph.base.to_variable(np_inp)
299
            var_inp.stop_gradient = False
M
minqiyang 已提交
300
            l = MyLayer("my_layer")
301
            print(var_inp)
M
minqiyang 已提交
302 303
            x = l(var_inp)[0]
            self.assertIsNotNone(x)
304
            dy_out = x.numpy()
L
lujun 已提交
305
            x.backward()
306
            dy_grad = l._x_for_debug.gradient()
M
minqiyang 已提交
307

308 309
        with fluid.dygraph.guard():
            var_inp2 = fluid.dygraph.base.to_variable(np_inp)
310
            var_inp2.stop_gradient = False
311 312 313 314 315 316 317 318 319
            l2 = MyLayer("my_layer")
            x2 = l2(var_inp2)[0]
            self.assertIsNotNone(x2)
            dy_out2 = x2.numpy()
            backward_strategy = fluid.dygraph.BackwardStrategy()
            backward_strategy.sort_sum_gradient = True
            x2.backward(backward_strategy)
            dy_grad2 = l2._x_for_debug.gradient()

M
minqiyang 已提交
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
        with new_program_scope():
            inp = fluid.layers.data(
                name="inp", shape=[3], append_batch_size=False)
            l = MyLayer("my_layer")
            x = l(inp)[0]
            param_grads = fluid.backward.append_backward(
                x, parameter_list=[l._x_for_debug.name])[0]
            exe = fluid.Executor(fluid.CPUPlace(
            ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))

            static_out, static_grad = exe.run(
                feed={inp.name: np_inp},
                fetch_list=[x.name, param_grads[1].name])

        self.assertTrue(np.allclose(dy_out, static_out))
        self.assertTrue(np.allclose(dy_grad, static_grad))
336 337
        self.assertTrue(np.allclose(dy_out2, static_out))
        self.assertTrue(np.allclose(dy_grad2, static_grad))
M
minqiyang 已提交
338 339 340

    def test_mlp(self):
        np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
L
lujun 已提交
341 342
        with fluid.dygraph.guard():
            var_inp = fluid.dygraph.base.to_variable(np_inp)
343
            mlp = MLP(input_size=2)
M
minqiyang 已提交
344
            out = mlp(var_inp)
345
            dy_out = out.numpy()
L
lujun 已提交
346
            out.backward()
347
            dy_grad = mlp._linear1.weight.gradient()
M
minqiyang 已提交
348

349 350
        with fluid.dygraph.guard():
            var_inp2 = fluid.dygraph.base.to_variable(np_inp)
351
            mlp2 = MLP(input_size=2)
352 353 354 355 356
            out2 = mlp2(var_inp2)
            dy_out2 = out2.numpy()
            backward_strategy = fluid.dygraph.BackwardStrategy()
            backward_strategy.sort_sum_gradient = True
            out2.backward(backward_strategy)
357
            dy_grad2 = mlp2._linear1.weight.gradient()
358

M
minqiyang 已提交
359 360 361
        with new_program_scope():
            inp = fluid.layers.data(
                name="inp", shape=[2, 2], append_batch_size=False)
362
            mlp = MLP(input_size=2)
M
minqiyang 已提交
363 364
            out = mlp(inp)
            param_grads = fluid.backward.append_backward(
365
                out, parameter_list=[mlp._linear1.weight.name])[0]
M
minqiyang 已提交
366 367 368 369 370 371 372 373 374 375
            exe = fluid.Executor(fluid.CPUPlace(
            ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
            exe.run(fluid.default_startup_program())

            static_out, static_grad = exe.run(
                feed={inp.name: np_inp},
                fetch_list=[out.name, param_grads[1].name])

        self.assertTrue(np.allclose(dy_out, static_out))
        self.assertTrue(np.allclose(dy_grad, static_grad))
376 377
        self.assertTrue(np.allclose(dy_out2, static_out))
        self.assertTrue(np.allclose(dy_grad2, static_grad))
M
minqiyang 已提交
378 379

        params = mlp.parameters(True)
380 381 382 383
        self.assertEqual("linear_0.w_0", params[0].name)
        self.assertEqual("linear_0.b_0", params[1].name)
        self.assertEqual("linear_1.w_0", params[2].name)
        self.assertEqual("linear_1.b_0", params[3].name)
M
minqiyang 已提交
384 385 386
        self.assertEqual(len(params), 4)

        sublayers = mlp.sublayers(True)
387 388
        self.assertEqual(mlp._linear1, sublayers[0])
        self.assertEqual(mlp._linear2, sublayers[1])
M
minqiyang 已提交
389 390
        self.assertEqual(len(sublayers), 2)

X
Xin Pan 已提交
391
    def test_dygraph_vs_static(self):
392 393
        np_inp1 = np.random.rand(4, 3, 3)
        np_inp2 = np.random.rand(4, 3, 3)
X
Xin Pan 已提交
394 395 396

        # dynamic graph
        with fluid.dygraph.guard():
397 398 399
            inp1 = fluid.dygraph.to_variable(np_inp1)
            inp2 = fluid.dygraph.to_variable(np_inp2)
            if np.sum(np_inp1) < np.sum(np_inp2):
X
Xin Pan 已提交
400 401 402
                x = fluid.layers.elementwise_add(inp1, inp2)
            else:
                x = fluid.layers.elementwise_sub(inp1, inp2)
L
lujun 已提交
403
            dygraph_result = x.numpy()
X
Xin Pan 已提交
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436

        # static graph
        with new_program_scope():
            inp_data1 = fluid.layers.data(
                name='inp1', shape=[3, 3], dtype=np.float32)
            inp_data2 = fluid.layers.data(
                name='inp2', shape=[3, 3], dtype=np.float32)

            a = fluid.layers.expand(
                fluid.layers.reshape(
                    fluid.layers.reduce_sum(inp_data1), [1, 1]), [4, 1])
            b = fluid.layers.expand(
                fluid.layers.reshape(
                    fluid.layers.reduce_sum(inp_data2), [1, 1]), [4, 1])
            cond = fluid.layers.less_than(x=a, y=b)

            ie = fluid.layers.IfElse(cond)
            with ie.true_block():
                d1 = ie.input(inp_data1)
                d2 = ie.input(inp_data2)
                d3 = fluid.layers.elementwise_add(d1, d2)
                ie.output(d3)

            with ie.false_block():
                d1 = ie.input(inp_data1)
                d2 = ie.input(inp_data2)
                d3 = fluid.layers.elementwise_sub(d1, d2)
                ie.output(d3)
            out = ie()

            exe = fluid.Executor(fluid.CPUPlace(
            ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
            static_result = exe.run(fluid.default_main_program(),
437 438
                                    feed={'inp1': np_inp1,
                                          'inp2': np_inp2},
X
Xin Pan 已提交
439 440 441
                                    fetch_list=out)[0]
        self.assertTrue(np.allclose(dygraph_result, static_result))

M
minqiyang 已提交
442 443 444 445 446
    def test_rnn(self):
        np_inp = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0],
                           [10.0, 11.0, 12.0]])
        np_inp = np_inp.reshape((1, 4, 3))
        np_inp = np_inp.astype(np.float32)
L
lujun 已提交
447 448
        with fluid.dygraph.guard():
            var_inp = fluid.dygraph.base.to_variable(np_inp)
M
minqiyang 已提交
449 450 451
            var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3])
            simple_rnn = SimpleRNN("simple_rnn")
            outs, pre_hiddens = simple_rnn.forward(var_inp)
452
            dy_out = outs[3].numpy()
L
lujun 已提交
453
            outs[3].backward()
454 455 456
            dy_grad_h2o = simple_rnn._cell._h2o_w.gradient()
            dy_grad_h2h = simple_rnn._cell._h2h_w.gradient()
            dy_grad_i2h = simple_rnn._cell._i2h_w.gradient()
M
minqiyang 已提交
457

458 459 460 461 462 463 464 465 466 467 468 469 470
        with fluid.dygraph.guard():
            var_inp2 = fluid.dygraph.base.to_variable(np_inp)
            var_inp2 = fluid.layers.reshape(var_inp2, shape=[1, 4, 3])
            simple_rnn2 = SimpleRNN("simple_rnn")
            outs2, pre_hiddens2 = simple_rnn2.forward(var_inp2)
            dy_out2 = outs2[3].numpy()
            backward_strategy = fluid.dygraph.BackwardStrategy()
            backward_strategy.sort_sum_gradient = True
            outs2[3].backward(backward_strategy)
            dy_grad_h2o2 = simple_rnn2._cell._h2o_w.gradient()
            dy_grad_h2h2 = simple_rnn2._cell._h2h_w.gradient()
            dy_grad_i2h2 = simple_rnn2._cell._i2h_w.gradient()

M
minqiyang 已提交
471 472 473 474 475 476 477 478 479 480 481 482 483 484
        with new_program_scope():
            inp = fluid.layers.data(
                name="inp", shape=[1, 4, 3], append_batch_size=False)
            simple_rnn = SimpleRNN("simple_rnn")
            outs, pre_hiddens = simple_rnn(inp)
            param_grads = fluid.backward.append_backward(outs[3])
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
            static_out, static_grad_h2o, static_grad_h2h, static_grad_i2h = exe.run(
                feed={inp.name: np_inp},
                fetch_list=[
                    outs[3].name, param_grads[0][1].name,
                    param_grads[1][1].name, param_grads[2][1].name
                ])
485

M
minqiyang 已提交
486 487 488 489
        self.assertTrue(np.allclose(dy_out, static_out))
        self.assertTrue(np.allclose(dy_grad_h2o, static_grad_h2o))
        self.assertTrue(np.allclose(dy_grad_h2h, static_grad_h2h))
        self.assertTrue(np.allclose(dy_grad_i2h, static_grad_i2h))
490 491 492 493
        self.assertTrue(np.allclose(dy_out2, static_out))
        self.assertTrue(np.allclose(dy_grad_h2o2, static_grad_h2o))
        self.assertTrue(np.allclose(dy_grad_h2h2, static_grad_h2h))
        self.assertTrue(np.allclose(dy_grad_i2h2, static_grad_i2h))
M
minqiyang 已提交
494

495 496 497 498 499 500 501
    def test_layer_attrs(self):
        layer = fluid.dygraph.Layer("test")
        layer.test_attr = 1
        self.assertFalse(hasattr(layer, "whatever"))
        self.assertTrue(hasattr(layer, "test_attr"))
        self.assertEqual(layer.test_attr, 1)

502 503 504

if __name__ == '__main__':
    unittest.main()