test_imperative.py 4.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

X
Xin Pan 已提交
15
import contextlib
16 17
import unittest
import numpy as np
X
Xin Pan 已提交
18
import sys
19 20 21

import paddle.fluid as fluid
from paddle.fluid import core
M
minqiyang 已提交
22
from paddle.fluid.imperative.nn import FC
M
minqiyang 已提交
23
from test_imperative_base import new_program_scope
24 25


X
Xin Pan 已提交
26
class MyLayer(fluid.imperative.Layer):
27 28 29 30
    def __init__(self):
        super(MyLayer, self).__init__()

    def forward(self, inputs):
M
minqiyang 已提交
31
        x = fluid.layers.relu(inputs)
32
        self._x_for_debug = x
X
Xin Pan 已提交
33 34 35
        x = fluid.layers.elementwise_mul(x, x)
        x = fluid.layers.reduce_sum(x)
        return [x]
36 37


X
Xin Pan 已提交
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
class MyPyLayer(fluid.imperative.PyLayer):
    def __init__(self):
        super(MyPyLayer, self).__init__()

    @staticmethod
    def forward(inputs):
        sys.stderr.write('before forward\n')
        ret = np.tanh(inputs[0])
        sys.stderr.write('after forward: %s\n' % ret)
        tensor = core.LoDTensor()
        tensor.set(ret, core.CPUPlace())
        return tuple([tensor])

    @staticmethod
    def backward(douts, outs):
        return np.array(douts[0]) * (1 - np.square(np.array(outs[0])))


X
Xin Pan 已提交
56
class MLP(fluid.imperative.Layer):
X
Xin Pan 已提交
57 58 59 60 61 62 63 64 65 66
    def __init__(self):
        super(MLP, self).__init__()
        self._fc1 = FC(3,
                       fluid.ParamAttr(
                           initializer=fluid.initializer.Constant(value=0.1)))
        self._fc2 = FC(4,
                       fluid.ParamAttr(
                           initializer=fluid.initializer.Constant(value=0.1)))

    def forward(self, inputs):
M
minqiyang 已提交
67
        x = self._fc1(inputs)
X
Xin Pan 已提交
68 69 70 71 72
        x = self._fc2(x)
        x = fluid.layers.reduce_sum(x)
        return x


73 74 75 76 77
class TestImperative(unittest.TestCase):
    def test_layer(self):
        with fluid.imperative.guard():
            cl = core.Layer()
            cl.forward([])
X
Xin Pan 已提交
78
            l = fluid.imperative.Layer()
M
minqiyang 已提交
79
            self.assertRaises(NotImplementedError, l.forward, [])
80

X
Xin Pan 已提交
81 82 83
    def test_pylayer(self):
        with fluid.imperative.guard():
            my_py_layer = MyPyLayer()
X
Xin Pan 已提交
84 85
            outs = my_py_layer([np.ones([2, 2], np.float32)])
            sys.stderr.write('%s\n' % outs[0]._numpy())
X
Xin Pan 已提交
86 87
            # out.backward()

88
    def test_layer_in_out(self):
X
Xin Pan 已提交
89
        np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32)
90
        with fluid.imperative.guard():
M
minqiyang 已提交
91
            var_inp = fluid.imperative.base.to_variable(np_inp)
92
            l = MyLayer()
M
minqiyang 已提交
93
            x = l(var_inp)[0]
94
            self.assertIsNotNone(x)
X
Xin Pan 已提交
95
            dy_out = x._numpy()
96
            x._backward()
X
Xin Pan 已提交
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
            dy_grad = l._x_for_debug._gradient()

        with new_program_scope():
            inp = fluid.layers.data(
                name="inp", shape=[3], append_batch_size=False)
            l = MyLayer()
            x = l(inp)[0]
            param_grads = fluid.backward.append_backward(
                x, parameter_list=[l._x_for_debug.name])[0]
            exe = fluid.Executor(fluid.CPUPlace())

            static_out, static_grad = exe.run(
                feed={inp.name: np_inp},
                fetch_list=[x.name, param_grads[1].name])

        self.assertTrue(np.allclose(dy_out, static_out))
        self.assertTrue(np.allclose(dy_grad, static_grad))

    def test_mlp(self):
        np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
        with fluid.imperative.guard():
M
minqiyang 已提交
118
            var_inp = fluid.imperative.base.to_variable(np_inp)
X
Xin Pan 已提交
119
            mlp = MLP()
M
minqiyang 已提交
120
            out = mlp(var_inp)
X
Xin Pan 已提交
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
            dy_out = out._numpy()
            out._backward()
            dy_grad = mlp._fc1._w._gradient()

        with new_program_scope():
            inp = fluid.layers.data(
                name="inp", shape=[2, 2], append_batch_size=False)
            mlp = MLP()
            out = mlp(inp)
            param_grads = fluid.backward.append_backward(
                out, parameter_list=[mlp._fc1._w.name])[0]
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_startup_program())

            static_out, static_grad = exe.run(
                feed={inp.name: np_inp},
                fetch_list=[out.name, param_grads[1].name])

        self.assertTrue(np.allclose(dy_out, static_out))
        self.assertTrue(np.allclose(dy_grad, static_grad))
141 142 143 144


if __name__ == '__main__':
    unittest.main()