test_imperative_mnist.py 5.0 KB
Newer Older
M
minqiyang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import contextlib
import unittest
import numpy as np

import paddle.fluid as fluid
from paddle.fluid import core
M
minqiyang 已提交
21
from paddle.fluid.optimizer import SGDOptimizer
22 23
from paddle.fluid.imperative.nn import Conv2D, Pool2D, FC
from paddle.fluid.imperative.base import to_variable
24 25 26 27 28 29


class SimpleImgConvPool(fluid.imperative.PyLayer):
    def __init__(self,
                 num_channels,
                 filter_size,
30
                 num_filters,
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
                 pool_size,
                 pool_stride,
                 pool_padding=0,
                 pool_type='max',
                 global_pooling=False,
                 conv_stride=1,
                 conv_padding=0,
                 conv_dilation=1,
                 conv_groups=1,
                 act=None,
                 use_cudnn=False,
                 param_attr=None,
                 bias_attr=None):
        super(SimpleImgConvPool, self).__init__()

        #  groups = 1
        #  dilation = [1, 1]
        #  pad = [0, 0]
        #  stride = [1, 1]
        #  input_size = [2, 3, 5, 5]  # NCHW
        #  assert np.mod(input_size[1], groups) == 0
        #  f_c = input_size[1] // groups
        #  filter_size = [6, f_c, 3, 3]
M
minqiyang 已提交
54

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
        self._conv2d = Conv2D(
            num_channels=num_channels,
            num_filters=num_filters,
            filter_size=filter_size,
            stride=conv_stride,
            padding=conv_padding,
            dilation=conv_dilation,
            groups=conv_groups,
            param_attr=None,
            bias_attr=None,
            use_cudnn=use_cudnn)

        self._pool2d = Pool2D(
            pool_size=pool_size,
            pool_type=pool_type,
            pool_stride=pool_stride,
            pool_padding=pool_padding,
            global_pooling=global_pooling,
            use_cudnn=use_cudnn)
M
minqiyang 已提交
74

75 76 77 78
    def forward(self, inputs):
        x = self._conv2d(inputs)
        x = self._pool2d(x)
        return x
M
minqiyang 已提交
79 80 81


class MNIST(fluid.imperative.PyLayer):
82 83
    def __init__(self, param_attr=None, bias_attr=None):
        super(MNIST, self).__init__(param_attr=param_attr, bias_attr=bias_attr)
M
minqiyang 已提交
84

85
        self._simple_img_conv_pool_1 = SimpleImgConvPool(
86
            1, 5, 20, 2, 2, act="relu")
87 88

        self._simple_img_conv_pool_2 = SimpleImgConvPool(
89 90 91 92 93 94 95 96 97 98
            20, 5, 50, 2, 2, act="relu")

        pool_2_shape = 50 * 8 * 8
        SIZE = 10
        scale = (2.0 / (pool_2_shape**2 * SIZE))**0.5
        self._fc = FC(-1,
                      10,
                      param_attr=fluid.param_attr.ParamAttr(
                          initializer=fluid.initializer.NormalInitializer(
                              loc=0.0, scale=scale)))
M
minqiyang 已提交
99 100

    def forward(self, inputs):
101 102
        x = self._simple_img_conv_pool_1(inputs)
        x = self._simple_img_conv_pool_2(x)
103
        x = self._fc(x)
M
minqiyang 已提交
104 105 106 107 108 109 110 111
        return x


class TestImperativeMnist(unittest.TestCase):
    def test_mnist_cpu_float32(self):
        with fluid.imperative.guard():
            mnist = MNIST()

112 113 114 115 116 117 118 119 120 121 122
            x_data = np.random.rand(128, 1, 28, 28).astype('float32')
            img = to_variable(x_data)
            y_data = np.random.rand(128, 1).astype('int64')
            label = to_variable(y_data)
            label._stop_gradient = True

            predict = mnist(img)
            out = fluid.layers.cross_entropy(predict, label)
            out._backward()
            filter_grad = mnist._simple_img_conv_pool_1._conv2d._filter_param._gradient(
            )
M
minqiyang 已提交
123 124 125 126 127
            #  print(filter_grad)

            sgd = SGDOptimizer(learning_rate=1e-3)
            sgd.minimize(out)

M
minqiyang 已提交
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
        #  np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
        #  with fluid.imperative.guard():
        #  mlp = MLP()
        #  out = mlp(np_inp)
        #  dy_out = out._numpy()
        #  out._backward()
        #  dy_grad = mlp._fc1._w._gradient()

        #  with new_program_scope():
        #  inp = fluid.layers.data(
        #  name="inp", shape=[2, 2], append_batch_size=False)
        #  mlp = MLP()
        #  out = mlp(inp)
        #  param_grads = fluid.backward.append_backward(
        #  out, parameter_list=[mlp._fc1._w.name])[0]
        #  exe = fluid.Executor(fluid.CPUPlace())
        #  exe.run(fluid.default_startup_program())

        #  static_out, static_grad = exe.run(
        #  feed={inp.name: np_inp},
        #  fetch_list=[out.name, param_grads[1].name])

        #  self.assertTrue(np.allclose(dy_out, static_out))
        #  self.assertTrue(np.allclose(dy_grad, static_grad))


if __name__ == '__main__':
    unittest.main()