test_imperative_layer_apply.py 3.2 KB
Newer Older
L
LielinJiang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest

import paddle
import paddle.nn as nn
import paddle.fluid as fluid

import numpy as np
24
from paddle.fluid.framework import _test_eager_guard
L
LielinJiang 已提交
25 26 27 28 29 30 31


class LeNetDygraph(fluid.dygraph.Layer):
    def __init__(self, num_classes=10, classifier_activation='softmax'):
        super(LeNetDygraph, self).__init__()
        self.num_classes = num_classes
        self.features = nn.Sequential(
C
cnn 已提交
32
            nn.Conv2D(
L
LielinJiang 已提交
33 34
                1, 6, 3, stride=1, padding=1),
            nn.ReLU(),
35
            paddle.fluid.dygraph.Pool2D(2, 'max', 2),
C
cnn 已提交
36
            nn.Conv2D(
L
LielinJiang 已提交
37 38
                6, 16, 5, stride=1, padding=0),
            nn.ReLU(),
39
            paddle.fluid.dygraph.Pool2D(2, 'max', 2))
L
LielinJiang 已提交
40 41 42 43

        if num_classes > 0:
            self.fc = nn.Sequential(
                nn.Linear(400, 120),
44 45
                nn.Linear(120, 84), nn.Linear(84, 10),
                nn.Softmax())  #Todo: accept any activation
L
LielinJiang 已提交
46 47 48 49 50 51 52 53 54 55 56 57

    def forward(self, inputs):
        x = self.features(inputs)

        if self.num_classes > 0:
            x = fluid.layers.flatten(x, 1)
            x = self.fc(x)
        return x


def init_weights(layer):
    if type(layer) == nn.Linear:
58
        new_weight = paddle.fluid.layers.fill_constant(
L
LielinJiang 已提交
59 60
            layer.weight.shape, layer.weight.dtype, value=0.9)
        layer.weight.set_value(new_weight)
61
        new_bias = paddle.fluid.layers.fill_constant(
L
LielinJiang 已提交
62 63
            layer.bias.shape, layer.bias.dtype, value=-0.1)
        layer.bias.set_value(new_bias)
C
cnn 已提交
64
    elif type(layer) == nn.Conv2D:
65
        new_weight = paddle.fluid.layers.fill_constant(
L
LielinJiang 已提交
66 67
            layer.weight.shape, layer.weight.dtype, value=0.7)
        layer.weight.set_value(new_weight)
68
        new_bias = paddle.fluid.layers.fill_constant(
L
LielinJiang 已提交
69 70 71 72 73
            layer.bias.shape, layer.bias.dtype, value=-0.2)
        layer.bias.set_value(new_bias)


class TestLayerApply(unittest.TestCase):
74
    def func_apply_init_weight(self):
L
LielinJiang 已提交
75 76 77 78 79 80 81 82 83
        with fluid.dygraph.guard():
            net = LeNetDygraph()

            net.apply(init_weights)

            for layer in net.sublayers():
                if type(layer) == nn.Linear:
                    np.testing.assert_allclose(layer.weight.numpy(), 0.9)
                    np.testing.assert_allclose(layer.bias.numpy(), -0.1)
C
cnn 已提交
84
                elif type(layer) == nn.Conv2D:
L
LielinJiang 已提交
85 86 87
                    np.testing.assert_allclose(layer.weight.numpy(), 0.7)
                    np.testing.assert_allclose(layer.bias.numpy(), -0.2)

88 89 90 91 92
    def test_apply_init_weight(self):
        with _test_eager_guard():
            self.func_apply_init_weight()
        self.func_apply_init_weight()

L
LielinJiang 已提交
93 94 95

if __name__ == '__main__':
    unittest.main()