test_imperative_gnn.py 6.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import sys
16
import unittest
17

18
import numpy as np
19
from test_imperative_base import new_program_scope
20 21 22 23

import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
24
import paddle.nn.functional as F
L
lujun 已提交
25
from paddle.fluid.dygraph.base import to_variable
26
from paddle.fluid.optimizer import AdamOptimizer
27 28 29 30 31 32


def gen_data():
    pass


33
class GraphConv(fluid.Layer):
34
    def __init__(self, name_scope, in_features, out_features):
35
        super().__init__(name_scope)
36 37 38 39 40 41

        self._in_features = in_features
        self._out_features = out_features
        self.weight = self.create_parameter(
            attr=None,
            dtype='float32',
42 43 44 45 46
            shape=[self._in_features, self._out_features],
        )
        self.bias = self.create_parameter(
            attr=None, dtype='float32', shape=[self._out_features]
        )
47 48

    def forward(self, features, adj):
K
kangguangli 已提交
49
        support = paddle.matmul(features, self.weight)
50
        # TODO(panyx0718): sparse matmul?
K
kangguangli 已提交
51
        return paddle.matmul(adj, support) + self.bias
52 53


54
class GCN(fluid.Layer):
55
    def __init__(self, name_scope, num_hidden):
56
        super().__init__(name_scope)
57 58 59 60
        self.gc = GraphConv(self.full_name(), num_hidden, 32)
        self.gc2 = GraphConv(self.full_name(), 32, 10)

    def forward(self, x, adj):
61
        x = F.relu(self.gc(x, adj))
62 63 64
        return self.gc2(x, adj)


L
lujun 已提交
65
class TestDygraphGNN(unittest.TestCase):
66
    def test_gnn_float32(self):
C
cnn 已提交
67
        paddle.seed(90)
L
Leo Chen 已提交
68
        paddle.framework.random._manual_program_seed(90)
X
polish  
Xin Pan 已提交
69 70 71 72 73
        startup = fluid.Program()
        main = fluid.Program()

        scope = fluid.core.Scope()
        with new_program_scope(main=main, startup=startup, scope=scope):
G
GGBond8488 已提交
74 75
            features = paddle.static.data(
                name='features', shape=[1, 100, 50], dtype='float32'
76
            )
X
polish  
Xin Pan 已提交
77
            # Use selected rows when it's supported.
G
GGBond8488 已提交
78 79
            adj = paddle.static.data(
                name='adj', shape=[1, 100, 100], dtype='float32'
80
            )
G
GGBond8488 已提交
81 82
            labels = paddle.static.data(
                name='labels', shape=[100, 1], dtype='int64'
83
            )
X
polish  
Xin Pan 已提交
84 85 86

            model = GCN('test_gcn', 50)
            logits = model(features, adj)
87
            logits = paddle.reshape(logits, logits.shape[1:])
X
polish  
Xin Pan 已提交
88 89
            # In other example, it's nll with log_softmax. However, paddle's
            # log_loss only supports binary classification now.
90 91 92
            loss = paddle.nn.functional.softmax_with_cross_entropy(
                logits, labels
            )
93
            loss = paddle.sum(loss)
X
polish  
Xin Pan 已提交
94 95 96

            adam = AdamOptimizer(learning_rate=1e-3)
            adam.minimize(loss)
97 98 99 100 101
            exe = fluid.Executor(
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
X
polish  
Xin Pan 已提交
102
            exe.run(startup)
103 104 105 106 107 108 109 110
            static_loss = exe.run(
                feed={
                    'features': np.ones([1, 100, 50], dtype=np.float32),
                    'adj': np.ones([1, 100, 100], dtype=np.float32),
                    'labels': np.ones([100, 1], dtype=np.int64),
                },
                fetch_list=[loss],
            )[0]
X
polish  
Xin Pan 已提交
111 112

            static_weight = np.array(
113 114
                scope.find_var(model.gc.weight.name).get_tensor()
            )
X
polish  
Xin Pan 已提交
115

L
lujun 已提交
116
        with fluid.dygraph.guard():
C
cnn 已提交
117
            paddle.seed(90)
L
Leo Chen 已提交
118
            paddle.framework.random._manual_program_seed(90)
119

120
            features = np.ones([1, 100, 50], dtype=np.float32)
X
polish  
Xin Pan 已提交
121
            # Use selected rows when it's supported.
122 123
            adj = np.ones([1, 100, 100], dtype=np.float32)
            labels = np.ones([100, 1], dtype=np.int64)
124 125 126

            model = GCN('test_gcn', 50)
            logits = model(to_variable(features), to_variable(adj))
127
            logits = paddle.reshape(logits, logits.shape[1:])
128 129
            # In other example, it's nll with log_softmax. However, paddle's
            # log_loss only supports binary classification now.
130
            loss = paddle.nn.functional.softmax_with_cross_entropy(
131 132
                logits, to_variable(labels)
            )
133
            loss = paddle.sum(loss)
134
            loss.backward()
135 136 137
            adam = AdamOptimizer(
                learning_rate=1e-3, parameter_list=model.parameters()
            )
138

X
polish  
Xin Pan 已提交
139
            adam.minimize(loss)
140
            model.clear_gradients()
141 142
            loss_value = loss.numpy()
            model_gc_weight_value = model.gc.weight.numpy()
143 144

        with fluid.dygraph.guard():
C
cnn 已提交
145
            paddle.seed(90)
L
Leo Chen 已提交
146
            paddle.framework.random._manual_program_seed(90)
147

148
            features2 = np.ones([1, 100, 50], dtype=np.float32)
149
            # Use selected rows when it's supported.
150 151
            adj2 = np.ones([1, 100, 100], dtype=np.float32)
            labels2 = np.ones([100, 1], dtype=np.int64)
152 153 154

            model2 = GCN('test_gcn', 50)
            logits2 = model2(to_variable(features2), to_variable(adj2))
155
            logits2 = paddle.reshape(logits2, logits2.shape[1:])
156 157
            # In other example, it's nll with log_softmax. However, paddle's
            # log_loss only supports binary classification now.
158
            loss2 = paddle.nn.functional.softmax_with_cross_entropy(
159 160
                logits2, to_variable(labels2)
            )
161
            loss2 = paddle.sum(loss2)
162
            loss2.backward()
163 164 165
            adam2 = AdamOptimizer(
                learning_rate=1e-3, parameter_list=model2.parameters()
            )
166
            adam2.minimize(loss2)
167
            model2.clear_gradients()
168 169 170 171
            loss2_value = loss2.numpy()
            model2_gc_weight_value = model2.gc.weight.numpy()

        self.assertEqual(static_loss, loss_value)
172 173 174
        np.testing.assert_allclose(
            static_weight, model_gc_weight_value, rtol=1e-05
        )
175
        self.assertEqual(static_loss, loss2_value)
176 177 178
        np.testing.assert_allclose(
            static_weight, model2_gc_weight_value, rtol=1e-05
        )
179
        sys.stderr.write('%s %s\n' % (static_loss, loss_value))
180 181 182


if __name__ == '__main__':
183
    paddle.enable_static()
184
    unittest.main()