test_imperative_gnn.py 6.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import sys
16
import unittest
17

18
import numpy as np
19
from test_imperative_base import new_program_scope
20 21 22 23

import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
L
lujun 已提交
24
from paddle.fluid.dygraph.base import to_variable
25
from paddle.fluid.framework import _test_eager_guard
26
from paddle.fluid.optimizer import AdamOptimizer
27 28 29 30 31 32


def gen_data():
    pass


33
class GraphConv(fluid.Layer):
34
    def __init__(self, name_scope, in_features, out_features):
35
        super().__init__(name_scope)
36 37 38 39 40 41

        self._in_features = in_features
        self._out_features = out_features
        self.weight = self.create_parameter(
            attr=None,
            dtype='float32',
42 43 44 45 46
            shape=[self._in_features, self._out_features],
        )
        self.bias = self.create_parameter(
            attr=None, dtype='float32', shape=[self._out_features]
        )
47 48 49 50 51 52 53

    def forward(self, features, adj):
        support = fluid.layers.matmul(features, self.weight)
        # TODO(panyx0718): sparse matmul?
        return fluid.layers.matmul(adj, support) + self.bias


54
class GCN(fluid.Layer):
55
    def __init__(self, name_scope, num_hidden):
56
        super().__init__(name_scope)
57 58 59 60 61 62 63 64
        self.gc = GraphConv(self.full_name(), num_hidden, 32)
        self.gc2 = GraphConv(self.full_name(), 32, 10)

    def forward(self, x, adj):
        x = fluid.layers.relu(self.gc(x, adj))
        return self.gc2(x, adj)


L
lujun 已提交
65
class TestDygraphGNN(unittest.TestCase):
66
    def func_gnn_float32(self):
C
cnn 已提交
67
        paddle.seed(90)
L
Leo Chen 已提交
68
        paddle.framework.random._manual_program_seed(90)
X
polish  
Xin Pan 已提交
69 70 71 72 73
        startup = fluid.Program()
        main = fluid.Program()

        scope = fluid.core.Scope()
        with new_program_scope(main=main, startup=startup, scope=scope):
74 75 76 77 78 79
            features = fluid.layers.data(
                name='features',
                shape=[1, 100, 50],
                dtype='float32',
                append_batch_size=False,
            )
X
polish  
Xin Pan 已提交
80
            # Use selected rows when it's supported.
81 82 83 84 85 86 87 88 89 90 91 92
            adj = fluid.layers.data(
                name='adj',
                shape=[1, 100, 100],
                dtype='float32',
                append_batch_size=False,
            )
            labels = fluid.layers.data(
                name='labels',
                shape=[100, 1],
                dtype='int64',
                append_batch_size=False,
            )
X
polish  
Xin Pan 已提交
93 94 95

            model = GCN('test_gcn', 50)
            logits = model(features, adj)
96
            logits = paddle.reshape(logits, logits.shape[1:])
X
polish  
Xin Pan 已提交
97 98
            # In other example, it's nll with log_softmax. However, paddle's
            # log_loss only supports binary classification now.
99 100 101
            loss = paddle.nn.functional.softmax_with_cross_entropy(
                logits, labels
            )
102
            loss = paddle.sum(loss)
X
polish  
Xin Pan 已提交
103 104 105

            adam = AdamOptimizer(learning_rate=1e-3)
            adam.minimize(loss)
106 107 108 109 110
            exe = fluid.Executor(
                fluid.CPUPlace()
                if not core.is_compiled_with_cuda()
                else fluid.CUDAPlace(0)
            )
X
polish  
Xin Pan 已提交
111
            exe.run(startup)
112 113 114 115 116 117 118 119
            static_loss = exe.run(
                feed={
                    'features': np.ones([1, 100, 50], dtype=np.float32),
                    'adj': np.ones([1, 100, 100], dtype=np.float32),
                    'labels': np.ones([100, 1], dtype=np.int64),
                },
                fetch_list=[loss],
            )[0]
X
polish  
Xin Pan 已提交
120 121

            static_weight = np.array(
122 123
                scope.find_var(model.gc.weight.name).get_tensor()
            )
X
polish  
Xin Pan 已提交
124

L
lujun 已提交
125
        with fluid.dygraph.guard():
C
cnn 已提交
126
            paddle.seed(90)
L
Leo Chen 已提交
127
            paddle.framework.random._manual_program_seed(90)
128

129
            features = np.ones([1, 100, 50], dtype=np.float32)
X
polish  
Xin Pan 已提交
130
            # Use selected rows when it's supported.
131 132
            adj = np.ones([1, 100, 100], dtype=np.float32)
            labels = np.ones([100, 1], dtype=np.int64)
133 134 135

            model = GCN('test_gcn', 50)
            logits = model(to_variable(features), to_variable(adj))
136
            logits = paddle.reshape(logits, logits.shape[1:])
137 138
            # In other example, it's nll with log_softmax. However, paddle's
            # log_loss only supports binary classification now.
139
            loss = paddle.nn.functional.softmax_with_cross_entropy(
140 141
                logits, to_variable(labels)
            )
142
            loss = paddle.sum(loss)
143
            loss.backward()
144 145 146
            adam = AdamOptimizer(
                learning_rate=1e-3, parameter_list=model.parameters()
            )
147

X
polish  
Xin Pan 已提交
148
            adam.minimize(loss)
149
            model.clear_gradients()
150 151
            loss_value = loss.numpy()
            model_gc_weight_value = model.gc.weight.numpy()
152 153

        with fluid.dygraph.guard():
C
cnn 已提交
154
            paddle.seed(90)
L
Leo Chen 已提交
155
            paddle.framework.random._manual_program_seed(90)
156

157
            features2 = np.ones([1, 100, 50], dtype=np.float32)
158
            # Use selected rows when it's supported.
159 160
            adj2 = np.ones([1, 100, 100], dtype=np.float32)
            labels2 = np.ones([100, 1], dtype=np.int64)
161 162 163

            model2 = GCN('test_gcn', 50)
            logits2 = model2(to_variable(features2), to_variable(adj2))
164
            logits2 = paddle.reshape(logits2, logits2.shape[1:])
165 166
            # In other example, it's nll with log_softmax. However, paddle's
            # log_loss only supports binary classification now.
167
            loss2 = paddle.nn.functional.softmax_with_cross_entropy(
168 169
                logits2, to_variable(labels2)
            )
170
            loss2 = paddle.sum(loss2)
171
            loss2.backward()
172 173 174
            adam2 = AdamOptimizer(
                learning_rate=1e-3, parameter_list=model2.parameters()
            )
175
            adam2.minimize(loss2)
176
            model2.clear_gradients()
177 178 179 180
            loss2_value = loss2.numpy()
            model2_gc_weight_value = model2.gc.weight.numpy()

        self.assertEqual(static_loss, loss_value)
181 182 183
        np.testing.assert_allclose(
            static_weight, model_gc_weight_value, rtol=1e-05
        )
184
        self.assertEqual(static_loss, loss2_value)
185 186 187
        np.testing.assert_allclose(
            static_weight, model2_gc_weight_value, rtol=1e-05
        )
188
        sys.stderr.write('%s %s\n' % (static_loss, loss_value))
189

190 191 192 193 194
    def test_gnn_float32(self):
        with _test_eager_guard():
            self.func_gnn_float32()
        self.func_gnn_float32()

195 196

if __name__ == '__main__':
197
    paddle.enable_static()
198
    unittest.main()