test_imperative_gnn.py 7.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np
import sys

import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
X
polish  
Xin Pan 已提交
22
from paddle.fluid.optimizer import AdamOptimizer
23
from test_imperative_base import new_program_scope
L
lujun 已提交
24
from paddle.fluid.dygraph.base import to_variable
25
from paddle.fluid.framework import _test_eager_guard
26 27 28 29 30 31


def gen_data():
    pass


32
class GraphConv(fluid.Layer):
33

34 35 36 37 38 39 40 41 42
    def __init__(self, name_scope, in_features, out_features):
        super(GraphConv, self).__init__(name_scope)

        self._in_features = in_features
        self._out_features = out_features
        self.weight = self.create_parameter(
            attr=None,
            dtype='float32',
            shape=[self._in_features, self._out_features])
43 44 45
        self.bias = self.create_parameter(attr=None,
                                          dtype='float32',
                                          shape=[self._out_features])
46 47 48 49 50 51 52

    def forward(self, features, adj):
        support = fluid.layers.matmul(features, self.weight)
        # TODO(panyx0718): sparse matmul?
        return fluid.layers.matmul(adj, support) + self.bias


53
class GCN(fluid.Layer):
54

55 56 57 58 59 60 61 62 63 64
    def __init__(self, name_scope, num_hidden):
        super(GCN, self).__init__(name_scope)
        self.gc = GraphConv(self.full_name(), num_hidden, 32)
        self.gc2 = GraphConv(self.full_name(), 32, 10)

    def forward(self, x, adj):
        x = fluid.layers.relu(self.gc(x, adj))
        return self.gc2(x, adj)


L
lujun 已提交
65
class TestDygraphGNN(unittest.TestCase):
66

67
    def func_gnn_float32(self):
C
cnn 已提交
68
        paddle.seed(90)
L
Leo Chen 已提交
69
        paddle.framework.random._manual_program_seed(90)
X
polish  
Xin Pan 已提交
70 71 72 73 74
        startup = fluid.Program()
        main = fluid.Program()

        scope = fluid.core.Scope()
        with new_program_scope(main=main, startup=startup, scope=scope):
75 76 77 78
            features = fluid.layers.data(name='features',
                                         shape=[1, 100, 50],
                                         dtype='float32',
                                         append_batch_size=False)
X
polish  
Xin Pan 已提交
79
            # Use selected rows when it's supported.
80 81 82 83 84 85 86 87
            adj = fluid.layers.data(name='adj',
                                    shape=[1, 100, 100],
                                    dtype='float32',
                                    append_batch_size=False)
            labels = fluid.layers.data(name='labels',
                                       shape=[100, 1],
                                       dtype='int64',
                                       append_batch_size=False)
X
polish  
Xin Pan 已提交
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102

            model = GCN('test_gcn', 50)
            logits = model(features, adj)
            logits = fluid.layers.reshape(logits, logits.shape[1:])
            # In other example, it's nll with log_softmax. However, paddle's
            # log_loss only supports binary classification now.
            loss = fluid.layers.softmax_with_cross_entropy(logits, labels)
            loss = fluid.layers.reduce_sum(loss)

            adam = AdamOptimizer(learning_rate=1e-3)
            adam.minimize(loss)
            exe = fluid.Executor(fluid.CPUPlace(
            ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
            exe.run(startup)
            static_loss = exe.run(feed={
103 104 105 106 107 108
                'features':
                np.ones([1, 100, 50], dtype=np.float32),
                'adj':
                np.ones([1, 100, 100], dtype=np.float32),
                'labels':
                np.ones([100, 1], dtype=np.int64)
X
polish  
Xin Pan 已提交
109 110 111 112 113 114
            },
                                  fetch_list=[loss])[0]

            static_weight = np.array(
                scope.find_var(model.gc.weight.name).get_tensor())

L
lujun 已提交
115
        with fluid.dygraph.guard():
C
cnn 已提交
116
            paddle.seed(90)
L
Leo Chen 已提交
117
            paddle.framework.random._manual_program_seed(90)
118

119
            features = np.ones([1, 100, 50], dtype=np.float32)
X
polish  
Xin Pan 已提交
120
            # Use selected rows when it's supported.
121 122
            adj = np.ones([1, 100, 100], dtype=np.float32)
            labels = np.ones([100, 1], dtype=np.int64)
123 124 125 126 127 128

            model = GCN('test_gcn', 50)
            logits = model(to_variable(features), to_variable(adj))
            logits = fluid.layers.reshape(logits, logits.shape[1:])
            # In other example, it's nll with log_softmax. However, paddle's
            # log_loss only supports binary classification now.
129 130
            loss = fluid.layers.softmax_with_cross_entropy(
                logits, to_variable(labels))
131
            loss = fluid.layers.reduce_sum(loss)
132
            loss.backward()
133 134
            adam = AdamOptimizer(learning_rate=1e-3,
                                 parameter_list=model.parameters())
135

X
polish  
Xin Pan 已提交
136
            adam.minimize(loss)
137
            model.clear_gradients()
138 139
            loss_value = loss.numpy()
            model_gc_weight_value = model.gc.weight.numpy()
140 141

        with fluid.dygraph.guard():
C
cnn 已提交
142
            paddle.seed(90)
L
Leo Chen 已提交
143
            paddle.framework.random._manual_program_seed(90)
144

145
            features2 = np.ones([1, 100, 50], dtype=np.float32)
146
            # Use selected rows when it's supported.
147 148
            adj2 = np.ones([1, 100, 100], dtype=np.float32)
            labels2 = np.ones([100, 1], dtype=np.int64)
149 150 151 152 153 154 155 156 157

            model2 = GCN('test_gcn', 50)
            logits2 = model2(to_variable(features2), to_variable(adj2))
            logits2 = fluid.layers.reshape(logits2, logits2.shape[1:])
            # In other example, it's nll with log_softmax. However, paddle's
            # log_loss only supports binary classification now.
            loss2 = fluid.layers.softmax_with_cross_entropy(
                logits2, to_variable(labels2))
            loss2 = fluid.layers.reduce_sum(loss2)
158
            loss2.backward()
159 160
            adam2 = AdamOptimizer(learning_rate=1e-3,
                                  parameter_list=model2.parameters())
161
            adam2.minimize(loss2)
162
            model2.clear_gradients()
163 164 165 166
            loss2_value = loss2.numpy()
            model2_gc_weight_value = model2.gc.weight.numpy()

        self.assertEqual(static_loss, loss_value)
167 168 169
        np.testing.assert_allclose(static_weight,
                                   model_gc_weight_value,
                                   rtol=1e-05)
170
        self.assertEqual(static_loss, loss2_value)
171 172 173
        np.testing.assert_allclose(static_weight,
                                   model2_gc_weight_value,
                                   rtol=1e-05)
174
        sys.stderr.write('%s %s\n' % (static_loss, loss_value))
175

176 177 178 179 180
    def test_gnn_float32(self):
        with _test_eager_guard():
            self.func_gnn_float32()
        self.func_gnn_float32()

181 182

if __name__ == '__main__':
183
    paddle.enable_static()
184
    unittest.main()