test_imperative_gnn.py 6.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import contextlib
import unittest
import numpy as np
import sys

import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
X
polish  
Xin Pan 已提交
23
from paddle.fluid.optimizer import AdamOptimizer
24
from test_imperative_base import new_program_scope
L
lujun 已提交
25
from paddle.fluid.dygraph.base import to_variable
26 27 28 29 30 31


def gen_data():
    pass


32
class GraphConv(fluid.Layer):
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
    def __init__(self, name_scope, in_features, out_features):
        super(GraphConv, self).__init__(name_scope)

        self._in_features = in_features
        self._out_features = out_features
        self.weight = self.create_parameter(
            attr=None,
            dtype='float32',
            shape=[self._in_features, self._out_features])
        self.bias = self.create_parameter(
            attr=None, dtype='float32', shape=[self._out_features])

    def forward(self, features, adj):
        support = fluid.layers.matmul(features, self.weight)
        # TODO(panyx0718): sparse matmul?
        return fluid.layers.matmul(adj, support) + self.bias


51
class GCN(fluid.Layer):
52 53 54 55 56 57 58 59 60 61
    def __init__(self, name_scope, num_hidden):
        super(GCN, self).__init__(name_scope)
        self.gc = GraphConv(self.full_name(), num_hidden, 32)
        self.gc2 = GraphConv(self.full_name(), 32, 10)

    def forward(self, x, adj):
        x = fluid.layers.relu(self.gc(x, adj))
        return self.gc2(x, adj)


L
lujun 已提交
62
class TestDygraphGNN(unittest.TestCase):
63
    def test_gnn_float32(self):
L
Leo Chen 已提交
64 65
        paddle.manual_seed(90)
        paddle.framework.random._manual_program_seed(90)
X
polish  
Xin Pan 已提交
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
        startup = fluid.Program()
        main = fluid.Program()

        scope = fluid.core.Scope()
        with new_program_scope(main=main, startup=startup, scope=scope):
            features = fluid.layers.data(
                name='features',
                shape=[1, 100, 50],
                dtype='float32',
                append_batch_size=False)
            # Use selected rows when it's supported.
            adj = fluid.layers.data(
                name='adj',
                shape=[1, 100, 100],
                dtype='float32',
                append_batch_size=False)
            labels = fluid.layers.data(
                name='labels',
                shape=[100, 1],
                dtype='int64',
                append_batch_size=False)

            model = GCN('test_gcn', 50)
            logits = model(features, adj)
            logits = fluid.layers.reshape(logits, logits.shape[1:])
            # In other example, it's nll with log_softmax. However, paddle's
            # log_loss only supports binary classification now.
            loss = fluid.layers.softmax_with_cross_entropy(logits, labels)
            loss = fluid.layers.reduce_sum(loss)

            adam = AdamOptimizer(learning_rate=1e-3)
            adam.minimize(loss)
            exe = fluid.Executor(fluid.CPUPlace(
            ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
            exe.run(startup)
            static_loss = exe.run(feed={
102
                'features': np.ones(
X
polish  
Xin Pan 已提交
103
                    [1, 100, 50], dtype=np.float32),
104
                'adj': np.ones(
X
polish  
Xin Pan 已提交
105
                    [1, 100, 100], dtype=np.float32),
106
                'labels': np.ones(
X
polish  
Xin Pan 已提交
107 108 109 110 111 112 113
                    [100, 1], dtype=np.int64)
            },
                                  fetch_list=[loss])[0]

            static_weight = np.array(
                scope.find_var(model.gc.weight.name).get_tensor())

L
lujun 已提交
114
        with fluid.dygraph.guard():
L
Leo Chen 已提交
115 116
            paddle.manual_seed(90)
            paddle.framework.random._manual_program_seed(90)
117

118
            features = np.ones([1, 100, 50], dtype=np.float32)
X
polish  
Xin Pan 已提交
119
            # Use selected rows when it's supported.
120 121
            adj = np.ones([1, 100, 100], dtype=np.float32)
            labels = np.ones([100, 1], dtype=np.int64)
122 123 124 125 126 127 128 129 130

            model = GCN('test_gcn', 50)
            logits = model(to_variable(features), to_variable(adj))
            logits = fluid.layers.reshape(logits, logits.shape[1:])
            # In other example, it's nll with log_softmax. However, paddle's
            # log_loss only supports binary classification now.
            loss = fluid.layers.softmax_with_cross_entropy(logits,
                                                           to_variable(labels))
            loss = fluid.layers.reduce_sum(loss)
131
            loss.backward()
132 133
            adam = AdamOptimizer(
                learning_rate=1e-3, parameter_list=model.parameters())
134

X
polish  
Xin Pan 已提交
135
            adam.minimize(loss)
136
            model.clear_gradients()
137 138
            loss_value = loss.numpy()
            model_gc_weight_value = model.gc.weight.numpy()
139 140

        with fluid.dygraph.guard():
L
Leo Chen 已提交
141 142
            paddle.manual_seed(90)
            paddle.framework.random._manual_program_seed(90)
143

144
            features2 = np.ones([1, 100, 50], dtype=np.float32)
145
            # Use selected rows when it's supported.
146 147
            adj2 = np.ones([1, 100, 100], dtype=np.float32)
            labels2 = np.ones([100, 1], dtype=np.int64)
148 149 150 151 152 153 154 155 156

            model2 = GCN('test_gcn', 50)
            logits2 = model2(to_variable(features2), to_variable(adj2))
            logits2 = fluid.layers.reshape(logits2, logits2.shape[1:])
            # In other example, it's nll with log_softmax. However, paddle's
            # log_loss only supports binary classification now.
            loss2 = fluid.layers.softmax_with_cross_entropy(
                logits2, to_variable(labels2))
            loss2 = fluid.layers.reduce_sum(loss2)
157
            loss2.backward()
158 159
            adam2 = AdamOptimizer(
                learning_rate=1e-3, parameter_list=model2.parameters())
160
            adam2.minimize(loss2)
161
            model2.clear_gradients()
162 163 164 165 166 167 168 169
            loss2_value = loss2.numpy()
            model2_gc_weight_value = model2.gc.weight.numpy()

        self.assertEqual(static_loss, loss_value)
        self.assertTrue(np.allclose(static_weight, model_gc_weight_value))
        self.assertEqual(static_loss, loss2_value)
        self.assertTrue(np.allclose(static_weight, model2_gc_weight_value))
        sys.stderr.write('%s %s\n' % (static_loss, loss_value))
170 171 172 173


if __name__ == '__main__':
    unittest.main()