提交 02582bcc 编写于 作者: F fengshikun01

Merge branch 'PaddlePaddle-main'

...@@ -124,8 +124,7 @@ def main(args, train_dataset, val_dataset, test_dataset): ...@@ -124,8 +124,7 @@ def main(args, train_dataset, val_dataset, test_dataset):
break break
correct = 0. correct = 0.
new_test_program = fluid.Program() fluid.load(test_program, "./save/%s/%s" \
fluid.load(new_test_program, "./save/%s/%s" \
% (args.dataset_name, args.save_model), exe) % (args.dataset_name, args.save_model), exe)
for feed_dict in test_loader: for feed_dict in test_loader:
correct_ = exe.run(test_program, correct_ = exe.run(test_program,
......
# Easy Paper Reproduction for Citation Network (Cora/Pubmed/Citeseer)
This page tries to reproduce all the **Graph Neural Network** paper for Citation Network (Cora/Pubmed/Citeseer), which is the **Hello world** dataset (**small** and **fast**) for graph neural networks. But it's very hard to achieve very high performance.
All datasets are runned with public split of **semi-supervised** settings. And we report the averarge accuracy by running 10 times.
# Experiment Results
| Model | Cora | Pubmed | Citeseer | Remarks |
| ------------------------------------------------------------ | ------------ | ------------ | ------------ | --------------------------------------------------------- |
| [Vanilla GCN (Kipf 2017)](https://openreview.net/pdf?id=SJU4ayYgl ) | 0.807(0.010) | 0.794(0.003) | 0.710(0.007) | |
| [GAT (Veličković 2017)](https://arxiv.org/pdf/1710.10903.pdf) | 0.834(0.004) | 0.772(0.004) | 0.700(0.006) | |
| [SGC(Wu 2019)](https://arxiv.org/pdf/1902.07153.pdf) | 0.818(0.000) | 0.782(0.000) | 0.708(0.000) | |
| [APPNP (Johannes 2018)](https://arxiv.org/abs/1810.05997) | 0.846(0.003) | 0.803(0.002) | 0.719(0.003) | Almost the same with the results reported in Appendix E. |
| [GCNII (64 Layers, 1500 Epochs, Chen 2020)](https://arxiv.org/pdf/2007.02133.pdf) | 0.846(0.003) | 0.798(0.003) | 0.724(0.006) | |
How to run the experiments?
```shell
# Device choose
export CUDA_VISIBLE_DEVICES=0
# GCN
python train.py --conf config/gcn.yaml --use_cuda --dataset cora
python train.py --conf config/gcn.yaml --use_cuda --dataset pubmed
python train.py --conf config/gcn.yaml --use_cuda --dataset citeseer
# GAT
python train.py --conf config/gat.yaml --use_cuda --dataset cora
python train.py --conf config/gat.yaml --use_cuda --dataset pubmed
python train.py --conf config/gat.yaml --use_cuda --dataset citeseer
# SGC (Slow version)
python train.py --conf config/sgc.yaml --use_cuda --dataset cora
python train.py --conf config/sgc.yaml --use_cuda --dataset pubmed
python train.py --conf config/sgc.yaml --use_cuda --dataset citeseer
# APPNP
python train.py --conf config/appnp.yaml --use_cuda --dataset cora
python train.py --conf config/appnp.yaml --use_cuda --dataset pubmed
python train.py --conf config/appnp.yaml --use_cuda --dataset citeseer
# GCNII (The original code use 1500 epochs.)
python train.py --conf config/gcnii.yaml --use_cuda --dataset cora --epoch 1500
python train.py --conf config/gcnii.yaml --use_cuda --dataset pubmed --epoch 1500
python train.py --conf config/gcnii.yaml --use_cuda --dataset citeseer --epoch 1500
```
import pgl
import model
from pgl import data_loader
import paddle.fluid as fluid
import numpy as np
import time
def build_model(dataset, config, phase, main_prog):
gw = pgl.graph_wrapper.GraphWrapper(
name="graph",
node_feat=dataset.graph.node_feat_info())
GraphModel = getattr(model, config.model_name)
m = GraphModel(config=config, num_class=dataset.num_classes)
logits = m.forward(gw, gw.node_feat["words"], phase)
# Take the last
node_index = fluid.layers.data(
"node_index",
shape=[None, 1],
dtype="int64",
append_batch_size=False)
node_label = fluid.layers.data(
"node_label",
shape=[None, 1],
dtype="int64",
append_batch_size=False)
pred = fluid.layers.gather(logits, node_index)
loss, pred = fluid.layers.softmax_with_cross_entropy(
logits=pred, label=node_label, return_softmax=True)
acc = fluid.layers.accuracy(input=pred, label=node_label, k=1)
loss = fluid.layers.mean(loss)
if phase == "train":
adam = fluid.optimizer.Adam(
learning_rate=config.learning_rate,
regularization=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=config.weight_decay))
adam.minimize(loss)
return gw, loss, acc
model_name: APPNP
k_hop: 10
alpha: 0.1
num_layer: 1
learning_rate: 0.01
dropout: 0.5
hidden_size: 64
weight_decay: 0.0005
edge_dropout: 0.0
model_name: GAT
learning_rate: 0.005
weight_decay: 0.0005
num_layers: 1
feat_drop: 0.6
attn_drop: 0.6
num_heads: 8
hidden_size: 8
edge_dropout: 0.0
model_name: GCN
num_layers: 1
dropout: 0.5
hidden_size: 16
learning_rate: 0.01
weight_decay: 0.0005
edge_dropout: 0.0
model_name: GCNII
k_hop: 64
alpha: 0.1
num_layer: 1
learning_rate: 0.01
dropout: 0.6
hidden_size: 64
weight_decay: 0.0005
edge_dropout: 0.0
model_name: SGC
num_layers: 2
learning_rate: 0.2
weight_decay: 0.000005
feature_pre_normalize: False
import pgl
import paddle.fluid.layers as L
import pgl.layers.conv as conv
def get_norm(indegree):
float_degree = L.cast(indegree, dtype="float32")
float_degree = L.clamp(float_degree, min=1.0)
norm = L.pow(float_degree, factor=-0.5)
return norm
class GCN(object):
"""Implement of GCN
"""
def __init__(self, config, num_class):
self.num_class = num_class
self.num_layers = config.get("num_layers", 1)
self.hidden_size = config.get("hidden_size", 64)
self.dropout = config.get("dropout", 0.5)
self.edge_dropout = config.get("edge_dropout", 0.0)
def forward(self, graph_wrapper, feature, phase):
for i in range(self.num_layers):
if phase == "train":
ngw = pgl.sample.edge_drop(graph_wrapper, self.edge_dropout)
norm = get_norm(ngw.indegree())
else:
ngw = graph_wrapper
norm = graph_wrapper.node_feat["norm"]
feature = pgl.layers.gcn(ngw,
feature,
self.hidden_size,
activation="relu",
norm=norm,
name="layer_%s" % i)
feature = L.dropout(
feature,
self.dropout,
dropout_implementation='upscale_in_train')
if phase == "train":
ngw = pgl.sample.edge_drop(graph_wrapper, self.edge_dropout)
norm = get_norm(ngw.indegree())
else:
ngw = graph_wrapper
norm = graph_wrapper.node_feat["norm"]
feature = conv.gcn(ngw,
feature,
self.num_class,
activation=None,
norm=norm,
name="output")
return feature
class GAT(object):
"""Implement of GAT"""
def __init__(self, config, num_class):
self.num_class = num_class
self.num_layers = config.get("num_layers", 1)
self.num_heads = config.get("num_heads", 8)
self.hidden_size = config.get("hidden_size", 8)
self.feat_dropout = config.get("feat_drop", 0.6)
self.attn_dropout = config.get("attn_drop", 0.6)
self.edge_dropout = config.get("edge_dropout", 0.0)
def forward(self, graph_wrapper, feature, phase):
if phase == "train":
edge_dropout = self.edge_dropout
else:
edge_dropout = 0
for i in range(self.num_layers):
ngw = pgl.sample.edge_drop(graph_wrapper, edge_dropout)
feature = conv.gat(ngw,
feature,
self.hidden_size,
activation="elu",
name="gat_layer_%s" % i,
num_heads=self.num_heads,
feat_drop=self.feat_dropout,
attn_drop=self.attn_dropout)
ngw = pgl.sample.edge_drop(graph_wrapper, edge_dropout)
feature = conv.gat(ngw,
feature,
self.num_class,
num_heads=1,
activation=None,
feat_drop=self.feat_dropout,
attn_drop=self.attn_dropout,
name="output")
return feature
class APPNP(object):
"""Implement of APPNP"""
def __init__(self, config, num_class):
self.num_class = num_class
self.num_layers = config.get("num_layers", 1)
self.hidden_size = config.get("hidden_size", 64)
self.dropout = config.get("dropout", 0.5)
self.alpha = config.get("alpha", 0.1)
self.k_hop = config.get("k_hop", 10)
self.edge_dropout = config.get("edge_dropout", 0.0)
def forward(self, graph_wrapper, feature, phase):
if phase == "train":
edge_dropout = self.edge_dropout
else:
edge_dropout = 0
for i in range(self.num_layers):
feature = L.dropout(
feature,
self.dropout,
dropout_implementation='upscale_in_train')
feature = L.fc(feature, self.hidden_size, act="relu", name="lin%s" % i)
feature = L.dropout(
feature,
self.dropout,
dropout_implementation='upscale_in_train')
feature = L.fc(feature, self.num_class, act=None, name="output")
feature = conv.appnp(graph_wrapper,
feature=feature,
edge_dropout=edge_dropout,
alpha=self.alpha,
k_hop=self.k_hop)
return feature
class SGC(object):
"""Implement of SGC"""
def __init__(self, config, num_class):
self.num_class = num_class
self.num_layers = config.get("num_layers", 1)
def forward(self, graph_wrapper, feature, phase):
feature = conv.appnp(graph_wrapper,
feature=feature,
edge_dropout=0,
alpha=0,
k_hop=self.num_layers)
feature.stop_gradient=True
feature = L.fc(feature, self.num_class, act=None, bias_attr=False, name="output")
return feature
class GCNII(object):
"""Implement of GCNII"""
def __init__(self, config, num_class):
self.num_class = num_class
self.num_layers = config.get("num_layers", 1)
self.hidden_size = config.get("hidden_size", 64)
self.dropout = config.get("dropout", 0.6)
self.alpha = config.get("alpha", 0.1)
self.lambda_l = config.get("lambda_l", 0.5)
self.k_hop = config.get("k_hop", 64)
self.edge_dropout = config.get("edge_dropout", 0.0)
def forward(self, graph_wrapper, feature, phase):
if phase == "train":
edge_dropout = self.edge_dropout
else:
edge_dropout = 0
for i in range(self.num_layers):
feature = L.fc(feature, self.hidden_size, act="relu", name="lin%s" % i)
feature = L.dropout(
feature,
self.dropout,
dropout_implementation='upscale_in_train')
feature = conv.gcnii(graph_wrapper,
feature=feature,
name="gcnii",
activation="relu",
lambda_l=self.lambda_l,
alpha=self.alpha,
dropout=self.dropout,
k_hop=self.k_hop)
feature = L.fc(feature, self.num_class, act=None, name="output")
return feature
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pgl
import model# import LabelGraphGCN
from pgl import data_loader
from pgl.utils.logger import log
import paddle.fluid as fluid
import numpy as np
import time
import argparse
from build_model import build_model
import yaml
from easydict import EasyDict as edict
import tqdm
def normalize(feat):
return feat / np.maximum(np.sum(feat, -1, keepdims=True), 1)
def load(name, normalized_feature=True):
if name == 'cora':
dataset = data_loader.CoraDataset()
elif name == "pubmed":
dataset = data_loader.CitationDataset("pubmed", symmetry_edges=True)
elif name == "citeseer":
dataset = data_loader.CitationDataset("citeseer", symmetry_edges=True)
else:
raise ValueError(name + " dataset doesn't exists")
indegree = dataset.graph.indegree()
norm = np.maximum(indegree.astype("float32"), 1)
norm = np.power(norm, -0.5)
dataset.graph.node_feat["norm"] = np.expand_dims(norm, -1)
dataset.graph.node_feat["words"] = normalize(dataset.graph.node_feat["words"])
return dataset
def main(args, config):
dataset = load(args.dataset, args.feature_pre_normalize)
place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
train_program = fluid.default_main_program()
startup_program = fluid.default_startup_program()
with fluid.program_guard(train_program, startup_program):
with fluid.unique_name.guard():
gw, loss, acc = build_model(dataset,
config=config,
phase="train",
main_prog=train_program)
test_program = fluid.Program()
with fluid.program_guard(test_program, startup_program):
with fluid.unique_name.guard():
_gw, v_loss, v_acc = build_model(dataset,
config=config,
phase="test",
main_prog=test_program)
test_program = test_program.clone(for_test=True)
exe = fluid.Executor(place)
train_index = dataset.train_index
train_label = np.expand_dims(dataset.y[train_index], -1)
train_index = np.expand_dims(train_index, -1)
val_index = dataset.val_index
val_label = np.expand_dims(dataset.y[val_index], -1)
val_index = np.expand_dims(val_index, -1)
test_index = dataset.test_index
test_label = np.expand_dims(dataset.y[test_index], -1)
test_index = np.expand_dims(test_index, -1)
dur = []
# Feed data
feed_dict = gw.to_feed(dataset.graph)
best_test = []
for run in range(args.runs):
exe.run(startup_program)
cal_val_acc = []
cal_test_acc = []
cal_val_loss = []
cal_test_loss = []
for epoch in tqdm.tqdm(range(args.epoch)):
feed_dict["node_index"] = np.array(train_index, dtype="int64")
feed_dict["node_label"] = np.array(train_label, dtype="int64")
train_loss, train_acc = exe.run(train_program,
feed=feed_dict,
fetch_list=[loss, acc],
return_numpy=True)
feed_dict["node_index"] = np.array(val_index, dtype="int64")
feed_dict["node_label"] = np.array(val_label, dtype="int64")
val_loss, val_acc = exe.run(test_program,
feed=feed_dict,
fetch_list=[v_loss, v_acc],
return_numpy=True)
cal_val_acc.append(val_acc[0])
cal_val_loss.append(val_loss[0])
feed_dict["node_index"] = np.array(test_index, dtype="int64")
feed_dict["node_label"] = np.array(test_label, dtype="int64")
test_loss, test_acc = exe.run(test_program,
feed=feed_dict,
fetch_list=[v_loss, v_acc],
return_numpy=True)
cal_test_acc.append(test_acc[0])
cal_test_loss.append(test_loss[0])
log.info("Runs %s: Model: %s Best Test Accuracy: %f" % (run, config.model_name,
cal_test_acc[np.argmin(cal_val_loss)]))
best_test.append(cal_test_acc[np.argmin(cal_val_loss)])
log.info("Dataset: %s Best Test Accuracy: %f ( stddev: %f )" % (args.dataset, np.mean(best_test), np.std(best_test)))
print("Dataset: %s Best Test Accuracy: %f ( stddev: %f )" % (args.dataset, np.mean(best_test), np.std(best_test)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Benchmarking Citation Network')
parser.add_argument(
"--dataset", type=str, default="cora", help="dataset (cora, pubmed)")
parser.add_argument("--use_cuda", action='store_true', help="use_cuda")
parser.add_argument("--conf", type=str, help="config file for models")
parser.add_argument("--epoch", type=int, default=200, help="Epoch")
parser.add_argument("--runs", type=int, default=10, help="runs")
parser.add_argument("--feature_pre_normalize", type=bool, default=True, help="pre_normalize feature")
args = parser.parse_args()
config = edict(yaml.load(open(args.conf), Loader=yaml.FullLoader))
log.info(args)
main(args, config)
...@@ -49,6 +49,8 @@ sh local_run.sh config/enriesage_v1_gpu.yaml ...@@ -49,6 +49,8 @@ sh local_run.sh config/enriesage_v1_gpu.yaml
sh local_run.sh config/enriesage_v1_cpu.yaml sh local_run.sh config/enriesage_v1_cpu.yaml
``` ```
**NOTE**: To help users better understand the ERNIESage Model, we provide a running example in Baidu AIStudio. Please visit here: https://aistudio.baidu.com/aistudio/projectdetail/667443.
## Hyperparamters ## Hyperparamters
- learner_type: `gpu` or `cpu`; gpu use fleet Collective mode, cpu use fleet Transpiler mode. - learner_type: `gpu` or `cpu`; gpu use fleet Collective mode, cpu use fleet Transpiler mode.
......
...@@ -50,6 +50,8 @@ sh local_run.sh config/erniesage_v2_gpu.yaml ...@@ -50,6 +50,8 @@ sh local_run.sh config/erniesage_v2_gpu.yaml
sh local_run.sh config/erniesage_v2_cpu.yaml sh local_run.sh config/erniesage_v2_cpu.yaml
``` ```
**NOTE**:为了方便用户们学习使用ERNIESage,我们在百度AIStudio中提供了可以直接运行的ERNIESage实例,详情可见:https://aistudio.baidu.com/aistudio/projectdetail/667443.
## Hyperparamters ## Hyperparamters
- learner_type: `gpu` or `cpu`; gpu 使用fleet Collective 模式, cpu 使用fleet Transpiler 模式. - learner_type: `gpu` or `cpu`; gpu 使用fleet Collective 模式, cpu 使用fleet Transpiler 模式.
......
# Global Enviroment Settings # Global Enviroment Settings
# #
# trainer config ------ # trainer config ------
task: "link_predict"
learner_type: "gpu" learner_type: "gpu"
optimizer_type: "adam" optimizer_type: "adam"
lr: 0.00005 lr: 0.00005
batch_size: 32 batch_size: 32
CPU_NUM: 10 CPU_NUM: 10
epoch: 20 epoch: 3
log_per_step: 1 log_per_step: 1
save_per_step: 100 save_per_step: 1000
output_path: "./output" output_path: "./output"
ckpt_path: "./ernie_base_ckpt" ckpt_path: "./ernie_base_ckpt"
# data config ------ # data config ------
input_data: "./data.txt" train_data: "./example_data/link_predict/graph_data.txt"
graph_path: "./workdir" graph_data: "./example_data/link_predict/train_data.txt"
graph_work_path: "./workdir"
sample_workers: 1 sample_workers: 1
use_pyreader: true use_pyreader: true
input_type: "text" input_type: "text"
# model config ------ # model config ------
samples: [10] samples: [10]
model_type: "ErnieSageModelV1" model_type: "ERNIESageV2"
layer_type: "graphsage_sum" layer_type: "graphsage_sum"
max_seqlen: 40 max_seqlen: 40
...@@ -30,8 +33,9 @@ num_layers: 1 ...@@ -30,8 +33,9 @@ num_layers: 1
hidden_size: 128 hidden_size: 128
final_fc: true final_fc: true
final_l2_norm: true final_l2_norm: true
loss_type: "hinge" loss_type: "global_hinge"
margin: 0.3 margin: 0.1
neg_type: "batch_neg"
# infer config ------ # infer config ------
infer_model: "./output/last" infer_model: "./output/last"
......
# Global Enviroment Settings # Global Enviroment Settings
# #
# trainer config ------ # trainer config ------
task: "node_classification"
learner_type: "gpu" learner_type: "gpu"
optimizer_type: "adam" optimizer_type: "adam"
lr: 0.00005 lr: 0.00005
batch_size: 32 batch_size: 32
CPU_NUM: 10 CPU_NUM: 10
epoch: 3 epoch: 3
log_per_step: 10 log_per_step: 1
save_per_step: 1000 save_per_step: 1000
output_path: "./output" output_path: "./output"
ckpt_path: "./ernie_base_ckpt" ckpt_path: "./ernie_base_ckpt"
# data config ------ # data config ------
input_data: "./data.txt" graph_data: "./example_data/node_classification/graph_data.txt"
graph_path: "./workdir" train_data: "./example_data/node_classification/train_data.txt"
graph_work_path: "./workdir"
sample_workers: 1 sample_workers: 1
use_pyreader: true use_pyreader: true
input_type: "text" input_type: "text"
# model config ------ # model config ------
num_label: 10
samples: [10] samples: [10]
model_type: "ErnieSageModelV2" model_type: "ERNIESageV2"
layer_type: "graphsage_sum"
max_seqlen: 40 max_seqlen: 40
...@@ -29,8 +34,8 @@ num_layers: 1 ...@@ -29,8 +34,8 @@ num_layers: 1
hidden_size: 128 hidden_size: 128
final_fc: true final_fc: true
final_l2_norm: true final_l2_norm: true
loss_type: "hinge" loss_type: "softmax_with_cross_entropy"
margin: 0.3 margin: 0.1
neg_type: "batch_neg" neg_type: "batch_neg"
# infer config ------ # infer config ------
...@@ -49,7 +54,7 @@ ernie_config: ...@@ -49,7 +54,7 @@ ernie_config:
max_position_embeddings: 513 max_position_embeddings: 513
num_attention_heads: 12 num_attention_heads: 12
num_hidden_layers: 12 num_hidden_layers: 12
sent_type_vocab_size: 2 sent_type_vocab_size: 4
task_type_vocab_size: 3 task_type_vocab_size: 3
vocab_size: 18000 vocab_size: 18000
use_task_id: false use_task_id: false
......
# Global Enviroment Settings
#
# trainer config ------
learner_type: "cpu"
optimizer_type: "adam"
lr: 0.00005
batch_size: 2
CPU_NUM: 10
epoch: 20
log_per_step: 1
save_per_step: 100
output_path: "./output"
ckpt_path: "./ernie_base_ckpt"
# data config ------
input_data: "./data.txt"
graph_path: "./workdir"
sample_workers: 1
use_pyreader: true
input_type: "text"
# model config ------
samples: [10]
model_type: "ErnieSageModelV1"
layer_type: "graphsage_sum"
max_seqlen: 40
num_layers: 1
hidden_size: 128
final_fc: true
final_l2_norm: true
loss_type: "hinge"
margin: 0.3
# infer config ------
infer_model: "./output/last"
infer_batch_size: 128
# ernie config ------
encoding: "utf8"
ernie_vocab_file: "./vocab.txt"
ernie_config:
attention_probs_dropout_prob: 0.1
hidden_act: "relu"
hidden_dropout_prob: 0.1
hidden_size: 768
initializer_range: 0.02
max_position_embeddings: 513
num_attention_heads: 12
num_hidden_layers: 12
sent_type_vocab_size: 4
task_type_vocab_size: 3
vocab_size: 18000
use_task_id: false
use_fp16: false
# Global Enviroment Settings
#
# trainer config ------
learner_type: "cpu"
optimizer_type: "adam"
lr: 0.00005
batch_size: 4
CPU_NUM: 16
epoch: 3
log_per_step: 1
save_per_step: 100
output_path: "./output"
ckpt_path: "./ernie_base_ckpt"
# data config ------
input_data: "./data.txt"
graph_path: "./workdir"
sample_workers: 1
use_pyreader: true
input_type: "text"
# model config ------
samples: [10]
model_type: "ErnieSageModelV2"
max_seqlen: 40
num_layers: 1
hidden_size: 128
final_fc: true
final_l2_norm: true
loss_type: "hinge"
margin: 0.3
neg_type: "batch_neg"
# infer config ------
infer_model: "./output/last"
infer_batch_size: 128
# ernie config ------
encoding: "utf8"
ernie_vocab_file: "./vocab.txt"
ernie_config:
attention_probs_dropout_prob: 0.1
hidden_act: "relu"
hidden_dropout_prob: 0.1
hidden_size: 768
initializer_range: 0.02
max_position_embeddings: 513
num_attention_heads: 12
num_hidden_layers: 12
sent_type_vocab_size: 2
task_type_vocab_size: 3
vocab_size: 18000
use_task_id: false
use_fp16: false
# Global Enviroment Settings
#
# trainer config ------
learner_type: "cpu"
optimizer_type: "adam"
lr: 0.00005
batch_size: 2
CPU_NUM: 10
epoch: 20
log_per_step: 1
save_per_step: 100
output_path: "./output"
ckpt_path: "./ernie_base_ckpt"
# data config ------
input_data: "./data.txt"
graph_path: "./workdir"
sample_workers: 1
use_pyreader: true
input_type: "text"
# model config ------
samples: [10]
model_type: "ErnieSageModelV3"
max_seqlen: 40
num_layers: 1
hidden_size: 128
final_fc: true
final_l2_norm: true
loss_type: "hinge"
margin: 0.3
# infer config ------
infer_model: "./output/last"
infer_batch_size: 128
# ernie config ------
encoding: "utf8"
ernie_vocab_file: "./vocab.txt"
ernie_config:
attention_probs_dropout_prob: 0.1
hidden_act: "relu"
hidden_dropout_prob: 0.1
hidden_size: 768
initializer_range: 0.02
max_position_embeddings: 513
num_attention_heads: 12
num_hidden_layers: 12
sent_type_vocab_size: 4
task_type_vocab_size: 3
vocab_size: 18000
use_task_id: false
use_fp16: false
# Global Enviroment Settings
#
# trainer config ------
learner_type: "gpu"
optimizer_type: "adam"
lr: 0.00005
batch_size: 32
CPU_NUM: 10
epoch: 20
log_per_step: 1
save_per_step: 100
output_path: "./output"
ckpt_path: "./ernie_base_ckpt"
# data config ------
input_data: "./data.txt"
graph_path: "./workdir"
sample_workers: 1
use_pyreader: true
input_type: "text"
# model config ------
samples: [10]
model_type: "ErnieSageModelV3"
max_seqlen: 40
num_layers: 1
hidden_size: 128
final_fc: true
final_l2_norm: true
loss_type: "hinge"
margin: 0.3
# infer config ------
infer_model: "./output/last"
infer_batch_size: 128
# ernie config ------
encoding: "utf8"
ernie_vocab_file: "./vocab.txt"
ernie_config:
attention_probs_dropout_prob: 0.1
hidden_act: "relu"
hidden_dropout_prob: 0.1
hidden_size: 768
initializer_range: 0.02
max_position_embeddings: 513
num_attention_heads: 12
num_hidden_layers: 12
sent_type_vocab_size: 4
task_type_vocab_size: 3
vocab_size: 18000
use_task_id: false
use_fp16: false
...@@ -74,17 +74,15 @@ class GraphGenerator(BaseDataGenerator): ...@@ -74,17 +74,15 @@ class GraphGenerator(BaseDataGenerator):
batch_dst = np.array(batch_dst, dtype="int64") batch_dst = np.array(batch_dst, dtype="int64")
if self.neg_type == "batch_neg": if self.neg_type == "batch_neg":
neg_shape = [1] batch_neg = batch_dst
else: else:
# TODO user define shape of neg_sample
neg_shape = batch_dst.shape neg_shape = batch_dst.shape
sampled_batch_neg = alias_sample(neg_shape, self.alias, self.events) sampled_batch_neg = alias_sample(neg_shape, self.alias, self.events)
if len(batch_neg) > 0:
batch_neg = np.concatenate([batch_neg, sampled_batch_neg], 0) batch_neg = np.concatenate([batch_neg, sampled_batch_neg], 0)
else:
batch_neg = sampled_batch_neg
if self.phase == "train": if self.phase == "train":
# TODO user define ignore edges or not
#ignore_edges = np.concatenate([np.stack([batch_src, batch_dst], 1), np.stack([batch_dst, batch_src], 1)], 0) #ignore_edges = np.concatenate([np.stack([batch_src, batch_dst], 1), np.stack([batch_dst, batch_src], 1)], 0)
ignore_edges = set() ignore_edges = set()
else: else:
...@@ -92,7 +90,8 @@ class GraphGenerator(BaseDataGenerator): ...@@ -92,7 +90,8 @@ class GraphGenerator(BaseDataGenerator):
nodes = np.unique(np.concatenate([batch_src, batch_dst, batch_neg], 0)) nodes = np.unique(np.concatenate([batch_src, batch_dst, batch_neg], 0))
subgraphs = graphsage_sample(self.graph, nodes, self.samples, ignore_edges=ignore_edges) subgraphs = graphsage_sample(self.graph, nodes, self.samples, ignore_edges=ignore_edges)
#subgraphs[0].reindex_to_parrent_nodes(subgraphs[0].nodes) subgraphs[0].node_feat["index"] = subgraphs[0].reindex_to_parrent_nodes(subgraphs[0].nodes).astype(np.int64)
subgraphs[0].node_feat["term_ids"] = self.term_ids[subgraphs[0].node_feat["index"]].astype(np.int64)
feed_dict = {} feed_dict = {}
for i in range(self.num_layers): for i in range(self.num_layers):
feed_dict.update(self.graph_wrappers[i].to_feed(subgraphs[i])) feed_dict.update(self.graph_wrappers[i].to_feed(subgraphs[i]))
...@@ -103,9 +102,11 @@ class GraphGenerator(BaseDataGenerator): ...@@ -103,9 +102,11 @@ class GraphGenerator(BaseDataGenerator):
sub_neg_idx = subgraphs[0].reindex_from_parrent_nodes(batch_neg) sub_neg_idx = subgraphs[0].reindex_from_parrent_nodes(batch_neg)
feed_dict["user_index"] = np.array(sub_src_idx, dtype="int64") feed_dict["user_index"] = np.array(sub_src_idx, dtype="int64")
feed_dict["item_index"] = np.array(sub_dst_idx, dtype="int64") feed_dict["pos_item_index"] = np.array(sub_dst_idx, dtype="int64")
feed_dict["neg_item_index"] = np.array(sub_neg_idx, dtype="int64") feed_dict["neg_item_index"] = np.array(sub_neg_idx, dtype="int64")
feed_dict["term_ids"] = self.term_ids[subgraphs[0].node_feat["index"]].astype(np.int64)
feed_dict["user_real_index"] = np.array(batch_src, dtype="int64")
feed_dict["pos_item_real_index"] = np.array(batch_dst, dtype="int64")
return feed_dict return feed_dict
def __call__(self): def __call__(self):
...@@ -124,3 +125,37 @@ class GraphGenerator(BaseDataGenerator): ...@@ -124,3 +125,37 @@ class GraphGenerator(BaseDataGenerator):
class NodeClassificationGenerator(GraphGenerator):
def batch_fn(self, batch_ex):
# batch_ex = [
# (node, label),
# (node, label),
# ]
#
batch_node = []
batch_label = []
for batch in batch_ex:
batch_node.append(batch[0])
batch_label.append(batch[1])
if len(batch_node) != self.batch_size:
if self.phase == "train":
return None #Skip
batch_node = np.array(batch_node, dtype="int64")
batch_label = np.array(batch_label, dtype="int64")
subgraphs = graphsage_sample(self.graph, batch_node, self.samples)
subgraphs[0].node_feat["index"] = subgraphs[0].reindex_to_parrent_nodes(subgraphs[0].nodes).astype(np.int64)
subgraphs[0].node_feat["term_ids"] = self.term_ids[subgraphs[0].node_feat["index"]].astype(np.int64)
feed_dict = {}
for i in range(self.num_layers):
feed_dict.update(self.graph_wrappers[i].to_feed(subgraphs[i]))
# only reindex from first subgraph
sub_node_idx = subgraphs[0].reindex_from_parrent_nodes(batch_node)
feed_dict["node_index"] = np.array(sub_node_idx, dtype="int64")
feed_dict["node_real_index"] = np.array(batch_node, dtype="int64")
feed_dict["label"] = np.array(batch_label, dtype="int64")
return feed_dict
...@@ -34,7 +34,7 @@ from pgl.utils import paddle_helper ...@@ -34,7 +34,7 @@ from pgl.utils import paddle_helper
import paddle import paddle
import paddle.fluid as F import paddle.fluid as F
from models.model_factory import Model from models.model import LinkPredictModel
from dataset.graph_reader import GraphGenerator from dataset.graph_reader import GraphGenerator
...@@ -59,7 +59,7 @@ def run_predict(py_reader, ...@@ -59,7 +59,7 @@ def run_predict(py_reader,
log_per_step=1, log_per_step=1,
args=None): args=None):
id2str = io.open(os.path.join(args.graph_path, "terms.txt"), encoding=args.encoding).readlines() id2str = io.open(os.path.join(args.graph_work_path, "terms.txt"), encoding=args.encoding).readlines()
trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
trainer_count = int(os.getenv("PADDLE_TRAINERS_NUM", "1")) trainer_count = int(os.getenv("PADDLE_TRAINERS_NUM", "1"))
...@@ -71,7 +71,7 @@ def run_predict(py_reader, ...@@ -71,7 +71,7 @@ def run_predict(py_reader,
for batch_feed_dict in py_reader(): for batch_feed_dict in py_reader():
batch += 1 batch += 1
batch_usr_feat, batch_ad_feat, _, batch_src_real_index = exe.run( batch_usr_feat, _, _, batch_src_real_index, _ = exe.run(
program, program,
feed=batch_feed_dict, feed=batch_feed_dict,
fetch_list=model_dict.outputs) fetch_list=model_dict.outputs)
...@@ -79,7 +79,7 @@ def run_predict(py_reader, ...@@ -79,7 +79,7 @@ def run_predict(py_reader,
if batch % log_per_step == 0: if batch % log_per_step == 0:
log.info("Predict %s finished" % batch) log.info("Predict %s finished" % batch)
for ufs, _, sri in zip(batch_usr_feat, batch_ad_feat, batch_src_real_index): for ufs, sri in zip(batch_usr_feat, batch_src_real_index):
if args.input_type == "text": if args.input_type == "text":
sri = id2str[int(sri)].strip("\n") sri = id2str[int(sri)].strip("\n")
line = "{}\t{}\n".format(sri, tostr(ufs)) line = "{}\t{}\n".format(sri, tostr(ufs))
...@@ -108,7 +108,7 @@ def _warmstart(exe, program, path='params'): ...@@ -108,7 +108,7 @@ def _warmstart(exe, program, path='params'):
) )
def main(config): def main(config):
model = Model.factory(config) model = LinkPredictModel(config)
if config.learner_type == "cpu": if config.learner_type == "cpu":
place = F.CPUPlace() place = F.CPUPlace()
...@@ -143,7 +143,7 @@ def main(config): ...@@ -143,7 +143,7 @@ def main(config):
build_strategy=build_strategy, build_strategy=build_strategy,
exec_strategy=exec_strategy) exec_strategy=exec_strategy)
num_nodes = int(np.load(os.path.join(config.graph_path, "num_nodes.npy"))) num_nodes = int(np.load(os.path.join(config.graph_work_path, "num_nodes.npy")))
predict_data = PredictData(num_nodes) predict_data = PredictData(num_nodes)
...@@ -156,7 +156,7 @@ def main(config): ...@@ -156,7 +156,7 @@ def main(config):
feed_name_list=[var.name for var in model.feed_list], feed_name_list=[var.name for var in model.feed_list],
use_pyreader=config.use_pyreader, use_pyreader=config.use_pyreader,
phase="predict", phase="predict",
graph_data_path=config.graph_path, graph_data_path=config.graph_work_path,
shuffle=False) shuffle=False)
if config.learner_type == "cpu": if config.learner_type == "cpu":
......
...@@ -22,27 +22,26 @@ from pgl.utils.logger import log ...@@ -22,27 +22,26 @@ from pgl.utils.logger import log
from pgl.utils import paddle_helper from pgl.utils import paddle_helper
from learner import Learner from learner import Learner
from models.model_factory import Model from models.model import LinkPredictModel
from dataset.graph_reader import GraphGenerator from dataset.graph_reader import GraphGenerator
class TrainData(object): class TrainData(object):
def __init__(self, graph_path): def __init__(self, graph_work_path):
trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
trainer_count = int(os.getenv("PADDLE_TRAINERS_NUM", "1")) trainer_count = int(os.getenv("PADDLE_TRAINERS_NUM", "1"))
log.info("trainer_id: %s, trainer_count: %s." % (trainer_id, trainer_count)) log.info("trainer_id: %s, trainer_count: %s." % (trainer_id, trainer_count))
bidirectional_edges = np.load(os.path.join(graph_path, "edges.npy"), allow_pickle=True) edges = np.load(os.path.join(graph_work_path, "train_data.npy"), allow_pickle=True)
# edges is bidirectional. # edges is bidirectional.
edges = bidirectional_edges[0::2]
train_usr = edges[trainer_id::trainer_count, 0] train_usr = edges[trainer_id::trainer_count, 0]
train_ad = edges[trainer_id::trainer_count, 1] train_ad = edges[trainer_id::trainer_count, 1]
returns = { returns = {
"train_data": [train_usr, train_ad] "train_data": [train_usr, train_ad]
} }
if os.path.exists(os.path.join(graph_path, "neg_samples.npy")): if os.path.exists(os.path.join(graph_work_path, "neg_samples.npy")):
neg_samples = np.load(os.path.join(graph_path, "neg_samples.npy"), allow_pickle=True) neg_samples = np.load(os.path.join(graph_work_path, "neg_samples.npy"), allow_pickle=True)
if neg_samples.size != 0: if neg_samples.size != 0:
train_negs = neg_samples[trainer_id::trainer_count] train_negs = neg_samples[trainer_id::trainer_count]
returns["train_data"].append(train_negs) returns["train_data"].append(train_negs)
...@@ -50,7 +49,7 @@ class TrainData(object): ...@@ -50,7 +49,7 @@ class TrainData(object):
self.data = returns self.data = returns
def __getitem__(self, index): def __getitem__(self, index):
return [ data[index] for data in self.data["train_data"]] return [data[index] for data in self.data["train_data"]]
def __len__(self): def __len__(self):
return len(self.data["train_data"][0]) return len(self.data["train_data"][0])
...@@ -58,10 +57,10 @@ class TrainData(object): ...@@ -58,10 +57,10 @@ class TrainData(object):
def main(config): def main(config):
# Select Model # Select Model
model = Model.factory(config) model = LinkPredictModel(config)
# Build Train Edges # Build Train Edges
data = TrainData(config.graph_path) data = TrainData(config.graph_work_path)
# Build Train Data # Build Train Data
train_iter = GraphGenerator( train_iter = GraphGenerator(
...@@ -73,7 +72,7 @@ def main(config): ...@@ -73,7 +72,7 @@ def main(config):
feed_name_list=[var.name for var in model.feed_list], feed_name_list=[var.name for var in model.feed_list],
use_pyreader=config.use_pyreader, use_pyreader=config.use_pyreader,
phase="train", phase="train",
graph_data_path=config.graph_path, graph_data_path=config.graph_work_path,
shuffle=True, shuffle=True,
neg_type=config.neg_type) neg_type=config.neg_type)
......
此差异已折叠。
import numpy as np
import pgl import pgl
import paddle.fluid as F import paddle.fluid as F
import paddle.fluid.layers as L import paddle.fluid.layers as L
from models.base import BaseNet, BaseGNNModel
from models.ernie_model.ernie import ErnieModel from models.ernie_model.ernie import ErnieModel
from models.ernie_model.ernie import ErnieGraphModel
from models.ernie_model.ernie import ErnieConfig
from models import message_passing
from models.message_passing import copy_send
class ErnieSageV2(BaseNet): def get_layer(layer_type, gw, feature, hidden_size, act, initializer, learning_rate, name, is_test=False):
return getattr(message_passing, layer_type)(gw, feature, hidden_size, act, initializer, learning_rate, name)
def build_inputs(self):
inputs = super(ErnieSageV2, self).build_inputs() class Encoder(object):
term_ids = L.data(
"term_ids", shape=[None, self.config.max_seqlen], dtype="int64", append_batch_size=False) def __init__(self, config):
return inputs + [term_ids] self.config = config
@classmethod
def factory(cls, config):
model_type = config.model_type
if model_type == "ERNIESageV1":
return ERNIESageV1Encoder(config)
elif model_type == "ERNIESageV2":
return ERNIESageV2Encoder(config)
elif model_type == "ERNIESageV3":
return ERNIESageV3Encoder(config)
elif model_type == "ERNIESageV4":
return ERNIESageV4Encoder(config)
else:
raise ValueError
def __call__(self, graph_wrappers, inputs):
raise NotImplementedError
class ERNIESageV1Encoder(Encoder):
def __call__(self, graph_wrappers, inputs):
feature = self.build_embedding(graph_wrappers[0].node_feat["term_ids"])
initializer = None
fc_lr = self.config.lr / 0.001
for i in range(self.config.num_layers):
if i == self.config.num_layers - 1:
act = None
else:
act = "leaky_relu"
feature = get_layer(
self.config.layer_type,
graph_wrappers[i],
feature,
self.config.hidden_size,
act,
initializer,
learning_rate=fc_lr,
name="%s_%s" % (self.config.layer_type, i))
final_feats = [self.take_final_feature(feature, i, "final_fc") for i in inputs]
return final_feats
def build_embedding(self, term_ids):
term_ids = L.unsqueeze(term_ids, [-1])
ernie_config = self.config.ernie_config
ernie = ErnieModel(
src_ids=term_ids,
sentence_ids=L.zeros_like(term_ids),
task_ids=None,
config=ernie_config,
use_fp16=False,
name="")
feature = ernie.get_pooled_output()
return feature
def take_final_feature(self, feature, index, name):
"""take final feature"""
feat = L.gather(feature, index, overwrite=False)
if self.config.final_fc:
feat = L.fc(feat,
self.config.hidden_size,
param_attr=F.ParamAttr(name=name + '_w'),
bias_attr=F.ParamAttr(name=name + '_b'))
if self.config.final_l2_norm:
feat = L.l2_normalize(feat, axis=1)
return feat
class ERNIESageV2Encoder(Encoder):
def __call__(self, graph_wrappers, inputs):
feature = graph_wrappers[0].node_feat["term_ids"]
feature = self.gnn_layer(graph_wrappers[0], feature, self.config.hidden_size, 'leaky_relu', None, 1., "erniesage_v2_0")
initializer = None
fc_lr = self.config.lr / 0.001
for i in range(1, self.config.num_layers):
if i == self.config.num_layers - 1:
act = None
else:
act = "leaky_relu"
feature = get_layer(
self.config.layer_type,
graph_wrappers[i],
feature,
self.config.hidden_size,
act,
initializer,
learning_rate=fc_lr,
name="%s_%s" % (self.config.layer_type, i))
final_feats = [self.take_final_feature(feature, i, "final_fc") for i in inputs]
return final_feats
def take_final_feature(self, feature, index, name):
"""take final feature"""
feat = L.gather(feature, index, overwrite=False)
if self.config.final_fc:
feat = L.fc(feat,
self.config.hidden_size,
param_attr=F.ParamAttr(name=name + '_w'),
bias_attr=F.ParamAttr(name=name + '_b'))
if self.config.final_l2_norm:
feat = L.l2_normalize(feat, axis=1)
return feat
def gnn_layer(self, gw, feature, hidden_size, act, initializer, learning_rate, name): def gnn_layer(self, gw, feature, hidden_size, act, initializer, learning_rate, name):
def build_position_ids(src_ids, dst_ids): def build_position_ids(src_ids, dst_ids):
...@@ -97,6 +211,36 @@ class ErnieSageV2(BaseNet): ...@@ -97,6 +211,36 @@ class ErnieSageV2(BaseNet):
return output return output
return erniesage_v2_aggregator(gw, feature, hidden_size, act, initializer, learning_rate, name) return erniesage_v2_aggregator(gw, feature, hidden_size, act, initializer, learning_rate, name)
class ERNIESageV3Encoder(Encoder):
def __call__(self, graph_wrappers, inputs):
feature = graph_wrappers[0].node_feat["term_ids"]
feature = self.gnn_layer(graph_wrappers[0], feature, self.config.hidden_size, 'leaky_relu', None, 1., "erniesage_v3_0")
final_feats = [self.take_final_feature(feature, i, "final_fc") for i in inputs]
return final_feats
def gnn_layer(self, gw, feature, hidden_size, act, initializer, learning_rate, name):
def ernie_recv(feat):
"""doc"""
num_neighbor = self.config.samples[0]
pad_value = L.zeros([1], "int64")
out, _ = L.sequence_pad(feat, pad_value=pad_value, maxlen=num_neighbor)
out = L.reshape(out, [0, self.config.max_seqlen*num_neighbor])
return out
def erniesage_v3_aggregator(gw, feature, hidden_size, act, initializer, learning_rate, name):
msg = gw.send(copy_send, nfeat_list=[("h", feature)])
neigh_feature = gw.recv(msg, ernie_recv)
neigh_feature = L.cast(L.unsqueeze(neigh_feature, [-1]), "int64")
feature = L.unsqueeze(feature, [-1])
cls = L.fill_constant_batch_size_like(feature, [-1, 1, 1], "int64", 1)
term_ids = L.concat([cls, feature[:, :-1], neigh_feature], 1)
term_ids.stop_gradient = True
return term_ids
return erniesage_v3_aggregator(gw, feature, hidden_size, act, initializer, learning_rate, name)
def gnn_layers(self, graph_wrappers, feature): def gnn_layers(self, graph_wrappers, feature):
features = [feature] features = [feature]
...@@ -116,20 +260,28 @@ class ErnieSageV2(BaseNet): ...@@ -116,20 +260,28 @@ class ErnieSageV2(BaseNet):
act, act,
initializer, initializer,
learning_rate=fc_lr, learning_rate=fc_lr,
name="%s_%s" % ("erniesage_v2", i)) name="%s_%s" % ("erniesage_v3", i))
features.append(feature) features.append(feature)
return features return features
def __call__(self, graph_wrappers): def take_final_feature(self, feature, index, name):
inputs = self.build_inputs() """take final feature"""
feature = inputs[-1] feat = L.gather(feature, index, overwrite=False)
features = self.gnn_layers(graph_wrappers, feature)
outputs = [self.take_final_feature(features[-1], i, "final_fc") for i in inputs[:-1]] ernie_config = self.config.ernie_config
src_real_index = L.gather(graph_wrappers[0].node_feat['index'], inputs[0]) ernie = ErnieGraphModel(
outputs.append(src_real_index) src_ids=feat,
return inputs, outputs config=ernie_config,
slot_seqlen=self.config.max_seqlen)
feat = ernie.get_pooled_output()
fc_lr = self.config.lr / 0.001
if self.config.final_fc:
feat = L.fc(feat,
self.config.hidden_size,
param_attr=F.ParamAttr(name=name + '_w'),
bias_attr=F.ParamAttr(name=name + '_b'))
class ErnieSageModelV2(BaseGNNModel): if self.config.final_l2_norm:
def gen_net_fn(self, config): feat = L.l2_normalize(feat, axis=1)
return ErnieSageV2(config) return feat
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -23,7 +23,7 @@ You can make your customized dataset by the following format: ...@@ -23,7 +23,7 @@ You can make your customized dataset by the following format:
For examples, use gpu to train STGCN on your dataset. For examples, use gpu to train STGCN on your dataset.
``` ```
python main.py --use_cuda --input_file dataset/input_csv --label_file dataset/output.csv --adj_mat_file dataset/W.csv --city_file dataset/city.csv python main.py --use_cuda --input_file dataset/input.csv --label_file dataset/output.csv --adj_mat_file dataset/W.csv --city_file dataset/city.csv
``` ```
#### Hyperparameters #### Hyperparameters
......
...@@ -167,9 +167,6 @@ def data_gen_mydata(input_file, label_file, n, n_his, n_pred, n_config): ...@@ -167,9 +167,6 @@ def data_gen_mydata(input_file, label_file, n, n_his, n_pred, n_config):
x = x.drop(columns=['date']) x = x.drop(columns=['date'])
y = y.drop(columns=['date']) y = y.drop(columns=['date'])
x = x.drop(columns=['武汉'])
y = y.drop(columns=['武汉'])
# param # param
n_val, n_test = n_config n_val, n_test = n_config
n_train = len(y) - n_val - n_test - 2 n_train = len(y) - n_val - n_test - 2
......
此差异已折叠。
num,city
0,A
1,B
2,C
3,D
4,E
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -22,3 +22,4 @@ from pgl import heter_graph ...@@ -22,3 +22,4 @@ from pgl import heter_graph
from pgl import heter_graph_wrapper from pgl import heter_graph_wrapper
from pgl import contrib from pgl import contrib
from pgl import message_passing from pgl import message_passing
from pgl import sample
此差异已折叠。
此差异已折叠。
...@@ -18,10 +18,10 @@ from pgl.layers import conv ...@@ -18,10 +18,10 @@ from pgl.layers import conv
from pgl.layers.conv import * from pgl.layers.conv import *
from pgl.layers import set2set from pgl.layers import set2set
from pgl.layers.set2set import * from pgl.layers.set2set import *
from pgl.layers import graph_pool from pgl.layers import graph_op
from pgl.layers.graph_pool import * from pgl.layers.graph_op import *
__all__ = [] __all__ = []
__all__ += conv.__all__ __all__ += conv.__all__
__all__ += set2set.__all__ __all__ += set2set.__all__
__all__ += graph_pool.__all__ __all__ += graph_op.__all__
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""graph saint sample test
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
import pgl
import paddle.fluid as fluid
from pgl.sample import graph_saint_random_walk_sample
class GraphSaintSampleTest(unittest.TestCase):
"""GraphSaintSampleTest"""
def test_randomwalk_sampler(self):
"""test_randomwalk_sampler"""
g = pgl.graph.Graph(
num_nodes=8,
edges=[(1, 2), (2, 3), (0, 2), (0, 1), (6, 7), (4, 5), (6, 4),
(7, 4), (3, 4)])
subgraph = graph_saint_random_walk_sample(g, [6, 7], 2)
print('reindex', subgraph._from_reindex)
print('subedges', subgraph.edges)
assert len(subgraph.nodes) == 4
assert len(subgraph.edges) == 4
true_edges = np.array([[0, 1], [2, 3], [2, 0], [3, 0]])
assert "{}".format(subgraph.edges) == "{}".format(true_edges)
if __name__ == '__main__':
unittest.main()
...@@ -68,3 +68,18 @@ def read_rows(data, index): ...@@ -68,3 +68,18 @@ def read_rows(data, index):
return new_data return new_data
else: else:
return paddle_helper.gather(data, index) return paddle_helper.gather(data, index)
class RowReader(object):
"""Memory Efficient RowReader
"""
def __init__(self, nfeat, index):
self.nfeat = nfeat
self.loaded_nfeat = {}
self.index = index
def __getitem__(self, key):
if key not in self.loaded_nfeat:
self.loaded_nfeat[key] = read_rows(self.nfeat[key], self.index)
return self.loaded_nfeat[key]
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册