build_model.py 1.5 KB
Newer Older
Y
Yelrose 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
import pgl
import model
from pgl import data_loader
import paddle.fluid as fluid
import numpy as np
import time
from optimization import AdamW 

def build_model(dataset, config, phase, main_prog):
    gw = pgl.graph_wrapper.GraphWrapper(
            name="graph",
            node_feat=dataset.graph.node_feat_info())

    GraphModel = getattr(model, config.model_name)
    m = GraphModel(config=config, num_class=dataset.num_classes) 
    logits = m.forward(gw, gw.node_feat["words"])

    node_index = fluid.layers.data(
            "node_index",
            shape=[None, 1],
            dtype="int64",
            append_batch_size=False)
    node_label = fluid.layers.data(
            "node_label",
            shape=[None, 1],
            dtype="int64",
            append_batch_size=False)

    pred = fluid.layers.gather(logits, node_index)
    loss, pred = fluid.layers.softmax_with_cross_entropy(
        logits=pred, label=node_label, return_softmax=True)
    acc = fluid.layers.accuracy(input=pred, label=node_label, k=1)
    loss = fluid.layers.mean(loss)

    if phase == "train":
        #adam = fluid.optimizer.Adam(
        #    learning_rate=config.learning_rate,
        #    regularization=fluid.regularizer.L2DecayRegularizer(
        #        regularization_coeff=config.weight_decay))
        #adam.minimize(loss)
        AdamW(loss=loss,
              learning_rate=config.learning_rate,
              weight_decay=config.weight_decay,
              train_program=main_prog)
    return gw, loss, acc