models.py 5.8 KB
Newer Older
W
wangxiao1021 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
W
wangxiao1021 已提交
15
from paddle.fluid.dygraph.nn import Linear, Embedding
W
wangxiao1021 已提交
16 17
from paddle.fluid.dygraph.base import to_variable
import numpy as np
L
LielinJiang 已提交
18 19 20
from paddle.incubate.hapi.model import Model
from paddle.incubate.hapi.text.text import GRUEncoderLayer as BiGRUEncoder
from paddle.incubate.hapi.text.test import BOWEncoder, CNNEncoder, GRUEncoder
W
wangxiao1021 已提交
21 22 23


class CNN(Model):
L
LielinJiang 已提交
24
    def __init__(self, dict_dim, batch_size, seq_len):
W
wangxiao1021 已提交
25 26 27 28 29 30 31 32 33 34 35 36 37 38
        super(CNN, self).__init__()
        self.dict_dim = dict_dim
        self.emb_dim = 128
        self.hid_dim = 128
        self.fc_hid_dim = 96
        self.class_dim = 2
        self.channels = 1
        self.win_size = [3, self.hid_dim]
        self.batch_size = batch_size
        self.seq_len = seq_len
        self._encoder = CNNEncoder(
            dict_size=self.dict_dim + 1,
            emb_dim=self.emb_dim,
            seq_len=self.seq_len,
L
LielinJiang 已提交
39 40 41
            filter_size=self.win_size,
            num_filters=self.hid_dim,
            hidden_dim=self.hid_dim,
W
wangxiao1021 已提交
42 43
            padding_idx=None,
            act='tanh')
L
LielinJiang 已提交
44 45 46 47 48 49 50 51
        self._fc1 = Linear(
            input_dim=self.hid_dim * self.seq_len,
            output_dim=self.fc_hid_dim,
            act="softmax")
        self._fc_prediction = Linear(
            input_dim=self.fc_hid_dim,
            output_dim=self.class_dim,
            act="softmax")
W
wangxiao1021 已提交
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75

    def forward(self, inputs):
        conv_3 = self._encoder(inputs)
        fc_1 = self._fc1(conv_3)
        prediction = self._fc_prediction(fc_1)
        return prediction


class BOW(Model):
    def __init__(self, dict_dim, batch_size, seq_len):
        super(BOW, self).__init__()
        self.dict_dim = dict_dim
        self.emb_dim = 128
        self.hid_dim = 128
        self.fc_hid_dim = 96
        self.class_dim = 2
        self.batch_size = batch_size
        self.seq_len = seq_len
        self._encoder = BOWEncoder(
            dict_size=self.dict_dim + 1,
            emb_dim=self.emb_dim,
            padding_idx=None,
            bow_dim=self.hid_dim,
            seq_len=self.seq_len)
L
LielinJiang 已提交
76 77 78 79 80 81 82 83
        self._fc1 = Linear(
            input_dim=self.hid_dim, output_dim=self.hid_dim, act="tanh")
        self._fc2 = Linear(
            input_dim=self.hid_dim, output_dim=self.fc_hid_dim, act="tanh")
        self._fc_prediction = Linear(
            input_dim=self.fc_hid_dim,
            output_dim=self.class_dim,
            act="softmax")
W
wangxiao1021 已提交
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103

    def forward(self, inputs):
        bow_1 = self._encoder(inputs)
        bow_1 = fluid.layers.tanh(bow_1)
        fc_1 = self._fc1(bow_1)
        fc_2 = self._fc2(fc_1)
        prediction = self._fc_prediction(fc_2)
        return prediction


class GRU(Model):
    def __init__(self, dict_dim, batch_size, seq_len):
        super(GRU, self).__init__()
        self.dict_dim = dict_dim
        self.emb_dim = 128
        self.hid_dim = 128
        self.fc_hid_dim = 96
        self.class_dim = 2
        self.batch_size = batch_size
        self.seq_len = seq_len
L
LielinJiang 已提交
104 105 106 107 108 109
        self._fc1 = Linear(
            input_dim=self.hid_dim, output_dim=self.fc_hid_dim, act="tanh")
        self._fc_prediction = Linear(
            input_dim=self.fc_hid_dim,
            output_dim=self.class_dim,
            act="softmax")
W
wangxiao1021 已提交
110 111 112 113 114 115 116 117 118 119 120 121 122 123
        self._encoder = GRUEncoder(
            dict_size=self.dict_dim + 1,
            emb_dim=self.emb_dim,
            gru_dim=self.hid_dim,
            hidden_dim=self.hid_dim,
            padding_idx=None,
            seq_len=self.seq_len)

    def forward(self, inputs):
        emb = self._encoder(inputs)
        fc_1 = self._fc1(emb)
        prediction = self._fc_prediction(fc_1)
        return prediction

L
LielinJiang 已提交
124

W
wangxiao1021 已提交
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
class BiGRU(Model):
    def __init__(self, dict_dim, batch_size, seq_len):
        super(BiGRU, self).__init__()
        self.dict_dim = dict_dim
        self.emb_dim = 128
        self.hid_dim = 128
        self.fc_hid_dim = 96
        self.class_dim = 2
        self.batch_size = batch_size
        self.seq_len = seq_len
        self.embedding = Embedding(
            size=[self.dict_dim + 1, self.emb_dim],
            dtype='float32',
            param_attr=fluid.ParamAttr(learning_rate=30),
            is_sparse=False)
        h_0 = np.zeros((self.batch_size, self.hid_dim), dtype="float32")
        h_0 = to_variable(h_0)
L
LielinJiang 已提交
142 143 144 145 146 147 148
        self._fc1 = Linear(input_dim=self.hid_dim, output_dim=self.hid_dim * 3)
        self._fc2 = Linear(
            input_dim=self.hid_dim * 2, output_dim=self.fc_hid_dim, act="tanh")
        self._fc_prediction = Linear(
            input_dim=self.fc_hid_dim,
            output_dim=self.class_dim,
            act="softmax")
W
wangxiao1021 已提交
149 150 151 152 153 154 155 156 157
        self._encoder = BiGRUEncoder(
            grnn_hidden_dim=self.hid_dim,
            input_dim=self.hid_dim * 3,
            h_0=h_0,
            init_bound=0.1,
            is_bidirection=True)

    def forward(self, inputs):
        emb = self.embedding(inputs)
L
LielinJiang 已提交
158 159
        emb = fluid.layers.reshape(
            emb, shape=[self.batch_size, -1, self.hid_dim])
W
wangxiao1021 已提交
160 161 162 163 164 165 166
        fc_1 = self._fc1(emb)
        encoded_vector = self._encoder(fc_1)
        encoded_vector = fluid.layers.tanh(encoded_vector)
        encoded_vector = fluid.layers.reduce_max(encoded_vector, dim=1)
        fc_2 = self._fc2(encoded_vector)
        prediction = self._fc_prediction(fc_2)
        return prediction