gru.py 3.2 KB
Newer Older
王肖 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
gru class
"""

import paddle_layers as layers
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.nn import Linear
from paddle.fluid.dygraph import Layer
from paddle import fluid
import numpy as np

class GRU(Layer):
    """
    GRU
    """

    def __init__(self, conf_dict):
        """
        initialize
        """
        super(GRU, self).__init__()
        self.dict_size = conf_dict["dict_size"]
        self.task_mode = conf_dict["task_mode"]
        self.emb_dim = conf_dict["net"]["emb_dim"]
        self.gru_dim = conf_dict["net"]["gru_dim"]
        self.hidden_dim = conf_dict["net"]["hidden_dim"]
        self.emb_layer = layers.EmbeddingLayer(self.dict_size, self.emb_dim, "emb").ops()
       
        self.gru_layer = layers.DynamicGRULayer(self.gru_dim, "gru").ops()
        self.fc_layer = layers.FCLayer(self.hidden_dim, None, "fc").ops()
        self.proj_layer = Linear(input_dim = self.hidden_dim, output_dim=self.gru_dim*3)
        self.softmax_layer = layers.FCLayer(2, "softmax", "cos_sim").ops()
        self.seq_len=5

    def forward(self, left, right):
        """
        Forward network
        """
        # embedding layer
        left_emb = self.emb_layer(left)
        right_emb = self.emb_layer(right)
        # Presentation context
        left_emb = self.proj_layer(left_emb)
        right_emb = self.proj_layer(right_emb)

        h_0 = np.zeros((left_emb.shape[0], self.hidden_dim), dtype="float32")
        h_0 = to_variable(h_0)
        left_gru = self.gru_layer(left_emb, h_0=h_0)
        right_gru = self.gru_layer(right_emb, h_0=h_0)
        left_emb = fluid.layers.reduce_max(left_gru, dim=1)
        right_emb = fluid.layers.reduce_max(right_gru, dim=1)
        left_emb = fluid.layers.reshape(
            left_emb, shape=[-1, self.seq_len, self.hidden_dim])
        right_emb = fluid.layers.reshape(
            right_emb, shape=[-1, self.seq_len, self.hidden_dim])
        left_emb = fluid.layers.reduce_sum(left_emb, dim=1)
        right_emb = fluid.layers.reduce_sum(right_emb, dim=1)

        left_last = fluid.layers.tanh(left_emb)
        right_last = fluid.layers.tanh(right_emb)
        
        if self.task_mode == "pairwise":
            left_fc = self.fc_layer(left_last)
            right_fc = self.fc_layer(right_last)
            cos_sim_layer = layers.CosSimLayer()
            pred = cos_sim_layer.ops(left_fc, right_fc)
            return left_fc, pred
        else:
            concat_layer = layers.ConcatLayer(1)
            concat = concat_layer.ops([left_last, right_last])
            concat_fc = self.fc_layer(concat)
            pred = self.softmax_layer(concat_fc)
            return left_last, pred