network_conf.py 4.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
neural network for word2vec
"""

from __future__ import print_function

Q
Qiao Longfei 已提交
20
import math
21
import numpy as np
Q
Qiao Longfei 已提交
22

23 24
import paddle.fluid as fluid

25 26 27 28 29 30

def skip_gram_word2vec(dict_size,
                       word_frequencys,
                       embedding_size,
                       max_code_length=None,
                       with_hsigmoid=False,
T
tangwei12 已提交
31 32
                       with_nce=True,
                       is_sparse=False):
33
    def nce_layer(input, label, embedding_size, num_total_classes,
T
tangwei12 已提交
34
                  num_neg_samples, sampler, word_frequencys, sample_weight):
35 36 37 38

        w_param_name = "nce_w"
        b_param_name = "nce_b"
        w_param = fluid.default_main_program().global_block().create_parameter(
39 40 41
            shape=[num_total_classes, embedding_size],
            dtype='float32',
            name=w_param_name)
42 43 44
        b_param = fluid.default_main_program().global_block().create_parameter(
            shape=[num_total_classes, 1], dtype='float32', name=b_param_name)

45 46 47 48
        cost = fluid.layers.nce(input=input,
                                label=label,
                                num_total_classes=num_total_classes,
                                sampler=sampler,
T
tangwei12 已提交
49
                                custom_dist=word_frequencys,
50 51 52
                                sample_weight=sample_weight,
                                param_attr=fluid.ParamAttr(name=w_param_name),
                                bias_attr=fluid.ParamAttr(name=b_param_name),
T
tangwei12 已提交
53
                                num_neg_samples=num_neg_samples, is_sparse=is_sparse)
54 55 56

        return cost

T
tangwei12 已提交
57 58
    def hsigmoid_layer(input, label, ptable, pcode, non_leaf_num):
        if non_leaf_num is None:
59 60 61
            non_leaf_num = dict_size

        cost = fluid.layers.hsigmoid(
T
tangwei12 已提交
62 63
            input=input,
            label=label,
64 65 66 67
            non_leaf_num=non_leaf_num,
            ptable=ptable,
            pcode=pcode,
            is_costum=True)
68 69

        return cost
Q
Qiao Longfei 已提交
70

T
tangwei12 已提交
71 72 73 74 75 76 77 78 79 80 81
    def get_loss(loss1, loss2):
        loss_op1 = fluid.layers.elementwise_sub(
            fluid.layers.fill_constant_batch_size_like(input=loss1, shape=[-1, 1], value=margin,
                                                       dtype='float32'), cos_q_pt)
        loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt)
        loss_op3 = fluid.layers.elementwise_max(
            fluid.layers.fill_constant_batch_size_like(input=loss_op2, shape=[-1, 1], value=0.0,
                                                       dtype='float32'), loss_op2)
        avg_cost = fluid.layers.mean(loss_op3)
        return avg_cost

T
tangwei12 已提交
82
    datas = []
83

T
tangwei12 已提交
84 85
    input_word = fluid.layers.data(name="input_word", shape=[1], dtype='int64')
    predict_word = fluid.layers.data(name='predict_word', shape=[1], dtype='int64')
T
tangwei12 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
    datas.append(input_word)
    datas.append(predict_word)

    if with_hsigmoid:
        if max_code_length:
            ptable = fluid.layers.data(
                name='ptable', shape=[max_code_length], dtype='int64')
            pcode = fluid.layers.data(
                name='pcode', shape=[max_code_length], dtype='int64')
        else:
            ptable = fluid.layers.data(name='ptable', shape=[40], dtype='int64')
            pcode = fluid.layers.data(name='pcode', shape=[40], dtype='int64')
        datas.append(ptable)
        datas.append(pcode)

    py_reader = fluid.layers.create_py_reader_by_data(capacity=64,
                                                      feed_list=datas,
                                                      name='py_reader',
                                                      use_double_buffer=True)
T
tangwei12 已提交
105

T
tangwei12 已提交
106
    words = fluid.layers.read_file(py_reader)
107

Q
Qiao Longfei 已提交
108
    emb = fluid.layers.embedding(
T
tangwei12 已提交
109
        input=words[0],
T
tangwei12 已提交
110
        is_sparse=is_sparse,
Q
Qiao Longfei 已提交
111
        size=[dict_size, embedding_size],
112 113 114
        param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal(
            scale=1 / math.sqrt(dict_size))))

T
tangwei12 已提交
115 116
    cost, cost_nce, cost_hs = None, None, None

117
    if with_nce:
T
tangwei12 已提交
118
        cost_nce = nce_layer(emb, words[1], embedding_size, dict_size, 5, "uniform",
T
tangwei12 已提交
119
                         word_frequencys, None)
T
tangwei12 已提交
120
        cost = cost_nce
121
    if with_hsigmoid:
T
tangwei12 已提交
122 123 124 125
        cost_hs = hsigmoid_layer(emb, words[1], words[2], words[3], dict_size)
        cost = cost_hs
    if with_nce and with_hsigmoid:
        cost = fluid.layers.elementwise_add(cost_nce, cost)
Q
Qiao Longfei 已提交
126 127 128

    avg_cost = fluid.layers.reduce_mean(cost)

129
    return avg_cost, py_reader