net.py 5.2 KB
Newer Older
Z
zhangwenhui03 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
neural network for word2vec
"""
from __future__ import print_function
import math
import numpy as np
import paddle.fluid as fluid


Z
zhangwenhui03 已提交
23
def skip_gram_word2vec(dict_size, embedding_size, is_sparse=False, neg_num=5):
Z
zhangwenhui03 已提交
24 25

    datas = []
26 27 28 29
    input_word = fluid.data(name="input_word", shape=[None, 1], dtype='int64')
    true_word = fluid.data(name='true_label', shape=[None, 1], dtype='int64')
    neg_word = fluid.data(
        name="neg_label", shape=[None, neg_num], dtype='int64')
Z
zhangwenhui03 已提交
30 31 32 33 34 35 36 37 38 39

    datas.append(input_word)
    datas.append(true_word)
    datas.append(neg_word)

    py_reader = fluid.layers.create_py_reader_by_data(
        capacity=64, feed_list=datas, name='py_reader', use_double_buffer=True)

    words = fluid.layers.read_file(py_reader)
    init_width = 0.5 / embedding_size
40
    input_emb = fluid.embedding(
Z
zhangwenhui03 已提交
41 42 43 44 45 46 47
        input=words[0],
        is_sparse=is_sparse,
        size=[dict_size, embedding_size],
        param_attr=fluid.ParamAttr(
            name='emb',
            initializer=fluid.initializer.Uniform(-init_width, init_width)))

48
    true_emb_w = fluid.embedding(
Z
zhangwenhui03 已提交
49 50 51 52 53 54
        input=words[1],
        is_sparse=is_sparse,
        size=[dict_size, embedding_size],
        param_attr=fluid.ParamAttr(
            name='emb_w', initializer=fluid.initializer.Constant(value=0.0)))

55
    true_emb_b = fluid.embedding(
Z
zhangwenhui03 已提交
56 57 58 59 60
        input=words[1],
        is_sparse=is_sparse,
        size=[dict_size, 1],
        param_attr=fluid.ParamAttr(
            name='emb_b', initializer=fluid.initializer.Constant(value=0.0)))
61 62 63
    input_emb = fluid.layers.squeeze(input=input_emb, axes=[1])
    true_emb_w = fluid.layers.squeeze(input=true_emb_w, axes=[1])
    true_emb_b = fluid.layers.squeeze(input=true_emb_b, axes=[1])
Z
zhangwenhui03 已提交
64

65 66
    neg_emb_w = fluid.embedding(
        input=words[2],
Z
zhangwenhui03 已提交
67 68 69
        is_sparse=is_sparse,
        size=[dict_size, embedding_size],
        param_attr=fluid.ParamAttr(
Z
zhangwenhui03 已提交
70
            name='emb_w', learning_rate=1.0))
71 72
    neg_emb_b = fluid.embedding(
        input=words[2],
Z
zhangwenhui03 已提交
73 74 75
        is_sparse=is_sparse,
        size=[dict_size, 1],
        param_attr=fluid.ParamAttr(
Z
zhangwenhui03 已提交
76
            name='emb_b', learning_rate=1.0))
Z
zhangwenhui03 已提交
77

78
    
Z
zhangwenhui03 已提交
79 80 81 82 83 84 85 86 87 88
    neg_emb_b_vec = fluid.layers.reshape(neg_emb_b, shape=[-1, neg_num])
    true_logits = fluid.layers.elementwise_add(
        fluid.layers.reduce_sum(
            fluid.layers.elementwise_mul(input_emb, true_emb_w),
            dim=1,
            keep_dim=True),
        true_emb_b)
    input_emb_re = fluid.layers.reshape(
        input_emb, shape=[-1, 1, embedding_size])
    neg_matmul = fluid.layers.matmul(
89
        input_emb_re, neg_emb_w, transpose_y=True)
Z
zhangwenhui03 已提交
90 91 92 93 94 95 96 97 98 99 100 101 102 103
    neg_matmul_re = fluid.layers.reshape(neg_matmul, shape=[-1, neg_num])
    neg_logits = fluid.layers.elementwise_add(neg_matmul_re, neg_emb_b_vec)
    #nce loss

    label_ones = fluid.layers.fill_constant_batch_size_like(
        true_logits, shape=[-1, 1], value=1.0, dtype='float32')
    label_zeros = fluid.layers.fill_constant_batch_size_like(
        true_logits, shape=[-1, neg_num], value=0.0, dtype='float32')

    true_xent = fluid.layers.sigmoid_cross_entropy_with_logits(true_logits,
                                                               label_ones)
    neg_xent = fluid.layers.sigmoid_cross_entropy_with_logits(neg_logits,
                                                              label_zeros)
    cost = fluid.layers.elementwise_add(
Z
zhangwenhui03 已提交
104 105 106
        fluid.layers.reduce_sum(
            true_xent, dim=1),
        fluid.layers.reduce_sum(
Z
zhangwenhui03 已提交
107 108 109 110 111 112
            neg_xent, dim=1))
    avg_cost = fluid.layers.reduce_mean(cost)
    return avg_cost, py_reader


def infer_network(vocab_size, emb_size):
113 114 115 116
    analogy_a = fluid.data(name="analogy_a", shape=[None], dtype='int64')
    analogy_b = fluid.data(name="analogy_b", shape=[None], dtype='int64')
    analogy_c = fluid.data(name="analogy_c", shape=[None], dtype='int64')
    all_label = fluid.data(
Z
zhangwenhui03 已提交
117
        name="all_label",
118 119 120
        shape=[vocab_size],
        dtype='int64')
    emb_all_label = fluid.embedding(
Z
zhangwenhui03 已提交
121 122
        input=all_label, size=[vocab_size, emb_size], param_attr="emb")

123
    emb_a = fluid.embedding(
Z
zhangwenhui03 已提交
124
        input=analogy_a, size=[vocab_size, emb_size], param_attr="emb")
125
    emb_b = fluid.embedding(
Z
zhangwenhui03 已提交
126
        input=analogy_b, size=[vocab_size, emb_size], param_attr="emb")
127
    emb_c = fluid.embedding(
Z
zhangwenhui03 已提交
128 129 130 131 132 133 134
        input=analogy_c, size=[vocab_size, emb_size], param_attr="emb")
    target = fluid.layers.elementwise_add(
        fluid.layers.elementwise_sub(emb_b, emb_a), emb_c)
    emb_all_label_l2 = fluid.layers.l2_normalize(x=emb_all_label, axis=1)
    dist = fluid.layers.matmul(x=target, y=emb_all_label_l2, transpose_y=True)
    values, pred_idx = fluid.layers.topk(input=dist, k=4)
    return values, pred_idx