提交 3bbe91dc 编写于 作者: W wangmeng28

Merge remote-tracking branch 'upstream/develop' into chinese_poetry

...@@ -33,11 +33,3 @@ ...@@ -33,11 +33,3 @@
entry: bash .clang_format.hook -i entry: bash .clang_format.hook -i
language: system language: system
files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|cuh|proto)$ files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|cuh|proto)$
- repo: local
hooks:
- id: convert-markdown-into-html
name: convert-markdown-into-html
description: Convert README.md into index.html
entry: python .pre-commit-hooks/convert_markdown_into_html.py
language: system
files: .+README\.md$
import argparse
import re
import sys
HEAD = """
<html>
<head>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js", "TeX/AMSsymbols.js", "TeX/AMSmath.js"],
jax: ["input/TeX", "output/HTML-CSS"],
tex2jax: {
inlineMath: [ ['$','$'] ],
displayMath: [ ['$$','$$'] ],
processEscapes: true
},
"HTML-CSS": { availableFonts: ["TeX"] }
});
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js" async></script>
<script type="text/javascript" src="../.tools/theme/marked.js">
</script>
<link href="http://cdn.bootcss.com/highlight.js/9.9.0/styles/darcula.min.css" rel="stylesheet">
<script src="http://cdn.bootcss.com/highlight.js/9.9.0/highlight.min.js"></script>
<link href="http://cdn.bootcss.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/css/perfect-scrollbar.min.css" rel="stylesheet">
<link href="../.tools/theme/github-markdown.css" rel='stylesheet'>
</head>
<style type="text/css" >
.markdown-body {
box-sizing: border-box;
min-width: 200px;
max-width: 980px;
margin: 0 auto;
padding: 45px;
}
</style>
<body>
<div id="context" class="container-fluid markdown-body">
</div>
<!-- This block will be replaced by each markdown file content. Please do not change lines below.-->
<div id="markdown" style='display:none'>
"""
TAIL = """
</div>
<!-- You can change the lines below now. -->
<script type="text/javascript">
marked.setOptions({
renderer: new marked.Renderer(),
gfm: true,
breaks: false,
smartypants: true,
highlight: function(code, lang) {
code = code.replace(/&amp;/g, "&")
code = code.replace(/&gt;/g, ">")
code = code.replace(/&lt;/g, "<")
code = code.replace(/&nbsp;/g, " ")
return hljs.highlightAuto(code, [lang]).value;
}
});
document.getElementById("context").innerHTML = marked(
document.getElementById("markdown").innerHTML)
</script>
</body>
"""
def convert_markdown_into_html(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to fix')
args = parser.parse_args(argv)
retv = 0
for filename in args.filenames:
with open(
re.sub(r"README", "index", re.sub(r"\.md$", ".html", filename)),
"w") as output:
output.write(HEAD)
with open(filename) as input:
for line in input:
output.write(line)
output.write(TAIL)
return retv
if __name__ == '__main__':
sys.exit(convert_markdown_into_html())
...@@ -17,20 +17,26 @@ addons: ...@@ -17,20 +17,26 @@ addons:
- python-pip - python-pip
- python2.7-dev - python2.7-dev
ssh_known_hosts: 52.76.173.135 ssh_known_hosts: 52.76.173.135
before_install: before_install:
- sudo pip install -U virtualenv pre-commit pip - sudo pip install -U virtualenv pre-commit pip
- docker pull paddlepaddle/paddle:latest - docker pull paddlepaddle/paddle:latest
script: script:
- .travis/precommit.sh - exit_code=0
- docker run -i --rm -v "$PWD:/py_unittest" paddlepaddle/paddle:latest /bin/bash -c - .travis/precommit.sh || exit_code=$(( exit_code | $? ))
'cd /py_unittest; sh .travis/unittest.sh' - docker run -i --rm -v "$PWD:/py_unittest" paddlepaddle/paddle:latest /bin/bash -c
'cd /py_unittest; sh .travis/unittest.sh' || exit_code=$(( exit_code | $? ))
- | - |
if [[ "$TRAVIS_PULL_REQUEST" != "false" ]]; then exit 0; fi; if [[ "$TRAVIS_PULL_REQUEST" != "false" ]]; then exit $exit_code; fi;
if [[ "$TRAVIS_BRANCH" != "develop" && ! "$TRAVIS_BRANCH" =~ ^v[[:digit:]]+\.[[:digit:]]+(\.[[:digit:]]+)?(-\S*)?$ ]]; then echo "not develop branch, no deploy"; exit 0; fi; if [[ "$TRAVIS_BRANCH" != "develop" && ! "$TRAVIS_BRANCH" =~ ^v[[:digit:]]+\.[[:digit:]]+(\.[[:digit:]]+)?(-\S*)?$ ]]; then echo "not develop branch, no deploy"; exit $exit_code; fi;
export DEPLOY_DOCS_SH=https://raw.githubusercontent.com/PaddlePaddle/PaddlePaddle.org/master/scripts/deploy/deploy_docs.sh export DEPLOY_DOCS_SH=https://raw.githubusercontent.com/PaddlePaddle/PaddlePaddle.org/master/scripts/deploy/deploy_docs.sh
export MODELS_DIR=`pwd` export MODELS_DIR=`pwd`
cd .. cd ..
curl $DEPLOY_DOCS_SH | bash -s $CONTENT_DEC_PASSWD $TRAVIS_BRANCH $MODELS_DIR curl $DEPLOY_DOCS_SH | bash -s $CONTENT_DEC_PASSWD $TRAVIS_BRANCH $MODELS_DIR
exit_code=$(( exit_code | $? ))
exit $exit_code
notifications: notifications:
email: email:
on_success: change on_success: change
......
...@@ -13,7 +13,7 @@ The word embedding expresses words with a real vector. Each dimension of the vec ...@@ -13,7 +13,7 @@ The word embedding expresses words with a real vector. Each dimension of the vec
In the example of word vectors, we show how to use Hierarchical-Sigmoid and Noise Contrastive Estimation (NCE) to accelerate word-vector learning. In the example of word vectors, we show how to use Hierarchical-Sigmoid and Noise Contrastive Estimation (NCE) to accelerate word-vector learning.
- 1.1 [Hsigmoid Accelerated Word Vector Training](https://github.com/PaddlePaddle/models/tree/develop/hsigmoid) - 1.1 [Hsigmoid Accelerated Word Vector Training](https://github.com/PaddlePaddle/models/tree/develop/hsigmoid)
- 1.2 [Noise Contrast Estimation Accelerated Word Vector Training](https://github.com/PaddlePaddle/models/tree/develop/nce_cost) - 1.2 [Noise Contrastive Estimation Accelerated Word Vector Training](https://github.com/PaddlePaddle/models/tree/develop/nce_cost)
## 2. RNN language model ## 2. RNN language model
......
...@@ -3,47 +3,55 @@ This model implements the work in the following paper: ...@@ -3,47 +3,55 @@ This model implements the work in the following paper:
Jonas Gehring, Micheal Auli, David Grangier, et al. Convolutional Sequence to Sequence Learning. Association for Computational Linguistics (ACL), 2017 Jonas Gehring, Micheal Auli, David Grangier, et al. Convolutional Sequence to Sequence Learning. Association for Computational Linguistics (ACL), 2017
# Data Preparation
- In this tutorial, each line in a data file contains one sample and each sample consists of a source sentence and a target sentence. And the two sentences are seperated by '\t'. So, to use your own data, it should be organized as follows:
```
<source sentence>\t<target sentence>
```
# Training a Model # Training a Model
- Modify the following script if needed and then run: - Modify the following script if needed and then run:
```bash ```bash
python train.py \ python train.py \
--train_data_path ./data/train_data \ --train_data_path ./data/train_data \
--test_data_path ./data/test_data \ --test_data_path ./data/test_data \
--src_dict_path ./data/src_dict \ --src_dict_path ./data/src_dict \
--trg_dict_path ./data/trg_dict \ --trg_dict_path ./data/trg_dict \
--enc_blocks "[(256, 3)] * 5" \ --enc_blocks "[(256, 3)] * 5" \
--dec_blocks "[(256, 3)] * 3" \ --dec_blocks "[(256, 3)] * 3" \
--emb_size 256 \ --emb_size 256 \
--pos_size 200 \ --pos_size 200 \
--drop_rate 0.1 \ --drop_rate 0.1 \
--use_gpu False \ --use_gpu False \
--trainer_count 1 \ --trainer_count 1 \
--batch_size 32 \ --batch_size 32 \
--num_passes 20 \ --num_passes 20 \
>train.log 2>&1 >train.log 2>&1
``` ```
# Inferring by a Trained Model # Inferring by a Trained Model
- Infer by a trained model by running: - Infer by a trained model by running:
```bash ```bash
python infer.py \ python infer.py \
--infer_data_path ./data/infer_data \ --infer_data_path ./data/infer_data \
--src_dict_path ./data/src_dict \ --src_dict_path ./data/src_dict \
--trg_dict_path ./data/trg_dict \ --trg_dict_path ./data/trg_dict \
--enc_blocks "[(256, 3)] * 5" \ --enc_blocks "[(256, 3)] * 5" \
--dec_blocks "[(256, 3)] * 3" \ --dec_blocks "[(256, 3)] * 3" \
--emb_size 256 \ --emb_size 256 \
--pos_size 200 \ --pos_size 200 \
--drop_rate 0.1 \ --drop_rate 0.1 \
--use_gpu False \ --use_gpu False \
--trainer_count 1 \ --trainer_count 1 \
--max_len 100 \ --max_len 100 \
--beam_size 1 \ --beam_size 1 \
--model_path ./params.pass-0.tar.gz \ --model_path ./params.pass-0.tar.gz \
1>infer_result 2>infer.log 1>infer_result 2>infer.log
``` ```
# Notes # Notes
......
...@@ -147,7 +147,8 @@ def encoder(token_emb, ...@@ -147,7 +147,8 @@ def encoder(token_emb,
encoded_sum = paddle.layer.addto(input=[encoded_vec, embedding]) encoded_sum = paddle.layer.addto(input=[encoded_vec, embedding])
# halve the variance of the sum # halve the variance of the sum
encoded_sum = paddle.layer.slope_intercept(input=encoded_sum, slope=math.sqrt(0.5)) encoded_sum = paddle.layer.slope_intercept(
input=encoded_sum, slope=math.sqrt(0.5))
return encoded_vec, encoded_sum return encoded_vec, encoded_sum
......
# Convolutional Sequence to Sequence Learning
This model implements the work in the following paper:
Jonas Gehring, Micheal Auli, David Grangier, et al. Convolutional Sequence to Sequence Learning. Association for Computational Linguistics (ACL), 2017
# Training a Model
- Modify the following script if needed and then run:
```bash
python train.py \
--train_data_path ./data/train_data \
--test_data_path ./data/test_data \
--src_dict_path ./data/src_dict \
--trg_dict_path ./data/trg_dict \
--enc_blocks "[(256, 3)] * 5" \
--dec_blocks "[(256, 3)] * 3" \
--emb_size 256 \
--pos_size 200 \
--drop_rate 0.1 \
--use_gpu False \
--trainer_count 1 \
--batch_size 32 \
--num_passes 20 \
>train.log 2>&1
```
# Inferring by a Trained Model
- Infer by a trained model by running:
```bash
python infer.py \
--infer_data_path ./data/infer_data \
--src_dict_path ./data/src_dict \
--trg_dict_path ./data/trg_dict \
--enc_blocks "[(256, 3)] * 5" \
--dec_blocks "[(256, 3)] * 3" \
--emb_size 256 \
--pos_size 200 \
--drop_rate 0.1 \
--use_gpu False \
--trainer_count 1 \
--max_len 100 \
--beam_size 1 \
--model_path ./params.pass-0.tar.gz \
1>infer_result 2>infer.log
```
# Notes
Currently, beam search will forward the encoder multiple times when predicting each target word, which requires extra computations. And we will fix it later.
#coding=utf-8
import sys
import time
import numpy as np
class BeamSearch(object):
"""
Generate sequence by beam search
NOTE: this class only implements generating one sentence at a time.
"""
def __init__(self,
inferer,
trg_dict,
pos_size,
padding_num,
beam_size=1,
max_len=100):
self.inferer = inferer
self.trg_dict = trg_dict
self.word_padding = trg_dict.__len__()
self.pos_size = pos_size
self.pos_padding = pos_size
self.padding_num = padding_num
self.win_len = padding_num + 1
self.max_len = max_len
self.beam_size = beam_size
def get_beam_input(self, pre_beam_list, infer_data):
"""
Get input for generation at the current iteration.
"""
beam_input = []
if len(pre_beam_list) == 0:
cur_trg = [self.word_padding
] * self.padding_num + [self.trg_dict['<s>']]
cur_trg_pos = [self.pos_padding] * self.padding_num + [0]
beam_input.append(infer_data + [cur_trg] + [cur_trg_pos])
else:
for seq in pre_beam_list:
if len(seq) < self.win_len:
cur_trg = [self.word_padding] * (
self.win_len - len(seq) - 1
) + [self.trg_dict['<s>']] + seq
cur_trg_pos = [self.pos_padding] * (
self.win_len - len(seq) - 1) + [0] + range(1,
len(seq) + 1)
else:
cur_trg = seq[-self.win_len:]
cur_trg_pos = range(
len(seq) + 1 - self.win_len, len(seq) + 1)
beam_input.append(infer_data + [cur_trg] + [cur_trg_pos])
return beam_input
def get_prob(self, beam_input):
"""
Get the probabilities of all possible tokens.
"""
row_list = [j * self.win_len for j in range(len(beam_input))]
prob = self.inferer.infer(beam_input, field='value')[row_list, :]
return prob
def get_candidate(self, pre_beam_list, pre_beam_score, prob):
"""
Get top beam_size tokens and their scores for each beam.
"""
if prob.ndim == 1:
candidate_id = prob.argsort()[-self.beam_size:][::-1]
candidate_log_prob = np.log(prob[candidate_id])
else:
candidate_id = prob.argsort()[:, -self.beam_size:][:, ::-1]
candidate_log_prob = np.zeros_like(candidate_id).astype('float32')
for j in range(len(pre_beam_list)):
candidate_log_prob[j, :] = np.log(prob[j, candidate_id[j, :]])
if pre_beam_score.size > 0:
candidate_score = candidate_log_prob + pre_beam_score.reshape(
(pre_beam_score.size, 1))
else:
candidate_score = candidate_log_prob
return candidate_id, candidate_score
def prune(self, candidate_id, candidate_score, pre_beam_list,
completed_seq_list, completed_seq_score, completed_seq_min_score):
"""
Pruning process of the beam search. During the process, beam_size most possible sequences
are selected for the beam in the next iteration. Besides, their scores and the minimum score
of the completed sequences are updated.
"""
candidate_id = candidate_id.flatten()
candidate_score = candidate_score.flatten()
topk_idx = candidate_score.argsort()[-self.beam_size:][::-1].tolist()
topk_seq_idx = [idx / self.beam_size for idx in topk_idx]
next_beam = []
beam_score = []
for j in range(len(topk_idx)):
if candidate_id[topk_idx[j]] == self.trg_dict['<e>']:
if len(
completed_seq_list
) < self.beam_size or completed_seq_min_score <= candidate_score[
topk_idx[j]]:
completed_seq_list.append(pre_beam_list[topk_seq_idx[j]])
completed_seq_score.append(candidate_score[topk_idx[j]])
if completed_seq_min_score is None or (
completed_seq_min_score >=
candidate_score[topk_idx[j]] and
len(completed_seq_list) < self.beam_size):
completed_seq_min_score = candidate_score[topk_idx[j]]
else:
seq = pre_beam_list[topk_seq_idx[
j]] + [candidate_id[topk_idx[j]]]
score = candidate_score[topk_idx[j]]
next_beam.append(seq)
beam_score.append(score)
beam_score = np.array(beam_score)
return next_beam, beam_score, completed_seq_min_score
def search_one_sample(self, infer_data):
"""
Beam search process for one sample.
"""
completed_seq_list = []
completed_seq_score = []
completed_seq_min_score = None
uncompleted_seq_list = [[]]
uncompleted_seq_score = np.zeros(0)
for i in xrange(self.max_len):
beam_input = self.get_beam_input(uncompleted_seq_list, infer_data)
prob = self.get_prob(beam_input)
candidate_id, candidate_score = self.get_candidate(
uncompleted_seq_list, uncompleted_seq_score, prob)
uncompleted_seq_list, uncompleted_seq_score, completed_seq_min_score = self.prune(
candidate_id, candidate_score, uncompleted_seq_list,
completed_seq_list, completed_seq_score,
completed_seq_min_score)
if len(uncompleted_seq_list) == 0:
break
if len(completed_seq_list) >= self.beam_size:
seq_max_score = uncompleted_seq_score.max()
if seq_max_score < completed_seq_min_score:
uncompleted_seq_list = []
break
final_seq_list = completed_seq_list + uncompleted_seq_list
final_score = np.concatenate(
(np.array(completed_seq_score), uncompleted_seq_score))
max_id = final_score.argmax()
top_seq = final_seq_list[max_id]
return top_seq
#coding=utf-8
import sys
import argparse
import distutils.util
import gzip
import paddle.v2 as paddle
from model import conv_seq2seq
from beamsearch import BeamSearch
import reader
def parse_args():
parser = argparse.ArgumentParser(
description="PaddlePaddle Convolutional Seq2Seq")
parser.add_argument(
'--infer_data_path',
type=str,
required=True,
help="Path of the dataset for inference")
parser.add_argument(
'--src_dict_path',
type=str,
required=True,
help='Path of the source dictionary')
parser.add_argument(
'--trg_dict_path',
type=str,
required=True,
help='path of the target dictionary')
parser.add_argument(
'--enc_blocks', type=str, help='Convolution blocks of the encoder')
parser.add_argument(
'--dec_blocks', type=str, help='Convolution blocks of the decoder')
parser.add_argument(
'--emb_size',
type=int,
default=512,
help='Dimension of word embedding. (default: %(default)s)')
parser.add_argument(
'--pos_size',
type=int,
default=200,
help='Total number of the position indexes. (default: %(default)s)')
parser.add_argument(
'--drop_rate',
type=float,
default=0.,
help='Dropout rate. (default: %(default)s)')
parser.add_argument(
"--use_gpu",
default=False,
type=distutils.util.strtobool,
help="Use gpu or not. (default: %(default)s)")
parser.add_argument(
"--trainer_count",
default=1,
type=int,
help="Trainer number. (default: %(default)s)")
parser.add_argument(
'--max_len',
type=int,
default=100,
help="The maximum length of the sentence to be generated. (default: %(default)s)"
)
parser.add_argument(
"--beam_size",
default=1,
type=int,
help="The width of beam expasion. (default: %(default)s)")
parser.add_argument(
"--model_path",
type=str,
required=True,
help="The path of trained model. (default: %(default)s)")
return parser.parse_args()
def to_sentence(seq, dictionary):
raw_sentence = [dictionary[id] for id in seq]
sentence = " ".join(raw_sentence)
return sentence
def infer(infer_data_path,
src_dict_path,
trg_dict_path,
model_path,
enc_conv_blocks,
dec_conv_blocks,
emb_dim=512,
pos_size=200,
drop_rate=0.,
max_len=100,
beam_size=1):
"""
Inference.
:param infer_data_path: The path of the data for inference.
:type infer_data_path: str
:param src_dict_path: The path of the source dictionary.
:type src_dict_path: str
:param trg_dict_path: The path of the target dictionary.
:type trg_dict_path: str
:param model_path: The path of a trained model.
:type model_path: str
:param enc_conv_blocks: The scale list of the encoder's convolution blocks. And each element of
the list contains output dimension and context length of the corresponding
convolution block.
:type enc_conv_blocks: list of tuple
:param dec_conv_blocks: The scale list of the decoder's convolution blocks. And each element of
the list contains output dimension and context length of the corresponding
convolution block.
:type dec_conv_blocks: list of tuple
:param emb_dim: The dimension of the embedding vector.
:type emb_dim: int
:param pos_size: The total number of the position indexes, which means
the maximum value of the index is pos_size - 1.
:type pos_size: int
:param drop_rate: Dropout rate.
:type drop_rate: float
:param max_len: The maximum length of the sentence to be generated.
:type max_len: int
:param beam_size: The width of beam expansion.
:type beam_size: int
"""
# load dict
src_dict = reader.load_dict(src_dict_path)
trg_dict = reader.load_dict(trg_dict_path)
src_dict_size = src_dict.__len__()
trg_dict_size = trg_dict.__len__()
prob = conv_seq2seq(
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
pos_size=pos_size,
emb_dim=emb_dim,
enc_conv_blocks=enc_conv_blocks,
dec_conv_blocks=dec_conv_blocks,
drop_rate=drop_rate,
is_infer=True)
# load parameters
parameters = paddle.parameters.Parameters.from_tar(gzip.open(model_path))
padding_list = [context_len - 1 for (size, context_len) in dec_conv_blocks]
padding_num = reduce(lambda x, y: x + y, padding_list)
infer_reader = reader.data_reader(
data_file=infer_data_path,
src_dict=src_dict,
trg_dict=trg_dict,
pos_size=pos_size,
padding_num=padding_num)
inferer = paddle.inference.Inference(
output_layer=prob, parameters=parameters)
searcher = BeamSearch(
inferer=inferer,
trg_dict=trg_dict,
pos_size=pos_size,
padding_num=padding_num,
max_len=max_len,
beam_size=beam_size)
reverse_trg_dict = reader.get_reverse_dict(trg_dict)
for i, raw_data in enumerate(infer_reader()):
infer_data = [raw_data[0], raw_data[1]]
result = searcher.search_one_sample(infer_data)
sentence = to_sentence(result, reverse_trg_dict)
print sentence
sys.stdout.flush()
return
def main():
args = parse_args()
enc_conv_blocks = eval(args.enc_blocks)
dec_conv_blocks = eval(args.dec_blocks)
paddle.init(use_gpu=args.use_gpu, trainer_count=args.trainer_count)
infer(
infer_data_path=args.infer_data_path,
src_dict_path=args.src_dict_path,
trg_dict_path=args.trg_dict_path,
model_path=args.model_path,
enc_conv_blocks=enc_conv_blocks,
dec_conv_blocks=dec_conv_blocks,
emb_dim=args.emb_size,
pos_size=args.pos_size,
drop_rate=args.drop_rate,
max_len=args.max_len,
beam_size=args.beam_size)
if __name__ == '__main__':
main()
#coding=utf-8
import math
import paddle.v2 as paddle
__all__ = ["conv_seq2seq"]
def gated_conv_with_batchnorm(input,
size,
context_len,
context_start=None,
learning_rate=1.0,
drop_rate=0.):
"""
Definition of the convolution block.
:param input: The input of this block.
:type input: LayerOutput
:param size: The dimension of the block's output.
:type size: int
:param context_len: The context length of the convolution.
:type context_len: int
:param context_start: The start position of the context.
:type context_start: int
:param learning_rate: The learning rate factor of the parameters in the block.
The actual learning rate is the product of the global
learning rate and this factor.
:type learning_rate: float
:param drop_rate: Dropout rate.
:type drop_rate: float
:return: The output of the convolution block.
:rtype: LayerOutput
"""
input = paddle.layer.dropout(input=input, dropout_rate=drop_rate)
context = paddle.layer.mixed(
size=input.size * context_len,
input=paddle.layer.context_projection(
input=input, context_len=context_len, context_start=context_start))
raw_conv = paddle.layer.fc(
input=context,
size=size * 2,
act=paddle.activation.Linear(),
param_attr=paddle.attr.Param(
initial_mean=0.,
initial_std=math.sqrt(4.0 * (1.0 - drop_rate) / context.size),
learning_rate=learning_rate),
bias_attr=False)
batch_norm_conv = paddle.layer.batch_norm(
input=raw_conv,
act=paddle.activation.Linear(),
param_attr=paddle.attr.Param(learning_rate=learning_rate))
with paddle.layer.mixed(size=size) as conv:
conv += paddle.layer.identity_projection(
batch_norm_conv, size=size, offset=0)
with paddle.layer.mixed(size=size, act=paddle.activation.Sigmoid()) as gate:
gate += paddle.layer.identity_projection(
batch_norm_conv, size=size, offset=size)
with paddle.layer.mixed(size=size) as gated_conv:
gated_conv += paddle.layer.dotmul_operator(conv, gate)
return gated_conv
def encoder(token_emb,
pos_emb,
conv_blocks=[(256, 3)] * 5,
num_attention=3,
drop_rate=0.1):
"""
Definition of the encoder.
:param token_emb: The embedding vector of the input token.
:type token_emb: LayerOutput
:param pos_emb: The embedding vector of the input token's position.
:type pos_emb: LayerOutput
:param conv_blocks: The scale list of the convolution blocks. Each element of
the list contains output dimension and context length of
the corresponding convolution block.
:type conv_blocks: list of tuple
:param num_attention: The total number of the attention modules used in the decoder.
:type num_attention: int
:param drop_rate: Dropout rate.
:type drop_rate: float
:return: The input token encoding.
:rtype: LayerOutput
"""
embedding = paddle.layer.addto(
input=[token_emb, pos_emb],
layer_attr=paddle.attr.Extra(drop_rate=drop_rate))
proj_size = conv_blocks[0][0]
block_input = paddle.layer.fc(
input=embedding,
size=proj_size,
act=paddle.activation.Linear(),
param_attr=paddle.attr.Param(
initial_mean=0.,
initial_std=math.sqrt((1.0 - drop_rate) / embedding.size),
learning_rate=1.0 / (2.0 * num_attention)),
bias_attr=True, )
for (size, context_len) in conv_blocks:
if block_input.size == size:
residual = block_input
else:
residual = paddle.layer.fc(
input=block_input,
size=size,
act=paddle.activation.Linear(),
param_attr=paddle.attr.Param(learning_rate=1.0 /
(2.0 * num_attention)),
bias_attr=True)
gated_conv = gated_conv_with_batchnorm(
input=block_input,
size=size,
context_len=context_len,
learning_rate=1.0 / (2.0 * num_attention),
drop_rate=drop_rate)
with paddle.layer.mixed(size=size) as block_output:
block_output += paddle.layer.identity_projection(residual)
block_output += paddle.layer.identity_projection(gated_conv)
# halve the variance of the sum
block_output = paddle.layer.slope_intercept(
input=block_output, slope=math.sqrt(0.5))
block_input = block_output
emb_dim = embedding.size
encoded_vec = paddle.layer.fc(
input=block_output,
size=emb_dim,
act=paddle.activation.Linear(),
param_attr=paddle.attr.Param(learning_rate=1.0 / (2.0 * num_attention)),
bias_attr=True)
encoded_sum = paddle.layer.addto(input=[encoded_vec, embedding])
# halve the variance of the sum
encoded_sum = paddle.layer.slope_intercept(input=encoded_sum, slope=math.sqrt(0.5))
return encoded_vec, encoded_sum
def attention(decoder_state, cur_embedding, encoded_vec, encoded_sum):
"""
Definition of the attention.
:param decoder_state: The hidden state of the decoder.
:type decoder_state: LayerOutput
:param cur_embedding: The embedding vector of the current token.
:type cur_embedding: LayerOutput
:param encoded_vec: The source token encoding.
:type encoded_vec: LayerOutput
:param encoded_sum: The sum of the source token's encoding and embedding.
:type encoded_sum: LayerOutput
:return: A context vector.
:rtype: LayerOutput
"""
residual = decoder_state
state_size = decoder_state.size
emb_dim = cur_embedding.size
with paddle.layer.mixed(size=emb_dim, bias_attr=True) as state_summary:
state_summary += paddle.layer.full_matrix_projection(decoder_state)
state_summary += paddle.layer.identity_projection(cur_embedding)
# halve the variance of the sum
state_summary = paddle.layer.slope_intercept(
input=state_summary, slope=math.sqrt(0.5))
expanded = paddle.layer.expand(input=state_summary, expand_as=encoded_vec)
m = paddle.layer.linear_comb(weights=expanded, vectors=encoded_vec)
attention_weight = paddle.layer.fc(
input=m,
size=1,
act=paddle.activation.SequenceSoftmax(),
bias_attr=False)
scaled = paddle.layer.scaling(weight=attention_weight, input=encoded_sum)
attended = paddle.layer.pooling(
input=scaled, pooling_type=paddle.pooling.Sum())
attended_proj = paddle.layer.fc(
input=attended,
size=state_size,
act=paddle.activation.Linear(),
bias_attr=True)
attention_result = paddle.layer.addto(input=[attended_proj, residual])
# halve the variance of the sum
attention_result = paddle.layer.slope_intercept(
input=attention_result, slope=math.sqrt(0.5))
return attention_result
def decoder(token_emb,
pos_emb,
encoded_vec,
encoded_sum,
dict_size,
conv_blocks=[(256, 3)] * 3,
drop_rate=0.1):
"""
Definition of the decoder.
:param token_emb: The embedding vector of the input token.
:type token_emb: LayerOutput
:param pos_emb: The embedding vector of the input token's position.
:type pos_emb: LayerOutput
:param encoded_vec: The source token encoding.
:type encoded_vec: LayerOutput
:param encoded_sum: The sum of the source token's encoding and embedding.
:type encoded_sum: LayerOutput
:param dict_size: The size of the target dictionary.
:type dict_size: int
:param conv_blocks: The scale list of the convolution blocks. Each element
of the list contains output dimension and context length
of the corresponding convolution block.
:type conv_blocks: list of tuple
:param drop_rate: Dropout rate.
:type drop_rate: float
:return: The probability of the predicted token.
:rtype: LayerOutput
"""
def attention_step(decoder_state, cur_embedding, encoded_vec, encoded_sum):
conditional = attention(
decoder_state=decoder_state,
cur_embedding=cur_embedding,
encoded_vec=encoded_vec,
encoded_sum=encoded_sum)
return conditional
embedding = paddle.layer.addto(
input=[token_emb, pos_emb],
layer_attr=paddle.attr.Extra(drop_rate=drop_rate))
proj_size = conv_blocks[0][0]
block_input = paddle.layer.fc(
input=embedding,
size=proj_size,
act=paddle.activation.Linear(),
param_attr=paddle.attr.Param(
initial_mean=0.,
initial_std=math.sqrt((1.0 - drop_rate) / embedding.size)),
bias_attr=True, )
for (size, context_len) in conv_blocks:
if block_input.size == size:
residual = block_input
else:
residual = paddle.layer.fc(
input=block_input,
size=size,
act=paddle.activation.Linear(),
bias_attr=True)
decoder_state = gated_conv_with_batchnorm(
input=block_input,
size=size,
context_len=context_len,
context_start=0,
drop_rate=drop_rate)
group_inputs = [
decoder_state,
embedding,
paddle.layer.StaticInput(input=encoded_vec),
paddle.layer.StaticInput(input=encoded_sum),
]
conditional = paddle.layer.recurrent_group(
step=attention_step, input=group_inputs)
block_output = paddle.layer.addto(input=[conditional, residual])
# halve the variance of the sum
block_output = paddle.layer.slope_intercept(
input=block_output, slope=math.sqrt(0.5))
block_input = block_output
out_emb_dim = embedding.size
block_output = paddle.layer.fc(
input=block_output,
size=out_emb_dim,
act=paddle.activation.Linear(),
layer_attr=paddle.attr.Extra(drop_rate=drop_rate))
decoder_out = paddle.layer.fc(
input=block_output,
size=dict_size,
act=paddle.activation.Softmax(),
param_attr=paddle.attr.Param(
initial_mean=0.,
initial_std=math.sqrt((1.0 - drop_rate) / block_output.size)),
bias_attr=True)
return decoder_out
def conv_seq2seq(src_dict_size,
trg_dict_size,
pos_size,
emb_dim,
enc_conv_blocks=[(256, 3)] * 5,
dec_conv_blocks=[(256, 3)] * 3,
drop_rate=0.1,
is_infer=False):
"""
Definition of convolutional sequence-to-sequence network.
:param src_dict_size: The size of the source dictionary.
:type src_dict_size: int
:param trg_dict_size: The size of the target dictionary.
:type trg_dict_size: int
:param pos_size: The total number of the position indexes, which means
the maximum value of the index is pos_size - 1.
:type pos_size: int
:param emb_dim: The dimension of the embedding vector.
:type emb_dim: int
:param enc_conv_blocks: The scale list of the encoder's convolution blocks. Each element
of the list contains output dimension and context length of the
corresponding convolution block.
:type enc_conv_blocks: list of tuple
:param dec_conv_blocks: The scale list of the decoder's convolution blocks. Each element
of the list contains output dimension and context length of the
corresponding convolution block.
:type dec_conv_blocks: list of tuple
:param drop_rate: Dropout rate.
:type drop_rate: float
:param is_infer: Whether infer or not.
:type is_infer: bool
:return: Cost or output layer.
:rtype: LayerOutput
"""
src = paddle.layer.data(
name='src_word',
type=paddle.data_type.integer_value_sequence(src_dict_size))
src_pos = paddle.layer.data(
name='src_word_pos',
type=paddle.data_type.integer_value_sequence(pos_size +
1)) # one for padding
src_emb = paddle.layer.embedding(
input=src,
size=emb_dim,
name='src_word_emb',
param_attr=paddle.attr.Param(initial_mean=0., initial_std=0.1))
src_pos_emb = paddle.layer.embedding(
input=src_pos,
size=emb_dim,
name='src_pos_emb',
param_attr=paddle.attr.Param(initial_mean=0., initial_std=0.1))
num_attention = len(dec_conv_blocks)
encoded_vec, encoded_sum = encoder(
token_emb=src_emb,
pos_emb=src_pos_emb,
conv_blocks=enc_conv_blocks,
num_attention=num_attention,
drop_rate=drop_rate)
trg = paddle.layer.data(
name='trg_word',
type=paddle.data_type.integer_value_sequence(trg_dict_size +
1)) # one for padding
trg_pos = paddle.layer.data(
name='trg_word_pos',
type=paddle.data_type.integer_value_sequence(pos_size +
1)) # one for padding
trg_emb = paddle.layer.embedding(
input=trg,
size=emb_dim,
name='trg_word_emb',
param_attr=paddle.attr.Param(initial_mean=0., initial_std=0.1))
trg_pos_emb = paddle.layer.embedding(
input=trg_pos,
size=emb_dim,
name='trg_pos_emb',
param_attr=paddle.attr.Param(initial_mean=0., initial_std=0.1))
decoder_out = decoder(
token_emb=trg_emb,
pos_emb=trg_pos_emb,
encoded_vec=encoded_vec,
encoded_sum=encoded_sum,
dict_size=trg_dict_size,
conv_blocks=dec_conv_blocks,
drop_rate=drop_rate)
if is_infer:
return decoder_out
trg_next_word = paddle.layer.data(
name='trg_next_word',
type=paddle.data_type.integer_value_sequence(trg_dict_size))
cost = paddle.layer.classification_cost(
input=decoder_out, label=trg_next_word)
return cost
#coding=utf-8
import random
def load_dict(dict_file):
word_dict = dict()
with open(dict_file, 'r') as f:
for i, line in enumerate(f):
w = line.strip().split()[0]
word_dict[w] = i
return word_dict
def get_reverse_dict(dictionary):
reverse_dict = {dictionary[k]: k for k in dictionary.keys()}
return reverse_dict
def load_data(data_file, src_dict, trg_dict):
UNK_IDX = src_dict['<unk>']
with open(data_file, 'r') as f:
for line in f:
line_split = line.strip().split('\t')
if len(line_split) < 2:
continue
src, trg = line_split
src_words = src.strip().split()
trg_words = trg.strip().split()
src_seq = [src_dict.get(w, UNK_IDX) for w in src_words]
trg_seq = [trg_dict.get(w, UNK_IDX) for w in trg_words]
yield src_seq, trg_seq
def data_reader(data_file, src_dict, trg_dict, pos_size, padding_num):
def reader():
UNK_IDX = src_dict['<unk>']
word_padding = trg_dict.__len__()
pos_padding = pos_size
def _get_pos(pos_list, pos_size, pos_padding):
return [pos if pos < pos_size else pos_padding for pos in pos_list]
with open(data_file, 'r') as f:
for line in f:
line_split = line.strip().split('\t')
if len(line_split) != 2:
continue
src, trg = line_split
src = src.strip().split()
src_word = [src_dict.get(w, UNK_IDX) for w in src]
src_word_pos = range(len(src_word))
src_word_pos = _get_pos(src_word_pos, pos_size, pos_padding)
trg = trg.strip().split()
trg_word = [trg_dict['<s>']
] + [trg_dict.get(w, UNK_IDX) for w in trg]
trg_word_pos = range(len(trg_word))
trg_word_pos = _get_pos(trg_word_pos, pos_size, pos_padding)
trg_next_word = trg_word[1:] + [trg_dict['<e>']]
trg_word = [word_padding] * padding_num + trg_word
trg_word_pos = [pos_padding] * padding_num + trg_word_pos
trg_next_word = trg_next_word + [trg_dict['<e>']] * padding_num
yield src_word, src_word_pos, trg_word, trg_word_pos, trg_next_word
return reader
#coding=utf-8
import os
import sys
import time
import argparse
import distutils.util
import gzip
import numpy as np
import paddle.v2 as paddle
from model import conv_seq2seq
import reader
def parse_args():
parser = argparse.ArgumentParser(
description="PaddlePaddle Convolutional Seq2Seq")
parser.add_argument(
'--train_data_path',
type=str,
required=True,
help="Path of the training set")
parser.add_argument(
'--test_data_path', type=str, help='Path of the test set')
parser.add_argument(
'--src_dict_path',
type=str,
required=True,
help='Path of source dictionary')
parser.add_argument(
'--trg_dict_path',
type=str,
required=True,
help='Path of target dictionary')
parser.add_argument(
'--enc_blocks', type=str, help='Convolution blocks of the encoder')
parser.add_argument(
'--dec_blocks', type=str, help='Convolution blocks of the decoder')
parser.add_argument(
'--emb_size',
type=int,
default=512,
help='Dimension of word embedding. (default: %(default)s)')
parser.add_argument(
'--pos_size',
type=int,
default=200,
help='Total number of the position indexes. (default: %(default)s)')
parser.add_argument(
'--drop_rate',
type=float,
default=0.,
help='Dropout rate. (default: %(default)s)')
parser.add_argument(
"--use_gpu",
default=False,
type=distutils.util.strtobool,
help="Use gpu or not. (default: %(default)s)")
parser.add_argument(
"--trainer_count",
default=1,
type=int,
help="Trainer number. (default: %(default)s)")
parser.add_argument(
'--batch_size',
type=int,
default=32,
help="Size of a mini-batch. (default: %(default)s)")
parser.add_argument(
'--num_passes',
type=int,
default=15,
help="Number of passes to train. (default: %(default)s)")
return parser.parse_args()
def create_reader(padding_num,
train_data_path,
test_data_path=None,
src_dict=None,
trg_dict=None,
pos_size=200,
batch_size=32):
train_reader = paddle.batch(
reader=paddle.reader.shuffle(
reader=reader.data_reader(
data_file=train_data_path,
src_dict=src_dict,
trg_dict=trg_dict,
pos_size=pos_size,
padding_num=padding_num),
buf_size=10240),
batch_size=batch_size)
test_reader = None
if test_data_path:
test_reader = paddle.batch(
reader=paddle.reader.shuffle(
reader=reader.data_reader(
data_file=test_data_path,
src_dict=src_dict,
trg_dict=trg_dict,
pos_size=pos_size,
padding_num=padding_num),
buf_size=10240),
batch_size=batch_size)
return train_reader, test_reader
def train(train_data_path,
test_data_path,
src_dict_path,
trg_dict_path,
enc_conv_blocks,
dec_conv_blocks,
emb_dim=512,
pos_size=200,
drop_rate=0.,
batch_size=32,
num_passes=15):
"""
Train the convolution sequence-to-sequence model.
:param train_data_path: The path of the training set.
:type train_data_path: str
:param test_data_path: The path of the test set.
:type test_data_path: str
:param src_dict_path: The path of the source dictionary.
:type src_dict_path: str
:param trg_dict_path: The path of the target dictionary.
:type trg_dict_path: str
:param enc_conv_blocks: The scale list of the encoder's convolution blocks. And each element of
the list contains output dimension and context length of the corresponding
convolution block.
:type enc_conv_blocks: list of tuple
:param dec_conv_blocks: The scale list of the decoder's convolution blocks. And each element of
the list contains output dimension and context length of the corresponding
convolution block.
:type dec_conv_blocks: list of tuple
:param emb_dim: The dimension of the embedding vector.
:type emb_dim: int
:param pos_size: The total number of the position indexes, which means
the maximum value of the index is pos_size - 1.
:type pos_size: int
:param drop_rate: Dropout rate.
:type drop_rate: float
:param batch_size: The size of a mini-batch.
:type batch_size: int
:param num_passes: The total number of the passes to train.
:type num_passes: int
"""
# load dict
src_dict = reader.load_dict(src_dict_path)
trg_dict = reader.load_dict(trg_dict_path)
src_dict_size = src_dict.__len__()
trg_dict_size = trg_dict.__len__()
optimizer = paddle.optimizer.Adam(
learning_rate=1e-3, )
cost = conv_seq2seq(
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
pos_size=pos_size,
emb_dim=emb_dim,
enc_conv_blocks=enc_conv_blocks,
dec_conv_blocks=dec_conv_blocks,
drop_rate=drop_rate,
is_infer=False)
# create parameters and trainer
parameters = paddle.parameters.create(cost)
trainer = paddle.trainer.SGD(
cost=cost, parameters=parameters, update_equation=optimizer)
padding_list = [context_len - 1 for (size, context_len) in dec_conv_blocks]
padding_num = reduce(lambda x, y: x + y, padding_list)
train_reader, test_reader = create_reader(
padding_num=padding_num,
train_data_path=train_data_path,
test_data_path=test_data_path,
src_dict=src_dict,
trg_dict=trg_dict,
pos_size=pos_size,
batch_size=batch_size)
feeding = {
'src_word': 0,
'src_word_pos': 1,
'trg_word': 2,
'trg_word_pos': 3,
'trg_next_word': 4
}
# create event handler
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 20 == 0:
cur_time = time.strftime('%Y.%m.%d %H:%M:%S', time.localtime())
print "[%s]: Pass: %d, Batch: %d, TrainCost: %f, %s" % (
cur_time, event.pass_id, event.batch_id, event.cost,
event.metrics)
else:
sys.stdout.flush()
if isinstance(event, paddle.event.EndPass):
if test_reader is not None:
cur_time = time.strftime('%Y.%m.%d %H:%M:%S', time.localtime())
result = trainer.test(reader=test_reader, feeding=feeding)
print "[%s]: Pass: %d, TestCost: %f, %s" % (
cur_time, event.pass_id, result.cost, result.metrics)
sys.stdout.flush()
with gzip.open("output/params.pass-%d.tar.gz" % event.pass_id,
'w') as f:
trainer.save_parameter_to_tar(f)
if not os.path.exists('output'):
os.mkdir('output')
trainer.train(
reader=train_reader,
event_handler=event_handler,
num_passes=num_passes,
feeding=feeding)
def main():
args = parse_args()
enc_conv_blocks = eval(args.enc_blocks)
dec_conv_blocks = eval(args.dec_blocks)
paddle.init(use_gpu=args.use_gpu, trainer_count=args.trainer_count)
train(
train_data_path=args.train_data_path,
test_data_path=args.test_data_path,
src_dict_path=args.src_dict_path,
trg_dict_path=args.trg_dict_path,
enc_conv_blocks=enc_conv_blocks,
dec_conv_blocks=dec_conv_blocks,
emb_dim=args.emb_size,
pos_size=args.pos_size,
drop_rate=args.drop_rate,
batch_size=args.batch_size,
num_passes=args.num_passes)
if __name__ == '__main__':
main()
...@@ -2,6 +2,7 @@ import sys ...@@ -2,6 +2,7 @@ import sys
import csv import csv
import cPickle import cPickle
import argparse import argparse
import os
import numpy as np import numpy as np
from utils import logger, TaskMode from utils import logger, TaskMode
......
<html>
<head>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js", "TeX/AMSsymbols.js", "TeX/AMSmath.js"],
jax: ["input/TeX", "output/HTML-CSS"],
tex2jax: {
inlineMath: [ ['$','$'] ],
displayMath: [ ['$$','$$'] ],
processEscapes: true
},
"HTML-CSS": { availableFonts: ["TeX"] }
});
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js" async></script>
<script type="text/javascript" src="../.tools/theme/marked.js">
</script>
<link href="http://cdn.bootcss.com/highlight.js/9.9.0/styles/darcula.min.css" rel="stylesheet">
<script src="http://cdn.bootcss.com/highlight.js/9.9.0/highlight.min.js"></script>
<link href="http://cdn.bootcss.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/css/perfect-scrollbar.min.css" rel="stylesheet">
<link href="../.tools/theme/github-markdown.css" rel='stylesheet'>
</head>
<style type="text/css" >
.markdown-body {
box-sizing: border-box;
min-width: 200px;
max-width: 980px;
margin: 0 auto;
padding: 45px;
}
</style>
<body>
<div id="context" class="container-fluid markdown-body">
</div>
<!-- This block will be replaced by each markdown file content. Please do not change lines below.-->
<div id="markdown" style='display:none'>
# Click-Through Rate Prediction
## Introduction
CTR(Click-Through Rate)\[[1](https://en.wikipedia.org/wiki/Click-through_rate)\]
is a prediction of the probability that a user clicks on an advertisement. This model is widely used in the advertisement industry. Accurate click rate estimates are important for maximizing online advertising revenue.
When there are multiple ad slots, CTR estimates are generally used as a baseline for ranking. For example, in a search engine's ad system, when the user enters a query, the system typically performs the following steps to show relevant ads.
1. Get the ad collection associated with the user's search term.
2. Business rules and relevance filtering.
3. Rank by auction mechanism and CTR.
4. Show ads.
Here,CTR plays a crucial role.
### Brief history
Historically, the CTR prediction model has been evolving as follows.
- Logistic Regression(LR) / Gradient Boosting Decision Trees (GBDT) + feature engineering
- LR + Deep Neural Network (DNN)
- DNN + feature engineering
In the early stages of development LR dominated, but the recent years DNN based models are mainly used.
### LR vs DNN
The following figure shows the structure of LR and DNN model:
<p align="center">
<img src="images/lr_vs_dnn.jpg" width="620" hspace='10'/> <br/>
Figure 1. LR and DNN model structure comparison
</p>
We can see, LR and CNN have some common structures. However, DNN can have non-linear relation between input and output values by adding activation unit and further layers. This enables DNN to achieve better learning results in CTR estimates.
In the following, we demonstrate how to use PaddlePaddle to learn to predict CTR.
## Data and Model formation
Here `click` is the learning objective. There are several ways to learn the objectives.
1. Direct learning click, 0,1 for binary classification
2. Learning to rank, pairwise rank or listwise rank
3. Measure the ad click rate of each ad, then rank by the click rate.
In this example, we use the first method.
We use the Kaggle `Click-through rate prediction` task \[[2](https://www.kaggle.com/c/avazu-ctr-prediction/data)\].
Please see the [data process](./dataset.md) for pre-processing data.
The input data format for the demo model in this tutorial is as follows:
```
# <dnn input ids> \t <lr input sparse values> \t click
1 23 190 \t 230:0.12 3421:0.9 23451:0.12 \t 0
23 231 \t 1230:0.12 13421:0.9 \t 1
```
Description:
- `dnn input ids` one-hot coding.
- `lr input sparse values` Use `ID:VALUE` , values are preferaly scaled to the range `[-1, 1]`。
此外,模型训练时需要传入一个文件描述 dnn 和 lr两个子模型的输入维度,文件的格式如下:
```
dnn_input_dim: <int>
lr_input_dim: <int>
```
<int> represents an integer value.
`avazu_data_processor.py` can be used to download the data set \[[2](#参考文档)\]and pre-process the data.
```
usage: avazu_data_processer.py [-h] --data_path DATA_PATH --output_dir
OUTPUT_DIR
[--num_lines_to_detect NUM_LINES_TO_DETECT]
[--test_set_size TEST_SET_SIZE]
[--train_size TRAIN_SIZE]
PaddlePaddle CTR example
optional arguments:
-h, --help show this help message and exit
--data_path DATA_PATH
path of the Avazu dataset
--output_dir OUTPUT_DIR
directory to output
--num_lines_to_detect NUM_LINES_TO_DETECT
number of records to detect dataset's meta info
--test_set_size TEST_SET_SIZE
size of the validation dataset(default: 10000)
--train_size TRAIN_SIZE
size of the trainset (default: 100000)
```
- `data_path` The data path to be processed
- `output_dir` The output path of the data
- `num_lines_to_detect` The number of generated IDs
- `test_set_size` The number of rows for the test set
- `train_size` The number of rows of training set
## Wide & Deep Learning Model
Google proposed a model framework for Wide & Deep Learning to integrate the advantages of both DNNs suitable for learning abstract features and LR models for large sparse features.
### Introduction to the model
Wide & Deep Learning Model\[[3](#References)\] is a relatively mature model, but this model is still being used in the CTR predicting task. Here we demonstrate the use of this model to complete the CTR predicting task.
The model structure is as follows:
<p align="center">
<img src="images/wide_deep.png" width="820" hspace='10'/> <br/>
Figure 2. Wide & Deep Model
</p>
The wide part of the left side of the model can accommodate large-scale coefficient features and has some memory for some specific information (such as ID); and the Deep part of the right side of the model can learn the implicit relationship between features.
### Model Input
The model has three inputs as follows.
- `dnn_input` ,the Deep part of the input
- `lr_input` ,the wide part of the input
- `click` , click on or not
```python
dnn_merged_input = layer.data(
name='dnn_input',
type=paddle.data_type.sparse_binary_vector(self.dnn_input_dim))
lr_merged_input = layer.data(
name='lr_input',
type=paddle.data_type.sparse_vector(self.lr_input_dim))
click = paddle.layer.data(name='click', type=dtype.dense_vector(1))
```
### Wide part
Wide part uses of the LR model, but the activation function changed to `RELU` for speed.
```python
def build_lr_submodel():
fc = layer.fc(
input=lr_merged_input, size=1, name='lr', act=paddle.activation.Relu())
return fc
```
### Deep part
The Deep part uses a standard multi-layer DNN.
```python
def build_dnn_submodel(dnn_layer_dims):
dnn_embedding = layer.fc(input=dnn_merged_input, size=dnn_layer_dims[0])
_input_layer = dnn_embedding
for i, dim in enumerate(dnn_layer_dims[1:]):
fc = layer.fc(
input=_input_layer,
size=dim,
act=paddle.activation.Relu(),
name='dnn-fc-%d' % i)
_input_layer = fc
return _input_layer
```
### Combine
The output section uses `sigmoid` function to output (0,1) as the prediction value.
```python
# conbine DNN and LR submodels
def combine_submodels(dnn, lr):
merge_layer = layer.concat(input=[dnn, lr])
fc = layer.fc(
input=merge_layer,
size=1,
name='output',
# use sigmoid function to approximate ctr, wihch is a float value between 0 and 1.
act=paddle.activation.Sigmoid())
return fc
```
### Training
```python
dnn = build_dnn_submodel(dnn_layer_dims)
lr = build_lr_submodel()
output = combine_submodels(dnn, lr)
# ==============================================================================
# cost and train period
# ==============================================================================
classification_cost = paddle.layer.multi_binary_label_cross_entropy_cost(
input=output, label=click)
paddle.init(use_gpu=False, trainer_count=11)
params = paddle.parameters.create(classification_cost)
optimizer = paddle.optimizer.Momentum(momentum=0)
trainer = paddle.trainer.SGD(
cost=classification_cost, parameters=params, update_equation=optimizer)
dataset = AvazuDataset(train_data_path, n_records_as_test=test_set_size)
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 100 == 0:
logging.warning("Pass %d, Samples %d, Cost %f" % (
event.pass_id, event.batch_id * batch_size, event.cost))
if event.batch_id % 1000 == 0:
result = trainer.test(
reader=paddle.batch(dataset.test, batch_size=1000),
feeding=field_index)
logging.warning("Test %d-%d, Cost %f" % (event.pass_id, event.batch_id,
result.cost))
trainer.train(
reader=paddle.batch(
paddle.reader.shuffle(dataset.train, buf_size=500),
batch_size=batch_size),
feeding=field_index,
event_handler=event_handler,
num_passes=100)
```
## Run training and testing
The model go through the following steps:
1. Prepare training data
1. Download train.gz from [Kaggle CTR](https://www.kaggle.com/c/avazu-ctr-prediction/data) .
2. Unzip train.gz to get train.txt
3. `mkdir -p output; python avazu_data_processer.py --data_path train.txt --output_dir output --num_lines_to_detect 1000 --test_set_size 100` 生成演示数据
2. Execute `python train.py --train_data_path ./output/train.txt --test_data_path ./output/test.txt --data_meta_file ./output/data.meta.txt --model_type=0`. Start training.
The argument options for `train.py` are as follows.
```
usage: train.py [-h] --train_data_path TRAIN_DATA_PATH
[--test_data_path TEST_DATA_PATH] [--batch_size BATCH_SIZE]
[--num_passes NUM_PASSES]
[--model_output_prefix MODEL_OUTPUT_PREFIX] --data_meta_file
DATA_META_FILE --model_type MODEL_TYPE
PaddlePaddle CTR example
optional arguments:
-h, --help show this help message and exit
--train_data_path TRAIN_DATA_PATH
path of training dataset
--test_data_path TEST_DATA_PATH
path of testing dataset
--batch_size BATCH_SIZE
size of mini-batch (default:10000)
--num_passes NUM_PASSES
number of passes to train
--model_output_prefix MODEL_OUTPUT_PREFIX
prefix of path for model to store (default:
./ctr_models)
--data_meta_file DATA_META_FILE
path of data meta info file
--model_type MODEL_TYPE
model type, classification: 0, regression 1 (default
classification)
```
- `train_data_path` : The path of the training set
- `test_data_path` : The path of the testing set
- `num_passes`: number of rounds of model training
- `data_meta_file`: Please refer to [数据和任务抽象](### 数据和任务抽象)的描述。
- `model_type`: Model classification or regressio
## Use the training model for prediction
The training model can be used to predict new data, and the format of the forecast data is as follows.
```
# <dnn input ids> \t <lr input sparse values>
1 23 190 \t 230:0.12 3421:0.9 23451:0.12
23 231 \t 1230:0.12 13421:0.9
```
Here the only difference to the training data is that there is no label (i.e. `click` values).
We now can use `infer.py` to perform inference.
```
usage: infer.py [-h] --model_gz_path MODEL_GZ_PATH --data_path DATA_PATH
--prediction_output_path PREDICTION_OUTPUT_PATH
[--data_meta_path DATA_META_PATH] --model_type MODEL_TYPE
PaddlePaddle CTR example
optional arguments:
-h, --help show this help message and exit
--model_gz_path MODEL_GZ_PATH
path of model parameters gz file
--data_path DATA_PATH
path of the dataset to infer
--prediction_output_path PREDICTION_OUTPUT_PATH
path to output the prediction
--data_meta_path DATA_META_PATH
path of trainset's meta info, default is ./data.meta
--model_type MODEL_TYPE
model type, classification: 0, regression 1 (default
classification)
```
- `model_gz_path_model`:path for `gz` compressed data.
- `data_path` :
- `prediction_output_patj`:path for the predicted values s
- `data_meta_file` :Please refer to [数据和任务抽象](### 数据和任务抽象)。
- `model_type` :Classification or regression
The sample data can be predicted with the following command
```
python infer.py --model_gz_path <model_path> --data_path output/infer.txt --prediction_output_path predictions.txt --data_meta_path data.meta.txt
```
The final prediction is written in `predictions.txt`。
## References
1. <https://en.wikipedia.org/wiki/Click-through_rate>
2. <https://www.kaggle.com/c/avazu-ctr-prediction/data>
3. Cheng H T, Koc L, Harmsen J, et al. [Wide & deep learning for recommender systems](https://arxiv.org/pdf/1606.07792.pdf)[C]//Proceedings of the 1st Workshop on Deep Learning for Recommender Systems. ACM, 2016: 7-10.
</div>
<!-- You can change the lines below now. -->
<script type="text/javascript">
marked.setOptions({
renderer: new marked.Renderer(),
gfm: true,
breaks: false,
smartypants: true,
highlight: function(code, lang) {
code = code.replace(/&amp;/g, "&")
code = code.replace(/&gt;/g, ">")
code = code.replace(/&lt;/g, "<")
code = code.replace(/&nbsp;/g, " ")
return hljs.highlightAuto(code, [lang]).value;
}
});
document.getElementById("context").innerHTML = marked(
document.getElementById("markdown").innerHTML)
</script>
</body>
...@@ -50,7 +50,7 @@ class CTRmodel(object): ...@@ -50,7 +50,7 @@ class CTRmodel(object):
self.lr_merged_input = layer.data( self.lr_merged_input = layer.data(
name='lr_input', name='lr_input',
type=paddle.data_type.sparse_vector(self.lr_input_dim)) type=paddle.data_type.sparse_float_vector(self.lr_input_dim))
if not self.is_infer: if not self.is_infer:
self.click = paddle.layer.data( self.click = paddle.layer.data(
......
Deprecated: please check out the new repository [DeepSpeech](https://github.com/PaddlePaddle/DeepSpeech).
# DeepSpeech2 on PaddlePaddle # DeepSpeech2 on PaddlePaddle
*DeepSpeech2 on PaddlePaddle* is an open-source implementation of end-to-end Automatic Speech Recognition (ASR) engine, based on [Baidu's Deep Speech 2 paper](http://proceedings.mlr.press/v48/amodei16.pdf), with [PaddlePaddle](https://github.com/PaddlePaddle/Paddle) platform. Our vision is to empower both industrial application and academic research on speech recognition, via an easy-to-use, efficient and scalable implementation, including training, inference & testing module, distributed [PaddleCloud](https://github.com/PaddlePaddle/cloud) training, and demo deployment. Besides, several pre-trained models for both English and Mandarin are also released. *DeepSpeech2 on PaddlePaddle* is an open-source implementation of end-to-end Automatic Speech Recognition (ASR) engine, based on [Baidu's Deep Speech 2 paper](http://proceedings.mlr.press/v48/amodei16.pdf), with [PaddlePaddle](https://github.com/PaddlePaddle/Paddle) platform. Our vision is to empower both industrial application and academic research on speech recognition, via an easy-to-use, efficient and scalable implementation, including training, inference & testing module, distributed [PaddleCloud](https://github.com/PaddlePaddle/cloud) training, and demo deployment. Besides, several pre-trained models for both English and Mandarin are also released.
...@@ -187,7 +189,7 @@ Six optional augmentation components are provided to be selected, configured and ...@@ -187,7 +189,7 @@ Six optional augmentation components are provided to be selected, configured and
- Noise Perturbation (need background noise audio files) - Noise Perturbation (need background noise audio files)
- Impulse Response (need impulse audio files) - Impulse Response (need impulse audio files)
In order to inform the trainer of what augmentation components are needed and what their processing orders are, it is required to prepare in advance a *augmentation configuration file* in [JSON](http://www.json.org/) format. For example: In order to inform the trainer of what augmentation components are needed and what their processing orders are, it is required to prepare in advance an *augmentation configuration file* in [JSON](http://www.json.org/) format. For example:
``` ```
[{ [{
...@@ -226,7 +228,7 @@ If you wish to train your own better language model, please refer to [KenLM](htt ...@@ -226,7 +228,7 @@ If you wish to train your own better language model, please refer to [KenLM](htt
#### English LM #### English LM
The English corpus is from the [Common Crawl Repository](http://commoncrawl.org) and you can download it from [statmt](http://data.statmt.org/ngrams/deduped_en). We use part en.00 to train our English languge model. There are some preprocessing steps before training: The English corpus is from the [Common Crawl Repository](http://commoncrawl.org) and you can download it from [statmt](http://data.statmt.org/ngrams/deduped_en). We use part en.00 to train our English language model. There are some preprocessing steps before training:
* Characters not in \[A-Za-z0-9\s'\] (\s represents whitespace characters) are removed and Arabic numbers are converted to English numbers like 1000 to one thousand. * Characters not in \[A-Za-z0-9\s'\] (\s represents whitespace characters) are removed and Arabic numbers are converted to English numbers like 1000 to one thousand.
* Repeated whitespace characters are squeezed to one and the beginning whitespace characters are removed. Notice that all transcriptions are lowercase, so all characters are converted to lowercase. * Repeated whitespace characters are squeezed to one and the beginning whitespace characters are removed. Notice that all transcriptions are lowercase, so all characters are converted to lowercase.
......
<html>
<head>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js", "TeX/AMSsymbols.js", "TeX/AMSmath.js"],
jax: ["input/TeX", "output/HTML-CSS"],
tex2jax: {
inlineMath: [ ['$','$'] ],
displayMath: [ ['$$','$$'] ],
processEscapes: true
},
"HTML-CSS": { availableFonts: ["TeX"] }
});
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js" async></script>
<script type="text/javascript" src="../.tools/theme/marked.js">
</script>
<link href="http://cdn.bootcss.com/highlight.js/9.9.0/styles/darcula.min.css" rel="stylesheet">
<script src="http://cdn.bootcss.com/highlight.js/9.9.0/highlight.min.js"></script>
<link href="http://cdn.bootcss.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/css/perfect-scrollbar.min.css" rel="stylesheet">
<link href="../.tools/theme/github-markdown.css" rel='stylesheet'>
</head>
<style type="text/css" >
.markdown-body {
box-sizing: border-box;
min-width: 200px;
max-width: 980px;
margin: 0 auto;
padding: 45px;
}
</style>
<body>
<div id="context" class="container-fluid markdown-body">
</div>
<!-- This block will be replaced by each markdown file content. Please do not change lines below.-->
<div id="markdown" style='display:none'>
# Deep Structured Semantic Models (DSSM)
Deep Structured Semantic Models (DSSM) is simple but powerful DNN based model for matching web search queries and the URL based documents. This example demonstrates how to use PaddlePaddle to implement a generic DSSM model for modeling the semantic similarity between two strings.
## Background Introduction
DSSM \[[1](##References)]is a classic semantic model proposed by the Institute of Physics. It is used to study the semantic distance between two texts. The general implementation of DSSM is as follows.
1. The CTR predictor measures the degree of association between a user search query and a candidate web page.
2. Text relevance, which measures the degree of semantic correlation between two strings.
3. Automatically recommend, measure the degree of association between User and the recommended Item.
## Model Architecture
In the original paper \[[1](#References)] the DSSM model uses the implicit semantic relation between the user search query and the document as metric. The model structure is as follows
<p align="center">
<img src="./images/dssm.png"/><br/><br/>
Figure 1. DSSM In the original paper
</p>
With the subsequent optimization of the DSSM model to simplify the structure \[[3](#References)],the model becomes:
<p align="center">
<img src="./images/dssm2.png" width="600"/><br/><br/>
Figure 2. DSSM generic structure
</p>
The blank box in the figure can be replaced by any model, such as fully connected FC, convoluted CNN, RNN, etc. The structure is designed to measure the semantic distance between two elements (such as strings).
In practice,DSSM model serves as a basic building block, with different loss functions to achieve specific functions, such as
- In ranking system, the pairwise rank loss function.
- In the CTR estimate, instead of the binary classification on the click, use cross-entropy loss for a classification model
- In regression model, the cosine similarity is used to calculate the similarity
## Model Implementation
At a high level, DSSM model is composed of three components: the left and right DNN, and loss function on top of them. In complex tasks, the structure of the left DNN and the light DNN can be different. In this example, we keep these two DNN structures the same. And we choose any of FC, CNN, and RNN for the DNN architecture.
In PaddlePaddle, the loss functions are supported for any of classification, regression, and ranking. Among them, the distance between the left and right DNN is calculated by the cosine similarity. In the classification task, the predicted distribution is calculated by softmax.
Here we demonstrate:
- How CNN, FC do text information extraction can refer to [text classification](https://github.com/PaddlePaddle/models/blob/develop/text_classification/README.md#模型详解)
- The contents of the RNN / GRU can be found in [Machine Translation](https://github.com/PaddlePaddle/book/blob/develop/08.machine_translation/README.md#gated-recurrent-unit-gru)
- For Pairwise Rank learning, please refer to [learn to rank](https://github.com/PaddlePaddle/models/blob/develop/ltr/README.md)
Figure 3 shows the general architecture for both regression and classification models.
<p align="center">
<img src="./images/dssm3.jpg"/><br/><br/>
Figure 3. DSSM for REGRESSION or CLASSIFICATION
</p>
The structure of the Pairwise Rank is more complex, as shown in Figure 4.
<p align="center">
<img src="./images/dssm2.jpg"/><br/><br/>
图 4. DSSM for Pairwise Rank
</p>
In below, we describe how to train DSSM model in PaddlePaddle. All the codes are included in `./network_conf.py`.
### Create a word vector table for the text
```python
def create_embedding(self, input, prefix=''):
"""
Create word embedding. The `prefix` is added in front of the name of
embedding"s learnable parameter.
"""
logger.info("Create embedding table [%s] whose dimention is %d" %
(prefix, self.dnn_dims[0]))
emb = paddle.layer.embedding(
input=input,
size=self.dnn_dims[0],
param_attr=ParamAttr(name='%s_emb.w' % prefix))
return emb
```
Since the input (embedding table) is a list of the IDs of the words corresponding to a sentence, the word vector table outputs the sequence of word vectors.
### CNN implementation
```python
def create_cnn(self, emb, prefix=''):
"""
A multi-layer CNN.
:param emb: The word embedding.
:type emb: paddle.layer
:param prefix: The prefix will be added to of layers' names.
:type prefix: str
"""
def create_conv(context_len, hidden_size, prefix):
key = "%s_%d_%d" % (prefix, context_len, hidden_size)
conv = paddle.networks.sequence_conv_pool(
input=emb,
context_len=context_len,
hidden_size=hidden_size,
# set parameter attr for parameter sharing
context_proj_param_attr=ParamAttr(name=key + "contex_proj.w"),
fc_param_attr=ParamAttr(name=key + "_fc.w"),
fc_bias_attr=ParamAttr(name=key + "_fc.b"),
pool_bias_attr=ParamAttr(name=key + "_pool.b"))
return conv
conv_3 = create_conv(3, self.dnn_dims[1], "cnn")
conv_4 = create_conv(4, self.dnn_dims[1], "cnn")
return conv_3, conv_4
```
CNN accepts the word sequence of the embedding table, then process the data by convolution and pooling, and finally outputs a semantic vector.
### RNN implementation
RNN is suitable for learning variable length of the information
```python
def create_rnn(self, emb, prefix=''):
"""
A GRU sentence vector learner.
"""
gru = paddle.networks.simple_gru(
input=emb,
size=self.dnn_dims[1],
mixed_param_attr=ParamAttr(name='%s_gru_mixed.w' % prefix),
mixed_bias_param_attr=ParamAttr(name="%s_gru_mixed.b" % prefix),
gru_param_attr=ParamAttr(name='%s_gru.w' % prefix),
gru_bias_attr=ParamAttr(name="%s_gru.b" % prefix))
sent_vec = paddle.layer.last_seq(gru)
return sent_vec
```
### FC implementation
```python
def create_fc(self, emb, prefix=''):
"""
A multi-layer fully connected neural networks.
:param emb: The output of the embedding layer
:type emb: paddle.layer
:param prefix: A prefix will be added to the layers' names.
:type prefix: str
"""
_input_layer = paddle.layer.pooling(
input=emb, pooling_type=paddle.pooling.Max())
fc = paddle.layer.fc(
input=_input_layer,
size=self.dnn_dims[1],
param_attr=ParamAttr(name='%s_fc.w' % prefix),
bias_attr=ParamAttr(name="%s_fc.b" % prefix))
return fc
```
In the construction of FC, we use `paddle.layer.pooling` for the maximum pooling operation on the word vector sequence. Then we transform the sequence into a fixed dimensional vector.
### Multi-layer DNN implementation
```python
def create_dnn(self, sent_vec, prefix):
if len(self.dnn_dims) > 1:
_input_layer = sent_vec
for id, dim in enumerate(self.dnn_dims[1:]):
name = "%s_fc_%d_%d" % (prefix, id, dim)
fc = paddle.layer.fc(
input=_input_layer,
size=dim,
act=paddle.activation.Tanh(),
param_attr=ParamAttr(name='%s.w' % name),
bias_attr=ParamAttr(name='%s.b' % name),
)
_input_layer = fc
return _input_layer
```
### Classification / Regression
The structure of classification and regression is similar. Below function can be used for both tasks.
Please check the function `_build_classification_or_regression_model` in [network_conf.py]( https://github.com/PaddlePaddle/models/blob/develop/dssm/network_conf.py) for detail implementation.
### Pairwise Rank
Please check the function `_build_rank_model` in [network_conf.py]( https://github.com/PaddlePaddle/models/blob/develop/dssm/network_conf.py) for implementation.
## Data Format
Below is a simple example for the data in `./data`
### Regression data format
```
# 3 fields each line:
# - source's word ids
# - target's word ids
# - target
<ids> \t <ids> \t <float>
```
The example of this format is as follows.
```
3 6 10 \t 6 8 33 \t 0.7
6 0 \t 6 9 330 \t 0.03
```
### Classification data format
```
# 3 fields each line:
# - source's word ids
# - target's word ids
# - target
<ids> \t <ids> \t <label>
```
The example of this format is as follows.
```
3 6 10 \t 6 8 33 \t 0
6 10 \t 8 3 1 \t 1
```
### Ranking data format
```
# 4 fields each line:
# - source's word ids
# - target1's word ids
# - target2's word ids
# - label
<ids> \t <ids> \t <ids> \t <label>
```
The example of this format is as follows.
```
7 2 4 \t 2 10 12 \t 9 2 7 10 23 \t 0
7 2 4 \t 10 12 \t 9 2 21 23 \t 1
```
## Training
We use `python train.py -y 0 --model_arch 0` with the data in `./data/classification` to train a DSSM model for classification. The paremeters to execute the script `train.py` can be found by execution `python infer.py --help`. Some important parameters are:
- `train_data_path` Training data path
- `test_data_path` Test data path, optional
- `source_dic_path` Source dictionary path
- `target_dic_path` 目Target dictionary path
- `model_type` The type of loss function of the model: classification 0, sort 1, regression 2
- `model_arch` Model structure: FC 0,CNN 1, RNN 2
- `dnn_dims` The dimension of each layer of the model is set, the default is `256,128,64,32`,with 4 layers.
## To predict using the trained model
The paremeters to execute the script `infer.py` can be found by execution `python infer.py --help`. Some important parameters are:
- `data_path` Path for the data to predict
- `prediction_output_path` Prediction output path
## References
1. Huang P S, He X, Gao J, et al. Learning deep structured semantic models for web search using clickthrough data[C]//Proceedings of the 22nd ACM international conference on Conference on information & knowledge management. ACM, 2013: 2333-2338.
2. [Microsoft Learning to Rank Datasets](https://www.microsoft.com/en-us/research/project/mslr/)
3. [Gao J, He X, Deng L. Deep Learning for Web Search and Natural Language Processing[J]. Microsoft Research Technical Report, 2015.](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/wsdm2015.v3.pdf)
</div>
<!-- You can change the lines below now. -->
<script type="text/javascript">
marked.setOptions({
renderer: new marked.Renderer(),
gfm: true,
breaks: false,
smartypants: true,
highlight: function(code, lang) {
code = code.replace(/&amp;/g, "&")
code = code.replace(/&gt;/g, ">")
code = code.replace(/&lt;/g, "<")
code = code.replace(/&nbsp;/g, " ")
return hljs.highlightAuto(code, [lang]).value;
}
});
document.getElementById("context").innerHTML = marked(
document.getElementById("markdown").innerHTML)
</script>
</body>
<html>
<head>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js", "TeX/AMSsymbols.js", "TeX/AMSmath.js"],
jax: ["input/TeX", "output/HTML-CSS"],
tex2jax: {
inlineMath: [ ['$','$'] ],
displayMath: [ ['$$','$$'] ],
processEscapes: true
},
"HTML-CSS": { availableFonts: ["TeX"] }
});
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js" async></script>
<script type="text/javascript" src="../.tools/theme/marked.js">
</script>
<link href="http://cdn.bootcss.com/highlight.js/9.9.0/styles/darcula.min.css" rel="stylesheet">
<script src="http://cdn.bootcss.com/highlight.js/9.9.0/highlight.min.js"></script>
<link href="http://cdn.bootcss.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/css/perfect-scrollbar.min.css" rel="stylesheet">
<link href="../.tools/theme/github-markdown.css" rel='stylesheet'>
</head>
<style type="text/css" >
.markdown-body {
box-sizing: border-box;
min-width: 200px;
max-width: 980px;
margin: 0 auto;
padding: 45px;
}
</style>
<body>
<div id="context" class="container-fluid markdown-body">
</div>
<!-- This block will be replaced by each markdown file content. Please do not change lines below.-->
<div id="markdown" style='display:none'>
# 中国古诗生成
## 简介
基于编码器-解码器(encoder-decoder)神经网络模型,利用全唐诗进行诗句-诗句(sequence to sequence)训练,实现给定诗句后,生成下一诗句。
模型中的编码器、解码器均使用堆叠双向LSTM (stacked bi-directional LSTM),默认均为3层,带有注意力单元(attention)。
以下是本例的简要目录结构及说明:
```text
.
├── data # 存储训练数据及字典
│ ├── download.sh # 下载原始数据
├── README.md # 文档
├── index.html # 文档(html格式)
├── preprocess.py # 原始数据预处理
├── generate.py # 生成诗句脚本
├── network_conf.py # 模型定义
├── reader.py # 数据读取接口
├── train.py # 训练脚本
└── utils.py # 定义实用工具函数
```
## 数据处理
### 原始数据来源
本例使用[中华古诗词数据库](https://github.com/chinese-poetry/chinese-poetry)中收集的全唐诗作为训练数据,共有约5.4万首唐诗。
### 原始数据下载
```bash
cd data && ./download.sh && cd ..
```
### 数据预处理
```bash
python preprocess.py --datadir data/raw --outfile data/poems.txt --dictfile data/dict.txt
```
上述脚本执行完后将生成处理好的训练数据poems.txt和字典dict.txt。字典的构建以字为单位,使用出现频数至少为10的字构建字典。
poems.txt中每行为一首唐诗的信息,分为三列,分别为题目、作者、诗内容。在诗内容中,诗句之间用`.`分隔。
训练数据示例:
```text
登鸛雀樓 王之渙 白日依山盡.黃河入海流.欲窮千里目.更上一層樓
觀獵 李白 太守耀清威.乘閑弄晚暉.江沙橫獵騎.山火遶行圍.箭逐雲鴻落.鷹隨月兔飛.不知白日暮.歡賞夜方歸
晦日重宴 陳嘉言 高門引冠蓋.下客抱支離.綺席珍羞滿.文場翰藻摛.蓂華彫上月.柳色藹春池.日斜歸戚里.連騎勒金羈
```
模型训练时,使用每一诗句作为模型输入,下一诗句作为预测目标。
## 模型训练
训练脚本[train.py](./train.py)中的命令行参数可以通过`python train.py --help`查看。主要参数说明如下:
- `num_passes`: 训练pass数
- `batch_size`: batch大小
- `use_gpu`: 是否使用GPU
- `trainer_count`: trainer数目,默认为1
- `save_dir_path`: 模型存储路径,默认为当前目录下models目录
- `encoder_depth`: 模型中编码器LSTM深度,默认为3
- `decoder_depth`: 模型中解码器LSTM深度,默认为3
- `train_data_path`: 训练数据路径
- `word_dict_path`: 数据字典路径
- `init_model_path`: 初始模型路径,从头训练时无需指定
### 训练执行
```bash
python train.py \
--num_passes 50 \
--batch_size 256 \
--use_gpu True \
--trainer_count 1 \
--save_dir_path models \
--train_data_path data/poems.txt \
--word_dict_path data/dict.txt \
2>&1 | tee train.log
```
每个pass训练结束后,模型参数将保存在models目录下。训练日志保存在train.log中。
### 最优模型参数
寻找cost最小的pass,使用该pass对应的模型参数用于后续预测。
```bash
python -c 'import utils; utils.find_optiaml_pass("./train.log")'
```
## 生成诗句
使用[generate.py](./generate.py)脚本对输入诗句生成下一诗句,命令行参数可通过`python generate.py --help`查看。
主要参数说明如下:
- `model_path`: 训练好的模型参数文件
- `word_dict_path`: 数据字典路径
- `test_data_path`: 输入数据路径
- `batch_size`: batch大小,默认为1
- `beam_size`: beam search中搜索范围大小,默认为5
- `save_file`: 输出保存路径
- `use_gpu`: 是否使用GPU
### 执行生成
例如将诗句 `孤帆遠影碧空盡` 保存在文件 `input.txt` 中作为预测下句诗的输入,执行命令:
```bash
python generate.py \
--model_path models/pass_00049.tar.gz \
--word_dict_path data/dict.txt \
--test_data_path input.txt \
--save_file output.txt
```
生成结果将保存在文件 `output.txt` 中。对于上述示例输入,生成的诗句如下:
```text
-9.6987 萬 壑 清 風 黃 葉 多
-10.0737 萬 里 遠 山 紅 葉 深
-10.4233 萬 壑 清 波 紅 一 流
-10.4802 萬 壑 清 風 黃 葉 深
-10.9060 萬 壑 清 風 紅 葉 多
```
</div>
<!-- You can change the lines below now. -->
<script type="text/javascript">
marked.setOptions({
renderer: new marked.Renderer(),
gfm: true,
breaks: false,
smartypants: true,
highlight: function(code, lang) {
code = code.replace(/&amp;/g, "&")
code = code.replace(/&gt;/g, ">")
code = code.replace(/&lt;/g, "<")
code = code.replace(/&nbsp;/g, " ")
return hljs.highlightAuto(code, [lang]).value;
}
});
document.getElementById("context").innerHTML = marked(
document.getElementById("markdown").innerHTML)
</script>
</body>
<html>
<head>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js", "TeX/AMSsymbols.js", "TeX/AMSmath.js"],
jax: ["input/TeX", "output/HTML-CSS"],
tex2jax: {
inlineMath: [ ['$','$'] ],
displayMath: [ ['$$','$$'] ],
processEscapes: true
},
"HTML-CSS": { availableFonts: ["TeX"] }
});
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js" async></script>
<script type="text/javascript" src="../.tools/theme/marked.js">
</script>
<link href="http://cdn.bootcss.com/highlight.js/9.9.0/styles/darcula.min.css" rel="stylesheet">
<script src="http://cdn.bootcss.com/highlight.js/9.9.0/highlight.min.js"></script>
<link href="http://cdn.bootcss.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/css/perfect-scrollbar.min.css" rel="stylesheet">
<link href="../.tools/theme/github-markdown.css" rel='stylesheet'>
</head>
<style type="text/css" >
.markdown-body {
box-sizing: border-box;
min-width: 200px;
max-width: 980px;
margin: 0 auto;
padding: 45px;
}
</style>
<body>
<div id="context" class="container-fluid markdown-body">
</div>
<!-- This block will be replaced by each markdown file content. Please do not change lines below.-->
<div id="markdown" style='display:none'>
# 使用循环神经网语言模型生成文本
语言模型(Language Model)是一个概率分布模型,简单来说,就是用来计算一个句子的概率的模型。利用它可以确定哪个词序列的可能性更大,或者给定若干个词,可以预测下一个最可能出现的词。语言模型是自然语言处理领域里一个重要的基础模型。
## 应用场景
**语言模型被应用在很多领域**,如:
* **自动写作**:语言模型可以根据上文生成下一个词,递归下去可以生成整个句子、段落、篇章。
* **QA**:语言模型可以根据Question生成Answer。
* **机器翻译**:当前主流的机器翻译模型大多基于Encoder-Decoder模式,其中Decoder就是一个待条件的语言模型,用来生成目标语言。
* **拼写检查**:语言模型可以计算出词序列的概率,一般在拼写错误处序列的概率会骤减,可以用来识别拼写错误并提供改正候选集。
* **词性标注、句法分析、语音识别......**
## 关于本例
本例实现基于RNN的语言模型,以及利用语言模型生成文本,本例的目录结构如下:
```text
.
├── data
│ └── train_data_examples.txt # 示例数据,可参考示例数据的格式,提供自己的数据
├── config.py # 配置文件,包括data、train、infer相关配置
├── generate.py # 预测任务脚本,即生成文本
├── beam_search.py # beam search 算法实现
├── network_conf.py # 本例中涉及的各种网络结构均定义在此文件中,希望进一步修改模型结构,请修改此文件
├── reader.py # 读取数据接口
├── README.md
├── train.py # 训练任务脚本
└── utils.py # 定义通用的函数,例如:构建字典、加载字典等
```
## RNN 语言模型
### 简介
RNN是一个序列模型,基本思路是:在时刻$t$,将前一时刻$t-1$的隐藏层输出和$t$时刻的词向量一起输入到隐藏层从而得到时刻$t$的特征表示,然后用这个特征表示得到$t$时刻的预测输出,如此在时间维上递归下去。可以看出RNN善于使用上文信息、历史知识,具有“记忆”功能。理论上RNN能实现“长依赖”(即利用很久之前的知识),但在实际应用中发现效果并不理想,研究提出了LSTM和GRU等变种,通过引入门机制对传统RNN的记忆单元进行了改进,弥补了传统RNN在学习长序列时遇到的难题。本例模型使用了LSTM或GRU,可通过配置进行修改。下图是RNN(广义上包含了LSTM、GRU等)语言模型“循环”思想的示意图:
<p align=center><img src='images/rnn.png' width='500px'/></p>
### 模型实现
本例中RNN语言模型的实现简介如下:
- **定义模型参数**:`config.py`中定义了模型的参数变量。
- **定义模型结构**:`network_conf.py`中的`rnn_lm`**函数**中定义了模型的**结构**,如下:
- 输入层:将输入的词(或字)序列映射成向量,即词向量层: `embedding`。
- 中间层:根据配置实现RNN层,将上一步得到的`embedding`向量序列作为输入。
- 输出层:使用`softmax`归一化计算单词的概率。
- loss:定义多类交叉熵作为模型的损失函数。
- **训练模型**:`train.py`中的`main`方法实现了模型的训练,实现流程如下:
- 准备输入数据:建立并保存词典、构建train和test数据的reader。
- 初始化模型:包括模型的结构、参数。
- 构建训练器:demo中使用的是Adam优化算法。
- 定义回调函数:构建`event_handler`来跟踪训练过程中loss的变化,并在每轮训练结束时保存模型的参数。
- 训练:使用trainer训练模型。
- **生成文本**:`generate.py` 实现了文本的生成,实现流程如下:
- 加载训练好的模型和词典文件。
- 读取`gen_file`文件,每行是一个句子的前缀,用[柱搜索算法(Beam Search)](https://github.com/PaddlePaddle/book/blob/develop/08.machine_translation/README.cn.md#柱搜索算法)根据前缀生成文本。
- 将生成的文本及其前缀保存到文件`gen_result`。
## 使用说明
运行本例的方法如下:
* 1,运行`python train.py`命令,开始train模型(默认使用RNN),待训练结束。
* 2,运行`python generate.py`运行文本生成。(输入的文本默认为`data/train_data_examples.txt`,生成的文本默认保存到`data/gen_result.txt`中。)
**如果需要使用自己的语料、定制模型,需要修改`config.py`中的配置,细节和适配工作详情如下:**
### 语料适配
* 清洗语料:去除原文中空格、tab、乱码,按需去除数字、标点符号、特殊符号等。
* 内容格式:每个句子占一行;每行中的各词之间使用一个空格符分开。
* 按需要配置`config.py`中的如下参数:
```python
train_file = "data/train_data_examples.txt"
test_file = ""
vocab_file = "data/word_vocab.txt"
model_save_dir = "models"
```
1. `train_file`:指定训练数据的路径,**需要预先分词**。
2. `test_file`:指定测试数据的路径,如果训练数据不为空,将在每个 `pass` 训练结束对指定的测试数据进行测试。
3. `vocab_file`:指定字典的路径,如果字典文件不存在,将会对训练语料进行词频统计,构建字典。
4. `model_save_dir`:指定模型保存的路径,如果指定的文件夹不存在,将会自动创建。
### 构建字典的策略
- 当指定的字典文件不存在时,将对训练数据进行词频统计,自动构建字典`config.py` 中有如下两个参数与构建字典有关:
```python
max_word_num = 51200 - 2
cutoff_word_fre = 0
```
1. `max_word_num`:指定字典中含有多少个词。
2. `cutoff_word_fre`:字典中词语在训练语料中出现的最低频率。
- 加入指定了 `max_word_num = 5000`,并且 `cutoff_word_fre = 10`,词频统计发现训练语料中出现频率高于10次的词语仅有3000个,那么最终会取3000个词构成词典。
- 构建词典时,会自动加入两个特殊符号:
1. `<unk>`:不出现在字典中的词
2. `<e>`:句子的结束符
*注:需要注意的是,词典越大生成的内容越丰富,但训练耗时越久。一般中文分词之后,语料中不同的词能有几万乃至几十万,如果`max_word_num`取值过小则导致`<unk>`占比过高,如果`max_word_num`取值较大,则严重影响训练速度(对精度也有影响)。所以,也有“按字”训练模型的方式,即:把每个汉字当做一个词,常用汉字也就几千个,使得字典的大小不会太大、不会丢失太多信息,但汉语中同一个字在不同词中语义相差很大,有时导致模型效果不理想。建议多试试、根据实际情况选择是“按词训练”还是“按字训练”。*
### 模型适配、训练
* 按需调整`config.py`中如下配置,来修改 rnn 语言模型的网络结果:
```python
rnn_type = "lstm" # "gru" or "lstm"
emb_dim = 256
hidden_size = 256
stacked_rnn_num = 2
```
1. `rnn_type`:支持 ”gru“ 或者 ”lstm“ 两种参数,选择使用何种 RNN 单元。
2. `emb_dim`:设置词向量的维度。
3. `hidden_size`:设置 RNN 单元隐层大小。
4. `stacked_rnn_num`:设置堆叠 RNN 单元的个数,构成一个更深的模型。
* 运行`python train.py`命令训练模型,模型将被保存到`model_save_dir`指定的目录。
### 按需生成文本
* 按需调整`config.py`中以下变量,详解如下:
```python
gen_file = "data/train_data_examples.txt"
gen_result = "data/gen_result.txt"
max_gen_len = 25 # the max number of words to generate
beam_size = 5
model_path = "models/rnn_lm_pass_00000.tar.gz"
```
1. `gen_file`:指定输入数据文件,每行是一个句子的前缀,**需要预先分词**。
2. `gen_result`:指定输出文件路径,生成结果将写入此文件。
3. `max_gen_len`:指定每一句生成的话最长长度,如果模型无法生成出`<e>`,当生成 `max_gen_len` 个词语后,生成过程会自动终止。
4. `beam_size`:Beam Search 算法每一步的展开宽度。
5. `model_path`:指定训练好的模型的路径。
其中,`gen_file` 中保存的是待生成的文本前缀,每个前缀占一行,形如:
```text
若隐若现 地像 幽灵 , 像 死神
```
将需要生成的文本前缀按此格式存入文件即可;
* 运行`python generate.py`命令运行beam search 算法为输入前缀生成文本,下面是模型生成的结果:
```text
81 若隐若现 地像 幽灵 , 像 死神
-12.2542 一样 。 他 是 个 怪物 <e>
-12.6889 一样 。 他 是 个 英雄 <e>
-13.9877 一样 。 他 是 我 的 敌人 <e>
-14.2741 一样 。 他 是 我 的 <e>
-14.6250 一样 。 他 是 我 的 朋友 <e>
```
其中:
1. 第一行 `81 若隐若现 地像 幽灵 , 像 死神`以`\t`为分隔,共有两列:
- 第一列是输入前缀在训练样本集中的序号。
- 第二列是输入的前缀。
2. 第二 ~ `beam_size + 1` 行是生成结果,同样以 `\t` 分隔为两列:
- 第一列是该生成序列的对数概率(log probability)。
- 第二列是生成的文本序列,正常的生成结果会以符号`<e>`结尾,如果没有以`<e>`结尾,意味着超过了最大序列长度,生成强制终止。
</div>
<!-- You can change the lines below now. -->
<script type="text/javascript">
marked.setOptions({
renderer: new marked.Renderer(),
gfm: true,
breaks: false,
smartypants: true,
highlight: function(code, lang) {
code = code.replace(/&amp;/g, "&")
code = code.replace(/&gt;/g, ">")
code = code.replace(/&lt;/g, "<")
code = code.replace(/&nbsp;/g, " ")
return hljs.highlightAuto(code, [lang]).value;
}
});
document.getElementById("context").innerHTML = marked(
document.getElementById("markdown").innerHTML)
</script>
</body>
<html>
<head>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js", "TeX/AMSsymbols.js", "TeX/AMSmath.js"],
jax: ["input/TeX", "output/HTML-CSS"],
tex2jax: {
inlineMath: [ ['$','$'] ],
displayMath: [ ['$$','$$'] ],
processEscapes: true
},
"HTML-CSS": { availableFonts: ["TeX"] }
});
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js" async></script>
<script type="text/javascript" src="../.tools/theme/marked.js">
</script>
<link href="http://cdn.bootcss.com/highlight.js/9.9.0/styles/darcula.min.css" rel="stylesheet">
<script src="http://cdn.bootcss.com/highlight.js/9.9.0/highlight.min.js"></script>
<link href="http://cdn.bootcss.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/css/perfect-scrollbar.min.css" rel="stylesheet">
<link href="../.tools/theme/github-markdown.css" rel='stylesheet'>
</head>
<style type="text/css" >
.markdown-body {
box-sizing: border-box;
min-width: 200px;
max-width: 980px;
margin: 0 auto;
padding: 45px;
}
</style>
<body>
<div id="context" class="container-fluid markdown-body">
</div>
<!-- This block will be replaced by each markdown file content. Please do not change lines below.-->
<div id="markdown" style='display:none'>
# Globally Normalized Reader
This model implements the work in the following paper:
Jonathan Raiman and John Miller. Globally Normalized Reader. Empirical Methods in Natural Language Processing (EMNLP), 2017.
If you use the dataset/code in your research, please cite the above paper:
```text
@inproceedings{raiman2015gnr,
author={Raiman, Jonathan and Miller, John},
booktitle={Empirical Methods in Natural Language Processing (EMNLP)},
title={Globally Normalized Reader},
year={2017},
}
```
You can also visit https://github.com/baidu-research/GloballyNormalizedReader to get more information.
# Installation
1. Please use [docker image](http://doc.paddlepaddle.org/develop/doc/getstarted/build_and_install/docker_install_en.html) to install the latest PaddlePaddle, by running:
```bash
docker pull paddledev/paddle
```
2. Download all necessary data by running:
```bash
cd data && ./download.sh && cd ..
```
3. Preprocess and featurizer data:
```bash
python featurize.py --datadir data --outdir data/featurized --glove-path data/glove.840B.300d.txt
```
# Training a Model
- Configurate the model by modifying `config.py` if needed, and then run:
```bash
python train.py 2>&1 | tee train.log
```
# Inferring by a Trained Model
- Infer by a trained model by running:
```bash
python infer.py \
--model_path models/pass_00000.tar.gz \
--data_dir data/featurized/ \
--batch_size 2 \
--use_gpu 0 \
--trainer_count 1 \
2>&1 | tee infer.log
```
</div>
<!-- You can change the lines below now. -->
<script type="text/javascript">
marked.setOptions({
renderer: new marked.Renderer(),
gfm: true,
breaks: false,
smartypants: true,
highlight: function(code, lang) {
code = code.replace(/&amp;/g, "&")
code = code.replace(/&gt;/g, ">")
code = code.replace(/&lt;/g, "<")
code = code.replace(/&nbsp;/g, " ")
return hljs.highlightAuto(code, [lang]).value;
}
});
document.getElementById("context").innerHTML = marked(
document.getElementById("markdown").innerHTML)
</script>
</body>
<html>
<head>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js", "TeX/AMSsymbols.js", "TeX/AMSmath.js"],
jax: ["input/TeX", "output/HTML-CSS"],
tex2jax: {
inlineMath: [ ['$','$'] ],
displayMath: [ ['$$','$$'] ],
processEscapes: true
},
"HTML-CSS": { availableFonts: ["TeX"] }
});
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js" async></script>
<script type="text/javascript" src="../.tools/theme/marked.js">
</script>
<link href="http://cdn.bootcss.com/highlight.js/9.9.0/styles/darcula.min.css" rel="stylesheet">
<script src="http://cdn.bootcss.com/highlight.js/9.9.0/highlight.min.js"></script>
<link href="http://cdn.bootcss.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/css/perfect-scrollbar.min.css" rel="stylesheet">
<link href="../.tools/theme/github-markdown.css" rel='stylesheet'>
</head>
<style type="text/css" >
.markdown-body {
box-sizing: border-box;
min-width: 200px;
max-width: 980px;
margin: 0 auto;
padding: 45px;
}
</style>
<body>
<div id="context" class="container-fluid markdown-body">
</div>
<!-- This block will be replaced by each markdown file content. Please do not change lines below.-->
<div id="markdown" style='display:none'>
# Hsigmoid加速词向量训练
## 背景介绍
在自然语言处理领域中,传统做法通常使用one-hot向量来表示词,比如词典为['我', '你', '喜欢'],可以用[1,0,0]、[0,1,0]和[0,0,1]这三个向量分别表示'我'、'你'和'喜欢'。这种表示方式比较简洁,但是当词表很大时,容易产生维度爆炸问题;而且任意两个词的向量是正交的,向量包含的信息有限。为了避免或减轻one-hot表示的缺点,目前通常使用词向量来取代one-hot表示,词向量也就是word embedding,即使用一个低维稠密的实向量取代高维稀疏的one-hot向量。训练词向量的方法有很多种,神经网络模型是其中之一,包括CBOW、Skip-gram等,这些模型本质上都是一个分类模型,当词表较大即类别较多时,传统的softmax将非常消耗时间。PaddlePaddle提供了Hsigmoid Layer、NCE Layer,来加速模型的训练过程。本文主要介绍如何使用Hsigmoid Layer来加速训练,词向量相关内容请查阅PaddlePaddle Book中的[词向量章节](https://github.com/PaddlePaddle/book/tree/develop/04.word2vec)。
## Hsigmoid Layer
Hsigmoid Layer引用自论文\[[1](#参考文献)\],Hsigmoid指Hierarchical-sigmoid,原理是通过构建一个分类二叉树来降低计算复杂度,二叉树中每个叶子节点代表一个类别,每个非叶子节点代表一个二类别分类器。例如我们一共有4个类别分别是0、1、2、3,softmax会分别计算4个类别的得分,然后归一化得到概率。当类别数很多时,计算每个类别的概率非常耗时,Hsigmoid Layer会根据类别数构建一个平衡二叉树,如下:
<p align="center">
<img src="images/binary_tree.png" width="220" hspace='10'/> <img src="images/path_to_1.png" width="220" hspace='10'/> <br/>
图1. (a)为平衡二叉树,(b)为根节点到类别1的路径
</p>
二叉树中每个非叶子节点是一个二类别分类器(sigmoid),如果类别是0,则取左子节点继续分类判断,反之取右子节点,直至达到叶节点。按照这种方式,每个类别均对应一条路径,例如从root到类别1的路径编码为0、1。训练阶段我们按照真实类别对应的路径,依次计算对应分类器的损失,然后综合所有损失得到最终损失。预测阶段,模型会输出各个非叶节点分类器的概率,我们可以根据概率获取路径编码,然后遍历路径编码就可以得到最终预测类别。传统softmax的计算复杂度为N(N为词典大小),Hsigmoid可以将复杂度降至log(N),详细理论细节可参照论文\[[1](#参考文献)\]。
## 数据准备
### PTB数据
本文采用Penn Treebank (PTB)数据集([Tomas Mikolov预处理版本](http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz)),共包含train、valid和test三个文件。其中使用train作为训练数据,valid作为测试数据。本文训练的是5-gram模型,即用每条数据的前4个词来预测第5个词。PaddlePaddle提供了对应PTB数据集的python包[paddle.dataset.imikolov](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/dataset/imikolov.py) ,自动做数据的下载与预处理。预处理会把数据集中的每一句话前后加上开始符号\<s>以及结束符号\<e>,然后依据窗口大小(本文为5),从头到尾每次向右滑动窗口并生成一条数据。如"I have a dream that one day"可以生成\<s> I have a dream、I have a dream that、have a dream that one、a dream that one day、dream that one day \<e>,PaddlePaddle会把词转换成id数据作为预处理的输出。
### 自定义数据
用户可以使用自己的数据集训练模型,自定义数据集最关键的地方是实现reader接口做数据处理,reader需要产生一个迭代器,迭代器负责解析文件中的每一行数据,返回一个python list,例如[1, 2, 3, 4, 5],分别是第一个到第四个词在字典中的id,PaddlePaddle会进一步将该list转化成`paddle.data_type.inter_value`类型作为data layer的输入,一个封装样例如下:
```python
def reader_creator(filename, word_dict, n):
def reader():
with open(filename) as f:
UNK = word_dict['<unk>']
for l in f:
l = ['<s>'] + l.strip().split() + ['<e>']
if len(l) >= n:
l = [word_dict.get(w, UNK) for w in l]
for i in range(n, len(l) + 1):
yield tuple(l[i - n:i])
return reader
def train_data(filename, word_dict, n):
"""
Reader interface for training data.
It returns a reader creator, each sample in the reader is a word ID tuple.
:param filename: path of data file
:type filename: str
:param word_dict: word dictionary
:type word_dict: dict
:param n: sliding window size
:type n: int
"""
return reader_creator(filename, word_dict, n)
```
## 网络结构
本文通过训练N-gram语言模型来获得词向量,具体地使用前4个词来预测当前词。网络输入为词在字典中的id,然后查询词向量词表获取词向量,接着拼接4个词的词向量,然后接入一个全连接隐层,最后是`Hsigmoid`层。详细网络结构见图2:
<p align="center">
<img src="images/network_conf.png" width = "70%" align="center"/><br/>
图2. 网络配置结构
</p>
代码如下:
```python
def ngram_lm(hidden_size, embed_size, dict_size, gram_num=4, is_train=True):
emb_layers = []
embed_param_attr = paddle.attr.Param(
name="_proj", initial_std=0.001, learning_rate=1, l2_rate=0)
for i in range(gram_num):
word = paddle.layer.data(
name="__word%02d__" % (i),
type=paddle.data_type.integer_value(dict_size))
emb_layers.append(
paddle.layer.embedding(
input=word, size=embed_size, param_attr=embed_param_attr))
target_word = paddle.layer.data(
name="__target_word__", type=paddle.data_type.integer_value(dict_size))
embed_context = paddle.layer.concat(input=emb_layers)
hidden_layer = paddle.layer.fc(
input=embed_context,
size=hidden_size,
act=paddle.activation.Sigmoid(),
layer_attr=paddle.attr.Extra(drop_rate=0.5),
bias_attr=paddle.attr.Param(learning_rate=2),
param_attr=paddle.attr.Param(
initial_std=1. / math.sqrt(embed_size * 8), learning_rate=1))
return paddle.layer.hsigmoid(
input=hidden_layer,
label=target_word,
param_attr=paddle.attr.Param(name="sigmoid_w"),
bias_attr=paddle.attr.Param(name="sigmoid_b"))
```
需要注意在 PaddlePaddle 中,hsigmoid 层将可学习参数存储为一个 `[类别数目 - 1 × 隐层向量宽度]` 大小的矩阵。预测时,需要将 hsigmoid 层替换为全连接运算**并固定以 `sigmoid` 为激活**。预测时输出一个宽度为`[batch_size x 类别数目 - 1]` 维度的矩阵(`batch_size = 1`时退化为一个向量)。矩阵行向量的每一维计算了一个输入向量属于一个内部结点的右孩子的概率。**全连接运算在加载 hsigmoid 层学习到的参数矩阵时,需要对参数矩阵进行一次转置**。代码片段如下:
```python
return paddle.layer.mixed(
size=dict_size - 1,
input=paddle.layer.trans_full_matrix_projection(
hidden_layer, param_attr=paddle.attr.Param(name="sigmoid_w")),
act=paddle.activation.Sigmoid(),
bias_attr=paddle.attr.Param(name="sigmoid_b"))
```
上述代码片段中的 `paddle.layer.mixed` 必须以 PaddlePaddle 中 `paddle.layer.×_projection` 为输入。`paddle.layer.mixed` 将多个 `projection` (输入可以是多个)计算结果求和作为输出。`paddle.layer.trans_full_matrix_projection` 在计算矩阵乘法时会对参数$W$进行转置。
## 训练阶段
训练比较简单,直接运行``` python train.py ```。程序第一次运行会检测用户缓存文件夹中是否包含imikolov数据集,如果未包含,则自动下载。运行过程中,每100个iteration会打印模型训练信息,主要包含训练损失和测试损失,每个pass会保存一次模型。
## 预测阶段
在命令行运行 :
```bash
python infer.py \
--model_path "models/XX" \
--batch_size 1 \
--use_gpu false \
--trainer_count 1
```
参数含义如下:
- `model_path`:指定训练好的模型所在的路径。必选。
- `batch_size`:一次预测并行的样本数目。可选,默认值为 `1`。
- `use_gpu`:是否使用 GPU 进行预测。可选,默认值为 `False`。
- `trainer_count` : 预测使用的线程数目。可选,默认为 `1`。**注意:预测使用的线程数目必选大于一次预测并行的样本数目**。
预测阶段根据多个二分类概率得到编码路径,遍历路径获取最终的预测类别,逻辑如下:
```python
def decode_res(infer_res, dict_size):
"""
Inferring probabilities are orginized as a complete binary tree.
The actual labels are leaves (indices are counted from class number).
This function travels paths decoded from inferring results.
If the probability >0.5 then go to right child, otherwise go to left child.
param infer_res: inferring result
param dict_size: class number
return predict_lbls: actual class
"""
predict_lbls = []
infer_res = infer_res > 0.5
for i, probs in enumerate(infer_res):
idx = 0
result = 1
while idx < len(probs):
result <<= 1
if probs[idx]:
result |= 1
if probs[idx]:
idx = idx * 2 + 2 # right child
else:
idx = idx * 2 + 1 # left child
predict_lbl = result - dict_size
predict_lbls.append(predict_lbl)
return predict_lbls
```
预测程序的输入数据格式与训练阶段相同`have a dream that one`,程序会根据`have a dream that`生成一组概率通过对概率解码生成预测词,`one`作为真实词方便评估解码函数的输入是一个batch样本的预测概率以及词表的大小里面的循环是对每条样本的输出概率进行解码解码方式就是按照左0右1的准则不断遍历路径直至到达叶子节点
## 参考文献
1. Morin, F., & Bengio, Y. (2005, January). [Hierarchical Probabilistic Neural Network Language Model](http://www.iro.umontreal.ca/~lisa/pointeurs/hierarchical-nnlm-aistats05.pdf). In Aistats (Vol. 5, pp. 246-252).
</div>
<!-- You can change the lines below now. -->
<script type="text/javascript">
marked.setOptions({
renderer: new marked.Renderer(),
gfm: true,
breaks: false,
smartypants: true,
highlight: function(code, lang) {
code = code.replace(/&amp;/g, "&")
code = code.replace(/&gt;/g, ">")
code = code.replace(/&lt;/g, "<")
code = code.replace(/&nbsp;/g, " ")
return hljs.highlightAuto(code, [lang]).value;
}
});
document.getElementById("context").innerHTML = marked(
document.getElementById("markdown").innerHTML)
</script>
</body>
<html>
<head>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js", "TeX/AMSsymbols.js", "TeX/AMSmath.js"],
jax: ["input/TeX", "output/HTML-CSS"],
tex2jax: {
inlineMath: [ ['$','$'] ],
displayMath: [ ['$$','$$'] ],
processEscapes: true
},
"HTML-CSS": { availableFonts: ["TeX"] }
});
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js" async></script>
<script type="text/javascript" src="../.tools/theme/marked.js">
</script>
<link href="http://cdn.bootcss.com/highlight.js/9.9.0/styles/darcula.min.css" rel="stylesheet">
<script src="http://cdn.bootcss.com/highlight.js/9.9.0/highlight.min.js"></script>
<link href="http://cdn.bootcss.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/css/perfect-scrollbar.min.css" rel="stylesheet">
<link href="../.tools/theme/github-markdown.css" rel='stylesheet'>
</head>
<style type="text/css" >
.markdown-body {
box-sizing: border-box;
min-width: 200px;
max-width: 980px;
margin: 0 auto;
padding: 45px;
}
</style>
<body>
<div id="context" class="container-fluid markdown-body">
</div>
<!-- This block will be replaced by each markdown file content. Please do not change lines below.-->
<div id="markdown" style='display:none'>
图像分类
=======================
这里将介绍如何在PaddlePaddle下使用AlexNet、VGG、GoogLeNet和ResNet模型进行图像分类。图像分类问题的描述和这四种模型的介绍可以参考[PaddlePaddle book](https://github.com/PaddlePaddle/book/tree/develop/03.image_classification)。
## 训练模型
### 初始化
在初始化阶段需要导入所用的包,并对PaddlePaddle进行初始化。
```python
import gzip
import paddle.v2.dataset.flowers as flowers
import paddle.v2 as paddle
import reader
import vgg
import resnet
import alexnet
import googlenet
# PaddlePaddle init
paddle.init(use_gpu=False, trainer_count=1)
```
### 定义参数和输入
设置算法参数(如数据维度、类别数目和batch size等参数),定义数据输入层`image`和类别标签`lbl`。
```python
DATA_DIM = 3 * 224 * 224
CLASS_DIM = 102
BATCH_SIZE = 128
image = paddle.layer.data(
name="image", type=paddle.data_type.dense_vector(DATA_DIM))
lbl = paddle.layer.data(
name="label", type=paddle.data_type.integer_value(CLASS_DIM))
```
### 获得所用模型
这里可以选择使用AlexNet、VGG、GoogLeNet和ResNet模型中的一个模型进行图像分类。通过调用相应的方法可以获得网络最后的Softmax层。
1. 使用AlexNet模型
指定输入层`image`和类别数目`CLASS_DIM`后,可以通过下面的代码得到AlexNet的Softmax层。
```python
out = alexnet.alexnet(image, class_dim=CLASS_DIM)
```
2. 使用VGG模型
根据层数的不同,VGG分为VGG13、VGG16和VGG19。使用VGG16模型的代码如下:
```python
out = vgg.vgg16(image, class_dim=CLASS_DIM)
```
类似地,VGG13和VGG19可以分别通过`vgg.vgg13`和`vgg.vgg19`方法获得。
3. 使用GoogLeNet模型
GoogLeNet在训练阶段使用两个辅助的分类器强化梯度信息并进行额外的正则化。因此`googlenet.googlenet`共返回三个Softmax层,如下面的代码所示:
```python
out, out1, out2 = googlenet.googlenet(image, class_dim=CLASS_DIM)
loss1 = paddle.layer.cross_entropy_cost(
input=out1, label=lbl, coeff=0.3)
paddle.evaluator.classification_error(input=out1, label=lbl)
loss2 = paddle.layer.cross_entropy_cost(
input=out2, label=lbl, coeff=0.3)
paddle.evaluator.classification_error(input=out2, label=lbl)
extra_layers = [loss1, loss2]
```
对于两个辅助的输出,这里分别对其计算损失函数并评价错误率,然后将损失作为后文SGD的extra_layers。
4. 使用ResNet模型
ResNet模型可以通过下面的代码获取:
```python
out = resnet.resnet_imagenet(image, class_dim=CLASS_DIM)
```
### 定义损失函数
```python
cost = paddle.layer.classification_cost(input=out, label=lbl)
```
### 创建参数和优化方法
```python
# Create parameters
parameters = paddle.parameters.create(cost)
# Create optimizer
optimizer = paddle.optimizer.Momentum(
momentum=0.9,
regularization=paddle.optimizer.L2Regularization(rate=0.0005 *
BATCH_SIZE),
learning_rate=0.001 / BATCH_SIZE,
learning_rate_decay_a=0.1,
learning_rate_decay_b=128000 * 35,
learning_rate_schedule="discexp", )
```
通过 `learning_rate_decay_a` (简写$a$) 、`learning_rate_decay_b` (简写$b$) 和 `learning_rate_schedule` 指定学习率调整策略,这里采用离散指数的方式调节学习率,计算公式如下, $n$ 代表已经处理过的累计总样本数,$lr_{0}$ 即为参数里设置的 `learning_rate`。
$$ lr = lr_{0} * a^ {\lfloor \frac{n}{ b}\rfloor} $$
### 定义数据读取
首先以[花卉数据](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html)为例说明如何定义输入。下面的代码定义了花卉数据训练集和验证集的输入:
```python
train_reader = paddle.batch(
paddle.reader.shuffle(
flowers.train(),
buf_size=1000),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
flowers.valid(),
batch_size=BATCH_SIZE)
```
若需要使用其他数据,则需要先建立图像列表文件。`reader.py`定义了这种文件的读取方式,它从图像列表文件中解析出图像路径和类别标签。
图像列表文件是一个文本文件,其中每一行由一个图像路径和类别标签构成,二者以跳格符(Tab)隔开。类别标签用整数表示,其最小值为0。下面给出一个图像列表文件的片段示例:
```
dataset_100/train_images/n03982430_23191.jpeg 1
dataset_100/train_images/n04461696_23653.jpeg 7
dataset_100/train_images/n02441942_3170.jpeg 8
dataset_100/train_images/n03733281_31716.jpeg 2
dataset_100/train_images/n03424325_240.jpeg 0
dataset_100/train_images/n02643566_75.jpeg 8
```
训练时需要分别指定训练集和验证集的图像列表文件。这里假设这两个文件分别为`train.list`和`val.list`,数据读取方式如下:
```python
train_reader = paddle.batch(
paddle.reader.shuffle(
reader.train_reader('train.list'),
buf_size=1000),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
reader.test_reader('val.list'),
batch_size=BATCH_SIZE)
```
### 定义事件处理程序
```python
# End batch and end pass event handler
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 1 == 0:
print "\nPass %d, Batch %d, Cost %f, %s" % (
event.pass_id, event.batch_id, event.cost, event.metrics)
if isinstance(event, paddle.event.EndPass):
with gzip.open('params_pass_%d.tar.gz' % event.pass_id, 'w') as f:
parameters.to_tar(f)
result = trainer.test(reader=test_reader)
print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics)
```
### 定义训练方法
对于AlexNet、VGG和ResNet,可以按下面的代码定义训练方法:
```python
# Create trainer
trainer = paddle.trainer.SGD(
cost=cost,
parameters=parameters,
update_equation=optimizer)
```
GoogLeNet有两个额外的输出层,因此需要指定`extra_layers`,如下所示:
```python
# Create trainer
trainer = paddle.trainer.SGD(
cost=cost,
parameters=parameters,
update_equation=optimizer,
extra_layers=extra_layers)
```
### 开始训练
```python
trainer.train(
reader=train_reader, num_passes=200, event_handler=event_handler)
```
## 应用模型
模型训练好后,可以使用下面的代码预测给定图片的类别。
```python
# load parameters
with gzip.open('params_pass_10.tar.gz', 'r') as f:
parameters = paddle.parameters.Parameters.from_tar(f)
file_list = [line.strip() for line in open(image_list_file)]
test_data = [(paddle.image.load_and_transform(image_file, 256, 224, False)
.flatten().astype('float32'), )
for image_file in file_list]
probs = paddle.infer(
output_layer=out, parameters=parameters, input=test_data)
lab = np.argsort(-probs)
for file_name, result in zip(file_list, lab):
print "Label of %s is: %d" % (file_name, result[0])
```
首先从文件中加载训练好的模型(代码里以第10轮迭代的结果为例),然后读取`image_list_file`中的图像。`image_list_file`是一个文本文件,每一行为一个图像路径。代码使用`paddle.infer`判断`image_list_file`中每个图像的类别,并进行输出。
## 使用预训练模型
为方便进行测试和fine-tuning,我们提供了一些对应于示例中模型配置的预训练模型,目前包括在ImageNet 1000类上训练的ResNet50、ResNet101和Vgg16,请使用`models`目录下的脚本`model_download.sh`进行模型下载,如下载ResNet50可进入`models`目录并执行"`sh model_download.sh ResNet50`",完成后同目录下的`Paddle_ResNet50.tar.gz`即是训练好的模型,可以在代码中使用如下两种方式进行加载模:
```python
parameters = paddle.parameters.Parameters.from_tar(gzip.open('Paddle_ResNet50.tar.gz', 'r'))
```
```python
parameters = paddle.parameters.create(cost)
parameters.init_from_tar(gzip.open('Paddle_ResNet50.tar.gz', 'r'))
```
### 注意事项
模型压缩包中所含各文件的文件名和模型配置中的参数名一一对应,是加载模型参数的依据。我们提供的预训练模型均使用了示例代码中的配置,如需修改网络配置,请多加注意,需要保证网络配置中的参数名和压缩包中的文件名能够正确对应。
</div>
<!-- You can change the lines below now. -->
<script type="text/javascript">
marked.setOptions({
renderer: new marked.Renderer(),
gfm: true,
breaks: false,
smartypants: true,
highlight: function(code, lang) {
code = code.replace(/&amp;/g, "&")
code = code.replace(/&gt;/g, ">")
code = code.replace(/&lt;/g, "<")
code = code.replace(/&nbsp;/g, " ")
return hljs.highlightAuto(code, [lang]).value;
}
});
document.getElementById("context").innerHTML = marked(
document.getElementById("markdown").innerHTML)
</script>
</body>
此差异已折叠。
<html>
<head>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js", "TeX/AMSsymbols.js", "TeX/AMSmath.js"],
jax: ["input/TeX", "output/HTML-CSS"],
tex2jax: {
inlineMath: [ ['$','$'] ],
displayMath: [ ['$$','$$'] ],
processEscapes: true
},
"HTML-CSS": { availableFonts: ["TeX"] }
});
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js" async></script>
<script type="text/javascript" src="../.tools/theme/marked.js">
</script>
<link href="http://cdn.bootcss.com/highlight.js/9.9.0/styles/darcula.min.css" rel="stylesheet">
<script src="http://cdn.bootcss.com/highlight.js/9.9.0/highlight.min.js"></script>
<link href="http://cdn.bootcss.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/css/perfect-scrollbar.min.css" rel="stylesheet">
<link href="../.tools/theme/github-markdown.css" rel='stylesheet'>
</head>
<style type="text/css" >
.markdown-body {
box-sizing: border-box;
min-width: 200px;
max-width: 980px;
margin: 0 auto;
padding: 45px;
}
</style>
<body>
<div id="context" class="container-fluid markdown-body">
</div>
<!-- This block will be replaced by each markdown file content. Please do not change lines below.-->
<div id="markdown" style='display:none'>
# 使用噪声对比估计加速语言模型训练
## 为什么需要噪声对比估计
语言模型是许多自然语言处理任务的基础,也是获得词向量表示的一种有效方法。神经概率语言模型(Neural Probabilistic Language Model, NPLM)刻画了词语序列 $\omega_1,...,\omega_T$ 属于某个固定语言的概率 $P(\omega_1^T)$ :
$$P(\omega_1^T)= \prod_{t=1}^{T}P(\omega_t|\omega_1^{t-1})$$
为了降低建模和求解的难度,通常会引入一定条件独立假设:词语$w_t$的概率只受之前$n-1$个词语的影响,于是有:
$$ P(\omega_1^T) \approx \prod P(\omega_t|\omega_{t-n-1}^{t-1}) \tag{1}$$
从式($1$)中看到,可以通过建模条件概率 $P(\omega_t|w_{t-n-1},...,\omega_{t-1})$ 进而计算整个序列 $\omega_1,...,\omega_T$ 的概率。于是,我们可以将语言模型求解的任务简单地概括为:
**给定词语序列的向量表示 $h$ ,称之为上下文(context),模型预测下一个目标词语 $\omega$ 的概率。**
在[$n$-gram 语言模型](https://github.com/PaddlePaddle/book/tree/develop/04.word2vec)中,上下文取固定的 $n-1$ 个词,[RNN 语言模型](https://github.com/PaddlePaddle/models/tree/develop/generate_sequence_by_rnn_lm)可以处理任意长度的上下文。
给定上下文 $h$,NPLM 学习一个分值函数(scoring function)$s_\theta(\omega, h)$,$s$ 刻画了上下文 $h$ 向量和所有可能的下一个词的向量表示 $\omega'$ 之间的相似度,再通过在全词表空间对打分函数 $s$ 的取值进行归一化(除以归一化因子 $Z$),得到目标词 $\omega$ 的概率分布,其中:$\theta$ 是可学习参数,这一过程用式($2$)表示,也就是 `Softmax` 函数的计算过程。
$$P_\theta^h(\omega) = \frac{\text{exp}{s_\theta(\omega, h)}}{Z},Z=\sum_{\omega'} \exp{s_\theta(\omega', h)}\tag{2}$$
极大似然估计(MLE,Maximum Likelihood Estimation)是求解概率($2$)最常用的学习准则。然而,不论是估计概率 $P_\theta^h(\omega)$ 还是计算似然(likelihood)的梯度时,都要计算归一化因子$Z$。$Z$ 的计算随着词典大小线性增长,当训练大规模语言模型时,例如,当词典增长到百万级别甚至更大,训练时间将变得十分漫长,因此,我们**需要其它可能的学习准则,他的求解过程从计算上应该更加轻便可解。**
models 的另一篇介绍了使用[Hsigmoid加速词向量训练](https://github.com/PaddlePaddle/models/tree/develop/hsigmoid) ,这里我们介绍另一种基于采样的提高语言模型训练速度的方法:使用噪声对比估计(Noise-contrastive estimation, NCE)\[[1](#参考文献)\]。
## 什么是噪声对比估计
噪声对比估计是一种基于采样思想的概率密度估计准则,用于估计/拟合:概率函数由非归一化的分值函数和归一化因子两部分构成,这样一类特殊的概率函数\[[1](#参考文献)\] 。噪声对比估计通过构造下面这样一个辅助问题避免在全词典空间计算归一化因子 $Z$ ,从而降低计算代价:
给定上下文 $h$ 和任意已知的噪声分布 $P_n$ ,学习一个二类分类器来拟合:目标 $\omega$ 来自真实分布 $P_\theta$ ($D = 1$) 还是噪声分布 $P_n$($D = 0$)的概率。假设来自噪声分布的负类样本的数量 $k$ 倍于目标样本,于是有:
$$P(D=1|h,\omega) = \frac{P_\theta(h, \omega)}{P_\theta (h, \omega) + kP_n} \tag{3}$$
我们直接用`Sigmoid`函数来刻画式($3$)这样一个二分类概率:
$$P(D=1|h,\omega) = \sigma (\Delta s_\theta(w,h)) \tag{4}$$
有了上面的问题设置便可以基于二分类来进行极大似然估计:增大正样本的概率同时降低负样本的概率[[2,3](#参考文献)],也就是最小化下面这样一个损失函数:
$$
J^h(\theta )=E_{ P_d^h }\left[ \log { P^h(D=1|w,\theta ) } \right] +kE_{ P_n }\left[ \log P^h (D=0|w,\theta ) \right]$$
$$
\\\\\qquad =E_{ P_d^h }\left[ \log { \sigma (\Delta s_\theta(w,h)) } \right] +kE_{ P_n }\left[ \log (1-\sigma (\Delta s_\theta(w,h))) \right] \tag{5}$$
式($5$)便是基于噪声对比估计而定义的NCE损失函数,至此,我们还剩下两个问题:
1. 式($5$)中的 $s_\theta(w,h)$ 是什么?
- 在神经网络的实现中,$s_\theta(h,\omega)$ 是未归一化的分值。
- NCE cost 层的可学习参数 $W$ 是一个 $|V| \times d$ 维度的矩阵,$|V|$ 是词典大小,$d$ 是上下文向量$h$的维度;
- 训练时下一个词的真实类别 $t$ 是正类,从指定的噪声分布中采样 $k$ 个负类样本它们的类别分别记作: $\{n_1, ..., n_k\}$;
- 抽取 $W$ 中第 $\{t, n_1, ..., n_k\}$ 行(共计 $k + 1$ 行)分别与 $h$ 计算分值 $s_\theta(w,h)$ ,再通过($5$)式计算最终的损失;
2. 噪声分布如何选择?
- 实践中,可以任意选择合适的噪声分布(噪声分布暗含着一定的先验)。
- 最常用选择有:使用基于全词典之上的`unigram`分布(词频统计),无偏的均匀分布。
- 在PaddlePaddle中用户如果用户未指定噪声分布,默认采用均匀分布。
使用NCE准确训练时,最后一层的计算代价只与负采样数目线性相关,当负采样数目逐渐增大时,NCE 估计准则会收敛到极大似然估计。因此,在使用NCE准则训练时,可以通过控制负采样数目来控制对归一化的概率分布近似的质量。
## 实验数据
本例采用 Penn Treebank (PTB) 数据集([Tomas Mikolov预处理版本](http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz))来训练一个 5-gram 语言模型。PaddlePaddle 提供了 [paddle.dataset.imikolov](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/dataset/imikolov.py) 接口来方便地使用PTB数据。当没有找到下载好的数据时,脚本会自动下载并验证文件的完整性。语料语种为英文,共有42068句训练数据,3761句测试数据。
## 网络结构
在 5-gram 神经概率语言模型详细网络结构见图1:
<p align="center">
<img src="images/network_conf.png" width = "70%" align="center"/><br/>
图1. 5-gram 网络配置结构
</p>
模型主要分为如下几个部分构成:
1. **输入层**:输入样本由原始英文单词组成,每个英文单词首先被转换为字典中的 id 表示。
2. **词向量层**:id 表示通过词向量层作用得到连续表示的词向量表示,能够更好地体现词与词之间的语义关系。训练完成之后,词语之间的语义相似度可以使用词向量之间的距离来表示,语义越相似,距离越近。
3. **词向量拼接层**:将词向量进行串联,并将词向量首尾相接形成一个长向量。这样可以方便后面全连接层的处理。
4. **全连接隐层**:将上一层获得的长向量输入到一层隐层的神经网络,输出特征向量。全连接的隐层可以增强网络的学习能力。
5. **NCE层**:训练时可以直接实用 PaddlePaddle 提供的 `paddle.layer.nce` 作为损失函数。
## 训练
在命令行窗口运行命令``` python train.py ```可以直接开启训练任务。
- 程序第一次运行会检测用户缓存文件夹中是否包含 ptb 数据集,如果未包含,则自动下载。
- 运行过程中,每10个 batch 会打印模型训练在训练集上的代价值
- 每个 pass 结束后,会计算测试数据集上的损失,并同时会保存最新的模型快照。
在模型文件`network_conf.py`中 NCE 调用代码如下:
```python
return paddle.layer.nce(
input=hidden_layer,
label=next_word,
num_classes=dict_size,
param_attr=paddle.attr.Param(name="nce_w"),
bias_attr=paddle.attr.Param(name="nce_b"),
num_neg_samples=25,
neg_distribution=None)
```
NCE 层的一些重要参数解释如下:
| 参数名 | 参数作用 | 介绍 |
|:------ |:-------| :--------|
| param\_attr / bias\_attr | 用来设置参数名字 |方便预测阶段加载参数,具体在预测一节中介绍。|
| num\_neg\_samples | 负样本采样个数|可以控制正负样本比例,这个值取值区间为 [1, 字典大小-1],负样本个数越多则整个模型的训练速度越慢,模型精度也会越高 |
| neg\_distribution | 生成负样例标签的分布,默认是一个均匀分布| 可以自行控制负样本采样时各个类别的采样权重。例如:希望正样例为“晴天”时,负样例“洪水”在训练时更被着重区分,则可以将“洪水”这个类别的采样权重增加|
| act | 使用何种激活函数| 根据 NCE 的原理,这里应该使用 sigmoid 函数 |
## 预测
1. 在命令行运行 :
```bash
python infer.py \
--model_path "models/XX" \
--batch_size 1 \
--use_gpu false \
--trainer_count 1
```
参数含义如下:
- `model_path`:指定训练好的模型所在的路径。必选。
- `batch_size`:一次预测并行的样本数目。可选,默认值为 `1`。
- `use_gpu`:是否使用 GPU 进行预测。可选,默认值为 `False`。
- `trainer_count` : 预测使用的线程数目。可选,默认为 `1`。**注意:预测使用的线程数目必选大于一次预测并行的样本数目**。
2. 需要注意的是:**预测和训练的计算逻辑不同**。预测使用全连接矩阵乘法后接`softmax`激活,输出基于各类别的概率分布,需要替换训练中使用的`paddle.train.nce`层。在PaddlePaddle中,NCE层将可学习参数存储为一个 `[类别数目 × 上一层输出向量宽度]` 大小的矩阵,预测时,**全连接运算在加载NCE层学习到参数时,需要进行转置**,代码如下:
```python
return paddle.layer.mixed(
size=dict_size,
input=paddle.layer.trans_full_matrix_projection(
hidden_layer, param_attr=paddle.attr.Param(name="nce_w")),
act=paddle.activation.Sigmoid(),
bias_attr=paddle.attr.Param(name="nce_b"))
```
上述代码片段中的 `paddle.layer.mixed` 必须以 PaddlePaddle 中 `paddle.layer.×_projection` 为输入。`paddle.layer.mixed` 将多个 `projection` (输入可以是多个)计算结果求和作为输出。`paddle.layer.trans_full_matrix_projection` 在计算矩阵乘法时会对参数$W$进行转置。
3. 预测的输出格式如下:
```text
0.6734 their may want to move
```
每一行是一条预测结果,内部以“\t”分隔,共计3列:
- 第一列:下一个词的概率。
- 第二列:模型预测的下一个词。
- 第三列:输入的 $n$ 个词语,内部以空格分隔。
## 参考文献
1. Gutmann M, Hyvärinen A. [Noise-contrastive estimation: A new estimation principle for unnormalized statistical models](http://proceedings.mlr.press/v9/gutmann10a/gutmann10a.pdf)[C]//Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics. 2010: 297-304.
1. Mnih A, Kavukcuoglu K. [Learning word embeddings efficiently with noise-contrastive estimation](https://papers.nips.cc/paper/5165-learning-word-embeddings-efficiently-with-noise-contrastive-estimation.pdf)[C]//Advances in neural information processing systems. 2013: 2265-2273.
1. Mnih A, Teh Y W. [A Fast and Simple Algorithm for Training Neural Probabilistic Language Models](http://xueshu.baidu.com/s?wd=paperuri%3A%280735b97df93976efb333ac8c266a1eb2%29&filter=sc_long_sign&tn=SE_xueshusource_2kduw22v&sc_vurl=http%3A%2F%2Farxiv.org%2Fabs%2F1206.6426&ie=utf-8&sc_us=5770715420073315630)[J]. Computer Science, 2012:1751-1758.
</div>
<!-- You can change the lines below now. -->
<script type="text/javascript">
marked.setOptions({
renderer: new marked.Renderer(),
gfm: true,
breaks: false,
smartypants: true,
highlight: function(code, lang) {
code = code.replace(/&amp;/g, "&")
code = code.replace(/&gt;/g, ">")
code = code.replace(/&lt;/g, "<")
code = code.replace(/&nbsp;/g, " ")
return hljs.highlightAuto(code, [lang]).value;
}
});
document.getElementById("context").innerHTML = marked(
document.getElementById("markdown").innerHTML)
</script>
</body>
<html>
<head>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js", "TeX/AMSsymbols.js", "TeX/AMSmath.js"],
jax: ["input/TeX", "output/HTML-CSS"],
tex2jax: {
inlineMath: [ ['$','$'] ],
displayMath: [ ['$$','$$'] ],
processEscapes: true
},
"HTML-CSS": { availableFonts: ["TeX"] }
});
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js" async></script>
<script type="text/javascript" src="../.tools/theme/marked.js">
</script>
<link href="http://cdn.bootcss.com/highlight.js/9.9.0/styles/darcula.min.css" rel="stylesheet">
<script src="http://cdn.bootcss.com/highlight.js/9.9.0/highlight.min.js"></script>
<link href="http://cdn.bootcss.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/css/perfect-scrollbar.min.css" rel="stylesheet">
<link href="../.tools/theme/github-markdown.css" rel='stylesheet'>
</head>
<style type="text/css" >
.markdown-body {
box-sizing: border-box;
min-width: 200px;
max-width: 980px;
margin: 0 auto;
padding: 45px;
}
</style>
<body>
<div id="context" class="container-fluid markdown-body">
</div>
<!-- This block will be replaced by each markdown file content. Please do not change lines below.-->
<div id="markdown" style='display:none'>
## 简介
序列是许多机器学习和数据挖掘任务面对的一种输入数据类型,以自然语言处理任务为例:句子由词语构成,而多个句子进一步构成了段落。因此,段落可以看作是一个嵌套的序列(或者叫作:双层序列),这个序列的每个元素又是一个序列。
双层序列是 PaddlePaddle 支持的一种非常灵活的数据组织方式, 能够帮助我们更好地描述段落、多轮对话等更为复杂的数据。以双层序列作为输入,我们可以设计一个层次化的网络,从而更好地完成一些复杂的任务。
本单元将介绍如何在 PaddlePaddle 中使用双层序列。
- [基于双层序列的文本分类](https://github.com/PaddlePaddle/models/tree/develop/nested_sequence/text_classification)
</div>
<!-- You can change the lines below now. -->
<script type="text/javascript">
marked.setOptions({
renderer: new marked.Renderer(),
gfm: true,
breaks: false,
smartypants: true,
highlight: function(code, lang) {
code = code.replace(/&amp;/g, "&")
code = code.replace(/&gt;/g, ">")
code = code.replace(/&lt;/g, "<")
code = code.replace(/&nbsp;/g, " ")
return hljs.highlightAuto(code, [lang]).value;
}
});
document.getElementById("context").innerHTML = marked(
document.getElementById("markdown").innerHTML)
</script>
</body>
<html>
<head>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js", "TeX/AMSsymbols.js", "TeX/AMSmath.js"],
jax: ["input/TeX", "output/HTML-CSS"],
tex2jax: {
inlineMath: [ ['$','$'] ],
displayMath: [ ['$$','$$'] ],
processEscapes: true
},
"HTML-CSS": { availableFonts: ["TeX"] }
});
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js" async></script>
<script type="text/javascript" src="../.tools/theme/marked.js">
</script>
<link href="http://cdn.bootcss.com/highlight.js/9.9.0/styles/darcula.min.css" rel="stylesheet">
<script src="http://cdn.bootcss.com/highlight.js/9.9.0/highlight.min.js"></script>
<link href="http://cdn.bootcss.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/css/perfect-scrollbar.min.css" rel="stylesheet">
<link href="../.tools/theme/github-markdown.css" rel='stylesheet'>
</head>
<style type="text/css" >
.markdown-body {
box-sizing: border-box;
min-width: 200px;
max-width: 980px;
margin: 0 auto;
padding: 45px;
}
</style>
<body>
<div id="context" class="container-fluid markdown-body">
</div>
<!-- This block will be replaced by each markdown file content. Please do not change lines below.-->
<div id="markdown" style='display:none'>
# 基于双层序列的文本分类
## 简介
本例将演示如何在 PaddlePaddle 中将长文本输入(通常能达到段落或者篇章基本)组织为双层序列,完成对长文本的分类任务。
## 模型介绍
我们将一段文本看成句子的序列,而每个句子又是词语的序列。
我们首先用卷积神经网络编码段落中的每一句话;然后,将每句话的表示向量经过池化层得到段落的编码向量;最后将段落的编码向量作为分类器(以softmax层的全连接层)输入,得到最终的分类结果。
**模型结构如下图所示**
<p align="center">
<img src="images/model.jpg" width = "60%" align="center"/><br/>
图1. 基于双层序列的文本分类模型
</p>
PaddlePaddle 实现该网络结构的代码见 `network_conf.py`。
对双层时间序列的处理,需要先将双层时间序列数据变换成单层时间序列数据,再对每一个单层时间序列进行处理。 在 PaddlePaddle 中 ,`recurrent_group` 是帮助我们构建处理双层序列的层次化模型的主要工具。这里,我们使用两个嵌套的 `recurrent_group` 。外层的 `recurrent_group` 将段落拆解为句子,`step` 函数中拿到的输入是句子序列;内层的 `recurrent_group` 将句子拆解为词语,`step` 函数中拿到的输入是非序列的词语。
在词语级别,我们通过 CNN 网络以词向量为输入输出学习到的句子表示;在段落级别,将每个句子的表示通过池化作用得到段落表示。
``` python
nest_group = paddle.layer.recurrent_group(input=[paddle.layer.SubsequenceInput(emb),
hidden_size],
step=cnn_cov_group)
```
拆解后的单层序列数据经过一个CNN网络学习对应的向量表示,CNN的网络结构包含以下部分:
- **卷积层**: 文本分类中的卷积在时间序列上进行,卷积核的宽度和词向量层产出的矩阵一致,卷积后得到的结果为“特征图”, 使用多个不同高度的卷积核,可以得到多个特征图。本例代码默认使用了大小为 3(图1红色框)和 4(图1蓝色框)的卷积核。
- **最大池化层**: 对卷积得到的各个特征图分别进行最大池化操作。由于特征图本身已经是向量,因此最大池化实际上就是选出各个向量中的最大元素。将所有最大元素又被拼接在一起,组成新的向量。
- **线性投影层**: 将不同卷积得到的结果经过最大池化层之后拼接为一个长向量, 然后经过一个线性投影得到对应单层序列的表示向量。
CNN网络具体代码实现如下:
```python
def cnn_cov_group(group_input, hidden_size):
"""
Convolution group definition.
:param group_input: The input of this layer.
:type group_input: LayerOutput
:params hidden_size: The size of the fully connected layer.
:type hidden_size: int
"""
conv3 = paddle.networks.sequence_conv_pool(
input=group_input, context_len=3, hidden_size=hidden_size)
conv4 = paddle.networks.sequence_conv_pool(
input=group_input, context_len=4, hidden_size=hidden_size)
linear_proj = paddle.layer.fc(input=[conv3, conv4],
size=hidden_size,
param_attr=paddle.attr.ParamAttr(name='_cov_value_weight'),
bias_attr=paddle.attr.ParamAttr(name='_cov_value_bias'),
act=paddle.activation.Linear())
return linear_proj
```
PaddlePaddle 中已经封装好的带有池化的文本序列卷积模块:`paddle.networks.sequence_conv_pool`,可直接调用。
在得到每个句子的表示向量之后, 将所有句子表示向量经过一个平均池化层, 得到一个样本的向量表示, 向量经过一个全连接层输出最终的预测结果。 代码如下:
```python
avg_pool = paddle.layer.pooling(input=nest_group,
pooling_type=paddle.pooling.Avg(),
agg_level=paddle.layer.AggregateLevel.TO_NO_SEQUENCE)
prob = paddle.layer.mixed(size=class_num,
input=[paddle.layer.full_matrix_projection(input=avg_pool)],
act=paddle.activation.Softmax())
```
## 安装依赖包
```bash
pip install -r requirements.txt
```
## 指定训练配置参数
通过 `config.py` 脚本修改训练和模型配置参数,脚本中有对可配置参数的详细解释,示例如下:
```python
class TrainerConfig(object):
# whether to use GPU for training
use_gpu = False
# the number of threads used in one machine
trainer_count = 1
# train batch size
batch_size = 32
...
class ModelConfig(object):
# embedding vector dimension
emb_size = 28
...
```
修改 `config.py` 对参数进行调整。例如,通过修改 `use_gpu` 参数来指定是否使用 GPU 进行训练。
## 使用 PaddlePaddle 内置数据运行
### 训练
在终端执行:
```bash
python train.py
```
将以 PaddlePaddle 内置的情感分类数据集: `imdb` 运行本例。
### 预测
训练结束后模型将存储在指定目录当中(默认models目录),在终端执行:
```bash
python infer.py --model_path 'models/params_pass_00000.tar.gz'
```
默认情况下,预测脚本将加载训练一个pass的模型对 `imdb的测试集` 进行测试。
## 使用自定义数据训练和预测
### 训练
1.数据组织
输入数据格式如下:每一行为一条样本,以 `\t` 分隔,第一列是类别标签,第二列是输入文本的内容。以下是两条示例数据:
```
positive This movie is very good. The actor is so handsome.
negative What a terrible movie. I waste so much time.
```
2.编写数据读取接口
自定义数据读取接口只需编写一个 Python 生成器实现**从原始输入文本中解析一条训练样本**的逻辑。以下代码片段实现了读取原始数据返回类型为: `paddle.data_type.integer_value_sub_sequence` 和 `paddle.data_type.integer_value`
```python
def train_reader(data_dir, word_dict, label_dict):
"""
Reader interface for training data
:param data_dir: data directory
:type data_dir: str
:param word_dict: path of word dictionary,
the dictionary must has a "UNK" in it.
:type word_dict: Python dict
:param label_dict: path of label dictionary.
:type label_dict: Python dict
"""
def reader():
UNK_ID = word_dict['<unk>']
word_col = 1
lbl_col = 0
for file_name in os.listdir(data_dir):
file_path = os.path.join(data_dir, file_name)
if not os.path.isfile(file_path):
continue
with open(file_path, "r") as f:
for line in f:
line_split = line.strip().split("\t")
doc = line_split[word_col]
doc_ids = []
for sent in doc.strip().split("."):
sent_ids = [
word_dict.get(w, UNK_ID)
for w in sent.split()]
if sent_ids:
doc_ids.append(sent_ids)
yield doc_ids, label_dict[line_split[lbl_col]]
return reader
```
需要注意的是, 本例中以英文句号`'.'`作为分隔符, 将一段文本分隔为一定数量的句子, 且每个句子表示为对应词表的索引数组(`sent_ids`)。 由于当前样本的表示(`doc_ids`)中包含了该段文本的所有句子, 因此,它的类型为:`paddle.data_type.integer_value_sub_sequence`。
3.指定命令行参数进行训练
`train.py`训练脚本中包含以下参数:
```
Options:
--train_data_dir TEXT The path of training dataset (default: None). If
this parameter is not set, imdb dataset will be
used.
--test_data_dir TEXT The path of testing dataset (default: None). If this
parameter is not set, imdb dataset will be used.
--word_dict_path TEXT The path of word dictionary (default: None). If this
parameter is not set, imdb dataset will be used. If
this parameter is set, but the file does not exist,
word dictionay will be built from the training data
automatically.
--label_dict_path TEXT The path of label dictionary (default: None).If this
parameter is not set, imdb dataset will be used. If
this parameter is set, but the file does not exist,
label dictionay will be built from the training data
automatically.
--model_save_dir TEXT The path to save the trained models (default:
'models').
--help Show this message and exit.
```
修改`train.py`脚本中的启动参数,可以直接运行本例。 以`data`目录下的示例数据为例,在终端执行:
```bash
python train.py \
--train_data_dir 'data/train_data' \
--test_data_dir 'data/test_data' \
--word_dict_path 'word_dict.txt' \
--label_dict_path 'label_dict.txt'
```
即可对样例数据进行训练。
### 预测
1.指定命令行参数
`infer.py`训练脚本中包含以下参数:
```
Options:
--data_path TEXT The path of data for inference (default: None). If
this parameter is not set, imdb test dataset will be
used.
--model_path TEXT The path of saved model. [required]
--word_dict_path TEXT The path of word dictionary (default: None). If this
parameter is not set, imdb dataset will be used.
--label_dict_path TEXT The path of label dictionary (default: None).If this
parameter is not set, imdb dataset will be used.
--batch_size INTEGER The number of examples in one batch (default: 32).
--help Show this message and exit.
```
2.以`data`目录下的示例数据为例,在终端执行:
```bash
python infer.py \
--data_path 'data/infer.txt' \
--word_dict_path 'word_dict.txt' \
--label_dict_path 'label_dict.txt' \
--model_path 'models/params_pass_00000.tar.gz'
```
即可对样例数据进行预测。
</div>
<!-- You can change the lines below now. -->
<script type="text/javascript">
marked.setOptions({
renderer: new marked.Renderer(),
gfm: true,
breaks: false,
smartypants: true,
highlight: function(code, lang) {
code = code.replace(/&amp;/g, "&")
code = code.replace(/&gt;/g, ">")
code = code.replace(/&lt;/g, "<")
code = code.replace(/&nbsp;/g, " ")
return hljs.highlightAuto(code, [lang]).value;
}
});
document.getElementById("context").innerHTML = marked(
document.getElementById("markdown").innerHTML)
</script>
</body>
<html>
<head>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js", "TeX/AMSsymbols.js", "TeX/AMSmath.js"],
jax: ["input/TeX", "output/HTML-CSS"],
tex2jax: {
inlineMath: [ ['$','$'] ],
displayMath: [ ['$$','$$'] ],
processEscapes: true
},
"HTML-CSS": { availableFonts: ["TeX"] }
});
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js" async></script>
<script type="text/javascript" src="../.tools/theme/marked.js">
</script>
<link href="http://cdn.bootcss.com/highlight.js/9.9.0/styles/darcula.min.css" rel="stylesheet">
<script src="http://cdn.bootcss.com/highlight.js/9.9.0/highlight.min.js"></script>
<link href="http://cdn.bootcss.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/perfect-scrollbar/0.6.14/css/perfect-scrollbar.min.css" rel="stylesheet">
<link href="../.tools/theme/github-markdown.css" rel='stylesheet'>
</head>
<style type="text/css" >
.markdown-body {
box-sizing: border-box;
min-width: 200px;
max-width: 980px;
margin: 0 auto;
padding: 45px;
}
</style>
<body>
<div id="context" class="container-fluid markdown-body">
</div>
<!-- This block will be replaced by each markdown file content. Please do not change lines below.-->
<div id="markdown" style='display:none'>
# Neural Recurrent Sequence Labeling Model for Open-Domain Factoid Question Answering
This model implements the work in the following paper:
Peng Li, Wei Li, Zhengyan He, Xuguang Wang, Ying Cao, Jie Zhou, and Wei Xu. Dataset and Neural Recurrent Sequence Labeling Model for Open-Domain Factoid Question Answering. [arXiv:1607.06275](https://arxiv.org/abs/1607.06275).
If you use the dataset/code in your research, please cite the above paper:
```text
@article{li:2016:arxiv,
author = {Li, Peng and Li, Wei and He, Zhengyan and Wang, Xuguang and Cao, Ying and Zhou, Jie and Xu, Wei},
title = {Dataset and Neural Recurrent Sequence Labeling Model for Open-Domain Factoid Question Answering},
journal = {arXiv:1607.06275v2},
year = {2016},
url = {https://arxiv.org/abs/1607.06275v2},
}
```
## Installation
1. Install PaddlePaddle v0.10.5 by the following commond. Note that v0.10.0 is not supported.
```bash
# either one is OK
# CPU
pip install paddlepaddle
# GPU
pip install paddlepaddle-gpu
```
2. Download the [WebQA](http://idl.baidu.com/WebQA.html) dataset by running
```bash
cd data && ./download.sh && cd ..
```
## Hyperparameters
All the hyperparameters are defined in `config.py`. The default values are aligned with the paper.
## Training
Training can be launched using the following command:
```bash
PYTHONPATH=data/evaluation:$PYTHONPATH python train.py 2>&1 | tee train.log
```
## Validation and Test
WebQA provides two versions of validation and test sets. Automatic validation and test can be lauched by
```bash
PYTHONPATH=data/evaluation:$PYTHONPATH python val_and_test.py models [ann|ir]
```
where
* `models`: the directory where model files are stored. You can use `models` if `config.py` is not changed.
* `ann`: using the validation and test sets with annotated evidence.
* `ir`: using the validation and test sets with retrieved evidence.
Note that validation and test can run simultaneously with training. `val_and_test.py` will handle the synchronization related problems.
Intermediate results are stored in the directory `tmp`. You can delete them safely after validation and test.
The results should be comparable with those shown in Table 3 in the paper.
## Inferring using a Trained Model
Infer using a trained model by running:
```bash
PYTHONPATH=data/evaluation:$PYTHONPATH python infer.py \
MODEL_FILE \
INPUT_DATA \
OUTPUT_FILE \
2>&1 | tee infer.log
```
where
* `MODEL_FILE`: a trained model produced by `train.py`.
* `INPUT_DATA`: input data in the same format as the validation/test sets of the WebQA dataset.
* `OUTPUT_FILE`: results in the format specified in the WebQA dataset for the evaluation scripts.
## Pre-trained Models
We have provided two pre-trained models, one for the validation and test sets with annotated evidence, and one for those with retrieved evidence. These two models are selected according to the performance on the corresponding version of validation set, which is consistent with the paper.
The models can be downloaded with
```bash
cd pre-trained-models && ./download-models.sh && cd ..
```
The evaluation result on the test set with annotated evidence can be achieved by
```bash
PYTHONPATH=data/evaluation:$PYTHONPATH python infer.py \
pre-trained-models/params_pass_00010.tar.gz \
data/data/test.ann.json.gz \
test.ann.output.txt.gz
PYTHONPATH=data/evaluation:$PYTHONPATH \
python data/evaluation/evaluate-tagging-result.py \
test.ann.output.txt.gz \
data/data/test.ann.json.gz \
--fuzzy --schema BIO2
# The result should be
# chunk_f1=0.739091 chunk_precision=0.686119 chunk_recall=0.800926 true_chunks=3024 result_chunks=3530 correct_chunks=2422
```
And the evaluation result on the test set with retrieved evidence can be achieved by
```bash
PYTHONPATH=data/evaluation:$PYTHONPATH python infer.py \
pre-trained-models/params_pass_00021.tar.gz \
data/data/test.ir.json.gz \
test.ir.output.txt.gz
PYTHONPATH=data/evaluation:$PYTHONPATH \
python data/evaluation/evaluate-voting-result.py \
test.ir.output.txt.gz \
data/data/test.ir.json.gz \
--fuzzy --schema BIO2
# The result should be
# chunk_f1=0.749358 chunk_precision=0.727868 chunk_recall=0.772156 true_chunks=3024 result_chunks=3208 correct_chunks=2335
```
</div>
<!-- You can change the lines below now. -->
<script type="text/javascript">
marked.setOptions({
renderer: new marked.Renderer(),
gfm: true,
breaks: false,
smartypants: true,
highlight: function(code, lang) {
code = code.replace(/&amp;/g, "&")
code = code.replace(/&gt;/g, ">")
code = code.replace(/&lt;/g, "<")
code = code.replace(/&nbsp;/g, " ")
return hljs.highlightAuto(code, [lang]).value;
}
});
document.getElementById("context").innerHTML = marked(
document.getElementById("markdown").innerHTML)
</script>
</body>
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -11,16 +11,16 @@ def ner_net(word_dict_len, label_dict_len, stack_num=2, is_train=True): ...@@ -11,16 +11,16 @@ def ner_net(word_dict_len, label_dict_len, stack_num=2, is_train=True):
hidden_dim = 128 hidden_dim = 128
word = paddle.layer.data( word = paddle.layer.data(
name='word', name="word",
type=paddle.data_type.integer_value_sequence(word_dict_len)) type=paddle.data_type.integer_value_sequence(word_dict_len))
word_embedding = paddle.layer.embedding( word_embedding = paddle.layer.embedding(
input=word, input=word,
size=word_dim, size=word_dim,
param_attr=paddle.attr.Param( param_attr=paddle.attr.Param(
name='emb', initial_std=math.sqrt(1. / word_dim), is_static=True)) name="emb", initial_std=math.sqrt(1. / word_dim), is_static=True))
mark = paddle.layer.data( mark = paddle.layer.data(
name='mark', name="mark",
type=paddle.data_type.integer_value_sequence(mark_dict_len)) type=paddle.data_type.integer_value_sequence(mark_dict_len))
mark_embedding = paddle.layer.embedding( mark_embedding = paddle.layer.embedding(
input=mark, input=mark,
...@@ -35,7 +35,8 @@ def ner_net(word_dict_len, label_dict_len, stack_num=2, is_train=True): ...@@ -35,7 +35,8 @@ def ner_net(word_dict_len, label_dict_len, stack_num=2, is_train=True):
hidden_para_attr = paddle.attr.Param( hidden_para_attr = paddle.attr.Param(
initial_std=1 / math.sqrt(hidden_dim), learning_rate=mix_hidden_lr) initial_std=1 / math.sqrt(hidden_dim), learning_rate=mix_hidden_lr)
# the first rnn layer shares the input-to-hidden mappings. # the first forward and backward rnn layer share the
# input-to-hidden mappings.
hidden = paddle.layer.fc( hidden = paddle.layer.fc(
name="__hidden00__", name="__hidden00__",
size=hidden_dim, size=hidden_dim,
...@@ -72,32 +73,40 @@ def ner_net(word_dict_len, label_dict_len, stack_num=2, is_train=True): ...@@ -72,32 +73,40 @@ def ner_net(word_dict_len, label_dict_len, stack_num=2, is_train=True):
input=fea, input=fea,
param_attr=[hidden_para_attr, rnn_para_attr] * 2) param_attr=[hidden_para_attr, rnn_para_attr] * 2)
# NOTE: This fully connected layer calculates the emission feature for
# the CRF layer. Because the paddle.layer.crf performs global normalization
# over all possible sequences internally, it expects UNSCALED emission
# feature weights.
# Please do not add any nonlinear activation to this fully connected layer.
# The default activation for paddle.layer.fc is the tanh, here needs to set
# it to linear explictly.
emission = paddle.layer.fc( emission = paddle.layer.fc(
size=label_dict_len, size=label_dict_len,
bias_attr=False, bias_attr=False,
input=rnn_fea, input=rnn_fea,
act=paddle.activation.Linear(),
param_attr=rnn_para_attr) param_attr=rnn_para_attr)
if is_train: if is_train:
target = paddle.layer.data( target = paddle.layer.data(
name='target', name="target",
type=paddle.data_type.integer_value_sequence(label_dict_len)) type=paddle.data_type.integer_value_sequence(label_dict_len))
crf = paddle.layer.crf( crf = paddle.layer.crf(
size=label_dict_len, size=label_dict_len,
input=emission, input=emission,
label=target, label=target,
param_attr=paddle.attr.Param(name='crfw', initial_std=1e-3)) param_attr=paddle.attr.Param(name="crfw", initial_std=1e-3))
crf_dec = paddle.layer.crf_decoding( crf_dec = paddle.layer.crf_decoding(
size=label_dict_len, size=label_dict_len,
input=emission, input=emission,
label=target, label=target,
param_attr=paddle.attr.Param(name='crfw')) param_attr=paddle.attr.Param(name="crfw"))
return crf, crf_dec, target return crf, crf_dec, target
else: else:
predict = paddle.layer.crf_decoding( predict = paddle.layer.crf_decoding(
size=label_dict_len, size=label_dict_len,
input=emission, input=emission,
param_attr=paddle.attr.Param(name='crfw')) param_attr=paddle.attr.Param(name="crfw"))
return predict return predict
import os
import gzip import gzip
import numpy as np import numpy as np
......
此差异已折叠。
此差异已折叠。
...@@ -46,10 +46,10 @@ def train(topology, ...@@ -46,10 +46,10 @@ def train(topology,
word_dict = paddle.dataset.imdb.word_dict() word_dict = paddle.dataset.imdb.word_dict()
train_reader = paddle.batch( train_reader = paddle.batch(
paddle.reader.shuffle( paddle.reader.shuffle(
lambda: paddle.dataset.imdb.train(word_dict), buf_size=1000), lambda: paddle.dataset.imdb.train(word_dict)(), buf_size=1000),
batch_size=100) batch_size=100)
test_reader = paddle.batch( test_reader = paddle.batch(
lambda: paddle.dataset.imdb.test(word_dict), batch_size=100) lambda: paddle.dataset.imdb.test(word_dict)(), batch_size=100)
class_num = 2 class_num = 2
else: else:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册