From 7c4ea4b35fa34321b6b64cf40ee0786c39f50c73 Mon Sep 17 00:00:00 2001 From: Travis CI Date: Tue, 23 Jan 2018 14:57:32 +0000 Subject: [PATCH] Deploy to GitHub Pages: e7d44a2034ddafabc53446221b3f37c433edf375 --- develop/doc/_sources/design/ops/sequence_decoder.md.txt | 4 ++-- develop/doc/design/ops/sequence_decoder.html | 4 ++-- develop/doc_cn/_sources/design/ops/sequence_decoder.md.txt | 4 ++-- develop/doc_cn/design/ops/sequence_decoder.html | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/develop/doc/_sources/design/ops/sequence_decoder.md.txt b/develop/doc/_sources/design/ops/sequence_decoder.md.txt index 9db5fb8e9a9..c4a9bbeeefc 100644 --- a/develop/doc/_sources/design/ops/sequence_decoder.md.txt +++ b/develop/doc/_sources/design/ops/sequence_decoder.md.txt @@ -22,7 +22,7 @@ The current `LoDTensor` is designed to store levels of variable-length sequences The integers in each level represent the begin and end (not inclusive) offset of a sequence **in the underlying tensor**, let's call this format the **absolute-offset LoD** for clarity. -The relative-offset LoD can retrieve any sequence very quickly but fails to represent empty sequences, for example, a two-level LoD is as follows +The absolute-offset LoD can retrieve any sequence very quickly but fails to represent empty sequences, for example, a two-level LoD is as follows ```python [[0, 3, 9] [0, 2, 3, 3, 3, 9]] @@ -119,7 +119,7 @@ def generate(): encoder_ctx_expanded = pd.lod_expand(encoder_ctx, target_word) decoder_input = pd.fc( act=pd.activation.Linear(), - input=[target_word, encoder_ctx], + input=[target_word, encoder_ctx_expanded], size=3 * decoder_dim) gru_out, cur_mem = pd.gru_step( decoder_input, mem=decoder_mem, size=decoder_dim) diff --git a/develop/doc/design/ops/sequence_decoder.html b/develop/doc/design/ops/sequence_decoder.html index c3be3d4aea5..1cf28a854ea 100644 --- a/develop/doc/design/ops/sequence_decoder.html +++ b/develop/doc/design/ops/sequence_decoder.html @@ -228,7 +228,7 @@ the selected candidate’s IDs in each time step can be stored in a The current LoDTensor is designed to store levels of variable-length sequences. It stores several arrays of integers where each represents a level.

The integers in each level represent the begin and end (not inclusive) offset of a sequence in the underlying tensor, let’s call this format the absolute-offset LoD for clarity.

-

The relative-offset LoD can retrieve any sequence very quickly but fails to represent empty sequences, for example, a two-level LoD is as follows

+

The absolute-offset LoD can retrieve any sequence very quickly but fails to represent empty sequences, for example, a two-level LoD is as follows

[[0, 3, 9]
  [0, 2, 3, 3, 3, 9]]
 
@@ -315,7 +315,7 @@ It is easy to find out the second sequence in the first-level LoD has two empty encoder_ctx_expanded = pd.lod_expand(encoder_ctx, target_word) decoder_input = pd.fc( act=pd.activation.Linear(), - input=[target_word, encoder_ctx], + input=[target_word, encoder_ctx_expanded], size=3 * decoder_dim) gru_out, cur_mem = pd.gru_step( decoder_input, mem=decoder_mem, size=decoder_dim) diff --git a/develop/doc_cn/_sources/design/ops/sequence_decoder.md.txt b/develop/doc_cn/_sources/design/ops/sequence_decoder.md.txt index 9db5fb8e9a9..c4a9bbeeefc 100644 --- a/develop/doc_cn/_sources/design/ops/sequence_decoder.md.txt +++ b/develop/doc_cn/_sources/design/ops/sequence_decoder.md.txt @@ -22,7 +22,7 @@ The current `LoDTensor` is designed to store levels of variable-length sequences The integers in each level represent the begin and end (not inclusive) offset of a sequence **in the underlying tensor**, let's call this format the **absolute-offset LoD** for clarity. -The relative-offset LoD can retrieve any sequence very quickly but fails to represent empty sequences, for example, a two-level LoD is as follows +The absolute-offset LoD can retrieve any sequence very quickly but fails to represent empty sequences, for example, a two-level LoD is as follows ```python [[0, 3, 9] [0, 2, 3, 3, 3, 9]] @@ -119,7 +119,7 @@ def generate(): encoder_ctx_expanded = pd.lod_expand(encoder_ctx, target_word) decoder_input = pd.fc( act=pd.activation.Linear(), - input=[target_word, encoder_ctx], + input=[target_word, encoder_ctx_expanded], size=3 * decoder_dim) gru_out, cur_mem = pd.gru_step( decoder_input, mem=decoder_mem, size=decoder_dim) diff --git a/develop/doc_cn/design/ops/sequence_decoder.html b/develop/doc_cn/design/ops/sequence_decoder.html index 6a3419fb5cc..e516c928fc0 100644 --- a/develop/doc_cn/design/ops/sequence_decoder.html +++ b/develop/doc_cn/design/ops/sequence_decoder.html @@ -247,7 +247,7 @@ the selected candidate’s IDs in each time step can be stored in a The current LoDTensor is designed to store levels of variable-length sequences. It stores several arrays of integers where each represents a level.

The integers in each level represent the begin and end (not inclusive) offset of a sequence in the underlying tensor, let’s call this format the absolute-offset LoD for clarity.

-

The relative-offset LoD can retrieve any sequence very quickly but fails to represent empty sequences, for example, a two-level LoD is as follows

+

The absolute-offset LoD can retrieve any sequence very quickly but fails to represent empty sequences, for example, a two-level LoD is as follows

[[0, 3, 9]
  [0, 2, 3, 3, 3, 9]]
 
@@ -334,7 +334,7 @@ It is easy to find out the second sequence in the first-level LoD has two empty encoder_ctx_expanded = pd.lod_expand(encoder_ctx, target_word) decoder_input = pd.fc( act=pd.activation.Linear(), - input=[target_word, encoder_ctx], + input=[target_word, encoder_ctx_expanded], size=3 * decoder_dim) gru_out, cur_mem = pd.gru_step( decoder_input, mem=decoder_mem, size=decoder_dim) -- GitLab