diff --git a/paddle/fluid/operators/sequence_expand_op.cc b/paddle/fluid/operators/sequence_expand_op.cc index d4bf6034ed56f922f70ba90dfa3ea1a9ec91d319..786fe63e7580ce16b946d5049a490eed2c3c6ced 100644 --- a/paddle/fluid/operators/sequence_expand_op.cc +++ b/paddle/fluid/operators/sequence_expand_op.cc @@ -145,7 +145,7 @@ and input(Y) [0, 3, 6, 6, 8]] ref_level: 0 then we get 1-level LoDTensor - Out.lod = [[0, 2, 5, 8]] + Out.lod = [[0, 1, 2, 5, 8]] Out.data = [[a], [a], [b], [c], [d], [b], [c], [d]] Out.dims = [8, 1] @@ -157,7 +157,7 @@ Given a common Tensor input(X) and input(Y) Y.lod = [[0, 2, 3, 6]] ref_level: -1 -then we a common Tensor +then we get a common Tensor Out.data = [[a], [a], [b], [c], [c], [c]] Out.dims = [6, 1] diff --git a/paddle/fluid/operators/sequence_expand_op.h b/paddle/fluid/operators/sequence_expand_op.h index eea3cf0440fdde99d3086c06bcba9111666de0be..db7d8bd6821fabd9714a160970558291ec47197f 100644 --- a/paddle/fluid/operators/sequence_expand_op.h +++ b/paddle/fluid/operators/sequence_expand_op.h @@ -37,13 +37,6 @@ class SequenceExpandKernel : public framework::OpKernel { int ref_level = context.Attr("ref_level"); auto& x_lod = x->lod(); auto& y_lod = y->lod(); - PADDLE_ENFORCE_GT(y_lod.size(), 0, - "Level number of `Y`'s lod should be greater than 0."); - PADDLE_ENFORCE( - ref_level == -1 || (ref_level >= 0 && ref_level < y_lod.size()), - "Invlid `ref_level`, which should be either equal to -1 " - "or in [0, %d)", - y_lod.size()); if (ref_level == -1) ref_level = y_lod.size() - 1; diff --git a/python/paddle/fluid/tests/book/test_machine_translation.py b/python/paddle/fluid/tests/book/test_machine_translation.py index fa38bd3762423497b82c3b421b3a1db4cd87525b..3a1a0859ecfd4ac5337e2112f8b22e32d8474f22 100644 --- a/python/paddle/fluid/tests/book/test_machine_translation.py +++ b/python/paddle/fluid/tests/book/test_machine_translation.py @@ -118,12 +118,12 @@ def decoder_decode(context, is_sparse): is_sparse=is_sparse) # use rnn unit to update rnn - current_state = pd.fc(input=[pre_ids_emb, pre_state_expanded], + current_state = pd.fc(input=[pre_state_expanded, pre_ids_emb], size=decoder_size, act='tanh') - + current_state_with_lod = pd.lod_reset(x=current_state, y=pre_score) # use score to do beam search - current_score = pd.fc(input=current_state, + current_score = pd.fc(input=current_state_with_lod, size=target_dict_dim, act='softmax') topk_scores, topk_indices = pd.topk(current_score, k=50)