提交 a281e101 编写于 作者: G guosheng

Make cc_test of beam_search_op and beam_searc_decode_op run correctly

上级 741046e8
...@@ -288,8 +288,8 @@ set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library") ...@@ -288,8 +288,8 @@ set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library")
cc_test(gather_test SRCS gather_test.cc DEPS tensor) cc_test(gather_test SRCS gather_test.cc DEPS tensor)
cc_test(scatter_test SRCS scatter_test.cc DEPS tensor) cc_test(scatter_test SRCS scatter_test.cc DEPS tensor)
# cc_test(beam_search_decode_op_test SRCS beam_search_decode_op_test.cc DEPS lod_tensor) cc_test(beam_search_decode_op_test SRCS beam_search_decode_op_test.cc DEPS lod_tensor)
# cc_test(beam_search_op_test SRCS beam_search_op_test.cc DEPS lod_tensor beam_search_op) cc_test(beam_search_op_test SRCS beam_search_op_test.cc DEPS lod_tensor beam_search_op)
cc_test(strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor memory) cc_test(strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor memory)
cc_test(save_load_op_test SRCS save_load_op_test.cc DEPS save_op load_op) cc_test(save_load_op_test SRCS save_load_op_test.cc DEPS save_op load_op)
cc_test(save_load_combine_op_test SRCS save_load_combine_op_test.cc DEPS save_combine_op load_combine_op) cc_test(save_load_combine_op_test SRCS save_load_combine_op_test.cc DEPS save_combine_op load_combine_op)
......
...@@ -87,13 +87,9 @@ void BeamSearchDecodeFunctor::operator()() const { ...@@ -87,13 +87,9 @@ void BeamSearchDecodeFunctor::operator()() const {
BeamSearchDecoder<T> beam_search_decoder(beam_size_, end_id_); BeamSearchDecoder<T> beam_search_decoder(beam_size_, end_id_);
// Check if the tensor is on GPU. If so, use the CPU copy instead // Check if the tensor is on GPU. If so, use the CPU copy instead
if (tensor_on_gpu_) { if (tensor_on_gpu_) {
// beam_search_decoder.PackAllSteps(step_ids_, step_scores_, id_tensor_,
// score_tensor_);
beam_search_decoder.Backtrace(step_ids_, step_scores_, id_tensor_, beam_search_decoder.Backtrace(step_ids_, step_scores_, id_tensor_,
score_tensor_); score_tensor_);
} else { } else {
// beam_search_decoder.PackAllSteps(step_ids_origin_, step_scores_origin_,
// id_tensor_, score_tensor_);
beam_search_decoder.Backtrace(step_ids_origin_, step_scores_origin_, beam_search_decoder.Backtrace(step_ids_origin_, step_scores_origin_,
id_tensor_, score_tensor_); id_tensor_, score_tensor_);
} }
......
...@@ -28,41 +28,11 @@ using LoDTensorArray = framework::LoDTensorArray; ...@@ -28,41 +28,11 @@ using LoDTensorArray = framework::LoDTensorArray;
// all the lod have 2 levels. // all the lod have 2 levels.
// The First is source level, the second is sentence level. // The First is source level, the second is sentence level.
// source level describe how many candidate words for this source. // source level describe how many prefixes (branchs) for each source sentece
// sentence level describe these candidates belong to which prefix // (beam). sentence level describe how these candidates belong to the prefixes.
const size_t kSourceLevel = 0; const size_t kSourceLevel = 0;
const size_t kSentenceLevel = 1; const size_t kSentenceLevel = 1;
template <typename T>
struct BeamNode {
BeamNode(int64_t word_id, T score) : word_id_(word_id), score_(score) {}
~BeamNode() {
if (parent_) {
parent_->DropKid(this);
if (parent_->kids_.size() == 0UL) {
delete parent_;
}
}
VLOG(3) << "Delete BeamNode root with word_id:" << this->word_id_;
}
void AppendTo(BeamNode* parent) {
parent_ = parent;
parent->kids_.insert(this);
}
void DropKid(BeamNode* kid) { kids_.erase(kid); }
BeamNode* parent_ = nullptr;
std::unordered_set<BeamNode*> kids_;
int64_t word_id_;
T score_;
};
template <typename T>
using BeamNodeVector = std::vector<std::unique_ptr<BeamNode<T>>>;
template <typename T> template <typename T>
struct Sentence { struct Sentence {
std::vector<int64_t> word_ids; std::vector<int64_t> word_ids;
...@@ -77,25 +47,6 @@ struct BeamSearchDecoder { ...@@ -77,25 +47,6 @@ struct BeamSearchDecoder {
BeamSearchDecoder(size_t beam_size, int end_id) BeamSearchDecoder(size_t beam_size, int end_id)
: beam_size_(beam_size), end_id_(end_id) {} : beam_size_(beam_size), end_id_(end_id) {}
/**
* make a BeamNode and all it's related prefix BeanNode into a Sentence.
*/
Sentence<T> MakeSentence(const BeamNode<T>* node) const;
/**
* Param:
* cur_ids: LoDTensor of One step for word ID
* cur_scores: LoDTensor of One Step for word score
* prefixes_list: prefixes for each source sentence.
* sentence_vector_list: result sentence_vector for each source sentence.
* Return:
* a new prefixes list for each source of current step
*/
std::vector<BeamNodeVector<T>> PackTwoSteps(
const LoDTensor& cur_ids, const LoDTensor& cur_scores,
std::vector<BeamNodeVector<T>>* prefixes_list,
std::vector<SentenceVector<T>>* sentence_vector_list) const;
/** /**
* convert the result sentence_vector for each source sentence into two * convert the result sentence_vector for each source sentence into two
* LodTensor. * LodTensor.
...@@ -105,29 +56,18 @@ struct BeamSearchDecoder { ...@@ -105,29 +56,18 @@ struct BeamSearchDecoder {
* sentence_vector_list: sentence_vector for each source sentence. * sentence_vector_list: sentence_vector for each source sentence.
* id_tensor: result LoDTensor for sentences of id. * id_tensor: result LoDTensor for sentences of id.
* score_tensor: result LoDTensor for sentences of score. * score_tensor: result LoDTensor for sentences of score.
* reverse: whether ids of sentence in sentence_vector_list is reversed
* sort_by_score: whether to sort hypotheses of each sentence by scores.
*/ */
void ConvertSentenceVectorToLodTensor( void ConvertSentenceVectorToLodTensor(
std::vector<SentenceVector<T>> sentence_vector_list, LoDTensor* id_tensor, std::vector<SentenceVector<T>> sentence_vector_list, LoDTensor* id_tensor,
LoDTensor* score_tensor, bool reverse = false, LoDTensor* score_tensor, bool reverse = true,
bool sort_by_score = true) const; bool sort_by_score = true) const;
/** /**
* Pack all steps of id/score LodTensor into sentence LoDTensor * Gather the hypotheses for each source sentence by backtrace though the
* it's main logic is: * LoDTensorArray step_ids whose lods reserve the path in the tree.
* ```python
* prefix
* result_sentence
* result_lod_tensor
*
* for (step in steps):
* prefix = PackTwoSteps(prefix, step, &result_sentence)
* ConvertSentenceVector<T>ToLodTensor(result_sentence, &result_lod_tensor)
* ```
*/ */
void PackAllSteps(const LoDTensorArray& step_ids,
const LoDTensorArray& step_scores, LoDTensor* id_tensor,
LoDTensor* score_tensor) const;
void Backtrace(const LoDTensorArray& step_ids, void Backtrace(const LoDTensorArray& step_ids,
const LoDTensorArray& step_scores, LoDTensor* id_tensor, const LoDTensorArray& step_scores, LoDTensor* id_tensor,
LoDTensor* score_tensor) const; LoDTensor* score_tensor) const;
...@@ -136,80 +76,6 @@ struct BeamSearchDecoder { ...@@ -136,80 +76,6 @@ struct BeamSearchDecoder {
int end_id_; int end_id_;
}; };
template <typename T>
Sentence<T> BeamSearchDecoder<T>::MakeSentence(const BeamNode<T>* node) const {
Sentence<T> sentence;
while (node != nullptr) {
sentence.word_ids.emplace_back(node->word_id_);
sentence.scores.emplace_back(node->score_);
node = node->parent_;
}
std::reverse(std::begin(sentence.word_ids), std::end(sentence.word_ids));
std::reverse(std::begin(sentence.scores), std::end(sentence.scores));
return sentence;
}
template <typename T>
std::vector<BeamNodeVector<T>> BeamSearchDecoder<T>::PackTwoSteps(
const LoDTensor& cur_ids, const LoDTensor& cur_scores,
std::vector<BeamNodeVector<T>>* prefixes_list,
std::vector<SentenceVector<T>>* sentence_vector_list) const {
std::vector<BeamNodeVector<T>> result;
for (size_t src_idx = 0; src_idx < cur_ids.lod()[kSourceLevel].size() - 1;
++src_idx) {
size_t src_start = cur_ids.lod().at(kSourceLevel)[src_idx];
size_t src_end = cur_ids.lod().at(kSourceLevel)[src_idx + 1];
BeamNodeVector<T> beam_nodes;
// if prefixes size is 0, it means this is the first step. In this step,
// all candidate id is the start of candidate sentences.
if (prefixes_list->empty()) {
PADDLE_ENFORCE_EQ(cur_ids.lod().at(kSourceLevel).back(),
cur_ids.lod().at(kSentenceLevel).back(),
"in the first step");
for (size_t id_idx = src_start; id_idx < src_end; ++id_idx) {
beam_nodes.push_back(std::unique_ptr<BeamNode<T>>(new BeamNode<T>(
cur_ids.data<int64_t>()[id_idx], cur_scores.data<T>()[id_idx])));
}
} else {
BeamNodeVector<T>& prefixes = prefixes_list->at(src_idx);
SentenceVector<T>& sentence_vector = (*sentence_vector_list)[src_idx];
PADDLE_ENFORCE_EQ(src_end - src_start, prefixes.size(),
"prefix and candidate set number should be the same");
auto candidate_offset = cur_ids.lod()[kSentenceLevel];
for (size_t prefix_idx = 0; prefix_idx < prefixes.size(); ++prefix_idx) {
std::unique_ptr<BeamNode<T>>& prefix = prefixes[prefix_idx];
size_t candidate_start = candidate_offset[src_start + prefix_idx];
size_t candidate_end = candidate_offset[src_start + prefix_idx + 1];
if (candidate_start == candidate_end) {
VLOG(3) << "this sentence has no more candidate, "
"add to result sentence and rm it from beam tree";
sentence_vector.push_back(MakeSentence(prefix.get()));
prefix.reset();
} else {
for (size_t candidate_idx = candidate_start;
candidate_idx < candidate_end; ++candidate_idx) {
auto* candidate =
new BeamNode<T>(cur_ids.data<int64_t>()[candidate_idx],
cur_scores.data<T>()[candidate_idx]);
candidate->AppendTo(prefix.get());
beam_nodes.push_back(std::unique_ptr<BeamNode<T>>(candidate));
}
prefix.release();
}
}
}
result.push_back(std::move(beam_nodes));
}
return result;
}
template <typename T> template <typename T>
void BeamSearchDecoder<T>::ConvertSentenceVectorToLodTensor( void BeamSearchDecoder<T>::ConvertSentenceVectorToLodTensor(
std::vector<SentenceVector<T>> sentence_vector_list, LoDTensor* id_tensor, std::vector<SentenceVector<T>> sentence_vector_list, LoDTensor* id_tensor,
...@@ -273,42 +139,6 @@ void BeamSearchDecoder<T>::ConvertSentenceVectorToLodTensor( ...@@ -273,42 +139,6 @@ void BeamSearchDecoder<T>::ConvertSentenceVectorToLodTensor(
framework::TensorFromVector<T>(score_data, cpu_ctx, score_tensor); framework::TensorFromVector<T>(score_data, cpu_ctx, score_tensor);
} }
template <typename T>
void BeamSearchDecoder<T>::PackAllSteps(const LoDTensorArray& step_ids,
const LoDTensorArray& step_scores,
LoDTensor* id_tensor,
LoDTensor* score_tensor) const {
PADDLE_ENFORCE(!step_ids.empty(), "step num should be larger than 0");
PADDLE_ENFORCE_EQ(step_ids.size(), step_scores.size(),
"step_ids and step_scores should be the same");
const size_t step_num = step_ids.size();
const size_t src_num = step_ids.at(0).lod().at(kSourceLevel).size() - 1;
PADDLE_ENFORCE_GT(src_num, 0UL, "source num should be larger than 0");
// previous prefixes for each step,
// the init length is 0, means this is the first step.
std::vector<BeamNodeVector<T>> beamnode_vector_list(0);
std::vector<SentenceVector<T>> sentence_vector_list(src_num);
// pack all steps for one batch first, then another batch
for (size_t step_id = 0; step_id < step_num; ++step_id) {
beamnode_vector_list =
PackTwoSteps(step_ids.at(step_id), step_scores.at(step_id),
&beamnode_vector_list, &sentence_vector_list);
}
// append last beam_node to result
for (size_t src_idx = 0; src_idx < src_num; ++src_idx) {
for (auto& beam_node : beamnode_vector_list.at(src_idx)) {
sentence_vector_list[src_idx].push_back(MakeSentence(beam_node.get()));
beam_node.reset();
}
}
ConvertSentenceVectorToLodTensor(sentence_vector_list, id_tensor,
score_tensor);
}
template <typename T> template <typename T>
void BeamSearchDecoder<T>::Backtrace(const LoDTensorArray& step_ids, void BeamSearchDecoder<T>::Backtrace(const LoDTensorArray& step_ids,
const LoDTensorArray& step_scores, const LoDTensorArray& step_scores,
......
...@@ -20,15 +20,11 @@ using LoD = paddle::framework::LoD; ...@@ -20,15 +20,11 @@ using LoD = paddle::framework::LoD;
using LoDTensor = paddle::framework::LoDTensor; using LoDTensor = paddle::framework::LoDTensor;
using LoDTensorArray = paddle::framework::LoDTensorArray; using LoDTensorArray = paddle::framework::LoDTensorArray;
template <typename T>
using BeamNode = paddle::operators::BeamNode<T>;
template <typename T> template <typename T>
using BeamSearchDecoder = paddle::operators::BeamSearchDecoder<T>; using BeamSearchDecoder = paddle::operators::BeamSearchDecoder<T>;
template <typename T> template <typename T>
using Sentence = paddle::operators::Sentence<T>; using Sentence = paddle::operators::Sentence<T>;
template <typename T> template <typename T>
using BeamNodeVector = paddle::operators::BeamNodeVector<T>;
template <typename T>
using SentenceVector = paddle::operators::SentenceVector<T>; using SentenceVector = paddle::operators::SentenceVector<T>;
namespace paddle { namespace paddle {
...@@ -77,138 +73,50 @@ void GenerateExample(const std::vector<size_t>& level_0, ...@@ -77,138 +73,50 @@ void GenerateExample(const std::vector<size_t>& level_0,
} // namespace test } // namespace test
} // namespace paddle } // namespace paddle
TEST(BeamSearchDecodeOp, DeleteBeamNode) { TEST(BeamSearchDecodeOp, Backtrace) {
auto* root = new BeamNode<float>(0, 0);
auto* b1 = new BeamNode<float>(1, 1);
auto* b2 = new BeamNode<float>(2, 2);
auto* b3 = new BeamNode<float>(3, 3);
b1->AppendTo(root);
b2->AppendTo(root);
b3->AppendTo(b1);
delete b3;
delete b2;
}
TEST(BeamSearchDecodeOp, MakeSentence) {
auto* root = new BeamNode<float>(0, 0);
auto* b1 = new BeamNode<float>(1, 1);
auto* end = new BeamNode<float>(2, 2);
b1->AppendTo(root);
end->AppendTo(b1);
BeamSearchDecoder<float> helper;
Sentence<float> sentence = helper.MakeSentence(end);
delete end;
std::vector<int64_t> expect_ids = {0, 1, 2};
ASSERT_EQ(sentence.word_ids, expect_ids);
std::vector<float> expect_scores = {0, 1, 2};
ASSERT_EQ(sentence.scores, expect_scores);
}
TEST(BeamSearchDecodeOp, PackTwoStepsFistStep) {
CPUPlace place;
LoDTensorArray ids;
LoDTensorArray scores;
paddle::test::GenerateExample(
std::vector<size_t>{0, 2, 6}, std::vector<size_t>{0, 1, 2, 3, 4, 5, 6},
std::vector<int>{1, 2, 3, 4, 5, 6}, &ids, &scores);
std::vector<BeamNodeVector<float>> beamnode_vector_list;
std::vector<SentenceVector<float>> sentence_vector_list(
2, SentenceVector<float>());
BeamSearchDecoder<float> helper;
beamnode_vector_list = helper.PackTwoSteps(
ids[0], scores[0], &beamnode_vector_list, &sentence_vector_list);
ASSERT_EQ(beamnode_vector_list.size(), 2UL);
ASSERT_EQ(beamnode_vector_list[0].size(), 2UL);
ASSERT_EQ(beamnode_vector_list[1].size(), 4UL);
}
TEST(BeamSearchDecodeOp, PackTwoSteps) {
CPUPlace place;
// first source has three prefix
BeamNodeVector<float> source0_prefixes;
source0_prefixes.push_back(
std::unique_ptr<BeamNode<float>>(new BeamNode<float>(1, 1)));
source0_prefixes.push_back(
std::unique_ptr<BeamNode<float>>(new BeamNode<float>(0, 0)));
source0_prefixes.push_back(
std::unique_ptr<BeamNode<float>>(new BeamNode<float>(3, 3)));
// second source has two prefix
BeamNodeVector<float> source1_prefixes;
source1_prefixes.push_back(
std::unique_ptr<BeamNode<float>>(new BeamNode<float>(4, 4)));
source1_prefixes.push_back(
std::unique_ptr<BeamNode<float>>(new BeamNode<float>(5, 5)));
std::vector<BeamNodeVector<float>> beamnode_vector_list;
std::vector<SentenceVector<float>> sentence_vector_list(
2, SentenceVector<float>());
beamnode_vector_list.push_back(std::move(source0_prefixes));
beamnode_vector_list.push_back(std::move(source1_prefixes));
// generate data for one step
LoDTensorArray ids;
LoDTensorArray scores;
paddle::test::GenerateExample(std::vector<size_t>{0, 3, 5},
std::vector<size_t>{0, 1, 1, 3, 4, 5},
std::vector<int>{0, 1, 2, 3, 4}, &ids, &scores);
BeamSearchDecoder<float> helper1;
beamnode_vector_list = helper1.PackTwoSteps(
ids[0], scores[0], &beamnode_vector_list, &sentence_vector_list);
ASSERT_EQ(sentence_vector_list[0].size(), 1UL);
ASSERT_EQ(sentence_vector_list[1].size(), 0UL);
ASSERT_EQ(beamnode_vector_list[0].size(), 3UL);
ASSERT_EQ(beamnode_vector_list[1].size(), 2UL);
}
TEST(BeamSearchDecodeOp, PackAllSteps) {
CPUPlace place; CPUPlace place;
// we will constuct a sample data with 3 steps and 2 source sentences // we will constuct a sample data with 4 steps and 2 source sentences
// beam_size = 2, start_id = 0, end_id = 1
LoDTensorArray ids; LoDTensorArray ids;
LoDTensorArray scores; LoDTensorArray scores;
paddle::test::GenerateExample( paddle::test::GenerateExample(
std::vector<size_t>{0, 3, 6}, std::vector<size_t>{0, 1, 2, 3, 4, 5, 6}, std::vector<size_t>{0, 1, 2}, std::vector<size_t>{0, 1, 2},
std::vector<int>{1, 2, 3, 4, 5, 6}, &ids, &scores); std::vector<int>{0, 0}, &ids, &scores); // start with start_id
paddle::test::GenerateExample(std::vector<size_t>{0, 1, 2},
std::vector<size_t>{0, 2, 4},
std::vector<int>{2, 3, 4, 5}, &ids, &scores);
paddle::test::GenerateExample(std::vector<size_t>{0, 2, 4},
std::vector<size_t>{0, 2, 2, 4, 4},
std::vector<int>{3, 1, 5, 4}, &ids, &scores);
paddle::test::GenerateExample(std::vector<size_t>{0, 2, 4},
std::vector<size_t>{0, 1, 2, 3, 4},
std::vector<int>{1, 1, 3, 5}, &ids, &scores);
paddle::test::GenerateExample( paddle::test::GenerateExample(
std::vector<size_t>{0, 3, 6}, std::vector<size_t>{0, 1, 1, 3, 5, 5, 6}, std::vector<size_t>{0, 2, 4},
std::vector<int>{0, 1, 2, 3, 4, 5}, &ids, &scores); std::vector<size_t>{0, 0, 0, 2,
paddle::test::GenerateExample(std::vector<size_t>{0, 3, 6}, 2}, // the branchs of the first source sentence
std::vector<size_t>{0, 0, 1, 2, 3, 4, 5}, // are pruned since finished
std::vector<int>{0, 1, 2, 3, 4}, &ids, &scores); std::vector<int>{5, 1},
&ids, &scores);
ASSERT_EQ(ids.size(), 3UL); ASSERT_EQ(ids.size(), 5UL);
ASSERT_EQ(scores.size(), 3UL); ASSERT_EQ(scores.size(), 5UL);
BeamSearchDecoder<float> helper; BeamSearchDecoder<float> helper(2, 1); // beam_size = 2, end_id = 1
LoDTensor id_tensor; LoDTensor id_tensor;
LoDTensor score_tensor; LoDTensor score_tensor;
helper.PackAllSteps(ids, scores, &id_tensor, &score_tensor); helper.Backtrace(ids, scores, &id_tensor, &score_tensor);
LoD lod = id_tensor.lod(); LoD lod = id_tensor.lod();
std::vector<size_t> expect_source_lod = {0, 4, 8}; std::vector<size_t> expect_source_lod = {0, 2, 4};
EXPECT_EQ(lod[0], expect_source_lod); EXPECT_EQ(lod[0], expect_source_lod);
std::vector<size_t> expect_sentence_lod = {0, 1, 3, 6, 9, 10, 13, 16, 19}; std::vector<size_t> expect_sentence_lod = {0, 4, 7, 12, 17};
EXPECT_EQ(lod[1], expect_sentence_lod); EXPECT_EQ(lod[1], expect_sentence_lod);
// 2| 1, 0| 3, 1, 0| 3, 2, 1| 5| 4, 3, 2| 4, 4, 3| 6, 5, 4 std::vector<int> expect_data = {0, 2, 3, 1, 0, 2, 1, 0, 4,
std::vector<int> expect_data = {2, 1, 0, 3, 1, 0, 3, 2, 1, 5, 5, 3, 5, 0, 4, 5, 3, 1};
4, 3, 2, 4, 4, 3, 6, 5, 4};
ASSERT_EQ(id_tensor.dims()[0], static_cast<int64_t>(expect_data.size())); ASSERT_EQ(id_tensor.dims()[0], static_cast<int64_t>(expect_data.size()));
for (size_t i = 0; i < expect_data.size(); ++i) { for (size_t i = 0; i < expect_data.size(); ++i) {
ASSERT_EQ(id_tensor.data<int64_t>()[i], ASSERT_EQ(id_tensor.data<int64_t>()[i],
......
...@@ -13,7 +13,6 @@ See the License for the specific language governing permissions and ...@@ -13,7 +13,6 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <algorithm> #include <algorithm>
#include <limits>
#include <map> #include <map>
#include <string> #include <string>
#include <vector> #include <vector>
...@@ -110,23 +109,6 @@ void BeamSearch::PruneEndBeams(const framework::LoDTensor &pre_ids, ...@@ -110,23 +109,6 @@ void BeamSearch::PruneEndBeams(const framework::LoDTensor &pre_ids,
} }
} }
int BeamSearch::PruneEndidCandidates(const framework::LoDTensor &pre_ids,
std::vector<std::vector<Item>> *items) {
auto *pre_ids_data = pre_ids.data<int64_t>();
int res = 0;
for (size_t offset = 0; offset < items->size(); offset++) {
auto prefix_id = pre_ids_data[offset];
if (prefix_id == end_id_) {
items->at(offset).clear();
} else {
res++;
}
}
return res;
}
std::vector<std::vector<BeamSearch::Item>> BeamSearch::ToMap( std::vector<std::vector<BeamSearch::Item>> BeamSearch::ToMap(
const std::vector<std::vector<Item>> &items, size_t element_num) { const std::vector<std::vector<Item>> &items, size_t element_num) {
std::vector<std::vector<Item>> result; std::vector<std::vector<Item>> result;
...@@ -201,8 +183,7 @@ bool BeamSearch::NextItemSet(const framework::LoDTensor &pre_ids, ...@@ -201,8 +183,7 @@ bool BeamSearch::NextItemSet(const framework::LoDTensor &pre_ids,
auto pre_score = pre_scores_data[offset]; auto pre_score = pre_scores_data[offset];
if (pre_id == end_id_) { if (pre_id == end_id_) {
// Allocate all probability mass to eos_id for finished branchs and the // Allocate all probability mass to eos_id for finished branchs and the
// other // other candidate ids can be ignored.
// candidate ids can be ignored.
items->emplace_back(offset, end_id_, pre_score); items->emplace_back(offset, end_id_, pre_score);
} else { } else {
for (size_t d = 0; d < instance_dim; d++) { for (size_t d = 0; d < instance_dim; d++) {
......
...@@ -155,17 +155,11 @@ class BeamSearch { ...@@ -155,17 +155,11 @@ class BeamSearch {
protected: protected:
/* /*
* Prune the source sentences all branchs finished, and it is optional. * Prune the source sentences all branchs finished, and it is optional.
* Pruning must one step later than finishing, since the end tokens * Pruning must one step later than finishing (thus pre_ids is needed here),
* must be writed out. Also the finished branchs with top 1 score can * since the end tokens must be writed out.
* be pruned.
*/ */
void PruneEndBeams(const framework::LoDTensor& pre_ids, void PruneEndBeams(const framework::LoDTensor& pre_ids,
std::vector<std::vector<Item>>* items); std::vector<std::vector<Item>>* items);
/*
* Delete all the records that follows the end token.
*/
int PruneEndidCandidates(const framework::LoDTensor& pre_ids,
std::vector<std::vector<Item>>* items);
/* /*
* Transform the items into a map whose key is offset, value is the items. * Transform the items into a map whose key is offset, value is the items.
......
...@@ -30,7 +30,7 @@ using std::endl; ...@@ -30,7 +30,7 @@ using std::endl;
void CreateInput(LoDTensor* ids, LoDTensor* scores) { void CreateInput(LoDTensor* ids, LoDTensor* scores) {
LoD lod; LoD lod;
vector<size_t> level0({0, 1, 4}); vector<size_t> level0({0, 2, 4});
vector<size_t> level1({0, 1, 2, 3, 4}); vector<size_t> level1({0, 1, 2, 3, 4});
lod.push_back(level0); lod.push_back(level0);
lod.push_back(level1); lod.push_back(level1);
...@@ -64,17 +64,22 @@ TEST(beam_search_op, run) { ...@@ -64,17 +64,22 @@ TEST(beam_search_op, run) {
for (int i = 0; i < 4; i++) { for (int i = 0; i < 4; i++) {
pre_ids.mutable_data<int64_t>(place)[i] = i + 1; pre_ids.mutable_data<int64_t>(place)[i] = i + 1;
} }
LoDTensor pre_scores;
pre_scores.Resize(framework::make_ddim(vector<int64_t>(4, 1)));
for (int i = 0; i < 4; i++) {
pre_scores.mutable_data<float>(place)[i] = 0.1;
}
BeamSearch beamsearch(ids, scores, (int64_t)0, (int64_t)2, 0); BeamSearch beamsearch(ids, scores, (size_t)0, (size_t)2, 0);
LoDTensor sids, sscores; LoDTensor sids, sscores;
beamsearch(pre_ids, &sids, &sscores); beamsearch(pre_ids, pre_scores, &sids, &sscores);
LOG(INFO) << "score: " << sscores << endl; LOG(INFO) << "score: " << sscores << endl;
ASSERT_EQ(sids.lod(), sscores.lod()); ASSERT_EQ(sids.lod(), sscores.lod());
vector<int> tids({2, 4, 3, 8}); vector<int> tids({4, 2, 3, 8});
vector<float> tscores({0.3, 0.5, 0.9, 0.7}); vector<float> tscores({0.5, 0.6, 0.9, 0.7});
for (int i = 0; i < 4; i++) { for (int i = 0; i < 4; i++) {
ASSERT_EQ(tids[i], sids.data<int64_t>()[i]); ASSERT_EQ(tids[i], sids.data<int64_t>()[i]);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册