未验证 提交 3deab77f 编写于 作者: M mengqingchun02 提交者: GitHub

Make up beam_search_decode operator test cases on xpu and cpu environment (#45264)

* support beam_search operator on xpu. test=kunlun

* support beam_search operator on xpu. test=kunlun

* support beam_search operator on xpu. test=kunlun

* support beam_search operator on xpu. test=kunlun

* support beam_search operator on xpu. test=kunlun

* make up beam_search_decode operator test cases on xpu and cpu environment. test=kunlun
上级 b68f1120
......@@ -31,6 +31,7 @@ using SentenceVector = paddle::operators::SentenceVector<T>;
namespace paddle {
namespace test {
template <typename T>
void GenerateExample(const std::vector<size_t>& level_0,
const std::vector<size_t>& level_1,
const std::vector<int>& data,
......@@ -72,19 +73,17 @@ void GenerateExample(const std::vector<size_t>& level_0,
tensor_score.set_lod(lod);
tensor_score.Resize({static_cast<int64_t>(data.size())});
// malloc memory
float* score_ptr = tensor_score.mutable_data<float>(place);
T* score_ptr = tensor_score.mutable_data<T>(place);
for (size_t i = 0; i < data.size(); ++i) {
score_ptr[i] = static_cast<float>(data.at(i));
score_ptr[i] = static_cast<T>(data.at(i));
}
ids->push_back(tensor_id);
scores->push_back(tensor_score);
}
} // namespace test
} // namespace paddle
TEST(BeamSearchDecodeOp, Backtrace) {
template <typename T>
void BeamSearchDecodeTestFrame() {
CPUPlace place;
// Construct sample data with 5 steps and 2 source sentences
......@@ -92,27 +91,27 @@ TEST(BeamSearchDecodeOp, Backtrace) {
LoDTensorArray ids;
LoDTensorArray scores;
paddle::test::GenerateExample(std::vector<size_t>{0, 1, 2},
std::vector<size_t>{0, 1, 2},
std::vector<int>{0, 0},
&ids,
&scores); // start with start_id
paddle::test::GenerateExample(std::vector<size_t>{0, 1, 2},
std::vector<size_t>{0, 2, 4},
std::vector<int>{2, 3, 4, 5},
&ids,
&scores);
paddle::test::GenerateExample(std::vector<size_t>{0, 2, 4},
std::vector<size_t>{0, 2, 2, 4, 4},
std::vector<int>{3, 1, 5, 4},
&ids,
&scores);
paddle::test::GenerateExample(std::vector<size_t>{0, 2, 4},
std::vector<size_t>{0, 1, 2, 3, 4},
std::vector<int>{1, 1, 3, 5},
&ids,
&scores);
paddle::test::GenerateExample(
GenerateExample<T>(std::vector<size_t>{0, 1, 2},
std::vector<size_t>{0, 1, 2},
std::vector<int>{0, 0},
&ids,
&scores); // start with start_id
GenerateExample<T>(std::vector<size_t>{0, 1, 2},
std::vector<size_t>{0, 2, 4},
std::vector<int>{2, 3, 4, 5},
&ids,
&scores);
GenerateExample<T>(std::vector<size_t>{0, 2, 4},
std::vector<size_t>{0, 2, 2, 4, 4},
std::vector<int>{3, 1, 5, 4},
&ids,
&scores);
GenerateExample<T>(std::vector<size_t>{0, 2, 4},
std::vector<size_t>{0, 1, 2, 3, 4},
std::vector<int>{1, 1, 3, 5},
&ids,
&scores);
GenerateExample<T>(
std::vector<size_t>{0, 2, 4},
std::vector<size_t>{0, 0, 0, 2, 2}, // the branchs of the first source
// sentence are pruned since finished
......@@ -123,7 +122,7 @@ TEST(BeamSearchDecodeOp, Backtrace) {
ASSERT_EQ(ids.size(), 5UL);
ASSERT_EQ(scores.size(), 5UL);
BeamSearchDecoder<float> helper(2, 1); // beam_size = 2, end_id = 1
BeamSearchDecoder<T> helper(2, 1); // beam_size = 2, end_id = 1
LoDTensor id_tensor;
LoDTensor score_tensor;
......@@ -142,7 +141,30 @@ TEST(BeamSearchDecodeOp, Backtrace) {
static_cast<int64_t>(expect_data[i]));
}
for (int64_t i = 0; i < id_tensor.dims()[0]; ++i) {
ASSERT_EQ(score_tensor.data<float>()[i],
static_cast<float>(id_tensor.data<int64_t>()[i]));
ASSERT_EQ(score_tensor.data<T>()[i],
static_cast<T>(id_tensor.data<int64_t>()[i]));
}
}
} // namespace test
} // namespace paddle
TEST(BeamSearchDecodeOp, Backtrace_CPU_Float) {
paddle::test::BeamSearchDecodeTestFrame<float>();
}
TEST(BeamSearchDecodeOp, Backtrace_CPU_Float16) {
paddle::test::BeamSearchDecodeTestFrame<paddle::platform::float16>();
}
TEST(BeamSearchDecodeOp, Backtrace_CPU_Double) {
paddle::test::BeamSearchDecodeTestFrame<double>();
}
TEST(BeamSearchDecodeOp, Backtrace_CPU_Int) {
paddle::test::BeamSearchDecodeTestFrame<int>();
}
TEST(BeamSearchDecodeOp, Backtrace_CPU_Int64) {
paddle::test::BeamSearchDecodeTestFrame<int64_t>();
}
......@@ -32,6 +32,7 @@ using SentenceVector = paddle::operators::SentenceVector<T>;
namespace paddle {
namespace test {
template <typename T>
void GenerateXPUExample(const std::vector<size_t>& level_0,
const std::vector<size_t>& level_1,
const std::vector<int>& data,
......@@ -93,60 +94,80 @@ void GenerateXPUExample(const std::vector<size_t>& level_0,
tensor_score_cpu.set_lod(lod);
tensor_score_cpu.Resize({static_cast<int64_t>(data.size())});
// malloc memory
float* score_cpu_ptr = tensor_score_cpu.mutable_data<float>(place);
T* score_cpu_ptr = tensor_score_cpu.mutable_data<T>(place);
for (size_t i = 0; i < data.size(); ++i) {
score_cpu_ptr[i] = static_cast<float>(data.at(i));
score_cpu_ptr[i] = static_cast<T>(data.at(i));
}
LoDTensor tensor_score;
const phi::DenseTensorMeta meta_data_score(
paddle::experimental::DataType::FLOAT32, tensor_score_cpu.dims());
tensor_score.set_meta(meta_data_score);
if (std::is_same<float, T>::value) {
const phi::DenseTensorMeta meta_data_score(
paddle::experimental::DataType::FLOAT32, tensor_score_cpu.dims());
tensor_score.set_meta(meta_data_score);
} else if (std::is_same<double, T>::value) {
const phi::DenseTensorMeta meta_data_score(
paddle::experimental::DataType::FLOAT64, tensor_score_cpu.dims());
tensor_score.set_meta(meta_data_score);
} else if (std::is_same<paddle::platform::float16, T>::value) {
const phi::DenseTensorMeta meta_data_score(
paddle::experimental::DataType::FLOAT16, tensor_score_cpu.dims());
tensor_score.set_meta(meta_data_score);
} else if (std::is_same<int, T>::value) {
const phi::DenseTensorMeta meta_data_score(
paddle::experimental::DataType::INT32, tensor_score_cpu.dims());
tensor_score.set_meta(meta_data_score);
} else if (std::is_same<int64_t, T>::value) {
const phi::DenseTensorMeta meta_data_score(
paddle::experimental::DataType::INT64, tensor_score_cpu.dims());
tensor_score.set_meta(meta_data_score);
}
tensor_score.set_lod(lod);
float* score_ptr = tensor_score.mutable_data<float>(xpu_place);
T* score_ptr = tensor_score.mutable_data<T>(xpu_place);
paddle::memory::Copy(paddle::platform::XPUPlace(XPU_PlaceNo),
score_ptr,
paddle::platform::CPUPlace(),
score_cpu_ptr,
tensor_score_cpu.numel() * sizeof(float));
tensor_score_cpu.numel() * sizeof(T));
ids->push_back(tensor_id);
scores->push_back(tensor_score);
}
} // namespace test
} // namespace paddle
TEST(BeamSearchDecodeOpXPU, Backtrace) {
template <typename T>
void BeamSearchDecodeTestByXPUFrame() {
CPUPlace place;
// Construct sample data with 5 steps and 2 source sentences
// beam_size = 2, start_id = 0, end_id = 1
LoDTensorArray ids;
LoDTensorArray scores;
paddle::test::GenerateXPUExample(std::vector<size_t>{0, 1, 2},
std::vector<size_t>{0, 1, 2},
std::vector<int>{0, 0},
&ids,
&scores); // start with start_id
paddle::test::GenerateXPUExample(std::vector<size_t>{0, 1, 2},
std::vector<size_t>{0, 2, 4},
std::vector<int>{2, 3, 4, 5},
&ids,
&scores);
paddle::test::GenerateXPUExample(std::vector<size_t>{0, 2, 4},
std::vector<size_t>{0, 2, 2, 4, 4},
std::vector<int>{3, 1, 5, 4},
&ids,
&scores);
paddle::test::GenerateXPUExample(std::vector<size_t>{0, 2, 4},
std::vector<size_t>{0, 1, 2, 3, 4},
std::vector<int>{1, 1, 3, 5},
&ids,
&scores);
paddle::test::GenerateXPUExample(
GenerateXPUExample<T>(std::vector<size_t>{0, 1, 2},
std::vector<size_t>{0, 1, 2},
std::vector<int>{0, 0},
&ids,
&scores); // start with start_id
GenerateXPUExample<T>(std::vector<size_t>{0, 1, 2},
std::vector<size_t>{0, 2, 4},
std::vector<int>{2, 3, 4, 5},
&ids,
&scores);
GenerateXPUExample<T>(std::vector<size_t>{0, 2, 4},
std::vector<size_t>{0, 2, 2, 4, 4},
std::vector<int>{3, 1, 5, 4},
&ids,
&scores);
GenerateXPUExample<T>(std::vector<size_t>{0, 2, 4},
std::vector<size_t>{0, 1, 2, 3, 4},
std::vector<int>{1, 1, 3, 5},
&ids,
&scores);
GenerateXPUExample<T>(
std::vector<size_t>{0, 2, 4},
std::vector<size_t>{0, 0, 0, 2, 2}, // the branchs of the first source
// sentence are pruned since finished
......@@ -162,7 +183,7 @@ TEST(BeamSearchDecodeOpXPU, Backtrace) {
paddle::operators::BeamSearchDecodeXPUFunctor bs_xpu(
ids, scores, &id_tensor_cpu, &score_tensor_cpu, 2, 1);
bs_xpu.apply_xpu<float>();
bs_xpu.apply_xpu<T>();
LoD lod = id_tensor_cpu.lod();
std::vector<size_t> expect_source_lod = {0, 2, 4};
......@@ -181,7 +202,30 @@ TEST(BeamSearchDecodeOpXPU, Backtrace) {
}
for (int64_t i = 0; i < id_tensor_cpu.dims()[0]; ++i) {
ASSERT_EQ(score_tensor_cpu.data<float>()[i],
static_cast<float>(id_tensor_cpu.data<int64_t>()[i]));
ASSERT_EQ(score_tensor_cpu.data<T>()[i],
static_cast<T>(id_tensor_cpu.data<int64_t>()[i]));
}
}
} // namespace test
} // namespace paddle
TEST(BeamSearchDecodeOpXPU, Backtrace_XPU_Float) {
paddle::test::BeamSearchDecodeTestByXPUFrame<float>();
}
TEST(BeamSearchDecodeOpXPU, Backtrace_XPU_Float16) {
paddle::test::BeamSearchDecodeTestByXPUFrame<paddle::platform::float16>();
}
TEST(BeamSearchDecodeOpXPU, Backtrace_XPU_Int) {
paddle::test::BeamSearchDecodeTestByXPUFrame<int>();
}
TEST(BeamSearchDecodeOpXPU, Backtrace_XPU_Int64) {
paddle::test::BeamSearchDecodeTestByXPUFrame<int64_t>();
}
TEST(BeamSearchDecodeOpXPU, Backtrace_XPU_Double) {
paddle::test::BeamSearchDecodeTestByXPUFrame<double>();
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册