diff --git a/paddle/fluid/operators/beam_search_decode_op_test.cc b/paddle/fluid/operators/beam_search_decode_op_test.cc index 1dc5a20986cfd396d3e2c4e1500feb3e11ef3c0f..a8147ace4ce21006ecd4248215182674fa76d454 100644 --- a/paddle/fluid/operators/beam_search_decode_op_test.cc +++ b/paddle/fluid/operators/beam_search_decode_op_test.cc @@ -31,6 +31,7 @@ using SentenceVector = paddle::operators::SentenceVector; namespace paddle { namespace test { +template void GenerateExample(const std::vector& level_0, const std::vector& level_1, const std::vector& data, @@ -72,19 +73,17 @@ void GenerateExample(const std::vector& level_0, tensor_score.set_lod(lod); tensor_score.Resize({static_cast(data.size())}); // malloc memory - float* score_ptr = tensor_score.mutable_data(place); + T* score_ptr = tensor_score.mutable_data(place); for (size_t i = 0; i < data.size(); ++i) { - score_ptr[i] = static_cast(data.at(i)); + score_ptr[i] = static_cast(data.at(i)); } ids->push_back(tensor_id); scores->push_back(tensor_score); } -} // namespace test -} // namespace paddle - -TEST(BeamSearchDecodeOp, Backtrace) { +template +void BeamSearchDecodeTestFrame() { CPUPlace place; // Construct sample data with 5 steps and 2 source sentences @@ -92,27 +91,27 @@ TEST(BeamSearchDecodeOp, Backtrace) { LoDTensorArray ids; LoDTensorArray scores; - paddle::test::GenerateExample(std::vector{0, 1, 2}, - std::vector{0, 1, 2}, - std::vector{0, 0}, - &ids, - &scores); // start with start_id - paddle::test::GenerateExample(std::vector{0, 1, 2}, - std::vector{0, 2, 4}, - std::vector{2, 3, 4, 5}, - &ids, - &scores); - paddle::test::GenerateExample(std::vector{0, 2, 4}, - std::vector{0, 2, 2, 4, 4}, - std::vector{3, 1, 5, 4}, - &ids, - &scores); - paddle::test::GenerateExample(std::vector{0, 2, 4}, - std::vector{0, 1, 2, 3, 4}, - std::vector{1, 1, 3, 5}, - &ids, - &scores); - paddle::test::GenerateExample( + GenerateExample(std::vector{0, 1, 2}, + std::vector{0, 1, 2}, + std::vector{0, 0}, + &ids, + &scores); // start with start_id + GenerateExample(std::vector{0, 1, 2}, + std::vector{0, 2, 4}, + std::vector{2, 3, 4, 5}, + &ids, + &scores); + GenerateExample(std::vector{0, 2, 4}, + std::vector{0, 2, 2, 4, 4}, + std::vector{3, 1, 5, 4}, + &ids, + &scores); + GenerateExample(std::vector{0, 2, 4}, + std::vector{0, 1, 2, 3, 4}, + std::vector{1, 1, 3, 5}, + &ids, + &scores); + GenerateExample( std::vector{0, 2, 4}, std::vector{0, 0, 0, 2, 2}, // the branchs of the first source // sentence are pruned since finished @@ -123,7 +122,7 @@ TEST(BeamSearchDecodeOp, Backtrace) { ASSERT_EQ(ids.size(), 5UL); ASSERT_EQ(scores.size(), 5UL); - BeamSearchDecoder helper(2, 1); // beam_size = 2, end_id = 1 + BeamSearchDecoder helper(2, 1); // beam_size = 2, end_id = 1 LoDTensor id_tensor; LoDTensor score_tensor; @@ -142,7 +141,30 @@ TEST(BeamSearchDecodeOp, Backtrace) { static_cast(expect_data[i])); } for (int64_t i = 0; i < id_tensor.dims()[0]; ++i) { - ASSERT_EQ(score_tensor.data()[i], - static_cast(id_tensor.data()[i])); + ASSERT_EQ(score_tensor.data()[i], + static_cast(id_tensor.data()[i])); } } + +} // namespace test +} // namespace paddle + +TEST(BeamSearchDecodeOp, Backtrace_CPU_Float) { + paddle::test::BeamSearchDecodeTestFrame(); +} + +TEST(BeamSearchDecodeOp, Backtrace_CPU_Float16) { + paddle::test::BeamSearchDecodeTestFrame(); +} + +TEST(BeamSearchDecodeOp, Backtrace_CPU_Double) { + paddle::test::BeamSearchDecodeTestFrame(); +} + +TEST(BeamSearchDecodeOp, Backtrace_CPU_Int) { + paddle::test::BeamSearchDecodeTestFrame(); +} + +TEST(BeamSearchDecodeOp, Backtrace_CPU_Int64) { + paddle::test::BeamSearchDecodeTestFrame(); +} diff --git a/paddle/fluid/operators/beam_search_decode_op_xpu_test.cc b/paddle/fluid/operators/beam_search_decode_op_xpu_test.cc index 7dd45371f6bb21826ef3a68068598811491fdcf0..07a7b9674fa20924f6ef2178ce1d09be1aff4c37 100644 --- a/paddle/fluid/operators/beam_search_decode_op_xpu_test.cc +++ b/paddle/fluid/operators/beam_search_decode_op_xpu_test.cc @@ -32,6 +32,7 @@ using SentenceVector = paddle::operators::SentenceVector; namespace paddle { namespace test { +template void GenerateXPUExample(const std::vector& level_0, const std::vector& level_1, const std::vector& data, @@ -93,60 +94,80 @@ void GenerateXPUExample(const std::vector& level_0, tensor_score_cpu.set_lod(lod); tensor_score_cpu.Resize({static_cast(data.size())}); // malloc memory - float* score_cpu_ptr = tensor_score_cpu.mutable_data(place); + T* score_cpu_ptr = tensor_score_cpu.mutable_data(place); for (size_t i = 0; i < data.size(); ++i) { - score_cpu_ptr[i] = static_cast(data.at(i)); + score_cpu_ptr[i] = static_cast(data.at(i)); } LoDTensor tensor_score; - const phi::DenseTensorMeta meta_data_score( - paddle::experimental::DataType::FLOAT32, tensor_score_cpu.dims()); - tensor_score.set_meta(meta_data_score); + + if (std::is_same::value) { + const phi::DenseTensorMeta meta_data_score( + paddle::experimental::DataType::FLOAT32, tensor_score_cpu.dims()); + tensor_score.set_meta(meta_data_score); + } else if (std::is_same::value) { + const phi::DenseTensorMeta meta_data_score( + paddle::experimental::DataType::FLOAT64, tensor_score_cpu.dims()); + tensor_score.set_meta(meta_data_score); + } else if (std::is_same::value) { + const phi::DenseTensorMeta meta_data_score( + paddle::experimental::DataType::FLOAT16, tensor_score_cpu.dims()); + tensor_score.set_meta(meta_data_score); + } else if (std::is_same::value) { + const phi::DenseTensorMeta meta_data_score( + paddle::experimental::DataType::INT32, tensor_score_cpu.dims()); + tensor_score.set_meta(meta_data_score); + } else if (std::is_same::value) { + const phi::DenseTensorMeta meta_data_score( + paddle::experimental::DataType::INT64, tensor_score_cpu.dims()); + tensor_score.set_meta(meta_data_score); + } + tensor_score.set_lod(lod); - float* score_ptr = tensor_score.mutable_data(xpu_place); + T* score_ptr = tensor_score.mutable_data(xpu_place); + paddle::memory::Copy(paddle::platform::XPUPlace(XPU_PlaceNo), score_ptr, paddle::platform::CPUPlace(), score_cpu_ptr, - tensor_score_cpu.numel() * sizeof(float)); + tensor_score_cpu.numel() * sizeof(T)); ids->push_back(tensor_id); scores->push_back(tensor_score); } -} // namespace test -} // namespace paddle - -TEST(BeamSearchDecodeOpXPU, Backtrace) { +template +void BeamSearchDecodeTestByXPUFrame() { CPUPlace place; // Construct sample data with 5 steps and 2 source sentences // beam_size = 2, start_id = 0, end_id = 1 + LoDTensorArray ids; LoDTensorArray scores; - paddle::test::GenerateXPUExample(std::vector{0, 1, 2}, - std::vector{0, 1, 2}, - std::vector{0, 0}, - &ids, - &scores); // start with start_id - paddle::test::GenerateXPUExample(std::vector{0, 1, 2}, - std::vector{0, 2, 4}, - std::vector{2, 3, 4, 5}, - &ids, - &scores); - paddle::test::GenerateXPUExample(std::vector{0, 2, 4}, - std::vector{0, 2, 2, 4, 4}, - std::vector{3, 1, 5, 4}, - &ids, - &scores); - paddle::test::GenerateXPUExample(std::vector{0, 2, 4}, - std::vector{0, 1, 2, 3, 4}, - std::vector{1, 1, 3, 5}, - &ids, - &scores); - paddle::test::GenerateXPUExample( + GenerateXPUExample(std::vector{0, 1, 2}, + std::vector{0, 1, 2}, + std::vector{0, 0}, + &ids, + &scores); // start with start_id + GenerateXPUExample(std::vector{0, 1, 2}, + std::vector{0, 2, 4}, + std::vector{2, 3, 4, 5}, + &ids, + &scores); + GenerateXPUExample(std::vector{0, 2, 4}, + std::vector{0, 2, 2, 4, 4}, + std::vector{3, 1, 5, 4}, + &ids, + &scores); + GenerateXPUExample(std::vector{0, 2, 4}, + std::vector{0, 1, 2, 3, 4}, + std::vector{1, 1, 3, 5}, + &ids, + &scores); + GenerateXPUExample( std::vector{0, 2, 4}, std::vector{0, 0, 0, 2, 2}, // the branchs of the first source // sentence are pruned since finished @@ -162,7 +183,7 @@ TEST(BeamSearchDecodeOpXPU, Backtrace) { paddle::operators::BeamSearchDecodeXPUFunctor bs_xpu( ids, scores, &id_tensor_cpu, &score_tensor_cpu, 2, 1); - bs_xpu.apply_xpu(); + bs_xpu.apply_xpu(); LoD lod = id_tensor_cpu.lod(); std::vector expect_source_lod = {0, 2, 4}; @@ -181,7 +202,30 @@ TEST(BeamSearchDecodeOpXPU, Backtrace) { } for (int64_t i = 0; i < id_tensor_cpu.dims()[0]; ++i) { - ASSERT_EQ(score_tensor_cpu.data()[i], - static_cast(id_tensor_cpu.data()[i])); + ASSERT_EQ(score_tensor_cpu.data()[i], + static_cast(id_tensor_cpu.data()[i])); } } + +} // namespace test +} // namespace paddle + +TEST(BeamSearchDecodeOpXPU, Backtrace_XPU_Float) { + paddle::test::BeamSearchDecodeTestByXPUFrame(); +} + +TEST(BeamSearchDecodeOpXPU, Backtrace_XPU_Float16) { + paddle::test::BeamSearchDecodeTestByXPUFrame(); +} + +TEST(BeamSearchDecodeOpXPU, Backtrace_XPU_Int) { + paddle::test::BeamSearchDecodeTestByXPUFrame(); +} + +TEST(BeamSearchDecodeOpXPU, Backtrace_XPU_Int64) { + paddle::test::BeamSearchDecodeTestByXPUFrame(); +} + +TEST(BeamSearchDecodeOpXPU, Backtrace_XPU_Double) { + paddle::test::BeamSearchDecodeTestByXPUFrame(); +}