diff --git a/lite/backends/xpu/target_wrapper.cc b/lite/backends/xpu/target_wrapper.cc index a3d8729410299170964e3ce3b59feb4b970a121b..5f5eae4703a0a0c5db3f026dabaea76d3371b03a 100644 --- a/lite/backends/xpu/target_wrapper.cc +++ b/lite/backends/xpu/target_wrapper.cc @@ -18,6 +18,27 @@ namespace paddle { namespace lite { +void XPUScratchPad::Reserve(size_t new_size) { + if (new_size <= size_) { + return; + } + + if (!is_l3_) { + TargetWrapperXPU::Free(addr_); + addr_ = TargetWrapperXPU::Malloc(new_size); + size_ = new_size; + } else { + CHECK(false) << "Not supported if is_l3_ == true"; + } +} + +void XPUScratchPadDeleter::operator()(XPUScratchPad* sp) const { + if (!sp->is_l3_) { + TargetWrapperXPU::Free(sp->addr_); + } + delete sp; +} + void* TargetWrapperXPU::Malloc(size_t size) { void* ptr{nullptr}; XPU_CALL(xpu_malloc(&ptr, size)); @@ -51,7 +72,7 @@ XPUScratchPadGuard TargetWrapperXPU::MallocScratchPad(size_t size, ptr = TargetWrapperXPU::Malloc(size); } CHECK(ptr != nullptr) << "size = " << size << ", use_l3 = " << use_l3; - return XPUScratchPadGuard(new XPUScratchPad(ptr, use_l3)); + return XPUScratchPadGuard(new XPUScratchPad(ptr, size, use_l3)); } std::string TargetWrapperXPU::multi_encoder_precision; // NOLINT diff --git a/lite/backends/xpu/target_wrapper.h b/lite/backends/xpu/target_wrapper.h index 1a888b126a43783ddae5654de38f5b2e201eaa5e..8151d733ba4b506d3d24fd7e7c150c5f12f1e691 100644 --- a/lite/backends/xpu/target_wrapper.h +++ b/lite/backends/xpu/target_wrapper.h @@ -37,19 +37,19 @@ const int XPU_MAX_LOD_SEQ_LEN = 512; using TargetWrapperXPU = TargetWrapper; struct XPUScratchPad { - XPUScratchPad(void* addr, bool is_l3) : addr_(addr), is_l3_(is_l3) {} + XPUScratchPad(void* addr, size_t size, bool is_l3) + : addr_(addr), size_(size), is_l3_(is_l3) {} + + // XXX(miaotianxiang): |size_| increases monotonically + void Reserve(size_t new_size); void* addr_{nullptr}; + size_t size_{0}; bool is_l3_{false}; }; struct XPUScratchPadDeleter { - void operator()(XPUScratchPad* sp) const { - if (!sp->is_l3_) { - XPU_CALL(xpu_free(sp->addr_)); - } - delete sp; - } + void operator()(XPUScratchPad* sp) const; }; using XPUScratchPadGuard = std::unique_ptr; diff --git a/lite/kernels/x86/sequence_unpad_compute.h b/lite/kernels/x86/sequence_unpad_compute.h index 5b4e3f6c1638975ec042598942363f516ddf3bb9..b8bdfe08e82629a839663efc63d8aee131dadea8 100644 --- a/lite/kernels/x86/sequence_unpad_compute.h +++ b/lite/kernels/x86/sequence_unpad_compute.h @@ -13,6 +13,7 @@ // limitations under the License. #pragma once +#include #include "lite/backends/x86/math/sequence_padding.h" #include "lite/core/kernel.h" #include "lite/core/op_registry.h" @@ -34,6 +35,30 @@ class SequenceUnpadCompute auto& param = this->template Param(); auto& ctx = this->ctx_->template As(); + auto x_dims = param.X->dims(); + auto len_dims = param.Length->dims(); + + auto* seq_len_ptr = param.Length->template data(); + int64_t batch_size = len_dims[0]; + std::vector out_lod0(batch_size + 1, 0); + for (int64_t i = 0; i < batch_size; ++i) { + out_lod0[i + 1] = out_lod0[i] + seq_len_ptr[i]; + } + paddle::lite::LoD out_lod; + out_lod.push_back(out_lod0); + + int64_t out_dim0 = out_lod0.back(); + std::vector out_dims{out_dim0}; + if (x_dims.size() == 2) { + out_dims.push_back(1); + } else { + for (size_t i = 2; i < x_dims.size(); ++i) { + out_dims.push_back(x_dims[i]); + } + } + param.Out->Resize(out_dims); + param.Out->set_lod(out_lod); + param.Out->template mutable_data(); int64_t padded_length = param.X->dims()[1]; math::UnpaddingLoDTensorFunctor()( diff --git a/lite/kernels/xpu/CMakeLists.txt b/lite/kernels/xpu/CMakeLists.txt index 798d707dd7021ccc26b5330619cd9ab9e0a229aa..cc691205570e7a640e22568e053c01daa36aa370 100644 --- a/lite/kernels/xpu/CMakeLists.txt +++ b/lite/kernels/xpu/CMakeLists.txt @@ -38,6 +38,7 @@ else() add_kernel(match_matrix_tensor_compute_xpu XPU extra SRCS match_matrix_tensor_compute.cc DEPS ${lite_kernel_deps}) add_kernel(var_conv_2d_compute_xpu XPU extra SRCS var_conv_2d_compute.cc DEPS ${lite_kernel_deps}) add_kernel(search_grnn_compute_xpu XPU extra SRCS search_grnn_compute.cc DEPS ${lite_kernel_deps}) + add_kernel(sequence_unpad_compute_xpu XPU extra SRCS sequence_unpad_compute.cc DEPS ${lite_kernel_deps}) # extra(fused kernel) add_kernel(__xpu__resnet50_compute_xpu XPU extra SRCS __xpu__resnet50_compute.cc DEPS ${lite_kernel_deps}) diff --git a/lite/kernels/xpu/sequence_pool_compute.cc b/lite/kernels/xpu/sequence_pool_compute.cc index f8e71639b7f4c67f7e60103a42766a4d32026bc1..35412cf49c5b41adb2664180bd703d8475463669 100644 --- a/lite/kernels/xpu/sequence_pool_compute.cc +++ b/lite/kernels/xpu/sequence_pool_compute.cc @@ -42,6 +42,8 @@ void XPUSequencePoolCompute::Run() { xdnn::Pooling_t pool_type = xdnn::Pooling_t::MAX_WITHOUT_INDEX; if (pool_type_str == "MAX") { + } else if (pool_type_str == "SUM") { + pool_type = xdnn::Pooling_t::SUM; } else if (pool_type_str == "LAST") { pool_type = xdnn::Pooling_t::LAST; } else { diff --git a/lite/kernels/xpu/sequence_unpad_compute.cc b/lite/kernels/xpu/sequence_unpad_compute.cc new file mode 100644 index 0000000000000000000000000000000000000000..2ce296ca2163748abb8702460a0dea84c659c1d8 --- /dev/null +++ b/lite/kernels/xpu/sequence_unpad_compute.cc @@ -0,0 +1,98 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "lite/kernels/xpu/sequence_unpad_compute.h" +#include "lite/backends/xpu/xpu_header_sitter.h" +#include "lite/core/op_registry.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace xpu { + +void SequenceUnpadCompute::PrepareForRun() { + lod_xpu_guard_ = TargetWrapperXPU::MallocScratchPad( + XPU_MAX_LOD_SIZE * sizeof(int), false /* use_l3 */); + lod_cpu_.reserve(XPU_MAX_LOD_SIZE); +} + +void SequenceUnpadCompute::Run() { + auto& param = this->template Param(); + auto& ctx = this->ctx_->template As(); + + auto x_dims = param.X->dims(); + auto len_dims = param.Length->dims(); + + // XXX(miaotianxiang): Target of tensor |Length| is |kHost|. + auto* seq_len_ptr = param.Length->template data(); + int64_t batch_size = len_dims[0]; + std::vector out_lod0(batch_size + 1, 0); + for (int64_t i = 0; i < batch_size; ++i) { + out_lod0[i + 1] = out_lod0[i] + seq_len_ptr[i]; + } + paddle::lite::LoD out_lod; + out_lod.push_back(out_lod0); + + int64_t out_dim0 = out_lod0.back(); + std::vector out_dims{out_dim0}; + if (x_dims.size() == 2) { + out_dims.push_back(1); + } else { + for (size_t i = 2; i < x_dims.size(); ++i) { + out_dims.push_back(x_dims[i]); + } + } + param.Out->Resize(out_dims); + param.Out->set_lod(out_lod); + + lod_cpu_ = {0}; + for (int64_t i = 0; i < batch_size; ++i) { + int offset = + lod_cpu_.back() + static_cast(param.Length->data()[i]); + lod_cpu_.push_back(offset); + } + lod_xpu_guard_->Reserve((batch_size + 1) * sizeof(int)); + TargetWrapperXPU::MemcpySync(lod_xpu_guard_->addr_, + lod_cpu_.data(), + (batch_size + 1) * sizeof(int), + IoDirection::HtoD); + + int dim = param.Out->numel() / out_dim0; + int r = xdnn::sequence_unpad( + ctx.GetRawContext(), /* ctx */ + param.X->data(), /* pad_data */ + param.Out->mutable_data(TARGET(kXPU)), /* seq_data */ + reinterpret_cast(lod_xpu_guard_->addr_), /* sequence */ + param.X->dims()[1], /* pad_seq_len */ + batch_size, /* batch_size */ + dim /* dim */); + CHECK_EQ(r, 0); +} + +} // namespace xpu +} // namespace kernels +} // namespace lite +} // namespace paddle + +REGISTER_LITE_KERNEL(sequence_unpad, + kXPU, + kFloat, + kNCHW, + paddle::lite::kernels::xpu::SequenceUnpadCompute, + def) + .BindInput("X", {LiteType::GetTensorTy(TARGET(kXPU))}) + .BindInput("Length", + {LiteType::GetTensorTy(TARGET(kHost), PRECISION(kInt64))}) + .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kXPU))}) + .Finalize(); diff --git a/lite/kernels/xpu/sequence_unpad_compute.h b/lite/kernels/xpu/sequence_unpad_compute.h new file mode 100644 index 0000000000000000000000000000000000000000..8e038383e6ff52e552fc6d53fc74216e02d3caf1 --- /dev/null +++ b/lite/kernels/xpu/sequence_unpad_compute.h @@ -0,0 +1,44 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include "lite/backends/xpu/target_wrapper.h" // XPUScratchPadGuard +#include "lite/core/kernel.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace xpu { + +class SequenceUnpadCompute + : public KernelLite { + public: + using param_t = operators::SequenceUnpadParam; + + void PrepareForRun() override; + + void Run() override; + + private: + XPUScratchPadGuard lod_xpu_guard_; + std::vector lod_cpu_; +}; + +} // namespace xpu +} // namespace kernels +} // namespace lite +} // namespace paddle