未验证 提交 b6cb22bf 编写于 作者: C Cwndmiao 提交者: GitHub

[LITE][XPU] 1. Add sequence_unpad kernel for XPU; 2. Bugfix in sequence_unpad...

[LITE][XPU] 1. Add sequence_unpad kernel for XPU; 2. Bugfix in sequence_unpad kernel for x86, as InferShapeImpl() is now empty in lite/operators/sequence_unpad_op.cc; 3. Refine TargetWrapperXPU; (#4237)
上级 784f75fa
......@@ -18,6 +18,27 @@
namespace paddle {
namespace lite {
void XPUScratchPad::Reserve(size_t new_size) {
if (new_size <= size_) {
return;
}
if (!is_l3_) {
TargetWrapperXPU::Free(addr_);
addr_ = TargetWrapperXPU::Malloc(new_size);
size_ = new_size;
} else {
CHECK(false) << "Not supported if is_l3_ == true";
}
}
void XPUScratchPadDeleter::operator()(XPUScratchPad* sp) const {
if (!sp->is_l3_) {
TargetWrapperXPU::Free(sp->addr_);
}
delete sp;
}
void* TargetWrapperXPU::Malloc(size_t size) {
void* ptr{nullptr};
XPU_CALL(xpu_malloc(&ptr, size));
......@@ -51,7 +72,7 @@ XPUScratchPadGuard TargetWrapperXPU::MallocScratchPad(size_t size,
ptr = TargetWrapperXPU::Malloc(size);
}
CHECK(ptr != nullptr) << "size = " << size << ", use_l3 = " << use_l3;
return XPUScratchPadGuard(new XPUScratchPad(ptr, use_l3));
return XPUScratchPadGuard(new XPUScratchPad(ptr, size, use_l3));
}
std::string TargetWrapperXPU::multi_encoder_precision; // NOLINT
......
......@@ -37,19 +37,19 @@ const int XPU_MAX_LOD_SEQ_LEN = 512;
using TargetWrapperXPU = TargetWrapper<TARGET(kXPU)>;
struct XPUScratchPad {
XPUScratchPad(void* addr, bool is_l3) : addr_(addr), is_l3_(is_l3) {}
XPUScratchPad(void* addr, size_t size, bool is_l3)
: addr_(addr), size_(size), is_l3_(is_l3) {}
// XXX(miaotianxiang): |size_| increases monotonically
void Reserve(size_t new_size);
void* addr_{nullptr};
size_t size_{0};
bool is_l3_{false};
};
struct XPUScratchPadDeleter {
void operator()(XPUScratchPad* sp) const {
if (!sp->is_l3_) {
XPU_CALL(xpu_free(sp->addr_));
}
delete sp;
}
void operator()(XPUScratchPad* sp) const;
};
using XPUScratchPadGuard = std::unique_ptr<XPUScratchPad, XPUScratchPadDeleter>;
......
......@@ -13,6 +13,7 @@
// limitations under the License.
#pragma once
#include <vector>
#include "lite/backends/x86/math/sequence_padding.h"
#include "lite/core/kernel.h"
#include "lite/core/op_registry.h"
......@@ -34,6 +35,30 @@ class SequenceUnpadCompute
auto& param = this->template Param<param_t>();
auto& ctx = this->ctx_->template As<X86Context>();
auto x_dims = param.X->dims();
auto len_dims = param.Length->dims();
auto* seq_len_ptr = param.Length->template data<int64_t>();
int64_t batch_size = len_dims[0];
std::vector<uint64_t> out_lod0(batch_size + 1, 0);
for (int64_t i = 0; i < batch_size; ++i) {
out_lod0[i + 1] = out_lod0[i] + seq_len_ptr[i];
}
paddle::lite::LoD out_lod;
out_lod.push_back(out_lod0);
int64_t out_dim0 = out_lod0.back();
std::vector<int64_t> out_dims{out_dim0};
if (x_dims.size() == 2) {
out_dims.push_back(1);
} else {
for (size_t i = 2; i < x_dims.size(); ++i) {
out_dims.push_back(x_dims[i]);
}
}
param.Out->Resize(out_dims);
param.Out->set_lod(out_lod);
param.Out->template mutable_data<T>();
int64_t padded_length = param.X->dims()[1];
math::UnpaddingLoDTensorFunctor<lite::TargetType::kX86, T>()(
......
......@@ -38,6 +38,7 @@ else()
add_kernel(match_matrix_tensor_compute_xpu XPU extra SRCS match_matrix_tensor_compute.cc DEPS ${lite_kernel_deps})
add_kernel(var_conv_2d_compute_xpu XPU extra SRCS var_conv_2d_compute.cc DEPS ${lite_kernel_deps})
add_kernel(search_grnn_compute_xpu XPU extra SRCS search_grnn_compute.cc DEPS ${lite_kernel_deps})
add_kernel(sequence_unpad_compute_xpu XPU extra SRCS sequence_unpad_compute.cc DEPS ${lite_kernel_deps})
# extra(fused kernel)
add_kernel(__xpu__resnet50_compute_xpu XPU extra SRCS __xpu__resnet50_compute.cc DEPS ${lite_kernel_deps})
......
......@@ -42,6 +42,8 @@ void XPUSequencePoolCompute::Run() {
xdnn::Pooling_t pool_type = xdnn::Pooling_t::MAX_WITHOUT_INDEX;
if (pool_type_str == "MAX") {
} else if (pool_type_str == "SUM") {
pool_type = xdnn::Pooling_t::SUM;
} else if (pool_type_str == "LAST") {
pool_type = xdnn::Pooling_t::LAST;
} else {
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/kernels/xpu/sequence_unpad_compute.h"
#include "lite/backends/xpu/xpu_header_sitter.h"
#include "lite/core/op_registry.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace xpu {
void SequenceUnpadCompute::PrepareForRun() {
lod_xpu_guard_ = TargetWrapperXPU::MallocScratchPad(
XPU_MAX_LOD_SIZE * sizeof(int), false /* use_l3 */);
lod_cpu_.reserve(XPU_MAX_LOD_SIZE);
}
void SequenceUnpadCompute::Run() {
auto& param = this->template Param<param_t>();
auto& ctx = this->ctx_->template As<XPUContext>();
auto x_dims = param.X->dims();
auto len_dims = param.Length->dims();
// XXX(miaotianxiang): Target of tensor |Length| is |kHost|.
auto* seq_len_ptr = param.Length->template data<int64_t>();
int64_t batch_size = len_dims[0];
std::vector<uint64_t> out_lod0(batch_size + 1, 0);
for (int64_t i = 0; i < batch_size; ++i) {
out_lod0[i + 1] = out_lod0[i] + seq_len_ptr[i];
}
paddle::lite::LoD out_lod;
out_lod.push_back(out_lod0);
int64_t out_dim0 = out_lod0.back();
std::vector<int64_t> out_dims{out_dim0};
if (x_dims.size() == 2) {
out_dims.push_back(1);
} else {
for (size_t i = 2; i < x_dims.size(); ++i) {
out_dims.push_back(x_dims[i]);
}
}
param.Out->Resize(out_dims);
param.Out->set_lod(out_lod);
lod_cpu_ = {0};
for (int64_t i = 0; i < batch_size; ++i) {
int offset =
lod_cpu_.back() + static_cast<int>(param.Length->data<int64_t>()[i]);
lod_cpu_.push_back(offset);
}
lod_xpu_guard_->Reserve((batch_size + 1) * sizeof(int));
TargetWrapperXPU::MemcpySync(lod_xpu_guard_->addr_,
lod_cpu_.data(),
(batch_size + 1) * sizeof(int),
IoDirection::HtoD);
int dim = param.Out->numel() / out_dim0;
int r = xdnn::sequence_unpad(
ctx.GetRawContext(), /* ctx */
param.X->data<float>(), /* pad_data */
param.Out->mutable_data<float>(TARGET(kXPU)), /* seq_data */
reinterpret_cast<int*>(lod_xpu_guard_->addr_), /* sequence */
param.X->dims()[1], /* pad_seq_len */
batch_size, /* batch_size */
dim /* dim */);
CHECK_EQ(r, 0);
}
} // namespace xpu
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_LITE_KERNEL(sequence_unpad,
kXPU,
kFloat,
kNCHW,
paddle::lite::kernels::xpu::SequenceUnpadCompute,
def)
.BindInput("X", {LiteType::GetTensorTy(TARGET(kXPU))})
.BindInput("Length",
{LiteType::GetTensorTy(TARGET(kHost), PRECISION(kInt64))})
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kXPU))})
.Finalize();
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <memory>
#include <vector>
#include "lite/backends/xpu/target_wrapper.h" // XPUScratchPadGuard
#include "lite/core/kernel.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace xpu {
class SequenceUnpadCompute
: public KernelLite<TARGET(kXPU), PRECISION(kFloat)> {
public:
using param_t = operators::SequenceUnpadParam;
void PrepareForRun() override;
void Run() override;
private:
XPUScratchPadGuard lod_xpu_guard_;
std::vector<int> lod_cpu_;
};
} // namespace xpu
} // namespace kernels
} // namespace lite
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册