提交 91838c32 编写于 作者: X xiaolil1 提交者: ceci3

Optimize Quantize Op with primitive reuse. (#15929)

test=develop
上级 1c58eee9
...@@ -30,6 +30,18 @@ using framework::DataLayout; ...@@ -30,6 +30,18 @@ using framework::DataLayout;
using mkldnn::stream; using mkldnn::stream;
using platform::GetMKLDNNFormat; using platform::GetMKLDNNFormat;
std::string CreateKey(const paddle::framework::ExecutionContext& ctx,
const std::vector<int>& src_tz, const float scale_data,
const bool is_negative) {
std::string key;
key.reserve(platform::MKLDNNHandler::MaxKeyLength);
platform::MKLDNNHandler::AppendKeyDims(&key, src_tz);
platform::MKLDNNHandler::AppendKey(&key, std::to_string(scale_data));
platform::MKLDNNHandler::AppendKey(&key, std::to_string(is_negative));
platform::MKLDNNHandler::AppendKey(&key, ctx.op().Output("Output"));
return key;
}
template <typename T> template <typename T>
class QuantOpKernel : public framework::OpKernel<T> { class QuantOpKernel : public framework::OpKernel<T> {
public: public:
...@@ -47,32 +59,61 @@ class QuantOpKernel : public framework::OpKernel<T> { ...@@ -47,32 +59,61 @@ class QuantOpKernel : public framework::OpKernel<T> {
const T* input_data = input->data<T>(); const T* input_data = input->data<T>();
mkldnn::primitive_attr attri;
int mask = 0;
attri.set_output_scales(mask, {scale_data});
auto src_md = platform::MKLDNNMemDesc({src_tz}, memory::data_type::f32,
input->format());
auto src_pd = mkldnn::memory::primitive_desc(src_md, engine);
auto src_memory =
std::make_shared<mkldnn::memory>(src_pd, to_void_cast<T>(input_data));
std::shared_ptr<primitive::at> src_memory_p =
std::shared_ptr<primitive::at>(new primitive::at(*src_memory));
bool is_negative = ctx.Attr<bool>("is_negative_input"); bool is_negative = ctx.Attr<bool>("is_negative_input");
std::shared_ptr<mkldnn::memory::primitive_desc> dst_pd; std::string key = CreateKey(ctx, src_tz, scale_data, is_negative);
const std::string key_prim = key + "@reorder_p";
const std::string key_src_mem = key + "@src_mem";
const std::string key_dst_mem = key + "@dst_mem";
std::shared_ptr<mkldnn::memory> src_memory;
std::shared_ptr<mkldnn::memory> dst_memory; std::shared_ptr<mkldnn::memory> dst_memory;
if (is_negative) { std::shared_ptr<reorder> reorder_p;
platform::ConvMKLDNNHandler::SetDstMemory<int8_t>( reorder_p = std::static_pointer_cast<reorder>(dev_ctx.GetBlob(key_prim));
ctx, output, dst_tz, engine, dst_pd, dst_memory);
if (reorder_p == nullptr) {
mkldnn::primitive_attr attri;
int mask = 0;
attri.set_output_scales(mask, {scale_data});
auto src_md = platform::MKLDNNMemDesc({src_tz}, memory::data_type::f32,
input->format());
auto src_pd = mkldnn::memory::primitive_desc(src_md, engine);
src_memory =
std::make_shared<mkldnn::memory>(src_pd, to_void_cast<T>(input_data));
std::shared_ptr<primitive::at> src_memory_p =
std::shared_ptr<primitive::at>(new primitive::at(*src_memory));
std::shared_ptr<mkldnn::memory::primitive_desc> dst_pd;
if (is_negative) {
platform::ConvMKLDNNHandler::SetDstMemory<int8_t>(
ctx, output, dst_tz, engine, dst_pd, dst_memory);
} else {
platform::ConvMKLDNNHandler::SetDstMemory<uint8_t>(
ctx, output, dst_tz, engine, dst_pd, dst_memory);
}
auto reorder_pd = std::shared_ptr<reorder::primitive_desc>(
new reorder::primitive_desc(src_pd, *dst_pd, attri));
reorder_p = std::shared_ptr<reorder>(
new reorder(*reorder_pd, *src_memory_p, *dst_memory));
dev_ctx.SetBlob(key_prim, reorder_p);
dev_ctx.SetBlob(key_src_mem, src_memory);
dev_ctx.SetBlob(key_dst_mem, dst_memory);
} else { } else {
platform::ConvMKLDNNHandler::SetDstMemory<uint8_t>( src_memory = std::static_pointer_cast<mkldnn::memory>(
ctx, output, dst_tz, engine, dst_pd, dst_memory); dev_ctx.GetBlob(key_src_mem));
src_memory->set_data_handle(to_void_cast<T>(input_data));
dst_memory = std::static_pointer_cast<mkldnn::memory>(
dev_ctx.GetBlob(key_dst_mem));
auto place = ctx.GetPlace();
if (is_negative) {
dst_memory->set_data_handle(output->mutable_data<int8_t>(place));
} else {
dst_memory->set_data_handle(output->mutable_data<uint8_t>(place));
}
} }
auto reorder_pd = std::shared_ptr<reorder::primitive_desc>(
new reorder::primitive_desc(src_pd, *dst_pd, attri));
auto reorder_p = std::shared_ptr<reorder>(
new reorder(*reorder_pd, *src_memory_p, *dst_memory));
pipeline.push_back(*reorder_p); pipeline.push_back(*reorder_p);
stream(stream::kind::eager).submit(pipeline).wait(); stream(stream::kind::eager).submit(pipeline).wait();
output->set_layout(DataLayout::kMKLDNN); output->set_layout(DataLayout::kMKLDNN);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册