提交 d98e78af 编写于 作者: X xiaolil1

clear debug log prepare for PR

上级 7c9aabd1
......@@ -499,17 +499,14 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
std::vector<std::vector<float>> none_scale = {{0.0f}};
std::vector<std::vector<float>> scale_datas(7,{1.0f});
//scale_in_data 0, scale_in_eltwise_data 1, scale_weights_data 2, scale_bias_data 3, scale_out_data 4, output_shift_scale 5, sum_scale 6
if (is_INT8 && GetScaleMap(scale_map, key) == none_scale){
scale_reuse = false;
} else{
scale_datas = GetScaleMap(scale_map, key);
}
//std::cout<<"scale_reuse = "<<scale_reuse<<std::endl;
if(is_INT8){
if(!scale_reuse){
//std::cout<<"load scale!!!!!!!!"<<std::endl;
int count = is_multi_channel? (g>1? weights_tz[1]*weights_tz[0] : weights_tz[0]) : 1;
scale_in_data = {*(scale_in->data<float>())};
scale_weights_data.resize(count);
......@@ -531,7 +528,6 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
if(fuse_residual_conn){
scale_in_eltwise_data = {*(scale_in_eltwise->data<float>())};
sum_scale[0] = scale_out_data[0] / scale_in_eltwise_data[0];
//SetScaleMap(scale_map, scale_in_eltwise_key, scale_in_eltwise_data);
}
//scale reuse
......@@ -541,11 +537,6 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
scale_datas[4] = scale_out_data;
scale_datas[5] = output_shift_scale;
scale_datas[6] = sum_scale;
//SetScaleMap(scale_map, key, scale_datas);
//SetScaleMap(scale_map, scale_weights_key, scale_weights_data);
//SetScaleMap(scale_map, scale_out_key, scale_out_data);
//SetScaleMap(scale_map, output_shift_scale_key, output_shift_scale);
//SetScaleMap(scale_map, sum_scale_key, sum_scale);
} else{
scale_in_data = scale_datas[0];
scale_out_data = scale_datas[3];
......@@ -555,37 +546,19 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
}
output_shift_scale = scale_datas[5];
sum_scale = scale_datas[6];
//printf("pause!!!");
}
}
//static std::unordered_map<std::string, std::shared_ptr<mkldnn::memory::desc>> md_map;
//bool md_reuse = true;
//auto user_src_md_key = key + "@user_src_md";
//if (GetMdMap(md_map, user_src_md_key) == nullptr){
// md_reuse = false; //we suppose all mds are reused if the first md is in the map.
//}
//auto user_weights_md_key = key + "@user_weights_md";
std::shared_ptr<mkldnn::memory::desc> user_src_md;
std::shared_ptr<mkldnn::memory::desc> user_weights_md;
std::vector<primitive> pipeline;
//std::cout<<"md_reuse = "<<md_reuse<<std::endl;
// if(!md_reuse){
//std::cout<<"create md.......... "<<std::endl;
user_src_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(
{src_tz}, paddle::framework::ToMKLDNNDataType(input->type()), input->format())));
user_weights_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(
{weights_tz}, platform::MKLDNNGetDataType<float>(),
(g == 1) ? mkldnn::memory::format::oihw : mkldnn::memory::format::goihw)));
// SetMdMap(md_map, user_src_md_key, user_src_md);
// SetMdMap(md_map, user_weights_md_key, user_weights_md);
// } else{
// user_src_md = GetMdMap(md_map, user_src_md_key);
// user_weights_md = GetMdMap(md_map, user_weights_md_key);
// }
/* create memory descriptor for convolution without specified format
* ('any') which lets a primitive (convolution in this case) choose
* the memory format preferred for best performance
......@@ -597,16 +570,11 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
std::shared_ptr<mkldnn::convolution_forward::primitive_desc> conv_pd;
auto bias_tz = paddle::framework::vectorize2int(bias->dims());
//auto src_md_key = key + "@src_md";
//auto weights_md_key = key + "@weights_md_key";
//auto dst_md_key = key + "@dst_md_key";
//auto bias_md_key = key + "@bias_md_key";
std::shared_ptr<mkldnn::memory::desc> src_md;
std::shared_ptr<mkldnn::memory::desc> weights_md;
std::shared_ptr<mkldnn::memory::desc> dst_md;
if(is_INT8){
//if(!md_reuse){
src_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(
src_tz, memory::data_type::u8, chosen_memory_format)));
weights_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(
......@@ -621,25 +589,12 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
if(force_fp32_output)
dst_dt = paddle::framework::ToMKLDNNDataType(std::type_index(typeid(float)));
dst_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(dst_tz, dst_dt, chosen_memory_format)));
//SetMdMap(md_map, src_md_key, src_md);
//SetMdMap(md_map, weights_md_key, weights_md);
//SetMdMap(md_map, dst_md_key, dst_md);
//} else{
// src_md = GetMdMap(md_map, src_md_key);
// weights_md = GetMdMap(md_map, weights_md_key);
// dst_md = GetMdMap(md_map, dst_md_key);
//}
// create a conv primitive descriptor and save it for usage in backward
if (bias) {
std::shared_ptr<mkldnn::memory::desc> bias_md;
//if(!md_reuse){
bias_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(
bias_tz, memory::data_type::s32, memory::format::x)));
// SetMdMap(md_map, bias_md_key, bias_md);
//} else{
// bias_md = GetMdMap(md_map, bias_md_key);
//}
bias_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(
bias_tz, memory::data_type::s32, memory::format::x)));
conv_pd = ConvFwdPrimitiveDesc(*src_md, *weights_md, *bias_md, *dst_md,
strides, paddings, mkldnn_engine,
......@@ -652,31 +607,16 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
output_shift_scale, sum_scale[0], is_test);
}
} else{
//if(!md_reuse){
src_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(
src_tz, platform::MKLDNNGetDataType<float>(), chosen_memory_format)));
weights_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(
weights_tz, platform::MKLDNNGetDataType<float>(), chosen_memory_format)));
dst_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(
dst_tz, platform::MKLDNNGetDataType<float>(), chosen_memory_format)));
// SetMdMap(md_map, src_md_key, src_md);
// SetMdMap(md_map, weights_md_key, weights_md);
// SetMdMap(md_map, dst_md_key, dst_md);
//} else{
// src_md = GetMdMap(md_map, src_md_key);
// weights_md = GetMdMap(md_map, weights_md_key);
// dst_md = GetMdMap(md_map, dst_md_key);
//}
// create a conv primitive descriptor and save it for usage in backward
src_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(
src_tz, platform::MKLDNNGetDataType<float>(), chosen_memory_format)));
weights_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(
weights_tz, platform::MKLDNNGetDataType<float>(), chosen_memory_format)));
dst_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(
dst_tz, platform::MKLDNNGetDataType<float>(), chosen_memory_format)));
if (bias) {
std::shared_ptr<mkldnn::memory::desc> bias_md;
//if(!md_reuse){
bias_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(
bias_tz, platform::MKLDNNGetDataType<float>(), memory::format::x)));
// SetMdMap(md_map, bias_md_key, bias_md);
//} else{
// bias_md = GetMdMap(md_map, bias_md_key);
//}
conv_pd = ConvFwdPrimitiveDesc(*src_md, *weights_md, *bias_md, *dst_md,
strides, paddings, mkldnn_engine,
fuse_relu, fuse_residual_conn, is_test);
......@@ -692,7 +632,6 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
ConvMKLDNNHandler handler(conv_pd, dev_ctx, mkldnn_engine, key);
handler.key_suffix_map_ = key_suffix_map;
// create mkldnn memory from input tensors (data/weights)
auto user_src_memory_p =
handler.AcquireSrcMemory(*user_src_md, to_void_cast<T>(input_data));
auto user_weights_memory_p = handler.AcquireWeightsMemory(
......@@ -714,7 +653,6 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
std::shared_ptr<mkldnn::memory> dst_memory_p;
bool need_s8_to_u8 = false;
//auto user_residual_md_key = key + "@user_residual_md";
if(fuse_residual_conn) {
auto residual_param = ctx.Input<Tensor>("ResidualData");
PADDLE_ENFORCE_EQ(output->dims(), residual_param->dims(),
......@@ -723,17 +661,12 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto residual_dt = paddle::framework::ToMKLDNNDataType(residual_param->type());
if(residual_param->format() != handler.GetDstFormat()) {
std::shared_ptr<mkldnn::memory::desc> user_residual_md;
//if(!md_reuse){
auto residual_data_tz =
paddle::framework::vectorize2int(residual_param->dims());
auto residual_data_type =
paddle::framework::ToMKLDNNDataType(residual_param->type());
user_residual_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(
residual_data_tz, residual_data_type, residual_param->format())));
//SetMdMap(md_map, user_residual_md_key, user_residual_md);
//} else{
// user_residual_md = GetMdMap(md_map, user_residual_md_key);
//}
auto residual_data_tz =
paddle::framework::vectorize2int(residual_param->dims());
auto residual_data_type =
paddle::framework::ToMKLDNNDataType(residual_param->type());
user_residual_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(
residual_data_tz, residual_data_type, residual_param->format())));
if(is_INT8){
PADDLE_ENFORCE(
force_fp32_output == false,
......@@ -817,18 +750,11 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
// create convolution op primitive
std::shared_ptr<mkldnn::convolution_forward> conv_p;
//auto scale_bias_key = key + "@scale_bias";
//auto user_bias_md_key = key + "@user_bias_md";
if (bias) {
const float* bias_data = bias->data<float>();
std::shared_ptr<mkldnn::memory::desc> user_bias_md;
//if(!md_reuse){
user_bias_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(
{bias_tz}, platform::MKLDNNGetDataType<float>(), memory::format::x)));
// SetMdMap(md_map, user_bias_md_key, user_bias_md);
//} else{
// user_bias_md = GetMdMap(md_map, user_bias_md_key);
//}
user_bias_md.reset(new mkldnn::memory::desc(platform::MKLDNNMemDesc(
{bias_tz}, platform::MKLDNNGetDataType<float>(), memory::format::x)));
auto user_bias_memory_p =
handler.AcquireBiasMemory(*user_bias_md, to_void_cast<float>(bias_data));
std::shared_ptr<mkldnn::memory> bias_memory_p;
......@@ -845,7 +771,6 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
scale_bias_data[i] = scale_in_data[0] * scale_weights_data[i];
}
scale_datas[3] = scale_bias_data;
//SetScaleMap(scale_map, scale_bias_key, scale_bias_data);
} else{
scale_bias_data = scale_datas[3];
}
......@@ -898,26 +823,6 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
return {{0.0f}};
}
//void SetMdMap(std::unordered_map<std::string, std::shared_ptr<mkldnn::memory::desc>> &md_map,
// const std::string& name, std::shared_ptr<mkldnn::memory::desc> mds) const {
// auto it = md_map.find(name);
// if (it == md_map.end()) {
// md_map[name] = mds; // create new blob
// } else {
// (*it).second = mds; // set data to existing blob
// }
// return;
//}
//std::shared_ptr<mkldnn::memory::desc> GetMdMap(std::unordered_map<std::string, std::shared_ptr<mkldnn::memory::desc>> md_map,
// const std::string& name) const {
// auto it = md_map.find(name);
// if (it != md_map.end()) {
// return (*it).second;
// }
// return nullptr;
//}
mkldnn::primitive_attr CreatePostOps(bool fuse_relu, bool fuse_residual_conn,
const std::vector<float> output_shift_scale, float sum_scale) const {
mkldnn::primitive_attr conv_attr;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册