未验证 提交 78a303c8 编写于 作者: Y ysh329 提交者: GitHub

[CORE][PROFILE] Write output tensor to file for each OP when precision profiler enabled (#4255)

* [PROFILE] Write output tensor to file for each OP when precision profiler enabled. test=develop

* create output tensor files dir. test=develop
上级 b6cb22bf
......@@ -169,8 +169,10 @@ class Optimizer {
"runtime_context_assign_pass",
"argument_type_display_pass",
"lite_reshape_fuse_pass",
"memory_optimize_pass"}};
#ifndef LITE_WITH_PRECISION_PROFILE
"memory_optimize_pass"
#endif
}};
if (passes.size() == 1) {
// multi_stream_analysis_pass must be in the front of
......
......@@ -18,10 +18,16 @@
* of each kernel.
*/
#pragma once
#include <sys/time.h>
#include <time.h>
#include <cmath>
#include <memory>
#include <string>
#include <vector>
#include "lite/core/program.h"
#include "lite/utils/io.h"
#ifdef LITE_WITH_X86
#include "lite/fluid/float16.h"
#endif
......@@ -40,14 +46,50 @@ namespace paddle {
namespace lite {
namespace profile {
static const std::string get_date_str() {
struct tm tm_time;
time_t timestamp = time(NULL);
localtime_r(&timestamp, &tm_time);
struct timeval tv;
gettimeofday(&tv, NULL);
// print date / time
std::string date_str =
std::to_string(1900 + tm_time.tm_year) +
std::to_string(1 + tm_time.tm_mon) + std::to_string(tm_time.tm_mday) +
'_' + std::to_string(tm_time.tm_hour) + std::to_string(tm_time.tm_min) +
std::to_string(tm_time.tm_sec) + '_' + std::to_string(tv.tv_usec / 1000);
return date_str;
}
inline std::string generate_valid_tensor_name(const std::string& name) {
std::string new_name("");
for (size_t i = 0; i < name.length(); ++i) {
if (name[i] != '/') {
new_name += name[i];
} else {
new_name += "_";
}
}
return new_name;
}
template <typename dtype>
static bool write_tensorfile(const Tensor* tensor, const std::string& locate) {
if (locate.find('/') != std::string::npos) {
return false;
static bool write_tensorfile(
const Tensor* tensor,
const std::string& tensor_name,
const std::string prefix_path = "/storage/emulated/0/") {
std::string new_tensor_name = generate_valid_tensor_name(tensor_name);
if (tensor_name.find('/') != std::string::npos) {
LOG(ERROR) << "--> tensor name is abnormal with '\\':" << tensor_name
<< " !!!, replace with '_'," << new_tensor_name
<< new_tensor_name;
}
FILE* fp = fopen(locate.c_str(), "w");
std::string tensor_save_path = prefix_path + new_tensor_name + ".txt";
FILE* fp = fopen(tensor_save_path.c_str(), "w");
if (fp == nullptr) {
LOG(ERROR) << "file open field " << locate;
LOG(ERROR) << "failed open file " << tensor_save_path;
return false;
} else {
const dtype* data = tensor->data<dtype>();
......@@ -56,19 +98,23 @@ static bool write_tensorfile(const Tensor* tensor, const std::string& locate) {
}
}
fclose(fp);
LOG(INFO) << "write tensor " << tensor_name
<< " to file:" << tensor_save_path;
return true;
}
static bool write_precision_summary_tofile(const std::string& string,
const std::string& log_dir = "") {
if (log_dir == "") {
LOG(INFO) << "The `log_dir` of precision summary file is not set. log_dir:"
<< log_dir;
static bool write_precision_summary_tofile(
const std::string& string, const std::string& summary_log_dir = "") {
if (summary_log_dir == "") {
LOG(INFO) << "The `summary_log_dir` of precision summary file is not set. "
"summary_log_dir:"
<< summary_log_dir;
return false;
}
FILE* fp = fopen(log_dir.c_str(), "a");
FILE* fp = fopen(summary_log_dir.c_str(), "a");
if (fp == nullptr) {
LOG(INFO) << "Open precision summary file:" << log_dir << "failed.";
LOG(INFO) << "Open precision summary file:" << summary_log_dir << "failed.";
return false;
} else {
fprintf(fp, "%s\n", string.c_str());
......@@ -85,7 +131,7 @@ class PrecisionProfiler {
std::string inst_precison_str = GetInstPrecision(inst);
}
PrecisionProfiler() {}
PrecisionProfiler() { MkDirRecur(log_dir_); }
std::string GetSummaryHeader() {
using std::setw;
......@@ -102,9 +148,9 @@ class PrecisionProfiler {
<< " " << setw(15) << left << "std_deviation"
<< " " << setw(15) << left << "ave_grow_rate*" << std::endl;
// write to file with path: `log_dir`
if (log_dir_ != "") {
FILE* fp = fopen(log_dir_.c_str(), "a");
// write to file with path: `summary_log_dir`
if (summary_log_dir_ != "") {
FILE* fp = fopen(summary_log_dir_.c_str(), "a");
std::string header_str{ss.str()};
fprintf(fp, "%s\n", header_str.c_str());
fclose(fp);
......@@ -180,7 +226,7 @@ class PrecisionProfiler {
*std_dev =
compute_standard_deviation<float>(ptr, in->numel(), true, *mean);
*ave_grow_rate = compute_average_grow_rate<float>(ptr, in->numel());
write_result_to_file&& write_tensorfile<float>(in, name);
write_result_to_file&& write_tensorfile<float>(in, name, log_dir_);
return;
}
case PRECISION(kAny): {
......@@ -189,7 +235,7 @@ class PrecisionProfiler {
*std_dev =
compute_standard_deviation<float>(ptr, in->numel(), true, *mean);
*ave_grow_rate = compute_average_grow_rate<float>(ptr, in->numel());
write_result_to_file&& write_tensorfile<float>(in, name);
write_result_to_file&& write_tensorfile<float>(in, name, log_dir_);
return;
}
case PRECISION(kInt8): {
......@@ -198,7 +244,7 @@ class PrecisionProfiler {
*std_dev =
compute_standard_deviation<int8_t>(ptr, in->numel(), true, *mean);
*ave_grow_rate = compute_average_grow_rate<int8_t>(ptr, in->numel());
write_result_to_file&& write_tensorfile<int8_t>(in, name);
write_result_to_file&& write_tensorfile<int8_t>(in, name, log_dir_);
return;
}
case PRECISION(kInt32): {
......@@ -207,7 +253,7 @@ class PrecisionProfiler {
*std_dev = compute_standard_deviation<int32_t>(
ptr, in->numel(), true, *mean);
*ave_grow_rate = compute_average_grow_rate<int32_t>(ptr, in->numel());
write_result_to_file&& write_tensorfile<int32_t>(in, name);
write_result_to_file&& write_tensorfile<int32_t>(in, name, log_dir_);
return;
}
case PRECISION(kInt64): {
......@@ -254,7 +300,14 @@ class PrecisionProfiler {
real_out_v.data(), in->numel(), true, *mean);
*ave_grow_rate = compute_average_grow_rate<float>(real_out_v.data(),
real_out_v.size());
write_result_to_file&& write_tensorfile<float>(in, name);
std::shared_ptr<lite::Tensor> real_out_t(new lite::Tensor);
real_out_t->Resize(in->dims());
float* real_out_data = real_out_t->mutable_data<float>();
memcpy(real_out_data,
real_out_v.data(),
real_out_v.size() * sizeof(float));
write_result_to_file&& write_tensorfile<float>(
real_out_t.get(), name, log_dir_);
return;
}
case DATALAYOUT(kNCHW): {
......@@ -269,7 +322,14 @@ class PrecisionProfiler {
in_data_v.data(), in->numel(), true, *mean);
*ave_grow_rate =
compute_average_grow_rate<float>(in_data_v.data(), in->numel());
write_result_to_file&& write_tensorfile<float>(in, name);
std::shared_ptr<lite::Tensor> real_out_t(new lite::Tensor);
real_out_t->Resize(in->dims());
float* real_out_data = real_out_t->mutable_data<float>();
memcpy(real_out_data,
in_data_v.data(),
in_data_v.size() * sizeof(float));
write_result_to_file&& write_tensorfile<float>(
real_out_t.get(), name, log_dir_);
return;
}
default:
......@@ -296,7 +356,7 @@ class PrecisionProfiler {
in_data_v.data(), in->numel(), true, *mean);
*ave_grow_rate =
compute_average_grow_rate<float>(in_data_v.data(), in->numel());
write_result_to_file&& write_tensorfile<float>(in, name);
write_result_to_file&& write_tensorfile<float>(in, name, log_dir_);
return;
}
case PRECISION(kInt32): {
......@@ -311,7 +371,7 @@ class PrecisionProfiler {
in_data_v.data(), in->numel(), true, *mean);
*ave_grow_rate =
compute_average_grow_rate<int>(in_data_v.data(), in->numel());
write_result_to_file&& write_tensorfile<float>(in, name);
write_result_to_file&& write_tensorfile<float>(in, name, log_dir_);
return;
}
case PRECISION(kInt64): {
......@@ -326,7 +386,7 @@ class PrecisionProfiler {
in_data_v.data(), in->numel(), true, *mean);
*ave_grow_rate =
compute_average_grow_rate<int64_t>(in_data_v.data(), in->numel());
write_result_to_file&& write_tensorfile<float>(in, name);
write_result_to_file&& write_tensorfile<float>(in, name, log_dir_);
return;
}
case PRECISION(kFP16): {
......@@ -347,7 +407,7 @@ class PrecisionProfiler {
in_data_v.data(), in->numel(), true, *mean);
*ave_grow_rate =
compute_average_grow_rate<float>(in_data_v.data(), in->numel());
write_result_to_file&& write_tensorfile<float>(in, name);
write_result_to_file&& write_tensorfile<float>(in, name, log_dir_);
return;
}
default:
......@@ -372,12 +432,13 @@ class PrecisionProfiler {
using std::left;
using std::fixed;
STL::stringstream ss;
bool write_result_to_file = false;
bool write_result_to_file = true;
VLOG(1) << ">> Running kernel: " << inst->op()->op_info()->Repr()
<< " registered on " << TargetToStr(inst->kernel()->target()) << "/"
<< PrecisionToStr(inst->kernel()->precision()) << "/"
<< DataLayoutToStr(inst->kernel()->layout());
<< DataLayoutToStr(inst->kernel()->layout())
<< ", write_result_to_file:" << write_result_to_file;
std::string kernel_repr = inst->op()->op_info()->Repr();
std::string kernel_place = TargetToStr(inst->kernel()->target()) + "/" +
......@@ -471,12 +532,14 @@ class PrecisionProfiler {
}
}
}
write_precision_summary_tofile(ss.str(), log_dir_);
write_precision_summary_tofile(ss.str(), summary_log_dir_);
return ss.str();
}
private:
std::string log_dir_{"/storage/emulated/0/precision.log"};
std::string log_dir_{"/storage/emulated/0/PaddleLite_" + get_date_str() +
"/"};
std::string summary_log_dir_{log_dir_ + "precision_summary.log"};
};
} // namespace profile
......
......@@ -35,7 +35,6 @@ void gen_log(STL::ostream& log_stream_,
const int kMaxLen) {
const int len = strlen(file);
std::string time_str;
struct tm tm_time; // Time of creation of LogMessage
time_t timestamp = time(NULL);
#if defined(_WIN32)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册