diff --git a/lite/core/optimizer.h b/lite/core/optimizer.h index 2dfc444a26ffe013ad05c81a003dd073cc133177..8d924d068fb193f8f3a980cfc1639412bdbd5c0f 100644 --- a/lite/core/optimizer.h +++ b/lite/core/optimizer.h @@ -169,8 +169,10 @@ class Optimizer { "runtime_context_assign_pass", "argument_type_display_pass", "lite_reshape_fuse_pass", - - "memory_optimize_pass"}}; +#ifndef LITE_WITH_PRECISION_PROFILE + "memory_optimize_pass" +#endif + }}; if (passes.size() == 1) { // multi_stream_analysis_pass must be in the front of diff --git a/lite/core/profile/precision_profiler.h b/lite/core/profile/precision_profiler.h index fda2b74f8f37f4705382f768b353150fa0bda3d7..6ef19b2b0685a50c32ba5bc8cad9eb109dc0e401 100644 --- a/lite/core/profile/precision_profiler.h +++ b/lite/core/profile/precision_profiler.h @@ -18,10 +18,16 @@ * of each kernel. */ #pragma once + +#include +#include + #include +#include #include #include #include "lite/core/program.h" +#include "lite/utils/io.h" #ifdef LITE_WITH_X86 #include "lite/fluid/float16.h" #endif @@ -40,14 +46,50 @@ namespace paddle { namespace lite { namespace profile { +static const std::string get_date_str() { + struct tm tm_time; + time_t timestamp = time(NULL); + localtime_r(×tamp, &tm_time); + struct timeval tv; + gettimeofday(&tv, NULL); + + // print date / time + std::string date_str = + std::to_string(1900 + tm_time.tm_year) + + std::to_string(1 + tm_time.tm_mon) + std::to_string(tm_time.tm_mday) + + '_' + std::to_string(tm_time.tm_hour) + std::to_string(tm_time.tm_min) + + std::to_string(tm_time.tm_sec) + '_' + std::to_string(tv.tv_usec / 1000); + return date_str; +} + +inline std::string generate_valid_tensor_name(const std::string& name) { + std::string new_name(""); + for (size_t i = 0; i < name.length(); ++i) { + if (name[i] != '/') { + new_name += name[i]; + } else { + new_name += "_"; + } + } + return new_name; +} + template -static bool write_tensorfile(const Tensor* tensor, const std::string& locate) { - if (locate.find('/') != std::string::npos) { - return false; +static bool write_tensorfile( + const Tensor* tensor, + const std::string& tensor_name, + const std::string prefix_path = "/storage/emulated/0/") { + std::string new_tensor_name = generate_valid_tensor_name(tensor_name); + if (tensor_name.find('/') != std::string::npos) { + LOG(ERROR) << "--> tensor name is abnormal with '\\':" << tensor_name + << " !!!, replace with '_'," << new_tensor_name + << new_tensor_name; } - FILE* fp = fopen(locate.c_str(), "w"); + + std::string tensor_save_path = prefix_path + new_tensor_name + ".txt"; + FILE* fp = fopen(tensor_save_path.c_str(), "w"); if (fp == nullptr) { - LOG(ERROR) << "file open field " << locate; + LOG(ERROR) << "failed open file " << tensor_save_path; return false; } else { const dtype* data = tensor->data(); @@ -56,19 +98,23 @@ static bool write_tensorfile(const Tensor* tensor, const std::string& locate) { } } fclose(fp); + LOG(INFO) << "write tensor " << tensor_name + << " to file:" << tensor_save_path; return true; } -static bool write_precision_summary_tofile(const std::string& string, - const std::string& log_dir = "") { - if (log_dir == "") { - LOG(INFO) << "The `log_dir` of precision summary file is not set. log_dir:" - << log_dir; +static bool write_precision_summary_tofile( + const std::string& string, const std::string& summary_log_dir = "") { + if (summary_log_dir == "") { + LOG(INFO) << "The `summary_log_dir` of precision summary file is not set. " + "summary_log_dir:" + << summary_log_dir; return false; } - FILE* fp = fopen(log_dir.c_str(), "a"); + + FILE* fp = fopen(summary_log_dir.c_str(), "a"); if (fp == nullptr) { - LOG(INFO) << "Open precision summary file:" << log_dir << "failed."; + LOG(INFO) << "Open precision summary file:" << summary_log_dir << "failed."; return false; } else { fprintf(fp, "%s\n", string.c_str()); @@ -85,7 +131,7 @@ class PrecisionProfiler { std::string inst_precison_str = GetInstPrecision(inst); } - PrecisionProfiler() {} + PrecisionProfiler() { MkDirRecur(log_dir_); } std::string GetSummaryHeader() { using std::setw; @@ -102,9 +148,9 @@ class PrecisionProfiler { << " " << setw(15) << left << "std_deviation" << " " << setw(15) << left << "ave_grow_rate*" << std::endl; - // write to file with path: `log_dir` - if (log_dir_ != "") { - FILE* fp = fopen(log_dir_.c_str(), "a"); + // write to file with path: `summary_log_dir` + if (summary_log_dir_ != "") { + FILE* fp = fopen(summary_log_dir_.c_str(), "a"); std::string header_str{ss.str()}; fprintf(fp, "%s\n", header_str.c_str()); fclose(fp); @@ -180,7 +226,7 @@ class PrecisionProfiler { *std_dev = compute_standard_deviation(ptr, in->numel(), true, *mean); *ave_grow_rate = compute_average_grow_rate(ptr, in->numel()); - write_result_to_file&& write_tensorfile(in, name); + write_result_to_file&& write_tensorfile(in, name, log_dir_); return; } case PRECISION(kAny): { @@ -189,7 +235,7 @@ class PrecisionProfiler { *std_dev = compute_standard_deviation(ptr, in->numel(), true, *mean); *ave_grow_rate = compute_average_grow_rate(ptr, in->numel()); - write_result_to_file&& write_tensorfile(in, name); + write_result_to_file&& write_tensorfile(in, name, log_dir_); return; } case PRECISION(kInt8): { @@ -198,7 +244,7 @@ class PrecisionProfiler { *std_dev = compute_standard_deviation(ptr, in->numel(), true, *mean); *ave_grow_rate = compute_average_grow_rate(ptr, in->numel()); - write_result_to_file&& write_tensorfile(in, name); + write_result_to_file&& write_tensorfile(in, name, log_dir_); return; } case PRECISION(kInt32): { @@ -207,7 +253,7 @@ class PrecisionProfiler { *std_dev = compute_standard_deviation( ptr, in->numel(), true, *mean); *ave_grow_rate = compute_average_grow_rate(ptr, in->numel()); - write_result_to_file&& write_tensorfile(in, name); + write_result_to_file&& write_tensorfile(in, name, log_dir_); return; } case PRECISION(kInt64): { @@ -254,7 +300,14 @@ class PrecisionProfiler { real_out_v.data(), in->numel(), true, *mean); *ave_grow_rate = compute_average_grow_rate(real_out_v.data(), real_out_v.size()); - write_result_to_file&& write_tensorfile(in, name); + std::shared_ptr real_out_t(new lite::Tensor); + real_out_t->Resize(in->dims()); + float* real_out_data = real_out_t->mutable_data(); + memcpy(real_out_data, + real_out_v.data(), + real_out_v.size() * sizeof(float)); + write_result_to_file&& write_tensorfile( + real_out_t.get(), name, log_dir_); return; } case DATALAYOUT(kNCHW): { @@ -269,7 +322,14 @@ class PrecisionProfiler { in_data_v.data(), in->numel(), true, *mean); *ave_grow_rate = compute_average_grow_rate(in_data_v.data(), in->numel()); - write_result_to_file&& write_tensorfile(in, name); + std::shared_ptr real_out_t(new lite::Tensor); + real_out_t->Resize(in->dims()); + float* real_out_data = real_out_t->mutable_data(); + memcpy(real_out_data, + in_data_v.data(), + in_data_v.size() * sizeof(float)); + write_result_to_file&& write_tensorfile( + real_out_t.get(), name, log_dir_); return; } default: @@ -296,7 +356,7 @@ class PrecisionProfiler { in_data_v.data(), in->numel(), true, *mean); *ave_grow_rate = compute_average_grow_rate(in_data_v.data(), in->numel()); - write_result_to_file&& write_tensorfile(in, name); + write_result_to_file&& write_tensorfile(in, name, log_dir_); return; } case PRECISION(kInt32): { @@ -311,7 +371,7 @@ class PrecisionProfiler { in_data_v.data(), in->numel(), true, *mean); *ave_grow_rate = compute_average_grow_rate(in_data_v.data(), in->numel()); - write_result_to_file&& write_tensorfile(in, name); + write_result_to_file&& write_tensorfile(in, name, log_dir_); return; } case PRECISION(kInt64): { @@ -326,7 +386,7 @@ class PrecisionProfiler { in_data_v.data(), in->numel(), true, *mean); *ave_grow_rate = compute_average_grow_rate(in_data_v.data(), in->numel()); - write_result_to_file&& write_tensorfile(in, name); + write_result_to_file&& write_tensorfile(in, name, log_dir_); return; } case PRECISION(kFP16): { @@ -347,7 +407,7 @@ class PrecisionProfiler { in_data_v.data(), in->numel(), true, *mean); *ave_grow_rate = compute_average_grow_rate(in_data_v.data(), in->numel()); - write_result_to_file&& write_tensorfile(in, name); + write_result_to_file&& write_tensorfile(in, name, log_dir_); return; } default: @@ -372,12 +432,13 @@ class PrecisionProfiler { using std::left; using std::fixed; STL::stringstream ss; - bool write_result_to_file = false; + bool write_result_to_file = true; VLOG(1) << ">> Running kernel: " << inst->op()->op_info()->Repr() << " registered on " << TargetToStr(inst->kernel()->target()) << "/" << PrecisionToStr(inst->kernel()->precision()) << "/" - << DataLayoutToStr(inst->kernel()->layout()); + << DataLayoutToStr(inst->kernel()->layout()) + << ", write_result_to_file:" << write_result_to_file; std::string kernel_repr = inst->op()->op_info()->Repr(); std::string kernel_place = TargetToStr(inst->kernel()->target()) + "/" + @@ -471,12 +532,14 @@ class PrecisionProfiler { } } } - write_precision_summary_tofile(ss.str(), log_dir_); + write_precision_summary_tofile(ss.str(), summary_log_dir_); return ss.str(); } private: - std::string log_dir_{"/storage/emulated/0/precision.log"}; + std::string log_dir_{"/storage/emulated/0/PaddleLite_" + get_date_str() + + "/"}; + std::string summary_log_dir_{log_dir_ + "precision_summary.log"}; }; } // namespace profile diff --git a/lite/utils/logging.cc b/lite/utils/logging.cc index cc5a5b408a9517cd657c8129cbe69b5e439a194f..768d4e0972c07f950a482aeecf1aa09c41b9b409 100644 --- a/lite/utils/logging.cc +++ b/lite/utils/logging.cc @@ -35,7 +35,6 @@ void gen_log(STL::ostream& log_stream_, const int kMaxLen) { const int len = strlen(file); - std::string time_str; struct tm tm_time; // Time of creation of LogMessage time_t timestamp = time(NULL); #if defined(_WIN32)