From a989a4e7c20e4ab82646a8c4c20b2ebcfb24afde Mon Sep 17 00:00:00 2001 From: luotao1 Date: Sat, 29 Sep 2018 12:46:21 +0800 Subject: [PATCH] refine paddle_inference_helper.h --- cmake/inference_lib.cmake | 6 +- paddle/fluid/framework/ir/CMakeLists.txt | 1 + .../fluid/inference/api/analysis_predictor.cc | 3 +- paddle/fluid/inference/api/api_impl.cc | 3 +- paddle/fluid/inference/api/helper.cc | 2 +- .../{helper.h => paddle_inference_helper.h} | 145 +++--------------- .../inference/tests/api/anakin_rnn1_tester.cc | 3 +- .../tests/api/analyzer_rnn1_tester.cc | 1 - .../fluid/inference/tests/api/tester_helper.h | 125 ++++++++++++++- 9 files changed, 152 insertions(+), 137 deletions(-) rename paddle/fluid/inference/api/{helper.h => paddle_inference_helper.h} (55%) diff --git a/cmake/inference_lib.cmake b/cmake/inference_lib.cmake index 077072f6ea..840aa06c22 100644 --- a/cmake/inference_lib.cmake +++ b/cmake/inference_lib.cmake @@ -157,9 +157,11 @@ endif() set(module "inference") copy(inference_lib DEPS ${inference_deps} SRCS ${src_dir}/${module}/*.h ${PADDLE_BINARY_DIR}/paddle/fluid/inference/libpaddle_fluid.* - ${src_dir}/${module}/api/paddle_inference_api.h ${src_dir}/${module}/api/demo_ci + ${src_dir}/${module}/api/paddle_inference_api.h ${src_dir}/${module}/api/paddle_inference_helper.h + ${src_dir}/${module}/api/demo_ci ${PADDLE_BINARY_DIR}/paddle/fluid/inference/api/paddle_inference_pass.h - DSTS ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module} + DSTS ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module} + ${dst_dir}/${module} ${dst_dir}/${module} ) set(module "platform") diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index a0bf1afd40..510c3b992c 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -1,5 +1,6 @@ set(pass_file ${PADDLE_BINARY_DIR}/paddle/fluid/inference/api/paddle_inference_pass.h) file(WRITE ${pass_file} "// Generated by the paddle/fluid/framework/ir/CMakeLists.txt. DO NOT EDIT!\n\n") +file(APPEND ${pass_file} "\#pragma once\n") file(APPEND ${pass_file} "\#include \"paddle/fluid/framework/ir/pass.h\"\n") diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 0c11694d5a..cd2e544433 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -21,10 +21,9 @@ #include "paddle/fluid/framework/ir/pass.h" #include "paddle/fluid/framework/naive_executor.h" #include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/inference/api/helper.h" #include "paddle/fluid/inference/api/paddle_inference_api.h" +#include "paddle/fluid/inference/api/paddle_inference_helper.h" #include "paddle/fluid/inference/api/paddle_inference_pass.h" -#include "paddle/fluid/inference/api/timer.h" #include "paddle/fluid/inference/utils/singleton.h" #include "paddle/fluid/platform/profiler.h" diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc index 53740899cd..ff4224c997 100644 --- a/paddle/fluid/inference/api/api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -22,8 +22,7 @@ limitations under the License. */ #include "paddle/fluid/framework/feed_fetch_method.h" #include "paddle/fluid/inference/api/api_impl.h" -#include "paddle/fluid/inference/api/helper.h" -#include "paddle/fluid/inference/api/timer.h" +#include "paddle/fluid/inference/api/paddle_inference_helper.h" #include "paddle/fluid/platform/profiler.h" DEFINE_bool(profile, false, "Turn on profiler for fluid"); diff --git a/paddle/fluid/inference/api/helper.cc b/paddle/fluid/inference/api/helper.cc index 9cc491e10d..f982d9e4ef 100644 --- a/paddle/fluid/inference/api/helper.cc +++ b/paddle/fluid/inference/api/helper.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/inference/api/helper.h" +#include "paddle/fluid/inference/api/paddle_inference_helper.h" namespace paddle { namespace inference { diff --git a/paddle/fluid/inference/api/helper.h b/paddle/fluid/inference/api/paddle_inference_helper.h similarity index 55% rename from paddle/fluid/inference/api/helper.h rename to paddle/fluid/inference/api/paddle_inference_helper.h index dbbd3f6a67..24f59cf43a 100644 --- a/paddle/fluid/inference/api/helper.h +++ b/paddle/fluid/inference/api/paddle_inference_helper.h @@ -16,19 +16,34 @@ #include #include -#include +#include // NOLINT #include #include #include #include -#include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/inference/api/paddle_inference_api.h" -#include "paddle/fluid/inference/api/timer.h" #include "paddle/fluid/string/printf.h" +#include "paddle_inference_api.h" namespace paddle { namespace inference { +// Timer for timer +class Timer { + public: + std::chrono::high_resolution_clock::time_point start; + std::chrono::high_resolution_clock::time_point startu; + + void tic() { start = std::chrono::high_resolution_clock::now(); } + double toc() { + startu = std::chrono::high_resolution_clock::now(); + std::chrono::duration time_span = + std::chrono::duration_cast>(startu - + start); + double used_time_ms = static_cast(time_span.count()) * 1000.0; + return used_time_ms; + } +}; + static void split(const std::string &str, char sep, std::vector *pieces) { pieces->clear(); @@ -154,127 +169,5 @@ static void PrintTime(int batch_size, int repeat, int num_threads, int tid, } } -template -std::string LoDTensorSummary(const framework::LoDTensor &tensor) { - std::stringstream ss; - ss << "\n---- tensor ---" << '\n'; - ss << "lod: ["; - for (const auto &level : tensor.lod()) { - ss << "[ "; - for (auto i : level) { - ss << i << ", "; - } - ss << "]"; - } - ss << "]\n"; - - ss << "shape: ["; - int size = 1; - for (int i = 0; i < tensor.dims().size(); i++) { - int dim = tensor.dims()[i]; - ss << dim << ", "; - size *= dim; - } - ss << "]\n"; - - ss << "data: "; - for (int i = 0; i < std::min(20, size); i++) { - ss << tensor.data()[i] << " "; - } - ss << "\n"; - - return ss.str(); -} - -static bool CompareLoD(const framework::LoD &a, const framework::LoD &b) { - if (a.size() != b.size()) { - LOG(ERROR) << string::Sprintf("lod size not match %d != %d", a.size(), - b.size()); - return false; - } - for (size_t i = 0; i < a.size(); i++) { - auto &al = a[i]; - auto &bl = b[i]; - if (al.size() != bl.size()) { - LOG(ERROR) << string::Sprintf("level size %d != %d", al.size(), - bl.size()); - return false; - } - } - return true; -} - -static bool CompareShape(const std::vector &a, - const std::vector &b) { - if (a.size() != b.size()) { - LOG(ERROR) << string::Sprintf("shape size not match %d != %d", a.size(), - b.size()); - return false; - } - for (size_t i = 0; i < a.size(); i++) { - if (a[i] != b[i]) { - LOG(ERROR) << string::Sprintf("shape %d-th element not match %d != %d", i, - a[i], b[i]); - return false; - } - } - return true; -} - -static bool CompareTensorData(const framework::LoDTensor &a, - const framework::LoDTensor &b) { - auto a_shape = framework::vectorize(a.dims()); - auto b_shape = framework::vectorize(b.dims()); - size_t a_size = std::accumulate(a_shape.begin(), a_shape.end(), 1, - [](int a, int b) { return a * b; }); - size_t b_size = std::accumulate(b_shape.begin(), b_shape.end(), 1, - [](int a, int b) { return a * b; }); - if (a_size != b_size) { - LOG(ERROR) << string::Sprintf("tensor data size not match, %d != %d", - a_size, b_size); - } - - for (size_t i = 0; i < a_size; i++) { - if (a.type() == typeid(float)) { - const auto *a_data = a.data(); - const auto *b_data = b.data(); - if (std::abs(a_data[i] - b_data[i]) > 1e-3) { - LOG(ERROR) << string::Sprintf( - "tensor data %d-th element not match, %f != %f", i, a_data[i], - b_data[i]); - return false; - } - } else if (a.type() == typeid(int64_t)) { - const auto *a_data = a.data(); - const auto *b_data = b.data(); - if (std::abs(a_data[i] - b_data[i]) > 1e-3) { - LOG(ERROR) << string::Sprintf( - "tensor data %d-th element not match, %f != %f", i, a_data[i], - b_data[i]); - return false; - } - } - } - - return true; -} - -static bool CompareTensor(const framework::LoDTensor &a, - const framework::LoDTensor &b) { - if (!CompareLoD(a.lod(), b.lod())) { - return false; - } - if (!CompareShape(framework::vectorize(a.dims()), - framework::vectorize(b.dims()))) { - return false; - } - - if (!CompareTensorData(a, b)) { - return false; - } - - return true; -} - } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc b/paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc index 82bc83988d..2bc8b61ef7 100644 --- a/paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc +++ b/paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc @@ -20,9 +20,8 @@ limitations under the License. */ #include #include // NOLINT #include -#include "paddle/fluid/inference/api/helper.h" #include "paddle/fluid/inference/api/paddle_inference_api.h" -#include "paddle/fluid/inference/api/timer.h" +#include "paddle/fluid/inference/api/paddle_inference_helper.h" #include "utils/logger/logger.h" DEFINE_string(model, "", "Directory of the inference model."); diff --git a/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc index d2e344111b..5a68b0b25d 100644 --- a/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/inference/api/analysis_predictor.h" #include "paddle/fluid/inference/tests/api/tester_helper.h" DEFINE_bool(with_precision_check, true, "turn on test"); diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index cb36ddc8c8..d87b35da24 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -15,6 +15,7 @@ #pragma once #include +#include #include #include // NOLINT #include @@ -22,7 +23,7 @@ #include "paddle/fluid/inference/analysis/analyzer.h" #include "paddle/fluid/inference/analysis/ut_helper.h" #include "paddle/fluid/inference/api/analysis_predictor.h" -#include "paddle/fluid/inference/api/helper.h" +#include "paddle/fluid/inference/api/paddle_inference_helper.h" #include "paddle/fluid/inference/api/paddle_inference_pass.h" #include "paddle/fluid/platform/profiler.h" @@ -182,5 +183,127 @@ void CompareNativeAndAnalysis( CompareResult(analysis_outputs, native_outputs); } +template +std::string LoDTensorSummary(const framework::LoDTensor &tensor) { + std::stringstream ss; + ss << "\n---- tensor ---" << '\n'; + ss << "lod: ["; + for (const auto &level : tensor.lod()) { + ss << "[ "; + for (auto i : level) { + ss << i << ", "; + } + ss << "]"; + } + ss << "]\n"; + + ss << "shape: ["; + int size = 1; + for (int i = 0; i < tensor.dims().size(); i++) { + int dim = tensor.dims()[i]; + ss << dim << ", "; + size *= dim; + } + ss << "]\n"; + + ss << "data: "; + for (int i = 0; i < std::min(20, size); i++) { + ss << tensor.data()[i] << " "; + } + ss << "\n"; + + return ss.str(); +} + +static bool CompareLoD(const framework::LoD &a, const framework::LoD &b) { + if (a.size() != b.size()) { + LOG(ERROR) << string::Sprintf("lod size not match %d != %d", a.size(), + b.size()); + return false; + } + for (size_t i = 0; i < a.size(); i++) { + auto &al = a[i]; + auto &bl = b[i]; + if (al.size() != bl.size()) { + LOG(ERROR) << string::Sprintf("level size %d != %d", al.size(), + bl.size()); + return false; + } + } + return true; +} + +static bool CompareShape(const std::vector &a, + const std::vector &b) { + if (a.size() != b.size()) { + LOG(ERROR) << string::Sprintf("shape size not match %d != %d", a.size(), + b.size()); + return false; + } + for (size_t i = 0; i < a.size(); i++) { + if (a[i] != b[i]) { + LOG(ERROR) << string::Sprintf("shape %d-th element not match %d != %d", i, + a[i], b[i]); + return false; + } + } + return true; +} + +static bool CompareTensorData(const framework::LoDTensor &a, + const framework::LoDTensor &b) { + auto a_shape = framework::vectorize(a.dims()); + auto b_shape = framework::vectorize(b.dims()); + size_t a_size = std::accumulate(a_shape.begin(), a_shape.end(), 1, + [](int a, int b) { return a * b; }); + size_t b_size = std::accumulate(b_shape.begin(), b_shape.end(), 1, + [](int a, int b) { return a * b; }); + if (a_size != b_size) { + LOG(ERROR) << string::Sprintf("tensor data size not match, %d != %d", + a_size, b_size); + } + + for (size_t i = 0; i < a_size; i++) { + if (a.type() == typeid(float)) { + const auto *a_data = a.data(); + const auto *b_data = b.data(); + if (std::abs(a_data[i] - b_data[i]) > 1e-3) { + LOG(ERROR) << string::Sprintf( + "tensor data %d-th element not match, %f != %f", i, a_data[i], + b_data[i]); + return false; + } + } else if (a.type() == typeid(int64_t)) { + const auto *a_data = a.data(); + const auto *b_data = b.data(); + if (std::abs(a_data[i] - b_data[i]) > 1e-3) { + LOG(ERROR) << string::Sprintf( + "tensor data %d-th element not match, %f != %f", i, a_data[i], + b_data[i]); + return false; + } + } + } + + return true; +} + +static bool CompareTensor(const framework::LoDTensor &a, + const framework::LoDTensor &b) { + if (!CompareLoD(a.lod(), b.lod())) { + return false; + } + if (!CompareShape(framework::vectorize(a.dims()), + framework::vectorize(b.dims()))) { + return false; + } + + if (!CompareTensorData(a, b)) { + return false; + } + + return true; +} + } // namespace inference } // namespace paddle -- GitLab