From e1da1af95983216fcb57f9eb27baecf34227ebce Mon Sep 17 00:00:00 2001 From: Wilber Date: Thu, 16 Jan 2020 10:09:45 +0800 Subject: [PATCH] model_test support print intermediate tensor. test=develop (#2776) test_model_bin support print intermediate tensor. --- lite/api/model_test.cc | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/lite/api/model_test.cc b/lite/api/model_test.cc index 5b063a8ef1..cf646d823d 100644 --- a/lite/api/model_test.cc +++ b/lite/api/model_test.cc @@ -13,6 +13,7 @@ // limitations under the License. #include +#include #include #include #include "lite/api/paddle_api.h" @@ -33,10 +34,10 @@ using paddle::lite::profile::Timer; DEFINE_string(input_shape, "1,3,224,224", "input shapes, separated by colon and comma"); - DEFINE_bool(use_optimize_nb, false, "optimized & naive buffer model for mobile devices"); +DEFINE_string(arg_name, "", "the arg name"); namespace paddle { namespace lite_api { @@ -123,6 +124,28 @@ void Run(const std::vector>& input_shapes, output_num *= output_shape[i]; } LOG(INFO) << "output_num: " << output_num; + + // please turn off memory_optimize_pass to use this feature. + if (FLAGS_arg_name != "") { + auto arg_tensor = predictor->GetTensor(FLAGS_arg_name); + auto arg_shape = arg_tensor->shape(); + int arg_num = 1; + std::ostringstream os; + os << "{"; + for (int i = 0; i < arg_shape.size(); ++i) { + arg_num *= arg_shape[i]; + os << arg_shape[i] << ","; + } + os << "}"; + float sum = 0.; + std::ofstream out(FLAGS_arg_name + ".txt"); + for (size_t i = 0; i < arg_num; ++i) { + sum += arg_tensor->data()[i]; + out << std::to_string(arg_tensor->data()[i]) << "\n"; + } + LOG(INFO) << FLAGS_arg_name << " shape is " << os.str() + << ", mean value is " << sum * 1. / arg_num; + } } #endif -- GitLab