未验证 提交 27e90303 编写于 作者: Y Yuan Shuai 提交者: GitHub

[LITE][DEMO] Enhance mobile_light demo. test=develop (#3171)

* [LITE][DEMO] Enhance mobile_light demo. test=develop

* fix print. test=develop
上级 a8e62ae8
...@@ -12,8 +12,12 @@ ...@@ -12,8 +12,12 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include <sys/time.h>
#include <time.h>
#include <iostream> #include <iostream>
#include <string>
#include <vector> #include <vector>
#include "paddle_api.h" // NOLINT #include "paddle_api.h" // NOLINT
using namespace paddle::lite_api; // NOLINT using namespace paddle::lite_api; // NOLINT
...@@ -24,7 +28,25 @@ int64_t ShapeProduction(const shape_t& shape) { ...@@ -24,7 +28,25 @@ int64_t ShapeProduction(const shape_t& shape) {
return res; return res;
} }
void RunModel(std::string model_dir) { std::string ShapePrint(const shape_t& shape) {
std::string shape_str{""};
for (auto i : shape) {
shape_str = shape_str + std::to_string(i) + " ";
}
return shape_str;
}
inline double GetCurrentUS() {
struct timeval time;
gettimeofday(&time, NULL);
return 1e+6 * time.tv_sec + time.tv_usec;
}
void RunModel(std::string model_dir,
const shape_t& input_shape,
int repeats,
int warmup,
int print_output_elem) {
// 1. Set MobileConfig // 1. Set MobileConfig
MobileConfig config; MobileConfig config;
config.set_model_from_file(model_dir); config.set_model_from_file(model_dir);
...@@ -38,31 +60,108 @@ void RunModel(std::string model_dir) { ...@@ -38,31 +60,108 @@ void RunModel(std::string model_dir) {
// 3. Prepare input data // 3. Prepare input data
std::unique_ptr<Tensor> input_tensor(std::move(predictor->GetInput(0))); std::unique_ptr<Tensor> input_tensor(std::move(predictor->GetInput(0)));
input_tensor->Resize({1, 3, 224, 224}); input_tensor->Resize(
{input_shape[0], input_shape[1], input_shape[2], input_shape[3]});
auto* data = input_tensor->mutable_data<float>(); auto* data = input_tensor->mutable_data<float>();
for (int i = 0; i < ShapeProduction(input_tensor->shape()); ++i) { for (int i = 0; i < ShapeProduction(input_tensor->shape()); ++i) {
data[i] = 1; data[i] = 1;
} }
// 4. Run predictor // 4. Run predictor
predictor->Run(); for (size_t widx = 0; widx < warmup; ++widx) {
predictor->Run();
}
double sum_duration = 0.0; // millisecond;
double max_duration = 1e-5;
double min_duration = 1e5;
double avg_duration = -1;
for (size_t ridx = 0; ridx < repeats; ++ridx) {
auto start = GetCurrentUS();
predictor->Run();
auto duration = (GetCurrentUS() - start) / 1000.0;
sum_duration += duration;
max_duration = duration > max_duration ? duration : max_duration;
min_duration = duration < min_duration ? duration : min_duration;
std::cout << "run_idx:" << ridx + 1 << " / " << repeats << ": " << duration
<< " ms" << std::endl;
}
avg_duration = sum_duration / static_cast<float>(repeats);
std::cout << "\n======= benchmark summary =======\n"
<< "input_shape(NCHW):" << ShapePrint(input_shape) << "\n"
<< "model_dir:" << model_dir << "\n"
<< "warmup:" << warmup << "\n"
<< "repeats:" << repeats << "\n"
<< "max_duration:" << max_duration << "\n"
<< "min_duration:" << min_duration << "\n"
<< "avg_duration:" << avg_duration << "\n";
// 5. Get output // 5. Get output
std::unique_ptr<const Tensor> output_tensor( std::cout << "\n====== output summary ====== " << std::endl;
std::move(predictor->GetOutput(0))); size_t output_tensor_num = predictor->GetOutputNames().size();
std::cout << "Output shape " << output_tensor->shape()[1] << std::endl; std::cout << "output tesnor num:" << output_tensor_num << std::endl;
for (int i = 0; i < ShapeProduction(output_tensor->shape()); i += 100) {
std::cout << "Output[" << i << "]: " << output_tensor->data<float>()[i] for (size_t tidx = 0; tidx < output_tensor_num; ++tidx) {
std::unique_ptr<const paddle::lite_api::Tensor> output_tensor =
predictor->GetOutput(tidx);
std::cout << "\n--- output tensor " << tidx << " ---" << std::endl;
auto out_shape = output_tensor->shape();
std::cout << "out_shape(NCHW):" << ShapePrint(out_shape) << std::endl;
float sum = 0.f;
for (int i = 0; i < ShapeProduction(out_shape); ++i) {
sum += output_tensor->data<float>()[i];
}
std::cout << "output tensor " << tidx
<< " elem num:" << ShapeProduction(out_shape) << std::endl;
std::cout << "output tensor " << tidx << " sum value:" << sum << std::endl;
std::cout << "output tensor " << tidx
<< " mean value:" << sum / ShapeProduction(out_shape)
<< std::endl; << std::endl;
// print output
if (print_output_elem) {
for (int i = 0; i < ShapeProduction(out_shape); ++i) {
std::cout << "out[" << tidx << "][" << i
<< "]:" << output_tensor->data<float>()[i] << std::endl;
}
}
} }
} }
int main(int argc, char** argv) { int main(int argc, char** argv) {
if (argc < 2) { shape_t input_shape{1, 3, 224, 224}; // shape_t ==> std::vector<int64_t>
std::cerr << "[ERROR] usage: ./" << argv[0] << " naive_buffer_model_dir\n"; int repeats = 10;
exit(1); int warmup = 10;
int print_output_elem = 0;
if (argc > 2 && argc < 9) {
std::cerr << "usage: ./" << argv[0] << "\n"
<< " <naive_buffer_model_dir>\n"
<< " <input_n>\n"
<< " <input_c>\n"
<< " <input_h>\n"
<< " <input_w>\n"
<< " <repeats>\n"
<< " <warmup>\n"
<< " <print_output>" << std::endl;
return 0;
} }
std::string model_dir = argv[1]; std::string model_dir = argv[1];
RunModel(model_dir); if (argc >= 9) {
input_shape[0] = atoi(argv[2]);
input_shape[1] = atoi(argv[4]);
input_shape[2] = atoi(argv[5]);
input_shape[3] = atoi(argv[6]);
repeats = atoi(argv[6]);
warmup = atoi(argv[7]);
print_output_elem = atoi(argv[8]);
}
RunModel(model_dir, input_shape, repeats, warmup, print_output_elem);
return 0; return 0;
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册