diff --git a/paddle/fluid/lite/api/inceptionv4_test.cc b/paddle/fluid/lite/api/inceptionv4_test.cc index 7908a8110045c53ad6f0b4f33702dccf58e5b1b5..977aa04abc6c9d990ae17d73750fc1f2af373920 100644 --- a/paddle/fluid/lite/api/inceptionv4_test.cc +++ b/paddle/fluid/lite/api/inceptionv4_test.cc @@ -16,20 +16,19 @@ #include #include #include "paddle/fluid/lite/api/cxx_api.h" +#include "paddle/fluid/lite/api/test_helper.h" #include "paddle/fluid/lite/core/mir/use_passes.h" #include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/kernels/use_kernels.h" #include "paddle/fluid/lite/operators/use_ops.h" -// for eval -DEFINE_string(model_dir, "", ""); - namespace paddle { namespace lite { #ifdef LITE_WITH_ARM TEST(InceptionV4, test) { DeviceInfo::Init(); + DeviceInfo::Global().SetRunMode(LITE_POWER_HIGH, FLAGS_threads); lite::Predictor predictor; std::vector valid_places({Place{TARGET(kHost), PRECISION(kFloat)}, Place{TARGET(kARM), PRECISION(kFloat)}}); @@ -44,7 +43,20 @@ TEST(InceptionV4, test) { data[i] = 1; } - predictor.Run(); + for (int i = 0; i < FLAGS_warmup; ++i) { + predictor.Run(); + } + + auto start = GetCurrentUS(); + for (int i = 0; i < FLAGS_repeats; ++i) { + predictor.Run(); + } + + LOG(INFO) << "================== Speed Report ==================="; + LOG(INFO) << "Model: " << FLAGS_model_dir << ", threads num " << FLAGS_threads + << ", warmup: " << FLAGS_warmup << ", repeats: " << FLAGS_repeats + << ", spend " << (GetCurrentUS() - start) / FLAGS_repeats / 1000.0 + << " ms in average."; auto* out = predictor.GetOutput(0); std::vector results({0.00078033, 0.00083865, 0.00060029, 0.00057083, diff --git a/paddle/fluid/lite/api/mobilenetv1_test.cc b/paddle/fluid/lite/api/mobilenetv1_test.cc index 94935e8699643577b309fb294a18ea848a5ad567..9b7d6dc40b881c3145ded02c0d065e66ea0a5afc 100644 --- a/paddle/fluid/lite/api/mobilenetv1_test.cc +++ b/paddle/fluid/lite/api/mobilenetv1_test.cc @@ -16,20 +16,19 @@ #include #include #include "paddle/fluid/lite/api/cxx_api.h" +#include "paddle/fluid/lite/api/test_helper.h" #include "paddle/fluid/lite/core/mir/use_passes.h" #include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/kernels/use_kernels.h" #include "paddle/fluid/lite/operators/use_ops.h" -// for eval -DEFINE_string(model_dir, "", ""); - namespace paddle { namespace lite { #ifdef LITE_WITH_ARM TEST(MobileNetV1, test) { DeviceInfo::Init(); + DeviceInfo::Global().SetRunMode(LITE_POWER_HIGH, FLAGS_threads); lite::Predictor predictor; std::vector valid_places({Place{TARGET(kHost), PRECISION(kFloat)}, Place{TARGET(kARM), PRECISION(kFloat)}}); @@ -44,7 +43,20 @@ TEST(MobileNetV1, test) { data[i] = 1; } - predictor.Run(); + for (int i = 0; i < FLAGS_warmup; ++i) { + predictor.Run(); + } + + auto start = GetCurrentUS(); + for (int i = 0; i < FLAGS_repeats; ++i) { + predictor.Run(); + } + + LOG(INFO) << "================== Speed Report ==================="; + LOG(INFO) << "Model: " << FLAGS_model_dir << ", threads num " << FLAGS_threads + << ", warmup: " << FLAGS_warmup << ", repeats: " << FLAGS_repeats + << ", spend " << (GetCurrentUS() - start) / FLAGS_repeats / 1000.0 + << " ms in average."; auto* out = predictor.GetOutput(0); std::vector results({1.91308980e-04, 5.92055148e-04, 1.12303176e-04, diff --git a/paddle/fluid/lite/api/mobilenetv2_test.cc b/paddle/fluid/lite/api/mobilenetv2_test.cc index 0d615f61f267a612a32e5a0535d6272f2c867769..e50ac212c10a462180786d2bb9887881957bb0f7 100644 --- a/paddle/fluid/lite/api/mobilenetv2_test.cc +++ b/paddle/fluid/lite/api/mobilenetv2_test.cc @@ -16,20 +16,19 @@ #include #include #include "paddle/fluid/lite/api/cxx_api.h" +#include "paddle/fluid/lite/api/test_helper.h" #include "paddle/fluid/lite/core/mir/use_passes.h" #include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/kernels/use_kernels.h" #include "paddle/fluid/lite/operators/use_ops.h" -// for eval -DEFINE_string(model_dir, "", ""); - namespace paddle { namespace lite { #ifdef LITE_WITH_ARM TEST(MobileNetV2, test) { DeviceInfo::Init(); + DeviceInfo::Global().SetRunMode(LITE_POWER_HIGH, FLAGS_threads); lite::Predictor predictor; std::vector valid_places({Place{TARGET(kHost), PRECISION(kFloat)}, Place{TARGET(kARM), PRECISION(kFloat)}}); @@ -44,7 +43,20 @@ TEST(MobileNetV2, test) { data[i] = 1; } - predictor.Run(); + for (int i = 0; i < FLAGS_warmup; ++i) { + predictor.Run(); + } + + auto start = GetCurrentUS(); + for (int i = 0; i < FLAGS_repeats; ++i) { + predictor.Run(); + } + + LOG(INFO) << "================== Speed Report ==================="; + LOG(INFO) << "Model: " << FLAGS_model_dir << ", threads num " << FLAGS_threads + << ", warmup: " << FLAGS_warmup << ", repeats: " << FLAGS_repeats + << ", spend " << (GetCurrentUS() - start) / FLAGS_repeats / 1000.0 + << " ms in average."; auto* out = predictor.GetOutput(0); std::vector results({0.00097802, 0.00099822, 0.00103093, 0.00100121, diff --git a/paddle/fluid/lite/api/resnet50_test.cc b/paddle/fluid/lite/api/resnet50_test.cc index cb63ad83cab40842fe799496ff8881f51ea953ae..a1e57bf32c583d74800cd36194ccd9b5171a8366 100644 --- a/paddle/fluid/lite/api/resnet50_test.cc +++ b/paddle/fluid/lite/api/resnet50_test.cc @@ -16,20 +16,19 @@ #include #include #include "paddle/fluid/lite/api/cxx_api.h" +#include "paddle/fluid/lite/api/test_helper.h" #include "paddle/fluid/lite/core/mir/use_passes.h" #include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/kernels/use_kernels.h" #include "paddle/fluid/lite/operators/use_ops.h" -// for eval -DEFINE_string(model_dir, "", ""); - namespace paddle { namespace lite { #ifdef LITE_WITH_ARM TEST(ResNet50, test) { DeviceInfo::Init(); + DeviceInfo::Global().SetRunMode(LITE_POWER_HIGH, FLAGS_threads); lite::Predictor predictor; std::vector valid_places({Place{TARGET(kHost), PRECISION(kFloat)}, Place{TARGET(kARM), PRECISION(kFloat)}}); @@ -44,7 +43,20 @@ TEST(ResNet50, test) { data[i] = 1; } - predictor.Run(); + for (int i = 0; i < FLAGS_warmup; ++i) { + predictor.Run(); + } + + auto start = GetCurrentUS(); + for (int i = 0; i < FLAGS_repeats; ++i) { + predictor.Run(); + } + + LOG(INFO) << "================== Speed Report ==================="; + LOG(INFO) << "Model: " << FLAGS_model_dir << ", threads num " << FLAGS_threads + << ", warmup: " << FLAGS_warmup << ", repeats: " << FLAGS_repeats + << ", spend " << (GetCurrentUS() - start) / FLAGS_repeats / 1000.0 + << " ms in average."; auto* out = predictor.GetOutput(0); std::vector results({2.41399175e-04, 4.13724629e-04, 2.64324830e-04, diff --git a/paddle/fluid/lite/api/test_helper.h b/paddle/fluid/lite/api/test_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..4d184eeb169c4f1c7f1de968e373137c4e9ffcc6 --- /dev/null +++ b/paddle/fluid/lite/api/test_helper.h @@ -0,0 +1,36 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +// for eval +DEFINE_string(model_dir, "", "model dir"); +DEFINE_int32(warmup, 0, "warmup times"); +DEFINE_int32(repeats, 1, "repeats times"); +DEFINE_int32(threads, 1, "threads num"); + +namespace paddle { +namespace lite { + +inline double GetCurrentUS() { + struct timeval time; + gettimeofday(&time, NULL); + return 1e+6 * time.tv_sec + time.tv_usec; +} + +} // namespace lite +} // namespace paddle