From 6b72a3e10b2a0d4ae0739ec5b52eac3822aaf191 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Sat, 22 Jun 2019 15:46:35 +0000 Subject: [PATCH] enable 4 models speed test and threads test --- paddle/fluid/lite/api/inceptionv4_test.cc | 20 ++++++++++--- paddle/fluid/lite/api/mobilenetv1_test.cc | 20 ++++++++++--- paddle/fluid/lite/api/mobilenetv2_test.cc | 20 ++++++++++--- paddle/fluid/lite/api/resnet50_test.cc | 20 ++++++++++--- paddle/fluid/lite/api/test_helper.h | 36 +++++++++++++++++++++++ 5 files changed, 100 insertions(+), 16 deletions(-) create mode 100644 paddle/fluid/lite/api/test_helper.h diff --git a/paddle/fluid/lite/api/inceptionv4_test.cc b/paddle/fluid/lite/api/inceptionv4_test.cc index 7908a81100..977aa04abc 100644 --- a/paddle/fluid/lite/api/inceptionv4_test.cc +++ b/paddle/fluid/lite/api/inceptionv4_test.cc @@ -16,20 +16,19 @@ #include #include #include "paddle/fluid/lite/api/cxx_api.h" +#include "paddle/fluid/lite/api/test_helper.h" #include "paddle/fluid/lite/core/mir/use_passes.h" #include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/kernels/use_kernels.h" #include "paddle/fluid/lite/operators/use_ops.h" -// for eval -DEFINE_string(model_dir, "", ""); - namespace paddle { namespace lite { #ifdef LITE_WITH_ARM TEST(InceptionV4, test) { DeviceInfo::Init(); + DeviceInfo::Global().SetRunMode(LITE_POWER_HIGH, FLAGS_threads); lite::Predictor predictor; std::vector valid_places({Place{TARGET(kHost), PRECISION(kFloat)}, Place{TARGET(kARM), PRECISION(kFloat)}}); @@ -44,7 +43,20 @@ TEST(InceptionV4, test) { data[i] = 1; } - predictor.Run(); + for (int i = 0; i < FLAGS_warmup; ++i) { + predictor.Run(); + } + + auto start = GetCurrentUS(); + for (int i = 0; i < FLAGS_repeats; ++i) { + predictor.Run(); + } + + LOG(INFO) << "================== Speed Report ==================="; + LOG(INFO) << "Model: " << FLAGS_model_dir << ", threads num " << FLAGS_threads + << ", warmup: " << FLAGS_warmup << ", repeats: " << FLAGS_repeats + << ", spend " << (GetCurrentUS() - start) / FLAGS_repeats / 1000.0 + << " ms in average."; auto* out = predictor.GetOutput(0); std::vector results({0.00078033, 0.00083865, 0.00060029, 0.00057083, diff --git a/paddle/fluid/lite/api/mobilenetv1_test.cc b/paddle/fluid/lite/api/mobilenetv1_test.cc index 94935e8699..9b7d6dc40b 100644 --- a/paddle/fluid/lite/api/mobilenetv1_test.cc +++ b/paddle/fluid/lite/api/mobilenetv1_test.cc @@ -16,20 +16,19 @@ #include #include #include "paddle/fluid/lite/api/cxx_api.h" +#include "paddle/fluid/lite/api/test_helper.h" #include "paddle/fluid/lite/core/mir/use_passes.h" #include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/kernels/use_kernels.h" #include "paddle/fluid/lite/operators/use_ops.h" -// for eval -DEFINE_string(model_dir, "", ""); - namespace paddle { namespace lite { #ifdef LITE_WITH_ARM TEST(MobileNetV1, test) { DeviceInfo::Init(); + DeviceInfo::Global().SetRunMode(LITE_POWER_HIGH, FLAGS_threads); lite::Predictor predictor; std::vector valid_places({Place{TARGET(kHost), PRECISION(kFloat)}, Place{TARGET(kARM), PRECISION(kFloat)}}); @@ -44,7 +43,20 @@ TEST(MobileNetV1, test) { data[i] = 1; } - predictor.Run(); + for (int i = 0; i < FLAGS_warmup; ++i) { + predictor.Run(); + } + + auto start = GetCurrentUS(); + for (int i = 0; i < FLAGS_repeats; ++i) { + predictor.Run(); + } + + LOG(INFO) << "================== Speed Report ==================="; + LOG(INFO) << "Model: " << FLAGS_model_dir << ", threads num " << FLAGS_threads + << ", warmup: " << FLAGS_warmup << ", repeats: " << FLAGS_repeats + << ", spend " << (GetCurrentUS() - start) / FLAGS_repeats / 1000.0 + << " ms in average."; auto* out = predictor.GetOutput(0); std::vector results({1.91308980e-04, 5.92055148e-04, 1.12303176e-04, diff --git a/paddle/fluid/lite/api/mobilenetv2_test.cc b/paddle/fluid/lite/api/mobilenetv2_test.cc index 0d615f61f2..e50ac212c1 100644 --- a/paddle/fluid/lite/api/mobilenetv2_test.cc +++ b/paddle/fluid/lite/api/mobilenetv2_test.cc @@ -16,20 +16,19 @@ #include #include #include "paddle/fluid/lite/api/cxx_api.h" +#include "paddle/fluid/lite/api/test_helper.h" #include "paddle/fluid/lite/core/mir/use_passes.h" #include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/kernels/use_kernels.h" #include "paddle/fluid/lite/operators/use_ops.h" -// for eval -DEFINE_string(model_dir, "", ""); - namespace paddle { namespace lite { #ifdef LITE_WITH_ARM TEST(MobileNetV2, test) { DeviceInfo::Init(); + DeviceInfo::Global().SetRunMode(LITE_POWER_HIGH, FLAGS_threads); lite::Predictor predictor; std::vector valid_places({Place{TARGET(kHost), PRECISION(kFloat)}, Place{TARGET(kARM), PRECISION(kFloat)}}); @@ -44,7 +43,20 @@ TEST(MobileNetV2, test) { data[i] = 1; } - predictor.Run(); + for (int i = 0; i < FLAGS_warmup; ++i) { + predictor.Run(); + } + + auto start = GetCurrentUS(); + for (int i = 0; i < FLAGS_repeats; ++i) { + predictor.Run(); + } + + LOG(INFO) << "================== Speed Report ==================="; + LOG(INFO) << "Model: " << FLAGS_model_dir << ", threads num " << FLAGS_threads + << ", warmup: " << FLAGS_warmup << ", repeats: " << FLAGS_repeats + << ", spend " << (GetCurrentUS() - start) / FLAGS_repeats / 1000.0 + << " ms in average."; auto* out = predictor.GetOutput(0); std::vector results({0.00097802, 0.00099822, 0.00103093, 0.00100121, diff --git a/paddle/fluid/lite/api/resnet50_test.cc b/paddle/fluid/lite/api/resnet50_test.cc index cb63ad83ca..a1e57bf32c 100644 --- a/paddle/fluid/lite/api/resnet50_test.cc +++ b/paddle/fluid/lite/api/resnet50_test.cc @@ -16,20 +16,19 @@ #include #include #include "paddle/fluid/lite/api/cxx_api.h" +#include "paddle/fluid/lite/api/test_helper.h" #include "paddle/fluid/lite/core/mir/use_passes.h" #include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/kernels/use_kernels.h" #include "paddle/fluid/lite/operators/use_ops.h" -// for eval -DEFINE_string(model_dir, "", ""); - namespace paddle { namespace lite { #ifdef LITE_WITH_ARM TEST(ResNet50, test) { DeviceInfo::Init(); + DeviceInfo::Global().SetRunMode(LITE_POWER_HIGH, FLAGS_threads); lite::Predictor predictor; std::vector valid_places({Place{TARGET(kHost), PRECISION(kFloat)}, Place{TARGET(kARM), PRECISION(kFloat)}}); @@ -44,7 +43,20 @@ TEST(ResNet50, test) { data[i] = 1; } - predictor.Run(); + for (int i = 0; i < FLAGS_warmup; ++i) { + predictor.Run(); + } + + auto start = GetCurrentUS(); + for (int i = 0; i < FLAGS_repeats; ++i) { + predictor.Run(); + } + + LOG(INFO) << "================== Speed Report ==================="; + LOG(INFO) << "Model: " << FLAGS_model_dir << ", threads num " << FLAGS_threads + << ", warmup: " << FLAGS_warmup << ", repeats: " << FLAGS_repeats + << ", spend " << (GetCurrentUS() - start) / FLAGS_repeats / 1000.0 + << " ms in average."; auto* out = predictor.GetOutput(0); std::vector results({2.41399175e-04, 4.13724629e-04, 2.64324830e-04, diff --git a/paddle/fluid/lite/api/test_helper.h b/paddle/fluid/lite/api/test_helper.h new file mode 100644 index 0000000000..4d184eeb16 --- /dev/null +++ b/paddle/fluid/lite/api/test_helper.h @@ -0,0 +1,36 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +// for eval +DEFINE_string(model_dir, "", "model dir"); +DEFINE_int32(warmup, 0, "warmup times"); +DEFINE_int32(repeats, 1, "repeats times"); +DEFINE_int32(threads, 1, "threads num"); + +namespace paddle { +namespace lite { + +inline double GetCurrentUS() { + struct timeval time; + gettimeofday(&time, NULL); + return 1e+6 * time.tv_sec + time.tv_usec; +} + +} // namespace lite +} // namespace paddle -- GitLab