From d666c8eb1de356903bf91c69df4a4045dabbd933 Mon Sep 17 00:00:00 2001 From: nhzlx Date: Wed, 28 Nov 2018 11:25:52 +0000 Subject: [PATCH] fix benchmark --- paddle/fluid/inference/utils/benchmark.cc | 2 +- paddle/fluid/inference/utils/benchmark.h | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/inference/utils/benchmark.cc b/paddle/fluid/inference/utils/benchmark.cc index 021edc2de5..d03aa11b75 100644 --- a/paddle/fluid/inference/utils/benchmark.cc +++ b/paddle/fluid/inference/utils/benchmark.cc @@ -33,7 +33,7 @@ std::string Benchmark::SerializeToString() const { ss << batch_size_ << "\t"; ss << num_threads_ << "\t"; ss << latency_ << "\t"; - ss << 1000 / latency_; + ss << 1000.0 / latency_; ss << '\n'; return ss.str(); } diff --git a/paddle/fluid/inference/utils/benchmark.h b/paddle/fluid/inference/utils/benchmark.h index 80e8f77adb..76a3dd2c29 100644 --- a/paddle/fluid/inference/utils/benchmark.h +++ b/paddle/fluid/inference/utils/benchmark.h @@ -11,9 +11,11 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +#pragma once #include #include +#include namespace paddle { namespace inference { @@ -31,8 +33,8 @@ struct Benchmark { bool use_gpu() const { return use_gpu_; } void SetUseGpu() { use_gpu_ = true; } - int latency() const { return latency_; } - void SetLatency(int x) { latency_ = x; } + float latency() const { return latency_; } + void SetLatency(float x) { latency_ = x; } const std::string& name() const { return name_; } void SetName(const std::string& name) { name_ = name; } @@ -43,7 +45,7 @@ struct Benchmark { private: bool use_gpu_{false}; int batch_size_{0}; - int latency_; + float latency_; int num_threads_{1}; std::string name_; }; -- GitLab