Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
weixin_41840029
PaddleOCR
提交
cc132dec
P
PaddleOCR
项目概览
weixin_41840029
/
PaddleOCR
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleOCR
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
cc132dec
编写于
8月 24, 2021
作者:
M
MissPenguin
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
rm autolog h file
上级
45d4afec
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
0 addition
and
84 deletion
+0
-84
deploy/cpp_infer/include/autolog.h
deploy/cpp_infer/include/autolog.h
+0
-83
deploy/cpp_infer/src/main.cpp
deploy/cpp_infer/src/main.cpp
+0
-1
未找到文件。
deploy/cpp_infer/include/autolog.h
已删除
100644 → 0
浏览文件 @
45d4afec
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <chrono>
#include <iomanip>
#include <iostream>
#include <ostream>
#include <stdlib.h>
#include <vector>
#include <glog/logging.h>
class
AutoLogger
{
public:
AutoLogger
(
std
::
string
model_name
,
bool
use_gpu
,
bool
enable_tensorrt
,
bool
enable_mkldnn
,
int
cpu_threads
,
int
batch_size
,
std
::
string
input_shape
,
std
::
string
model_precision
,
std
::
vector
<
double
>
time_info
,
int
img_num
)
{
this
->
model_name_
=
model_name
;
this
->
use_gpu_
=
use_gpu
;
this
->
enable_tensorrt_
=
enable_tensorrt
;
this
->
enable_mkldnn_
=
enable_mkldnn
;
this
->
cpu_threads_
=
cpu_threads
;
this
->
batch_size_
=
batch_size
;
this
->
input_shape_
=
input_shape
;
this
->
model_precision_
=
model_precision
;
this
->
time_info_
=
time_info
;
this
->
img_num_
=
img_num
;
}
void
report
()
{
LOG
(
INFO
)
<<
"----------------------- Config info -----------------------"
;
LOG
(
INFO
)
<<
"runtime_device: "
<<
(
this
->
use_gpu_
?
"gpu"
:
"cpu"
);
LOG
(
INFO
)
<<
"ir_optim: "
<<
"True"
;
LOG
(
INFO
)
<<
"enable_memory_optim: "
<<
"True"
;
LOG
(
INFO
)
<<
"enable_tensorrt: "
<<
this
->
enable_tensorrt_
;
LOG
(
INFO
)
<<
"enable_mkldnn: "
<<
(
this
->
enable_mkldnn_
?
"True"
:
"False"
);
LOG
(
INFO
)
<<
"cpu_math_library_num_threads: "
<<
this
->
cpu_threads_
;
LOG
(
INFO
)
<<
"----------------------- Data info -----------------------"
;
LOG
(
INFO
)
<<
"batch_size: "
<<
this
->
batch_size_
;
LOG
(
INFO
)
<<
"input_shape: "
<<
this
->
input_shape_
;
LOG
(
INFO
)
<<
"data_num: "
<<
this
->
img_num_
;
LOG
(
INFO
)
<<
"----------------------- Model info -----------------------"
;
LOG
(
INFO
)
<<
"model_name: "
<<
this
->
model_name_
;
LOG
(
INFO
)
<<
"precision: "
<<
this
->
model_precision_
;
LOG
(
INFO
)
<<
"----------------------- Perf info ------------------------"
;
LOG
(
INFO
)
<<
"Total time spent(ms): "
<<
std
::
accumulate
(
this
->
time_info_
.
begin
(),
this
->
time_info_
.
end
(),
0
);
LOG
(
INFO
)
<<
"preprocess_time(ms): "
<<
this
->
time_info_
[
0
]
/
this
->
img_num_
<<
", inference_time(ms): "
<<
this
->
time_info_
[
1
]
/
this
->
img_num_
<<
", postprocess_time(ms): "
<<
this
->
time_info_
[
2
]
/
this
->
img_num_
;
}
private:
std
::
string
model_name_
;
bool
use_gpu_
=
false
;
bool
enable_tensorrt_
=
false
;
bool
enable_mkldnn_
=
true
;
int
cpu_threads_
=
10
;
int
batch_size_
=
1
;
std
::
string
input_shape_
=
"dynamic"
;
std
::
string
model_precision_
=
"fp32"
;
std
::
vector
<
double
>
time_info_
;
int
img_num_
;
};
deploy/cpp_infer/src/main.cpp
浏览文件 @
cc132dec
...
@@ -35,7 +35,6 @@
...
@@ -35,7 +35,6 @@
#include <sys/stat.h>
#include <sys/stat.h>
#include <gflags/gflags.h>
#include <gflags/gflags.h>
#include <include/autolog.h>
DEFINE_bool
(
use_gpu
,
false
,
"Infering with GPU or CPU."
);
DEFINE_bool
(
use_gpu
,
false
,
"Infering with GPU or CPU."
);
DEFINE_int32
(
gpu_id
,
0
,
"Device id of GPU to execute."
);
DEFINE_int32
(
gpu_id
,
0
,
"Device id of GPU to execute."
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录