Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
4dbd85b8
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
接近 2 年 前同步成功
通知
116
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4dbd85b8
编写于
11月 29, 2021
作者:
D
dongshuilong
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update cls_cpp, use yaml as input
上级
9176a01a
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
166 addition
and
137 deletion
+166
-137
deploy/cpp/CMakeLists.txt
deploy/cpp/CMakeLists.txt
+9
-3
deploy/cpp/external-cmake/yaml-cpp.cmake
deploy/cpp/external-cmake/yaml-cpp.cmake
+30
-0
deploy/cpp/include/cls.h
deploy/cpp/include/cls.h
+19
-21
deploy/cpp/include/cls_config.h
deploy/cpp/include/cls_config.h
+65
-44
deploy/cpp/include/preprocess_op.h
deploy/cpp/include/preprocess_op.h
+4
-4
deploy/cpp/src/cls.cpp
deploy/cpp/src/cls.cpp
+2
-3
deploy/cpp/src/cls_config.cpp
deploy/cpp/src/cls_config.cpp
+12
-41
deploy/cpp/src/main.cpp
deploy/cpp/src/main.cpp
+18
-12
deploy/cpp/src/preprocess_op.cpp
deploy/cpp/src/preprocess_op.cpp
+7
-9
未找到文件。
deploy/cpp/CMakeLists.txt
浏览文件 @
4dbd85b8
...
@@ -14,6 +14,11 @@ SET(TENSORRT_DIR "" CACHE PATH "Compile demo with TensorRT")
...
@@ -14,6 +14,11 @@ SET(TENSORRT_DIR "" CACHE PATH "Compile demo with TensorRT")
set
(
DEMO_NAME
"clas_system"
)
set
(
DEMO_NAME
"clas_system"
)
include
(
external-cmake/yaml-cpp.cmake
)
include_directories
(
"
${
CMAKE_SOURCE_DIR
}
/"
)
include_directories
(
"
${
CMAKE_CURRENT_BINARY_DIR
}
/ext/yaml-cpp/src/ext-yaml-cpp/include"
)
link_directories
(
"
${
CMAKE_CURRENT_BINARY_DIR
}
/ext/yaml-cpp/lib"
)
macro
(
safe_set_static_flag
)
macro
(
safe_set_static_flag
)
foreach
(
flag_var
foreach
(
flag_var
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
...
@@ -61,7 +66,7 @@ if (WIN32)
...
@@ -61,7 +66,7 @@ if (WIN32)
add_definitions
(
-DSTATIC_LIB
)
add_definitions
(
-DSTATIC_LIB
)
endif
()
endif
()
else
()
else
()
set
(
CMAKE_CXX_FLAGS
"
${
CMAKE_CXX_FLAGS
}
-g -
o
3 -std=c++11"
)
set
(
CMAKE_CXX_FLAGS
"
${
CMAKE_CXX_FLAGS
}
-g -
O
3 -std=c++11"
)
set
(
CMAKE_STATIC_LIBRARY_PREFIX
""
)
set
(
CMAKE_STATIC_LIBRARY_PREFIX
""
)
endif
()
endif
()
message
(
"flags"
${
CMAKE_CXX_FLAGS
}
)
message
(
"flags"
${
CMAKE_CXX_FLAGS
}
)
...
@@ -153,7 +158,7 @@ endif(WITH_STATIC_LIB)
...
@@ -153,7 +158,7 @@ endif(WITH_STATIC_LIB)
if
(
NOT WIN32
)
if
(
NOT WIN32
)
set
(
DEPS
${
DEPS
}
set
(
DEPS
${
DEPS
}
${
MATH_LIB
}
${
MKLDNN_LIB
}
${
MATH_LIB
}
${
MKLDNN_LIB
}
glog gflags protobuf z xxhash
glog gflags protobuf z xxhash
yaml-cpp
)
)
if
(
EXISTS
"
${
PADDLE_LIB
}
/third_party/install/snappystream/lib"
)
if
(
EXISTS
"
${
PADDLE_LIB
}
/third_party/install/snappystream/lib"
)
set
(
DEPS
${
DEPS
}
snappystream
)
set
(
DEPS
${
DEPS
}
snappystream
)
...
@@ -164,7 +169,7 @@ if (NOT WIN32)
...
@@ -164,7 +169,7 @@ if (NOT WIN32)
else
()
else
()
set
(
DEPS
${
DEPS
}
set
(
DEPS
${
DEPS
}
${
MATH_LIB
}
${
MKLDNN_LIB
}
${
MATH_LIB
}
${
MKLDNN_LIB
}
glog gflags_static libprotobuf xxhash
)
glog gflags_static libprotobuf xxhash
libyaml-cppmt
)
set
(
DEPS
${
DEPS
}
libcmt shlwapi
)
set
(
DEPS
${
DEPS
}
libcmt shlwapi
)
if
(
EXISTS
"
${
PADDLE_LIB
}
/third_party/install/snappy/lib"
)
if
(
EXISTS
"
${
PADDLE_LIB
}
/third_party/install/snappy/lib"
)
set
(
DEPS
${
DEPS
}
snappy
)
set
(
DEPS
${
DEPS
}
snappy
)
...
@@ -204,6 +209,7 @@ include_directories(${FETCHCONTENT_BASE_DIR}/extern_autolog-src)
...
@@ -204,6 +209,7 @@ include_directories(${FETCHCONTENT_BASE_DIR}/extern_autolog-src)
AUX_SOURCE_DIRECTORY
(
./src SRCS
)
AUX_SOURCE_DIRECTORY
(
./src SRCS
)
add_executable
(
${
DEMO_NAME
}
${
SRCS
}
)
add_executable
(
${
DEMO_NAME
}
${
SRCS
}
)
ADD_DEPENDENCIES
(
${
DEMO_NAME
}
ext-yaml-cpp
)
target_link_libraries
(
${
DEMO_NAME
}
${
DEPS
}
)
target_link_libraries
(
${
DEMO_NAME
}
${
DEPS
}
)
...
...
deploy/cpp/external-cmake/yaml-cpp.cmake
0 → 100644
浏览文件 @
4dbd85b8
find_package
(
Git REQUIRED
)
include
(
ExternalProject
)
message
(
"
${
CMAKE_BUILD_TYPE
}
"
)
ExternalProject_Add
(
ext-yaml-cpp
URL https://bj.bcebos.com/paddlex/deploy/deps/yaml-cpp.zip
URL_MD5 9542d6de397d1fbd649ed468cb5850e6
CMAKE_ARGS
-DYAML_CPP_BUILD_TESTS=OFF
-DYAML_CPP_BUILD_TOOLS=OFF
-DYAML_CPP_INSTALL=OFF
-DYAML_CPP_BUILD_CONTRIB=OFF
-DMSVC_SHARED_RT=OFF
-DBUILD_SHARED_LIBS=OFF
-DCMAKE_BUILD_TYPE=
${
CMAKE_BUILD_TYPE
}
-DCMAKE_CXX_FLAGS=
${
CMAKE_CXX_FLAGS
}
-DCMAKE_CXX_FLAGS_DEBUG=
${
CMAKE_CXX_FLAGS_DEBUG
}
-DCMAKE_CXX_FLAGS_RELEASE=
${
CMAKE_CXX_FLAGS_RELEASE
}
-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=
${
CMAKE_BINARY_DIR
}
/ext/yaml-cpp/lib
-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY=
${
CMAKE_BINARY_DIR
}
/ext/yaml-cpp/lib
PREFIX
"
${
CMAKE_BINARY_DIR
}
/ext/yaml-cpp"
# Disable install step
INSTALL_COMMAND
""
LOG_DOWNLOAD ON
LOG_BUILD 1
)
deploy/cpp/include/cls.h
浏览文件 @
4dbd85b8
...
@@ -28,6 +28,7 @@
...
@@ -28,6 +28,7 @@
#include <fstream>
#include <fstream>
#include <numeric>
#include <numeric>
#include "include/cls_config.h"
#include <include/preprocess_op.h>
#include <include/preprocess_op.h>
using
namespace
paddle_infer
;
using
namespace
paddle_infer
;
...
@@ -36,25 +37,21 @@ namespace PaddleClas {
...
@@ -36,25 +37,21 @@ namespace PaddleClas {
class
Classifier
{
class
Classifier
{
public:
public:
explicit
Classifier
(
const
std
::
string
&
model_path
,
explicit
Classifier
(
const
ClsConfig
&
config
)
{
const
std
::
string
&
params_path
,
const
bool
&
use_gpu
,
this
->
use_gpu_
=
config
.
use_gpu
;
const
int
&
gpu_id
,
const
int
&
gpu_mem
,
this
->
gpu_id_
=
config
.
gpu_id
;
const
int
&
cpu_math_library_num_threads
,
this
->
gpu_mem_
=
config
.
gpu_mem
;
const
bool
&
use_mkldnn
,
const
bool
&
use_tensorrt
,
this
->
cpu_math_library_num_threads_
=
config
.
cpu_threads
;
const
bool
&
use_fp16
,
const
int
&
resize_short_size
,
this
->
use_fp16_
=
config
.
use_fp16
;
const
int
&
crop_size
)
{
this
->
use_mkldnn_
=
config
.
use_mkldnn
;
this
->
use_gpu_
=
use_gpu
;
this
->
use_tensorrt_
=
config
.
use_tensorrt
;
this
->
gpu_id_
=
gpu_id
;
this
->
mean_
=
config
.
mean
;
this
->
gpu_mem_
=
gpu_mem
;
this
->
std_
=
config
.
std
;
this
->
cpu_math_library_num_threads_
=
cpu_math_library_num_threads
;
this
->
resize_short_size_
=
config
.
resize_short_size
;
this
->
use_mkldnn_
=
use_mkldnn
;
this
->
scale_
=
config
.
scale
;
this
->
use_tensorrt_
=
use_tensorrt
;
this
->
crop_size_
=
config
.
crop_size
;
this
->
use_fp16_
=
use_fp16
;
this
->
ir_optim_
=
config
.
ir_optim
;
LoadModel
(
config
.
cls_model_path
,
config
.
cls_params_path
);
this
->
resize_short_size_
=
resize_short_size
;
this
->
crop_size_
=
crop_size
;
LoadModel
(
model_path
,
params_path
);
}
}
// Load Paddle inference model
// Load Paddle inference model
...
@@ -73,10 +70,11 @@ private:
...
@@ -73,10 +70,11 @@ private:
bool
use_mkldnn_
=
false
;
bool
use_mkldnn_
=
false
;
bool
use_tensorrt_
=
false
;
bool
use_tensorrt_
=
false
;
bool
use_fp16_
=
false
;
bool
use_fp16_
=
false
;
bool
ir_optim_
=
true
;
std
::
vector
<
float
>
mean_
=
{
0.485
f
,
0.456
f
,
0.406
f
};
std
::
vector
<
float
>
mean_
=
{
0.485
f
,
0.456
f
,
0.406
f
};
std
::
vector
<
float
>
s
cale_
=
{
1
/
0.229
f
,
1
/
0.224
f
,
1
/
0.225
f
};
std
::
vector
<
float
>
s
td_
=
{
0.229
f
,
0.224
f
,
0.225
f
};
bool
is_scale_
=
true
;
float
scale_
=
0.00392157
;
int
resize_short_size_
=
256
;
int
resize_short_size_
=
256
;
int
crop_size_
=
224
;
int
crop_size_
=
224
;
...
...
deploy/cpp/include/cls_config.h
浏览文件 @
4dbd85b8
...
@@ -14,6 +14,14 @@
...
@@ -14,6 +14,14 @@
#pragma once
#pragma once
#ifdef WIN32
#define OS_PATH_SEP "\\"
#else
#define OS_PATH_SEP "/"
#endif
#include "include/utility.h"
#include "yaml-cpp/yaml.h"
#include <iomanip>
#include <iomanip>
#include <iostream>
#include <iostream>
#include <map>
#include <map>
...
@@ -21,70 +29,83 @@
...
@@ -21,70 +29,83 @@
#include <string>
#include <string>
#include <vector>
#include <vector>
#include "include/utility.h"
namespace
PaddleClas
{
namespace
PaddleClas
{
class
ClsConfig
{
class
ClsConfig
{
public:
public:
explicit
ClsConfig
(
const
std
::
string
&
config_file
)
{
explicit
ClsConfig
(
const
std
::
string
&
path
)
{
config_map_
=
LoadConfig
(
config_file
);
ReadYamlConfig
(
path
);
this
->
infer_imgs
=
this
->
use_gpu
=
bool
(
stoi
(
config_map_
[
"use_gpu"
]));
this
->
config_file
[
"Global"
][
"infer_imgs"
].
as
<
std
::
string
>
();
this
->
batch_size
=
this
->
config_file
[
"Global"
][
"batch_size"
].
as
<
int
>
();
this
->
gpu_id
=
stoi
(
config_map_
[
"gpu_id"
]);
this
->
use_gpu
=
this
->
config_file
[
"Global"
][
"use_gpu"
].
as
<
bool
>
();
if
(
this
->
config_file
[
"Global"
][
"gpu_id"
].
IsDefined
())
this
->
gpu_mem
=
stoi
(
config_map_
[
"gpu_mem"
]);
this
->
gpu_id
=
this
->
config_file
[
"Global"
][
"gpu_id"
].
as
<
int
>
();
else
this
->
cpu_threads
=
stoi
(
config_map_
[
"cpu_threads"
]);
this
->
gpu_id
=
0
;
this
->
gpu_mem
=
this
->
config_file
[
"Global"
][
"gpu_mem"
].
as
<
int
>
();
this
->
use_mkldnn
=
bool
(
stoi
(
config_map_
[
"use_mkldnn"
]));
this
->
cpu_threads
=
this
->
config_file
[
"Global"
][
"cpu_num_threads"
].
as
<
int
>
();
this
->
use_tensorrt
=
bool
(
stoi
(
config_map_
[
"use_tensorrt"
]));
this
->
use_mkldnn
=
this
->
config_file
[
"Global"
][
"enable_mkldnn"
].
as
<
bool
>
();
this
->
use_fp16
=
bool
(
stoi
(
config_map_
[
"use_fp16"
]));
this
->
use_tensorrt
=
this
->
config_file
[
"Global"
][
"use_tensorrt"
].
as
<
bool
>
();
this
->
use_fp16
=
this
->
config_file
[
"Global"
][
"use_fp16"
].
as
<
bool
>
();
this
->
cls_model_path
.
assign
(
config_map_
[
"cls_model_path"
]);
this
->
enable_benchmark
=
this
->
config_file
[
"Global"
][
"enable_benchmark"
].
as
<
bool
>
();
this
->
cls_params_path
.
assign
(
config_map_
[
"cls_params_path"
]);
this
->
ir_optim
=
this
->
config_file
[
"Global"
][
"ir_optim"
].
as
<
bool
>
();
this
->
enable_profile
=
this
->
resize_short_size
=
stoi
(
config_map_
[
"resize_short_size"
]);
this
->
config_file
[
"Global"
][
"enable_profile"
].
as
<
bool
>
();
this
->
cls_model_path
=
this
->
crop_size
=
stoi
(
config_map_
[
"crop_size"
]);
this
->
config_file
[
"Global"
][
"inference_model_dir"
].
as
<
std
::
string
>
()
+
OS_PATH_SEP
+
"inference.pdmodel"
;
this
->
benchmark
=
bool
(
stoi
(
config_map_
[
"benchmark"
]));
this
->
cls_params_path
=
this
->
config_file
[
"Global"
][
"inference_model_dir"
].
as
<
std
::
string
>
()
+
OS_PATH_SEP
+
"inference.pdiparams"
;
this
->
resize_short_size
=
this
->
config_file
[
"PreProcess"
][
"transform_ops"
][
0
][
"ResizeImage"
]
[
"resize_short"
]
.
as
<
int
>
();
this
->
crop_size
=
this
->
config_file
[
"PreProcess"
][
"transform_ops"
][
1
][
"CropImage"
][
"size"
]
.
as
<
int
>
();
this
->
scale
=
this
->
config_file
[
"PreProcess"
][
"transform_ops"
][
2
]
[
"NormalizeImage"
][
"scale"
]
.
as
<
float
>
();
this
->
mean
=
this
->
config_file
[
"PreProcess"
][
"transform_ops"
][
2
]
[
"NormalizeImage"
][
"mean"
]
.
as
<
std
::
vector
<
float
>>
();
this
->
std
=
this
->
config_file
[
"PreProcess"
][
"transform_ops"
][
2
]
[
"NormalizeImage"
][
"std"
]
.
as
<
std
::
vector
<
float
>>
();
if
(
this
->
config_file
[
"Global"
][
"benchmark"
].
IsDefined
())
this
->
benchmark
=
this
->
config_file
[
"Global"
][
"benchmark"
].
as
<
bool
>
();
else
this
->
benchmark
=
false
;
}
}
YAML
::
Node
config_file
;
bool
use_gpu
=
false
;
bool
use_gpu
=
false
;
int
gpu_id
=
0
;
int
gpu_id
=
0
;
int
gpu_mem
=
4000
;
int
gpu_mem
=
4000
;
int
cpu_threads
=
1
;
int
cpu_threads
=
1
;
bool
use_mkldnn
=
false
;
bool
use_mkldnn
=
false
;
bool
use_tensorrt
=
false
;
bool
use_tensorrt
=
false
;
bool
use_fp16
=
false
;
bool
use_fp16
=
false
;
bool
benchmark
=
false
;
bool
benchmark
=
false
;
int
batch_size
=
1
;
bool
enable_benchmark
=
false
;
bool
ir_optim
=
true
;
bool
enable_profile
=
false
;
std
::
string
cls_model_path
;
std
::
string
cls_model_path
;
std
::
string
cls_params_path
;
std
::
string
cls_params_path
;
std
::
string
infer_imgs
;
int
resize_short_size
=
256
;
int
resize_short_size
=
256
;
int
crop_size
=
224
;
int
crop_size
=
224
;
float
scale
=
0.00392157
;
std
::
vector
<
float
>
mean
=
{
0.485
,
0.456
,
0.406
};
std
::
vector
<
float
>
std
=
{
0.229
,
0.224
,
0.225
};
void
PrintConfigInfo
();
void
PrintConfigInfo
();
void
ReadYamlConfig
(
const
std
::
string
&
path
);
private:
// Load configuration
std
::
map
<
std
::
string
,
std
::
string
>
LoadConfig
(
const
std
::
string
&
config_file
);
std
::
vector
<
std
::
string
>
split
(
const
std
::
string
&
str
,
const
std
::
string
&
delim
);
std
::
map
<
std
::
string
,
std
::
string
>
config_map_
;
};
};
}
// namespace PaddleClas
}
// namespace PaddleClas
deploy/cpp/include/preprocess_op.h
浏览文件 @
4dbd85b8
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
//
// Licensed under the Apache License, Version 2.0 (the "License");
// Licensed under the Apache License, Version 2.0 (the "License");
you may not
//
you may not
use this file except in compliance with the License.
// use this file except in compliance with the License.
// You may obtain a copy of the License at
// You may obtain a copy of the License at
//
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
...
@@ -34,7 +34,7 @@ namespace PaddleClas {
...
@@ -34,7 +34,7 @@ namespace PaddleClas {
class
Normalize
{
class
Normalize
{
public:
public:
virtual
void
Run
(
cv
::
Mat
*
im
,
const
std
::
vector
<
float
>
&
mean
,
virtual
void
Run
(
cv
::
Mat
*
im
,
const
std
::
vector
<
float
>
&
mean
,
const
std
::
vector
<
float
>
&
s
cale
,
const
bool
is_scale
=
tru
e
);
const
std
::
vector
<
float
>
&
s
td
,
float
&
scal
e
);
};
};
// RGB -> CHW
// RGB -> CHW
...
@@ -53,4 +53,4 @@ public:
...
@@ -53,4 +53,4 @@ public:
virtual
void
Run
(
const
cv
::
Mat
&
img
,
cv
::
Mat
&
resize_img
,
int
max_size_len
);
virtual
void
Run
(
const
cv
::
Mat
&
img
,
cv
::
Mat
&
resize_img
,
int
max_size_len
);
};
};
}
// namespace PaddleClas
}
// namespace PaddleClas
\ No newline at end of file
deploy/cpp/src/cls.cpp
浏览文件 @
4dbd85b8
...
@@ -44,7 +44,7 @@ void Classifier::LoadModel(const std::string &model_path,
...
@@ -44,7 +44,7 @@ void Classifier::LoadModel(const std::string &model_path,
// true for multiple input
// true for multiple input
config
.
SwitchSpecifyInputNames
(
true
);
config
.
SwitchSpecifyInputNames
(
true
);
config
.
SwitchIrOptim
(
t
rue
);
config
.
SwitchIrOptim
(
t
his
->
ir_optim_
);
config
.
EnableMemoryOptim
();
config
.
EnableMemoryOptim
();
config
.
DisableGlogInfo
();
config
.
DisableGlogInfo
();
...
@@ -62,8 +62,7 @@ double Classifier::Run(cv::Mat &img, std::vector<double> *times) {
...
@@ -62,8 +62,7 @@ double Classifier::Run(cv::Mat &img, std::vector<double> *times) {
this
->
crop_op_
.
Run
(
resize_img
,
this
->
crop_size_
);
this
->
crop_op_
.
Run
(
resize_img
,
this
->
crop_size_
);
this
->
normalize_op_
.
Run
(
&
resize_img
,
this
->
mean_
,
this
->
scale_
,
this
->
normalize_op_
.
Run
(
&
resize_img
,
this
->
mean_
,
this
->
std_
,
this
->
scale_
);
this
->
is_scale_
);
std
::
vector
<
float
>
input
(
1
*
3
*
resize_img
.
rows
*
resize_img
.
cols
,
0.0
f
);
std
::
vector
<
float
>
input
(
1
*
3
*
resize_img
.
rows
*
resize_img
.
cols
,
0.0
f
);
this
->
permute_op_
.
Run
(
&
resize_img
,
input
.
data
());
this
->
permute_op_
.
Run
(
&
resize_img
,
input
.
data
());
...
...
deploy/cpp/src/cls_config.cpp
浏览文件 @
4dbd85b8
...
@@ -16,49 +16,20 @@
...
@@ -16,49 +16,20 @@
namespace
PaddleClas
{
namespace
PaddleClas
{
std
::
vector
<
std
::
string
>
ClsConfig
::
split
(
const
std
::
string
&
str
,
const
std
::
string
&
delim
)
{
std
::
vector
<
std
::
string
>
res
;
if
(
""
==
str
)
return
res
;
char
*
strs
=
new
char
[
str
.
length
()
+
1
];
std
::
strcpy
(
strs
,
str
.
c_str
());
char
*
d
=
new
char
[
delim
.
length
()
+
1
];
std
::
strcpy
(
d
,
delim
.
c_str
());
char
*
p
=
std
::
strtok
(
strs
,
d
);
while
(
p
)
{
std
::
string
s
=
p
;
res
.
push_back
(
s
);
p
=
std
::
strtok
(
NULL
,
d
);
}
return
res
;
}
std
::
map
<
std
::
string
,
std
::
string
>
ClsConfig
::
LoadConfig
(
const
std
::
string
&
config_path
)
{
auto
config
=
Utility
::
ReadDict
(
config_path
);
std
::
map
<
std
::
string
,
std
::
string
>
dict
;
for
(
int
i
=
0
;
i
<
config
.
size
();
i
++
)
{
// pass for empty line or comment
if
(
config
[
i
].
size
()
<=
1
||
config
[
i
][
0
]
==
'#'
)
{
continue
;
}
std
::
vector
<
std
::
string
>
res
=
split
(
config
[
i
],
" "
);
dict
[
res
[
0
]]
=
res
[
1
];
}
return
dict
;
}
void
ClsConfig
::
PrintConfigInfo
()
{
void
ClsConfig
::
PrintConfigInfo
()
{
std
::
cout
<<
"=======Paddle Class inference config======"
<<
std
::
endl
;
std
::
cout
<<
"=======Paddle Class inference config======"
<<
std
::
endl
;
for
(
auto
iter
=
config_map_
.
begin
();
iter
!=
config_map_
.
end
();
iter
++
)
{
std
::
cout
<<
this
->
config_file
<<
std
::
endl
;
std
::
cout
<<
iter
->
first
<<
" : "
<<
iter
->
second
<<
std
::
endl
;
}
std
::
cout
<<
"=======End of Paddle Class inference config======"
<<
std
::
endl
;
std
::
cout
<<
"=======End of Paddle Class inference config======"
<<
std
::
endl
;
}
}
}
// namespace PaddleClas
void
ClsConfig
::
ReadYamlConfig
(
const
std
::
string
&
path
)
{
\ No newline at end of file
try
{
this
->
config_file
=
YAML
::
LoadFile
(
path
);
}
catch
(
YAML
::
BadFile
&
e
)
{
std
::
cout
<<
"Something wrong in yaml file, please check yaml file"
<<
std
::
endl
;
exit
(
1
);
}
}
};
// namespace PaddleClas
deploy/cpp/src/main.cpp
浏览文件 @
4dbd85b8
...
@@ -27,6 +27,7 @@
...
@@ -27,6 +27,7 @@
#include <numeric>
#include <numeric>
#include <auto_log/autolog.h>
#include <auto_log/autolog.h>
#include <gflags/gflags.h>
#include <include/cls.h>
#include <include/cls.h>
#include <include/cls_config.h>
#include <include/cls_config.h>
...
@@ -34,18 +35,27 @@ using namespace std;
...
@@ -34,18 +35,27 @@ using namespace std;
using
namespace
cv
;
using
namespace
cv
;
using
namespace
PaddleClas
;
using
namespace
PaddleClas
;
DEFINE_string
(
config
,
""
,
"Path of yaml file"
);
DEFINE_string
(
c
,
""
,
"Path of yaml file"
);
int
main
(
int
argc
,
char
**
argv
)
{
int
main
(
int
argc
,
char
**
argv
)
{
if
(
argc
<
3
)
{
google
::
ParseCommandLineFlags
(
&
argc
,
&
argv
,
true
);
std
::
cerr
<<
"[ERROR] usage: "
<<
argv
[
0
]
std
::
string
yaml_path
=
""
;
<<
" configure_filepath image_path
\n
"
;
if
(
FLAGS_config
==
""
&&
FLAGS_c
==
""
)
{
std
::
cerr
<<
"[ERROR] usage: "
<<
std
::
endl
<<
argv
[
0
]
<<
" -c $yaml_path"
<<
std
::
endl
<<
"or:"
<<
std
::
endl
<<
argv
[
0
]
<<
" -config $yaml_path"
<<
std
::
endl
;
exit
(
1
);
exit
(
1
);
}
else
if
(
FLAGS_config
!=
""
)
{
yaml_path
=
FLAGS_config
;
}
else
{
yaml_path
=
FLAGS_c
;
}
}
ClsConfig
config
(
yaml_path
);
ClsConfig
config
(
argv
[
1
]);
config
.
PrintConfigInfo
();
config
.
PrintConfigInfo
();
std
::
string
path
(
argv
[
2
]
);
std
::
string
path
(
config
.
infer_imgs
);
std
::
vector
<
std
::
string
>
img_files_list
;
std
::
vector
<
std
::
string
>
img_files_list
;
if
(
cv
::
utils
::
fs
::
isDirectory
(
path
))
{
if
(
cv
::
utils
::
fs
::
isDirectory
(
path
))
{
...
@@ -60,11 +70,7 @@ int main(int argc, char **argv) {
...
@@ -60,11 +70,7 @@ int main(int argc, char **argv) {
std
::
cout
<<
"img_file_list length: "
<<
img_files_list
.
size
()
<<
std
::
endl
;
std
::
cout
<<
"img_file_list length: "
<<
img_files_list
.
size
()
<<
std
::
endl
;
Classifier
classifier
(
config
.
cls_model_path
,
config
.
cls_params_path
,
Classifier
classifier
(
config
);
config
.
use_gpu
,
config
.
gpu_id
,
config
.
gpu_mem
,
config
.
cpu_threads
,
config
.
use_mkldnn
,
config
.
use_tensorrt
,
config
.
use_fp16
,
config
.
resize_short_size
,
config
.
crop_size
);
double
elapsed_time
=
0.0
;
double
elapsed_time
=
0.0
;
std
::
vector
<
double
>
cls_times
;
std
::
vector
<
double
>
cls_times
;
...
...
deploy/cpp/src/preprocess_op.cpp
浏览文件 @
4dbd85b8
...
@@ -42,20 +42,18 @@ void Permute::Run(const cv::Mat *im, float *data) {
...
@@ -42,20 +42,18 @@ void Permute::Run(const cv::Mat *im, float *data) {
}
}
void
Normalize
::
Run
(
cv
::
Mat
*
im
,
const
std
::
vector
<
float
>
&
mean
,
void
Normalize
::
Run
(
cv
::
Mat
*
im
,
const
std
::
vector
<
float
>
&
mean
,
const
std
::
vector
<
float
>
&
scale
,
const
bool
is_scale
)
{
const
std
::
vector
<
float
>
&
std
,
float
&
scale
)
{
double
e
=
1.0
;
if
(
scale
)
{
if
(
is_scale
)
{
(
*
im
).
convertTo
(
*
im
,
CV_32FC3
,
scale
);
e
/=
255.0
;
}
}
(
*
im
).
convertTo
(
*
im
,
CV_32FC3
,
e
);
for
(
int
h
=
0
;
h
<
im
->
rows
;
h
++
)
{
for
(
int
h
=
0
;
h
<
im
->
rows
;
h
++
)
{
for
(
int
w
=
0
;
w
<
im
->
cols
;
w
++
)
{
for
(
int
w
=
0
;
w
<
im
->
cols
;
w
++
)
{
im
->
at
<
cv
::
Vec3f
>
(
h
,
w
)[
0
]
=
im
->
at
<
cv
::
Vec3f
>
(
h
,
w
)[
0
]
=
(
im
->
at
<
cv
::
Vec3f
>
(
h
,
w
)[
0
]
-
mean
[
0
])
*
scale
[
0
];
(
im
->
at
<
cv
::
Vec3f
>
(
h
,
w
)[
0
]
-
mean
[
0
])
/
std
[
0
];
im
->
at
<
cv
::
Vec3f
>
(
h
,
w
)[
1
]
=
im
->
at
<
cv
::
Vec3f
>
(
h
,
w
)[
1
]
=
(
im
->
at
<
cv
::
Vec3f
>
(
h
,
w
)[
1
]
-
mean
[
1
])
*
scale
[
1
];
(
im
->
at
<
cv
::
Vec3f
>
(
h
,
w
)[
1
]
-
mean
[
1
])
/
std
[
1
];
im
->
at
<
cv
::
Vec3f
>
(
h
,
w
)[
2
]
=
im
->
at
<
cv
::
Vec3f
>
(
h
,
w
)[
2
]
=
(
im
->
at
<
cv
::
Vec3f
>
(
h
,
w
)[
2
]
-
mean
[
2
])
*
scale
[
2
];
(
im
->
at
<
cv
::
Vec3f
>
(
h
,
w
)[
2
]
-
mean
[
2
])
/
std
[
2
];
}
}
}
}
}
}
...
@@ -87,4 +85,4 @@ void ResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img,
...
@@ -87,4 +85,4 @@ void ResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img,
cv
::
resize
(
img
,
resize_img
,
cv
::
Size
(
resize_w
,
resize_h
));
cv
::
resize
(
img
,
resize_img
,
cv
::
Size
(
resize_w
,
resize_h
));
}
}
}
// namespace PaddleClas
}
// namespace PaddleClas
\ No newline at end of file
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录