Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleSeg
提交
adac158d
P
PaddleSeg
项目概览
PaddlePaddle
/
PaddleSeg
通知
285
Star
8
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
53
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleSeg
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
53
Issue
53
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
adac158d
编写于
11月 04, 2019
作者:
J
joey12300
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
change utils.h, seg_conf_parser.h to google code style.
上级
0fc9b582
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
258 addition
and
254 deletion
+258
-254
inference/utils/seg_conf_parser.h
inference/utils/seg_conf_parser.h
+165
-166
inference/utils/utils.h
inference/utils/utils.h
+93
-88
未找到文件。
inference/utils/seg_conf_parser.h
浏览文件 @
adac158d
...
@@ -4,7 +4,7 @@
...
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// You may obtain a copy of the License at
//
//
//
http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
//
// Unless required by applicable law or agreed to in writing, software
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// distributed under the License is distributed on an "AS IS" BASIS,
...
@@ -12,168 +12,167 @@
...
@@ -12,168 +12,167 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#pragma once
#pragma once
#include <iostream>
#include <yaml-cpp/yaml.h>
#include <vector>
#include <iostream>
#include <string>
#include <vector>
#include <string>
#include <yaml-cpp/yaml.h>
namespace
PaddleSolution
{
namespace
PaddleSolution
{
class
PaddleSegModelConfigPaser
{
class
PaddleSegModelConfigPaser
{
public:
public:
PaddleSegModelConfigPaser
()
PaddleSegModelConfigPaser
()
:
_class_num
(
0
),
:
_class_num
(
0
),
_channels
(
0
),
_channels
(
0
),
_use_gpu
(
0
),
_use_gpu
(
0
),
_batch_size
(
1
),
_batch_size
(
1
),
_model_file_name
(
"__model__"
),
_model_file_name
(
"__model__"
),
_param_file_name
(
"__params__"
)
{
_param_file_name
(
"__params__"
)
{
}
}
~
PaddleSegModelConfigPaser
()
{
~
PaddleSegModelConfigPaser
()
{
}
}
void
reset
()
{
void
reset
()
{
_resize
.
clear
();
_resize
.
clear
();
_mean
.
clear
();
_mean
.
clear
();
_std
.
clear
();
_std
.
clear
();
_img_type
.
clear
();
_img_type
.
clear
();
_class_num
=
0
;
_class_num
=
0
;
_channels
=
0
;
_channels
=
0
;
_use_gpu
=
0
;
_use_gpu
=
0
;
_batch_size
=
1
;
_batch_size
=
1
;
_model_file_name
.
clear
();
_model_file_name
.
clear
();
_model_path
.
clear
();
_model_path
.
clear
();
_param_file_name
.
clear
();
_param_file_name
.
clear
();
}
}
std
::
string
process_parenthesis
(
const
std
::
string
&
str
)
{
std
::
string
process_parenthesis
(
const
std
::
string
&
str
)
{
if
(
str
.
size
()
<
2
)
{
if
(
str
.
size
()
<
2
)
{
return
str
;
return
str
;
}
}
std
::
string
nstr
(
str
);
std
::
string
nstr
(
str
);
if
(
str
[
0
]
==
'('
&&
str
.
back
()
==
')'
)
{
if
(
str
[
0
]
==
'('
&&
str
.
back
()
==
')'
)
{
nstr
[
0
]
=
'['
;
nstr
[
0
]
=
'['
;
nstr
[
str
.
size
()
-
1
]
=
']'
;
nstr
[
str
.
size
()
-
1
]
=
']'
;
}
}
return
nstr
;
return
nstr
;
}
}
template
<
typename
T
>
template
<
typename
T
>
std
::
vector
<
T
>
parse_str_to_vec
(
const
std
::
string
&
str
)
{
std
::
vector
<
T
>
parse_str_to_vec
(
const
std
::
string
&
str
)
{
std
::
vector
<
T
>
data
;
std
::
vector
<
T
>
data
;
auto
node
=
YAML
::
Load
(
str
);
auto
node
=
YAML
::
Load
(
str
);
for
(
const
auto
&
item
:
node
)
{
for
(
const
auto
&
item
:
node
)
{
data
.
push_back
(
item
.
as
<
T
>
());
data
.
push_back
(
item
.
as
<
T
>
());
}
}
return
data
;
return
data
;
}
}
bool
load_config
(
const
std
::
string
&
conf_file
)
{
bool
load_config
(
const
std
::
string
&
conf_file
)
{
reset
();
reset
();
YAML
::
Node
config
=
YAML
::
LoadFile
(
conf_file
);
// 1. get resize
YAML
::
Node
config
=
YAML
::
LoadFile
(
conf_file
);
auto
str
=
config
[
"DEPLOY"
][
"EVAL_CROP_SIZE"
].
as
<
std
::
string
>
();
// 1. get resize
_resize
=
parse_str_to_vec
<
int
>
(
process_parenthesis
(
str
));
auto
str
=
config
[
"DEPLOY"
][
"EVAL_CROP_SIZE"
].
as
<
std
::
string
>
();
_resize
=
parse_str_to_vec
<
int
>
(
process_parenthesis
(
str
));
// 2. get mean
for
(
const
auto
&
item
:
config
[
"DEPLOY"
][
"MEAN"
])
{
// 2. get mean
_mean
.
push_back
(
item
.
as
<
float
>
());
for
(
const
auto
&
item
:
config
[
"DEPLOY"
][
"MEAN"
])
{
}
_mean
.
push_back
(
item
.
as
<
float
>
());
}
// 3. get std
for
(
const
auto
&
item
:
config
[
"DEPLOY"
][
"STD"
])
{
// 3. get std
_std
.
push_back
(
item
.
as
<
float
>
());
for
(
const
auto
&
item
:
config
[
"DEPLOY"
][
"STD"
])
{
}
_std
.
push_back
(
item
.
as
<
float
>
());
}
// 4. get image type
_img_type
=
config
[
"DEPLOY"
][
"IMAGE_TYPE"
].
as
<
std
::
string
>
();
// 4. get image type
// 5. get class number
_img_type
=
config
[
"DEPLOY"
][
"IMAGE_TYPE"
].
as
<
std
::
string
>
();
_class_num
=
config
[
"DEPLOY"
][
"NUM_CLASSES"
].
as
<
int
>
();
// 5. get class number
// 7. set model path
_class_num
=
config
[
"DEPLOY"
][
"NUM_CLASSES"
].
as
<
int
>
();
_model_path
=
config
[
"DEPLOY"
][
"MODEL_PATH"
].
as
<
std
::
string
>
();
// 7. set model path
// 8. get model file_name
_model_path
=
config
[
"DEPLOY"
][
"MODEL_PATH"
].
as
<
std
::
string
>
();
_model_file_name
=
config
[
"DEPLOY"
][
"MODEL_FILENAME"
].
as
<
std
::
string
>
();
// 8. get model file_name
// 9. get model param file name
_model_file_name
=
config
[
"DEPLOY"
][
"MODEL_FILENAME"
].
as
<
std
::
string
>
();
_param_file_name
=
// 9. get model param file name
config
[
"DEPLOY"
][
"PARAMS_FILENAME"
].
as
<
std
::
string
>
();
_param_file_name
=
config
[
"DEPLOY"
][
"PARAMS_FILENAME"
].
as
<
std
::
string
>
();
// 10. get pre_processor
// 10. get pre_processor
_pre_processor
=
config
[
"DEPLOY"
][
"PRE_PROCESSOR"
].
as
<
std
::
string
>
();
_pre_processor
=
config
[
"DEPLOY"
][
"PRE_PROCESSOR"
].
as
<
std
::
string
>
();
// 11. use_gpu
// 11. use_gpu
_use_gpu
=
config
[
"DEPLOY"
][
"USE_GPU"
].
as
<
int
>
();
_use_gpu
=
config
[
"DEPLOY"
][
"USE_GPU"
].
as
<
int
>
();
// 12. predictor_mode
// 12. predictor_mode
_predictor_mode
=
config
[
"DEPLOY"
][
"PREDICTOR_MODE"
].
as
<
std
::
string
>
();
_predictor_mode
=
config
[
"DEPLOY"
][
"PREDICTOR_MODE"
].
as
<
std
::
string
>
();
// 13. batch_size
// 13. batch_size
_batch_size
=
config
[
"DEPLOY"
][
"BATCH_SIZE"
].
as
<
int
>
();
_batch_size
=
config
[
"DEPLOY"
][
"BATCH_SIZE"
].
as
<
int
>
();
// 14. channels
// 14. channels
_channels
=
config
[
"DEPLOY"
][
"CHANNELS"
].
as
<
int
>
();
_channels
=
config
[
"DEPLOY"
][
"CHANNELS"
].
as
<
int
>
();
return
true
;
return
true
;
}
}
void
debug
()
const
{
void
debug
()
const
{
std
::
cout
<<
"EVAL_CROP_SIZE: ("
<<
_resize
[
0
]
<<
", "
<<
_resize
[
1
]
std
::
cout
<<
"EVAL_CROP_SIZE: ("
<<
_resize
[
0
]
<<
", "
<<
_resize
[
1
]
<<
")"
<<
std
::
endl
;
<<
")"
<<
std
::
endl
;
std
::
cout
<<
"MEAN: ["
;
std
::
cout
<<
"MEAN: ["
;
for
(
int
i
=
0
;
i
<
_mean
.
size
();
++
i
)
{
for
(
int
i
=
0
;
i
<
_mean
.
size
();
++
i
)
{
if
(
i
!=
_mean
.
size
()
-
1
)
{
if
(
i
!=
_mean
.
size
()
-
1
)
{
std
::
cout
<<
_mean
[
i
]
<<
", "
;
std
::
cout
<<
_mean
[
i
]
<<
", "
;
}
else
{
}
else
{
std
::
cout
<<
_mean
[
i
];
std
::
cout
<<
_mean
[
i
];
}
}
}
}
std
::
cout
<<
"]"
<<
std
::
endl
;
std
::
cout
<<
"]"
<<
std
::
endl
;
std
::
cout
<<
"STD: ["
;
std
::
cout
<<
"STD: ["
;
for
(
int
i
=
0
;
i
<
_std
.
size
();
++
i
)
{
for
(
int
i
=
0
;
i
<
_std
.
size
();
++
i
)
{
if
(
i
!=
_std
.
size
()
-
1
)
{
if
(
i
!=
_std
.
size
()
-
1
)
{
std
::
cout
<<
_std
[
i
]
<<
", "
;
std
::
cout
<<
_std
[
i
]
<<
", "
;
}
else
{
}
std
::
cout
<<
_std
[
i
];
else
{
}
std
::
cout
<<
_std
[
i
];
}
}
std
::
cout
<<
"]"
<<
std
::
endl
;
}
std
::
cout
<<
"]"
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.IMAGE_TYPE: "
<<
_img_type
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.NUM_CLASSES: "
<<
_class_num
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.IMAGE_TYPE: "
<<
_img_type
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.CHANNELS: "
<<
_channels
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.NUM_CLASSES: "
<<
_class_num
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.MODEL_PATH: "
<<
_model_path
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.CHANNELS: "
<<
_channels
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.MODEL_FILENAME: "
<<
_model_file_name
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.MODEL_PATH: "
<<
_model_path
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.PARAMS_FILENAME: "
std
::
cout
<<
"DEPLOY.MODEL_FILENAME: "
<<
_model_file_name
<<
std
::
endl
;
<<
_param_file_name
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.PARAMS_FILENAME: "
<<
_param_file_name
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.PRE_PROCESSOR: "
<<
_pre_processor
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.PRE_PROCESSOR: "
<<
_pre_processor
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.USE_GPU: "
<<
_use_gpu
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.USE_GPU: "
<<
_use_gpu
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.PREDICTOR_MODE: "
<<
_predictor_mode
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.PREDICTOR_MODE: "
<<
_predictor_mode
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.BATCH_SIZE: "
<<
_batch_size
<<
std
::
endl
;
std
::
cout
<<
"DEPLOY.BATCH_SIZE: "
<<
_batch_size
<<
std
::
endl
;
}
}
// DEPLOY.EVAL_CROP_SIZE
// DEPLOY.EVAL_CROP_SIZE
std
::
vector
<
int
>
_resize
;
std
::
vector
<
int
>
_resize
;
// DEPLOY.MEAN
// DEPLOY.MEAN
std
::
vector
<
float
>
_mean
;
std
::
vector
<
float
>
_mean
;
// DEPLOY.STD
// DEPLOY.STD
std
::
vector
<
float
>
_std
;
std
::
vector
<
float
>
_std
;
// DEPLOY.IMAGE_TYPE
// DEPLOY.IMAGE_TYPE
std
::
string
_img_type
;
std
::
string
_img_type
;
// DEPLOY.NUM_CLASSES
// DEPLOY.NUM_CLASSES
int
_class_num
;
int
_class_num
;
// DEPLOY.CHANNELS
// DEPLOY.CHANNELS
int
_channels
;
int
_channels
;
// DEPLOY.MODEL_PATH
// DEPLOY.MODEL_PATH
std
::
string
_model_path
;
std
::
string
_model_path
;
// DEPLOY.MODEL_FILENAME
// DEPLOY.MODEL_FILENAME
std
::
string
_model_file_name
;
std
::
string
_model_file_name
;
// DEPLOY.PARAMS_FILENAME
// DEPLOY.PARAMS_FILENAME
std
::
string
_param_file_name
;
std
::
string
_param_file_name
;
// DEPLOY.PRE_PROCESSOR
// DEPLOY.PRE_PROCESSOR
std
::
string
_pre_processor
;
std
::
string
_pre_processor
;
// DEPLOY.USE_GPU
// DEPLOY.USE_GPU
int
_use_gpu
;
int
_use_gpu
;
// DEPLOY.PREDICTOR_MODE
// DEPLOY.PREDICTOR_MODE
std
::
string
_predictor_mode
;
std
::
string
_predictor_mode
;
// DEPLOY.BATCH_SIZE
// DEPLOY.BATCH_SIZE
int
_batch_size
;
int
_batch_size
;
};
};
}
// namespace PaddleSolution
}
inference/utils/utils.h
浏览文件 @
adac158d
...
@@ -4,7 +4,7 @@
...
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// You may obtain a copy of the License at
//
//
//
http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
//
// Unless required by applicable law or agreed to in writing, software
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// distributed under the License is distributed on an "AS IS" BASIS,
...
@@ -30,105 +30,110 @@
...
@@ -30,105 +30,110 @@
#endif
#endif
namespace
PaddleSolution
{
namespace
PaddleSolution
{
namespace
utils
{
namespace
utils
{
inline
std
::
string
path_join
(
const
std
::
string
&
dir
,
const
std
::
string
&
path
)
{
inline
std
::
string
path_join
(
const
std
::
string
&
dir
,
std
::
string
seperator
=
"/"
;
const
std
::
string
&
path
)
{
#ifdef _WIN32
std
::
string
seperator
=
"/"
;
seperator
=
"
\\
"
;
#ifdef _WIN32
#endif
seperator
=
"
\\
"
;
return
dir
+
seperator
+
path
;
#endif
return
dir
+
seperator
+
path
;
}
#ifndef _WIN32
// scan a directory and get all files with input extensions
inline
std
::
vector
<
std
::
string
>
get_directory_images
(
const
std
::
string
&
path
,
const
std
::
string
&
exts
)
{
std
::
vector
<
std
::
string
>
imgs
;
struct
dirent
*
entry
;
DIR
*
dir
=
opendir
(
path
.
c_str
());
if
(
dir
==
NULL
)
{
closedir
(
dir
);
return
imgs
;
}
}
#ifndef _WIN32
// scan a directory and get all files with input extensions
inline
std
::
vector
<
std
::
string
>
get_directory_images
(
const
std
::
string
&
path
,
const
std
::
string
&
exts
)
{
std
::
vector
<
std
::
string
>
imgs
;
struct
dirent
*
entry
;
DIR
*
dir
=
opendir
(
path
.
c_str
());
if
(
dir
==
NULL
)
{
closedir
(
dir
);
return
imgs
;
}
while
((
entry
=
readdir
(
dir
))
!=
NULL
)
{
while
((
entry
=
readdir
(
dir
))
!=
NULL
)
{
std
::
string
item
=
entry
->
d_name
;
std
::
string
item
=
entry
->
d_name
;
auto
ext
=
strrchr
(
entry
->
d_name
,
'.'
);
auto
ext
=
strrchr
(
entry
->
d_name
,
'.'
);
if
(
!
ext
||
std
::
string
(
ext
)
==
"."
||
std
::
string
(
ext
)
==
".."
)
{
if
(
!
ext
||
std
::
string
(
ext
)
==
"."
||
std
::
string
(
ext
)
==
".."
)
{
continue
;
continue
;
}
}
if
(
exts
.
find
(
ext
)
!=
std
::
string
::
npos
)
{
if
(
exts
.
find
(
ext
)
!=
std
::
string
::
npos
)
{
imgs
.
push_back
(
path_join
(
path
,
entry
->
d_name
));
imgs
.
push_back
(
path_join
(
path
,
entry
->
d_name
));
}
}
}
return
imgs
;
}
}
#else
return
imgs
;
// scan a directory and get all files with input extensions
}
inline
std
::
vector
<
std
::
string
>
get_directory_images
(
const
std
::
string
&
path
,
const
std
::
string
&
exts
)
#else
{
// scan a directory and get all files with input extensions
std
::
vector
<
std
::
string
>
imgs
;
inline
std
::
vector
<
std
::
string
>
get_directory_images
(
for
(
const
auto
&
item
:
std
::
experimental
::
filesystem
::
directory_iterator
(
path
))
{
const
std
::
string
&
path
,
const
std
::
string
&
exts
)
{
auto
suffix
=
item
.
path
().
extension
().
string
();
std
::
vector
<
std
::
string
>
imgs
;
if
(
exts
.
find
(
suffix
)
!=
std
::
string
::
npos
&&
suffix
.
size
()
>
0
)
{
for
(
const
auto
&
item
:
auto
fullname
=
path_join
(
path
,
item
.
path
().
filename
().
string
());
std
::
experimental
::
filesystem
::
directory_iterator
(
path
))
{
imgs
.
push_back
(
item
.
path
().
string
());
auto
suffix
=
item
.
path
().
extension
().
string
();
}
if
(
exts
.
find
(
suffix
)
!=
std
::
string
::
npos
&&
suffix
.
size
()
>
0
)
{
auto
fullname
=
path_join
(
path
,
item
.
path
().
filename
().
string
());
imgs
.
push_back
(
item
.
path
().
string
());
}
}
return
imgs
;
}
}
#endif
return
imgs
;
}
#endif
// normalize and HWC_BGR -> CHW_RGB
// normalize and HWC_BGR -> CHW_RGB
inline
void
normalize
(
cv
::
Mat
&
im
,
float
*
data
,
std
::
vector
<
float
>&
fmean
,
std
::
vector
<
float
>&
fstd
)
{
inline
void
normalize
(
cv
::
Mat
&
im
,
float
*
data
,
std
::
vector
<
float
>&
fmean
,
int
rh
=
im
.
rows
;
std
::
vector
<
float
>&
fstd
)
{
int
rw
=
im
.
col
s
;
int
rh
=
im
.
row
s
;
int
rc
=
im
.
channels
()
;
int
rw
=
im
.
cols
;
double
normf
=
(
double
)
1.0
/
255.0
;
int
rc
=
im
.
channels
()
;
#pragma omp parallel for
double
normf
=
static_cast
<
double
>
(
1.0
)
/
255.0
;
for
(
int
h
=
0
;
h
<
rh
;
++
h
)
{
#pragma omp parallel for
const
uchar
*
ptr
=
im
.
ptr
<
uchar
>
(
h
);
for
(
int
h
=
0
;
h
<
rh
;
++
h
)
{
int
im_index
=
0
;
const
uchar
*
ptr
=
im
.
ptr
<
uchar
>
(
h
)
;
for
(
int
w
=
0
;
w
<
rw
;
++
w
)
{
int
im_index
=
0
;
for
(
int
c
=
0
;
c
<
rc
;
++
c
)
{
for
(
int
w
=
0
;
w
<
rw
;
++
w
)
{
int
top_index
=
(
c
*
rh
+
h
)
*
rw
+
w
;
for
(
int
c
=
0
;
c
<
rc
;
++
c
)
{
float
pixel
=
static_cast
<
float
>
(
ptr
[
im_index
++
])
;
int
top_index
=
(
c
*
rh
+
h
)
*
rw
+
w
;
pixel
=
(
pixel
*
normf
-
fmean
[
c
])
/
fstd
[
c
]
;
float
pixel
=
static_cast
<
float
>
(
ptr
[
im_index
++
])
;
data
[
top_index
]
=
pixel
;
pixel
=
(
pixel
*
normf
-
fmean
[
c
])
/
fstd
[
c
]
;
}
data
[
top_index
]
=
pixel
;
}
}
}
}
}
}
}
// argmax
// argmax
inline
void
argmax
(
float
*
out
,
std
::
vector
<
int
>&
shape
,
std
::
vector
<
uchar
>&
mask
,
std
::
vector
<
uchar
>&
scoremap
)
{
inline
void
argmax
(
float
*
out
,
std
::
vector
<
int
>&
shape
,
int
out_img_len
=
shape
[
1
]
*
shape
[
2
];
std
::
vector
<
uchar
>&
mask
,
std
::
vector
<
uchar
>&
scoremap
)
{
int
blob_out_len
=
out_img_len
*
shape
[
0
];
int
out_img_len
=
shape
[
1
]
*
shape
[
2
];
/*
int
blob_out_len
=
out_img_len
*
shape
[
0
];
Eigen::TensorMap<Eigen::Tensor<float, 3>> out_3d(out, shape[0], shape[1], shape[2]);
/*
Eigen::Tensor<Eigen::DenseIndex, 2> argmax = out_3d.argmax(0
);
Eigen::TensorMap<Eigen::Tensor<float, 3>> out_3d(out, shape[0], shape[1], shape[2]
);
*/
Eigen::Tensor<Eigen::DenseIndex, 2> argmax = out_3d.argmax(0);
float
max_value
=
-
1
;
*/
int
label
=
0
;
float
max_value
=
-
1
;
#pragma omp parallel private(label)
int
label
=
0
;
for
(
int
i
=
0
;
i
<
out_img_len
;
++
i
)
{
#pragma omp parallel private(label)
max_value
=
-
1
;
for
(
int
i
=
0
;
i
<
out_img_len
;
++
i
)
{
label
=
0
;
max_value
=
-
1
;
#pragma omp for reduction(max : max_value)
label
=
0
;
for
(
int
j
=
0
;
j
<
shape
[
0
];
++
j
)
{
#pragma omp for reduction(max : max_value)
int
index
=
i
+
j
*
out_img_len
;
for
(
int
j
=
0
;
j
<
shape
[
0
];
++
j
)
{
if
(
index
>=
blob_out_len
)
{
int
index
=
i
+
j
*
out_img_len
;
continue
;
if
(
index
>=
blob_out_len
)
{
}
continue
;
float
value
=
out
[
index
];
}
if
(
value
>
max_value
)
{
float
value
=
out
[
index
];
max_value
=
value
;
if
(
value
>
max_value
)
{
label
=
j
;
max_value
=
value
;
}
label
=
j
;
}
}
if
(
label
==
0
)
max_value
=
0
;
mask
[
i
]
=
uchar
(
label
);
scoremap
[
i
]
=
uchar
(
max_value
*
255
);
}
}
if
(
label
==
0
)
max_value
=
0
;
mask
[
i
]
=
uchar
(
label
);
scoremap
[
i
]
=
uchar
(
max_value
*
255
);
}
}
}
}
}
}
// namespace utils
}
// namespace PaddleSolution
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录