Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleX
提交
bc8745f8
P
PaddleX
项目概览
PaddlePaddle
/
PaddleX
通知
138
Star
4
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
43
列表
看板
标记
里程碑
合并请求
5
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleX
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
43
Issue
43
列表
看板
标记
里程碑
合并请求
5
合并请求
5
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
bc8745f8
编写于
5月 19, 2020
作者:
J
Jason
提交者:
GitHub
5月 19, 2020
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #69 from PaddlePaddle/devleop_deploy
add deploy code
上级
d1e8bb23
b885a6d1
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
329 addition
and
32 deletion
+329
-32
paddlex/__init__.py
paddlex/__init__.py
+4
-3
paddlex/deploy.py
paddlex/deploy.py
+276
-0
paddlex/interpret/as_data_reader/__init__.py
paddlex/interpret/as_data_reader/__init__.py
+13
-0
paddlex/interpret/core/__init__.py
paddlex/interpret/core/__init__.py
+13
-0
setup.py
setup.py
+1
-1
tutorials/interpret/interpret.py
tutorials/interpret/interpret.py
+22
-28
未找到文件。
paddlex/__init__.py
浏览文件 @
bc8745f8
...
@@ -30,6 +30,7 @@ from . import slim
...
@@ -30,6 +30,7 @@ from . import slim
from
.
import
convertor
from
.
import
convertor
from
.
import
tools
from
.
import
tools
from
.
import
interpret
from
.
import
interpret
from
.
import
deploy
try
:
try
:
import
pycocotools
import
pycocotools
...
@@ -41,9 +42,9 @@ except:
...
@@ -41,9 +42,9 @@ except:
"[WARNING] pycocotools install: https://github.com/PaddlePaddle/PaddleX/blob/develop/docs/install.md"
"[WARNING] pycocotools install: https://github.com/PaddlePaddle/PaddleX/blob/develop/docs/install.md"
)
)
#
import paddlehub as hub
import
paddlehub
as
hub
#
if hub.version.hub_version < '1.6.2':
if
hub
.
version
.
hub_version
<
'1.6.2'
:
#
raise Exception("[ERROR] paddlehub >= 1.6.2 is required")
raise
Exception
(
"[ERROR] paddlehub >= 1.6.2 is required"
)
env_info
=
get_environ_info
()
env_info
=
get_environ_info
()
load_model
=
cv
.
models
.
load_model
load_model
=
cv
.
models
.
load_model
...
...
paddlex/deploy.py
0 → 100644
浏览文件 @
bc8745f8
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
os
import
os.path
as
osp
import
cv2
import
numpy
as
np
import
yaml
import
paddlex
import
paddle.fluid
as
fluid
class
Predictor
:
def
__init__
(
self
,
model_dir
,
use_gpu
=
True
,
gpu_id
=
0
,
use_mkl
=
False
,
use_trt
=
False
,
use_glog
=
False
,
memory_optimize
=
True
):
""" 创建Paddle Predictor
Args:
model_dir: 模型路径(必须是导出的部署或量化模型)
use_gpu: 是否使用gpu,默认True
gpu_id: 使用gpu的id,默认0
use_mkl: 是否使用mkldnn计算库,CPU情况下使用,默认False
use_trt: 是否使用TensorRT,默认False
use_glog: 是否启用glog日志, 默认False
memory_optimize: 是否启动内存优化,默认True
"""
if
not
osp
.
isdir
(
model_dir
):
raise
Exception
(
"[ERROR] Path {} not exist."
.
format
(
model_dir
))
if
not
osp
.
exists
(
osp
.
join
(
model_dir
,
"model.yml"
)):
raise
Exception
(
"There's not model.yml in {}"
.
format
(
model_dir
))
with
open
(
osp
.
join
(
model_dir
,
"model.yml"
))
as
f
:
self
.
info
=
yaml
.
load
(
f
.
read
(),
Loader
=
yaml
.
Loader
)
self
.
status
=
self
.
info
[
'status'
]
if
self
.
status
!=
"Quant"
and
self
.
status
!=
"Infer"
:
raise
Exception
(
"[ERROR] Only quantized model or exported "
"inference model is supported."
)
self
.
model_dir
=
model_dir
self
.
model_type
=
self
.
info
[
'_Attributes'
][
'model_type'
]
self
.
model_name
=
self
.
info
[
'Model'
]
self
.
num_classes
=
self
.
info
[
'_Attributes'
][
'num_classes'
]
self
.
labels
=
self
.
info
[
'_Attributes'
][
'labels'
]
if
self
.
info
[
'Model'
]
==
'MaskRCNN'
:
if
self
.
info
[
'_init_params'
][
'with_fpn'
]:
self
.
mask_head_resolution
=
28
else
:
self
.
mask_head_resolution
=
14
transforms_mode
=
self
.
info
.
get
(
'TransformsMode'
,
'RGB'
)
if
transforms_mode
==
'RGB'
:
to_rgb
=
True
else
:
to_rgb
=
False
self
.
transforms
=
self
.
build_transforms
(
self
.
info
[
'Transforms'
],
to_rgb
)
self
.
predictor
=
self
.
create_predictor
(
use_gpu
,
gpu_id
,
use_mkl
,
use_trt
,
use_glog
,
memory_optimize
)
def
create_predictor
(
self
,
use_gpu
=
True
,
gpu_id
=
0
,
use_mkl
=
False
,
use_trt
=
False
,
use_glog
=
False
,
memory_optimize
=
True
):
config
=
fluid
.
core
.
AnalysisConfig
(
os
.
path
.
join
(
self
.
model_dir
,
'__model__'
),
os
.
path
.
join
(
self
.
model_dir
,
'__params__'
))
if
use_gpu
:
# 设置GPU初始显存(单位M)和Device ID
config
.
enable_use_gpu
(
100
,
gpu_id
)
else
:
config
.
disable_gpu
()
if
use_mkl
:
config
.
enable_mkldnn
()
if
use_glog
:
config
.
enable_glog_info
()
else
:
config
.
disable_glog_info
()
if
memory_optimize
:
config
.
enable_memory_optim
()
else
:
config
.
diable_memory_optim
()
# 开启计算图分析优化,包括OP融合等
config
.
switch_ir_optim
(
True
)
# 关闭feed和fetch OP使用,使用ZeroCopy接口必须设置此项
config
.
switch_use_feed_fetch_ops
(
False
)
predictor
=
fluid
.
core
.
create_paddle_predictor
(
config
)
return
predictor
def
build_transforms
(
self
,
transforms_info
,
to_rgb
=
True
):
if
self
.
model_type
==
"classifier"
:
from
paddlex.cls
import
transforms
elif
self
.
model_type
==
"detector"
:
from
paddlex.det
import
transforms
elif
self
.
model_type
==
"segmenter"
:
from
paddlex.seg
import
transforms
op_list
=
list
()
for
op_info
in
transforms_info
:
op_name
=
list
(
op_info
.
keys
())[
0
]
op_attr
=
op_info
[
op_name
]
if
not
hasattr
(
transforms
,
op_name
):
raise
Exception
(
"There's no operator named '{}' in transforms of {}"
.
format
(
op_name
,
self
.
model_type
))
op_list
.
append
(
getattr
(
transforms
,
op_name
)(
**
op_attr
))
eval_transforms
=
transforms
.
Compose
(
op_list
)
if
hasattr
(
eval_transforms
,
'to_rgb'
):
eval_transforms
.
to_rgb
=
to_rgb
self
.
arrange_transforms
(
eval_transforms
)
return
eval_transforms
def
arrange_transforms
(
self
,
transforms
):
if
self
.
model_type
==
'classifier'
:
arrange_transform
=
paddlex
.
cls
.
transforms
.
ArrangeClassifier
elif
self
.
model_type
==
'segmenter'
:
arrange_transform
=
paddlex
.
seg
.
transforms
.
ArrangeSegmenter
elif
self
.
model_type
==
'detector'
:
arrange_name
=
'Arrange{}'
.
format
(
self
.
model_name
)
arrange_transform
=
getattr
(
paddlex
.
det
.
transforms
,
arrange_name
)
else
:
raise
Exception
(
"Unrecognized model type: {}"
.
format
(
self
.
model_type
))
if
type
(
transforms
.
transforms
[
-
1
]).
__name__
.
startswith
(
'Arrange'
):
transforms
.
transforms
[
-
1
]
=
arrange_transform
(
mode
=
'test'
)
else
:
transforms
.
transforms
.
append
(
arrange_transform
(
mode
=
'test'
))
def
preprocess
(
self
,
image
):
""" 对图像做预处理
Args:
image(str|np.ndarray): 图片路径或np.ndarray,如为后者,要求是BGR格式
"""
res
=
dict
()
if
self
.
model_type
==
"classifier"
:
im
,
=
self
.
transforms
(
image
)
im
=
np
.
expand_dims
(
im
,
axis
=
0
).
copy
()
res
[
'image'
]
=
im
elif
self
.
model_type
==
"detector"
:
if
self
.
model_name
==
"YOLOv3"
:
im
,
im_shape
=
self
.
transforms
(
image
)
im
=
np
.
expand_dims
(
im
,
axis
=
0
).
copy
()
im_shape
=
np
.
expand_dims
(
im_shape
,
axis
=
0
).
copy
()
res
[
'image'
]
=
im
res
[
'im_size'
]
=
im_shape
if
self
.
model_name
.
count
(
'RCNN'
)
>
0
:
im
,
im_resize_info
,
im_shape
=
self
.
transforms
(
image
)
im
=
np
.
expand_dims
(
im
,
axis
=
0
).
copy
()
im_resize_info
=
np
.
expand_dims
(
im_resize_info
,
axis
=
0
).
copy
()
im_shape
=
np
.
expand_dims
(
im_shape
,
axis
=
0
).
copy
()
res
[
'image'
]
=
im
res
[
'im_info'
]
=
im_resize_info
res
[
'im_shape'
]
=
im_shape
elif
self
.
model_type
==
"segmenter"
:
im
,
im_info
=
self
.
transforms
(
image
)
im
=
np
.
expand_dims
(
im
,
axis
=
0
).
copy
()
res
[
'image'
]
=
im
res
[
'im_info'
]
=
im_info
return
res
def
raw_predict
(
self
,
inputs
):
""" 接受预处理过后的数据进行预测
Args:
inputs(tuple): 预处理过后的数据
"""
for
k
,
v
in
inputs
.
items
():
try
:
tensor
=
self
.
predictor
.
get_input_tensor
(
k
)
except
:
continue
tensor
.
copy_from_cpu
(
v
)
self
.
predictor
.
zero_copy_run
()
output_names
=
self
.
predictor
.
get_output_names
()
output_results
=
list
()
for
name
in
output_names
:
output_tensor
=
self
.
predictor
.
get_output_tensor
(
name
)
output_results
.
append
(
output_tensor
.
copy_to_cpu
())
return
output_results
def
classifier_postprocess
(
self
,
preds
,
topk
=
1
):
""" 对分类模型的预测结果做后处理
"""
true_topk
=
min
(
self
.
num_classes
,
topk
)
pred_label
=
np
.
argsort
(
preds
[
0
][
0
])[::
-
1
][:
true_topk
]
result
=
[{
'category_id'
:
l
,
'category'
:
self
.
labels
[
l
],
'score'
:
preds
[
0
][
0
,
l
],
}
for
l
in
pred_label
]
return
result
def
segmenter_postprocess
(
self
,
preds
,
preprocessed_inputs
):
""" 对语义分割结果做后处理
"""
label_map
=
np
.
squeeze
(
preds
[
0
]).
astype
(
'uint8'
)
score_map
=
np
.
squeeze
(
preds
[
1
])
score_map
=
np
.
transpose
(
score_map
,
(
1
,
2
,
0
))
im_info
=
preprocessed_inputs
[
'im_info'
]
for
info
in
im_info
[::
-
1
]:
if
info
[
0
]
==
'resize'
:
w
,
h
=
info
[
1
][
1
],
info
[
1
][
0
]
label_map
=
cv2
.
resize
(
label_map
,
(
w
,
h
),
cv2
.
INTER_NEAREST
)
score_map
=
cv2
.
resize
(
score_map
,
(
w
,
h
),
cv2
.
INTER_LINEAR
)
elif
info
[
0
]
==
'padding'
:
w
,
h
=
info
[
1
][
1
],
info
[
1
][
0
]
label_map
=
label_map
[
0
:
h
,
0
:
w
]
score_map
=
score_map
[
0
:
h
,
0
:
w
,
:]
else
:
raise
Exception
(
"Unexpected info '{}' in im_info"
.
format
(
info
[
0
]))
return
{
'label_map'
:
label_map
,
'score_map'
:
score_map
}
def
detector_postprocess
(
self
,
preds
,
preprocessed_inputs
):
""" 对目标检测和实例分割结果做后处理
"""
bboxes
=
{
'bbox'
:
(
np
.
array
(
preds
[
0
]),
[[
len
(
preds
[
0
])]])}
bboxes
[
'im_id'
]
=
(
np
.
array
([[
0
]]).
astype
(
'int32'
),
[])
clsid2catid
=
dict
({
i
:
i
for
i
in
range
(
self
.
num_classes
)})
xywh_results
=
paddlex
.
cv
.
models
.
utils
.
detection_eval
.
bbox2out
(
[
bboxes
],
clsid2catid
)
results
=
list
()
for
xywh_res
in
xywh_results
:
del
xywh_res
[
'image_id'
]
xywh_res
[
'category'
]
=
self
.
labels
[
xywh_res
[
'category_id'
]]
results
.
append
(
xywh_res
)
if
len
(
preds
)
>
1
:
im_shape
=
preprocessed_inputs
[
'im_shape'
]
bboxes
[
'im_shape'
]
=
(
im_shape
,
[])
bboxes
[
'mask'
]
=
(
np
.
array
(
preds
[
1
]),
[[
len
(
preds
[
1
])]])
segm_results
=
paddlex
.
cv
.
models
.
utils
.
detection_eval
.
mask2out
(
[
bboxes
],
clsid2catid
,
self
.
mask_head_resolution
)
import
pycocotools.mask
as
mask_util
for
i
in
range
(
len
(
results
)):
results
[
i
][
'mask'
]
=
mask_util
.
decode
(
segm_results
[
i
][
'segmentation'
])
return
results
def
predict
(
self
,
image
,
topk
=
1
,
threshold
=
0.5
):
""" 图片预测
Args:
image(str|np.ndarray): 图片路径或np.ndarray格式,如果后者,要求为BGR输入格式
topk(int): 分类预测时使用,表示预测前topk的结果
"""
preprocessed_input
=
self
.
preprocess
(
image
)
model_pred
=
self
.
raw_predict
(
preprocessed_input
)
if
self
.
model_type
==
"classifier"
:
results
=
self
.
classifier_postprocess
(
model_pred
,
topk
)
elif
self
.
model_type
==
"detector"
:
results
=
self
.
detector_postprocess
(
model_pred
,
preprocessed_input
)
elif
self
.
model_type
==
"segmenter"
:
results
=
self
.
segmenter_postprocess
(
model_pred
,
preprocessed_input
)
return
results
paddlex/interpret/as_data_reader/__init__.py
0 → 100644
浏览文件 @
bc8745f8
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
paddlex/interpret/core/__init__.py
0 → 100644
浏览文件 @
bc8745f8
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
setup.py
浏览文件 @
bc8745f8
...
@@ -30,7 +30,7 @@ setuptools.setup(
...
@@ -30,7 +30,7 @@ setuptools.setup(
setup_requires
=
[
'cython'
,
'numpy'
],
setup_requires
=
[
'cython'
,
'numpy'
],
install_requires
=
[
install_requires
=
[
"pycocotools;platform_system!='Windows'"
,
'pyyaml'
,
'colorama'
,
'tqdm'
,
"pycocotools;platform_system!='Windows'"
,
'pyyaml'
,
'colorama'
,
'tqdm'
,
'paddleslim==1.0.1'
,
'visualdl
=
=2.0.0a2'
'paddleslim==1.0.1'
,
'visualdl
>
=2.0.0a2'
],
],
classifiers
=
[
classifiers
=
[
"Programming Language :: Python :: 3"
,
"Programming Language :: Python :: 3"
,
...
...
tutorials/interpret/interpret.py
浏览文件 @
bc8745f8
...
@@ -4,44 +4,38 @@ os.environ['CUDA_VISIBLE_DEVICES'] = '0'
...
@@ -4,44 +4,38 @@ os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import
os.path
as
osp
import
os.path
as
osp
import
paddlex
as
pdx
import
paddlex
as
pdx
from
paddlex.cls
import
transforms
# 下载和解压Imagenet果蔬分类数据集
# 下载和解压Imagenet果蔬分类数据集
veg_dataset
=
'https://bj.bcebos.com/paddlex/interpret/mini_imagenet_veg.tar.gz'
veg_dataset
=
'https://bj.bcebos.com/paddlex/interpret/mini_imagenet_veg.tar.gz'
pdx
.
utils
.
download_and_decompress
(
veg_dataset
,
path
=
'./'
)
pdx
.
utils
.
download_and_decompress
(
veg_dataset
,
path
=
'./'
)
#
定义测试集的transform
#
下载和解压已训练好的MobileNetV2模型
test_transforms
=
transforms
.
Compose
([
model_file
=
'https://bj.bcebos.com/paddlex/interpret/mini_imagenet_veg_mobilenetv2.tar.gz'
transforms
.
ResizeByShort
(
short_size
=
256
),
pdx
.
utils
.
download_and_decompress
(
model_file
,
path
=
'./'
)
transforms
.
CenterCrop
(
crop_size
=
224
),
transforms
.
Normalize
()
# 加载模型
]
)
model
=
pdx
.
load_model
(
'mini_imagenet_veg_mobilenetv2'
)
# 定义测试所用的数据集
# 定义测试所用的数据集
test_dataset
=
pdx
.
datasets
.
ImageNet
(
test_dataset
=
pdx
.
datasets
.
ImageNet
(
data_dir
=
'mini_imagenet_veg'
,
data_dir
=
'mini_imagenet_veg'
,
file_list
=
osp
.
join
(
'mini_imagenet_veg'
,
'test_list.txt'
),
file_list
=
osp
.
join
(
'mini_imagenet_veg'
,
'test_list.txt'
),
label_list
=
osp
.
join
(
'mini_imagenet_veg'
,
'labels.txt'
),
label_list
=
osp
.
join
(
'mini_imagenet_veg'
,
'labels.txt'
),
transforms
=
test_transforms
)
transforms
=
model
.
test_transforms
)
# 下载和解压已训练好的MobileNetV2模型
model_file
=
'https://bj.bcebos.com/paddlex/interpret/mini_imagenet_veg_mobilenetv2.tar.gz'
pdx
.
utils
.
download_and_decompress
(
model_file
,
path
=
'./'
)
# 导入模型
model
=
pdx
.
load_model
(
'mini_imagenet_veg_mobilenetv2'
)
# 可解释性可视化
# 可解释性可视化
save_dir
=
'interpret_results'
# LIME算法
if
not
osp
.
exists
(
save_dir
):
pdx
.
interpret
.
visualize
(
os
.
makedirs
(
save_dir
)
'mini_imagenet_veg/mushroom/n07734744_1106.JPEG'
,
pdx
.
interpret
.
visualize
(
'mini_imagenet_veg/mushroom/n07734744_1106.JPEG'
,
model
,
model
,
test_dataset
,
test_dataset
,
algo
=
'lime'
,
algo
=
'lime'
,
save_dir
=
'./'
)
save_dir
=
save_dir
)
pdx
.
interpret
.
visualize
(
'mini_imagenet_veg/mushroom/n07734744_1106.JPEG'
,
# NormLIME算法
model
,
pdx
.
interpret
.
visualize
(
test_dataset
,
'mini_imagenet_veg/mushroom/n07734744_1106.JPEG'
,
algo
=
'normlime'
,
model
,
save_dir
=
save_dir
)
test_dataset
,
\ No newline at end of file
algo
=
'normlime'
,
save_dir
=
'./'
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录