未验证 提交 4d6efec9 编写于 作者: S SunAhong1993 提交者: GitHub

Merge pull request #7 from PaddlePaddle/develop

00
# 更新日志 # 更新日志
- 2020.05.20
> - 发布正式版 v1.0
> - 增加模型C++部署和Python部署代码
> - 增加模型加密部署方案
> - 增加分类模型的OpenVINO部署方案
> - 增加模型可解释性的接口
- 2020.05.17 - 2020.05.17
> - 发布v0.1.8 pip更新 > - 发布v0.1.8 pip更新
> - 修复部分代码Bug > - 修复部分代码Bug
> - 新增EasyData平台数据标注格式支持 > - 新增EasyData平台数据标注格式支持
> - 支持imgaug数据增强库的pixel-level算子 > - 支持imgaug数据增强库的pixel-level算子
...@@ -53,4 +53,4 @@ log_level = 2 ...@@ -53,4 +53,4 @@ log_level = 2
from . import interpret from . import interpret
__version__ = '1.0.1.github' __version__ = '1.0.2.github'
...@@ -259,8 +259,8 @@ class ResizeByShort(ClsTransform): ...@@ -259,8 +259,8 @@ class ResizeByShort(ClsTransform):
im_short_size = min(im.shape[0], im.shape[1]) im_short_size = min(im.shape[0], im.shape[1])
im_long_size = max(im.shape[0], im.shape[1]) im_long_size = max(im.shape[0], im.shape[1])
scale = float(self.short_size) / im_short_size scale = float(self.short_size) / im_short_size
if self.max_size > 0 and np.round( if self.max_size > 0 and np.round(scale *
scale * im_long_size) > self.max_size: im_long_size) > self.max_size:
scale = float(self.max_size) / float(im_long_size) scale = float(self.max_size) / float(im_long_size)
resized_width = int(round(im.shape[1] * scale)) resized_width = int(round(im.shape[1] * scale))
resized_height = int(round(im.shape[0] * scale)) resized_height = int(round(im.shape[0] * scale))
...@@ -455,7 +455,7 @@ class ArrangeClassifier(ClsTransform): ...@@ -455,7 +455,7 @@ class ArrangeClassifier(ClsTransform):
tuple: 当mode为'train'或'eval'时,返回(im, label),分别对应图像np.ndarray数据、 tuple: 当mode为'train'或'eval'时,返回(im, label),分别对应图像np.ndarray数据、
图像类别id;当mode为'test'或'quant'时,返回(im, ),对应图像np.ndarray数据。 图像类别id;当mode为'test'或'quant'时,返回(im, ),对应图像np.ndarray数据。
""" """
im = permute(im, False) im = permute(im, False).astype('float32')
if self.mode == 'train' or self.mode == 'eval': if self.mode == 'train' or self.mode == 'eval':
outputs = (im, label) outputs = (im, label)
else: else:
......
...@@ -16,4 +16,4 @@ from __future__ import absolute_import ...@@ -16,4 +16,4 @@ from __future__ import absolute_import
from . import visualize from . import visualize
lime = visualize.lime lime = visualize.lime
normlime = visualize.normlime normlime = visualize.normlime
\ No newline at end of file
...@@ -28,17 +28,6 @@ def gen_user_home(): ...@@ -28,17 +28,6 @@ def gen_user_home():
return os.path.expanduser('~') return os.path.expanduser('~')
root_path = gen_user_home()
root_path = osp.join(root_path, '.paddlex')
h_pre_models = osp.join(root_path, "pre_models")
if not osp.exists(h_pre_models):
if not osp.exists(root_path):
os.makedirs(root_path)
url = "https://bj.bcebos.com/paddlex/interpret/pre_models.tar.gz"
pdx.utils.download_and_decompress(url, path=root_path)
h_pre_models_kmeans = osp.join(h_pre_models, "kmeans_model.pkl")
def paddle_get_fc_weights(var_name="fc_0.w_0"): def paddle_get_fc_weights(var_name="fc_0.w_0"):
fc_weights = fluid.global_scope().find_var(var_name).get_tensor() fc_weights = fluid.global_scope().find_var(var_name).get_tensor()
return np.array(fc_weights) return np.array(fc_weights)
...@@ -50,6 +39,14 @@ def paddle_resize(extracted_features, outsize): ...@@ -50,6 +39,14 @@ def paddle_resize(extracted_features, outsize):
def compute_features_for_kmeans(data_content): def compute_features_for_kmeans(data_content):
root_path = gen_user_home()
root_path = osp.join(root_path, '.paddlex')
h_pre_models = osp.join(root_path, "pre_models")
if not osp.exists(h_pre_models):
if not osp.exists(root_path):
os.makedirs(root_path)
url = "https://bj.bcebos.com/paddlex/interpret/pre_models.tar.gz"
pdx.utils.download_and_decompress(url, path=root_path)
def conv_bn_layer(input, def conv_bn_layer(input,
num_filters, num_filters,
filter_size, filter_size,
......
...@@ -13,11 +13,12 @@ ...@@ -13,11 +13,12 @@
#limitations under the License. #limitations under the License.
import os import os
import os.path as osp
import numpy as np import numpy as np
import time import time
from . import lime_base from . import lime_base
from ._session_preparation import paddle_get_fc_weights, compute_features_for_kmeans, h_pre_models_kmeans from ._session_preparation import paddle_get_fc_weights, compute_features_for_kmeans, gen_user_home
from .normlime_base import combine_normlime_and_lime, get_feature_for_kmeans, load_kmeans_model from .normlime_base import combine_normlime_and_lime, get_feature_for_kmeans, load_kmeans_model
from paddlex.interpret.as_data_reader.readers import read_image from paddlex.interpret.as_data_reader.readers import read_image
...@@ -215,6 +216,15 @@ class LIME(object): ...@@ -215,6 +216,15 @@ class LIME(object):
class NormLIME(object): class NormLIME(object):
def __init__(self, predict_fn, label_names, num_samples=3000, batch_size=50, def __init__(self, predict_fn, label_names, num_samples=3000, batch_size=50,
kmeans_model_for_normlime=None, normlime_weights=None): kmeans_model_for_normlime=None, normlime_weights=None):
root_path = gen_user_home()
root_path = osp.join(root_path, '.paddlex')
h_pre_models = osp.join(root_path, "pre_models")
if not osp.exists(h_pre_models):
if not osp.exists(root_path):
os.makedirs(root_path)
url = "https://bj.bcebos.com/paddlex/interpret/pre_models.tar.gz"
pdx.utils.download_and_decompress(url, path=root_path)
h_pre_models_kmeans = osp.join(h_pre_models, "kmeans_model.pkl")
if kmeans_model_for_normlime is None: if kmeans_model_for_normlime is None:
try: try:
self.kmeans_model = load_kmeans_model(h_pre_models_kmeans) self.kmeans_model = load_kmeans_model(h_pre_models_kmeans)
......
...@@ -13,13 +13,14 @@ ...@@ -13,13 +13,14 @@
#limitations under the License. #limitations under the License.
import os import os
import os.path as osp
import numpy as np import numpy as np
import glob import glob
from paddlex.interpret.as_data_reader.readers import read_image from paddlex.interpret.as_data_reader.readers import read_image
import paddlex.utils.logging as logging import paddlex.utils.logging as logging
from . import lime_base from . import lime_base
from ._session_preparation import compute_features_for_kmeans, h_pre_models_kmeans from ._session_preparation import compute_features_for_kmeans, gen_user_home
def load_kmeans_model(fname): def load_kmeans_model(fname):
...@@ -103,6 +104,15 @@ def save_one_lime_predict_and_kmean_labels(lime_all_weights, image_pred_labels, ...@@ -103,6 +104,15 @@ def save_one_lime_predict_and_kmean_labels(lime_all_weights, image_pred_labels,
def precompute_lime_weights(list_data_, predict_fn, num_samples, batch_size, save_dir): def precompute_lime_weights(list_data_, predict_fn, num_samples, batch_size, save_dir):
root_path = gen_user_home()
root_path = osp.join(root_path, '.paddlex')
h_pre_models = osp.join(root_path, "pre_models")
if not osp.exists(h_pre_models):
if not osp.exists(root_path):
os.makedirs(root_path)
url = "https://bj.bcebos.com/paddlex/interpret/pre_models.tar.gz"
pdx.utils.download_and_decompress(url, path=root_path)
h_pre_models_kmeans = osp.join(h_pre_models, "kmeans_model.pkl")
kmeans_model = load_kmeans_model(h_pre_models_kmeans) kmeans_model = load_kmeans_model(h_pre_models_kmeans)
for data_index, each_data_ in enumerate(list_data_): for data_index, each_data_ in enumerate(list_data_):
......
...@@ -19,7 +19,7 @@ long_description = "PaddleX. A end-to-end deeplearning model development toolkit ...@@ -19,7 +19,7 @@ long_description = "PaddleX. A end-to-end deeplearning model development toolkit
setuptools.setup( setuptools.setup(
name="paddlex", name="paddlex",
version='1.0.1', version='1.0.2',
author="paddlex", author="paddlex",
author_email="paddlex@baidu.com", author_email="paddlex@baidu.com",
description=long_description, description=long_description,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册