提交 62520440 编写于 作者: S Steffy-zxf

Merge branch 'release/v1.7' of https://github.com/PaddlePaddle/PaddleHub into release/v1.7

...@@ -192,7 +192,7 @@ def predict(self, data, return_result=False, accelerate_mode=True): ...@@ -192,7 +192,7 @@ def predict(self, data, return_result=False, accelerate_mode=True):
prediction = [] prediction = []
for batch_result in results: for batch_result in results:
# get predict index # get predict index
batch_result = np.argmax(batch_result, axis=2)[0] batch_result = np.argmax(batch_result[0], axis=1)
batch_result = batch_result.tolist() batch_result = batch_result.tolist()
prediction += batch_result prediction += batch_result
return prediction return prediction
......
## 命令行预测
```
hub run efficientnetb0_imagenet --input_path "/PATH/TO/IMAGE"
```
## API
```python
def get_expected_image_width()
```
返回预处理的图片宽度,也就是224。
```python
def get_expected_image_height()
```
返回预处理的图片高度,也就是224。
```python
def get_pretrained_images_mean()
```
返回预处理的图片均值,也就是 \[0.485, 0.456, 0.406\]
```python
def get_pretrained_images_std()
```
返回预处理的图片标准差,也就是 \[0.229, 0.224, 0.225\]
```python
def context(trainable=True, pretrained=True)
```
**参数**
* trainable (bool): 计算图的参数是否为可训练的;
* pretrained (bool): 是否加载默认的预训练模型。
**返回**
* inputs (dict): 计算图的输入,key 为 'image', value 为图片的张量;
* outputs (dict): 计算图的输出,key 为 'classification' 和 'feature_map',其相应的值为:
* classification (paddle.fluid.framework.Variable): 分类结果,也就是全连接层的输出;
* feature\_map (paddle.fluid.framework.Variable): 特征匹配,全连接层前面的那个张量。
* context\_prog(fluid.Program): 计算图,用于迁移学习。
```python
def classify(images=None,
paths=None,
batch_size=1,
use_gpu=False,
top_k=1):
```
**参数**
* images (list\[numpy.ndarray\]): 图片数据,每一个图片数据的shape 均为 \[H, W, C\],颜色空间为 BGR;
* paths (list\[str\]): 图片的路径;
* batch\_size (int): batch 的大小;
* use\_gpu (bool): 是否使用 GPU 来预测;
* top\_k (int): 返回预测结果的前 k 个。
**返回**
res (list\[dict\]): 分类结果,列表的每一个元素均为字典,其中 key 为识别动物的类别,value为置信度。
```python
def save_inference_model(dirname,
model_filename=None,
params_filename=None,
combined=True)
```
将模型保存到指定路径。
**参数**
* dirname: 存在模型的目录名称
* model\_filename: 模型文件名称,默认为\_\_model\_\_
* params\_filename: 参数文件名称,默认为\_\_params\_\_(仅当`combined`为True时生效)
* combined: 是否将参数保存到统一的一个文件中
## 代码示例
```python
import paddlehub as hub
import cv2
classifier = hub.Module(name="efficientnetb0_imagenet")
result = classifier.classify(images=[cv2.imread('/PATH/TO/IMAGE')])
# or
# result = classifier.classify(paths=['/PATH/TO/IMAGE'])
```
## 服务部署
PaddleHub Serving可以部署一个在线图像识别服务。
## 第一步:启动PaddleHub Serving
运行启动命令:
```shell
$ hub serving start -m efficientnetb0_imagenet
```
这样就完成了一个在线图像识别服务化API的部署,默认端口号为8866。
**NOTE:** 如使用GPU预测,则需要在启动服务之前,请设置CUDA\_VISIBLE\_DEVICES环境变量,否则不用设置。
## 第二步:发送预测请求
配置好服务端,以下数行代码即可实现发送预测请求,获取预测结果
```python
import requests
import json
import cv2
import base64
def cv2_to_base64(image):
data = cv2.imencode('.jpg', image)[1]
return base64.b64encode(data.tostring()).decode('utf8')
# 发送HTTP请求
data = {'images':[cv2_to_base64(cv2.imread("/PATH/TO/IMAGE"))]}
headers = {"Content-type": "application/json"}
url = "http://127.0.0.1:8866/predict/efficientnetb0_imagenet"
r = requests.post(url=url, headers=headers, data=json.dumps(data))
# 打印预测结果
print(r.json()["results"])
```
### 查看代码
https://github.com/PaddlePaddle/PaddleClas
### 依赖
paddlepaddle >= 1.6.2
paddlehub >= 1.6.0
# -*- coding:utf-8 -*-
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from collections import OrderedDict
import cv2
import numpy as np
from PIL import Image
__all__ = ['reader']
DATA_DIM = 224
img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
def resize_short(img, target_size):
percent = float(target_size) / min(img.size[0], img.size[1])
resized_width = int(round(img.size[0] * percent))
resized_height = int(round(img.size[1] * percent))
img = img.resize((resized_width, resized_height), Image.LANCZOS)
return img
def crop_image(img, target_size, center):
width, height = img.size
size = target_size
if center == True:
w_start = (width - size) / 2
h_start = (height - size) / 2
else:
w_start = np.random.randint(0, width - size + 1)
h_start = np.random.randint(0, height - size + 1)
w_end = w_start + size
h_end = h_start + size
img = img.crop((w_start, h_start, w_end, h_end))
return img
def process_image(img):
img = resize_short(img, target_size=256)
img = crop_image(img, target_size=DATA_DIM, center=True)
if img.mode != 'RGB':
img = img.convert('RGB')
img = np.array(img).astype('float32').transpose((2, 0, 1)) / 255
img -= img_mean
img /= img_std
return img
def reader(images=None, paths=None):
"""
Preprocess to yield image.
Args:
images (list[numpy.ndarray]): images data, shape of each is [H, W, C].
paths (list[str]): paths to images.
Yield:
each (collections.OrderedDict): info of original image, preprocessed image.
"""
component = list()
if paths:
for im_path in paths:
each = OrderedDict()
assert os.path.isfile(
im_path), "The {} isn't a valid file path.".format(im_path)
each['org_im_path'] = im_path
each['org_im'] = Image.open(im_path)
each['org_im_width'], each['org_im_height'] = each['org_im'].size
component.append(each)
if images is not None:
assert type(images), "images is a list."
for im in images:
each = OrderedDict()
each['org_im'] = Image.fromarray(im[:, :, ::-1])
each['org_im_path'] = 'ndarray_time={}'.format(
round(time.time(), 6) * 1e6)
each['org_im_width'], each['org_im_height'] = each['org_im'].size
component.append(each)
for element in component:
element['image'] = process_image(element['org_im'])
yield element
# -*- coding:utf-8 -*-
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import warnings
import paddle.fluid as fluid
def initial_type(name,
input,
op_type,
fan_out,
init="google",
use_bias=False,
filter_size=0,
stddev=0.02):
if init == "kaiming":
if op_type == 'conv':
fan_in = input.shape[1] * filter_size * filter_size
elif op_type == 'deconv':
fan_in = fan_out * filter_size * filter_size
else:
if len(input.shape) > 2:
fan_in = input.shape[1] * input.shape[2] * input.shape[3]
else:
fan_in = input.shape[1]
bound = 1 / math.sqrt(fan_in)
param_attr = fluid.ParamAttr(
name=name + "_weights",
initializer=fluid.initializer.Uniform(low=-bound, high=bound))
if use_bias == True:
bias_attr = fluid.ParamAttr(
name=name + '_offset',
initializer=fluid.initializer.Uniform(low=-bound, high=bound))
else:
bias_attr = False
elif init == 'google':
n = filter_size * filter_size * fan_out
param_attr = fluid.ParamAttr(
name=name + "_weights",
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=math.sqrt(2.0 / n)))
if use_bias == True:
bias_attr = fluid.ParamAttr(
name=name + "_offset",
initializer=fluid.initializer.Constant(0.0))
else:
bias_attr = False
else:
param_attr = fluid.ParamAttr(
name=name + "_weights",
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=stddev))
if use_bias == True:
bias_attr = fluid.ParamAttr(
name=name + "_offset",
initializer=fluid.initializer.Constant(0.0))
else:
bias_attr = False
return param_attr, bias_attr
def cal_padding(img_size, stride, filter_size, dilation=1):
"""Calculate padding size."""
if img_size % stride == 0:
out_size = max(filter_size - stride, 0)
else:
out_size = max(filter_size - (img_size % stride), 0)
return out_size // 2, out_size - out_size // 2
def init_batch_norm_layer(name="batch_norm"):
param_attr = fluid.ParamAttr(
name=name + '_scale', initializer=fluid.initializer.Constant(1.0))
bias_attr = fluid.ParamAttr(
name=name + '_offset',
initializer=fluid.initializer.Constant(value=0.0))
return param_attr, bias_attr
def init_fc_layer(fout, name='fc'):
n = fout # fan-out
init_range = 1.0 / math.sqrt(n)
param_attr = fluid.ParamAttr(
name=name + '_weights',
initializer=fluid.initializer.UniformInitializer(
low=-init_range, high=init_range))
bias_attr = fluid.ParamAttr(
name=name + '_offset',
initializer=fluid.initializer.Constant(value=0.0))
return param_attr, bias_attr
def norm_layer(input, norm_type='batch_norm', name=None):
if norm_type == 'batch_norm':
param_attr = fluid.ParamAttr(
name=name + '_weights', initializer=fluid.initializer.Constant(1.0))
bias_attr = fluid.ParamAttr(
name=name + '_offset',
initializer=fluid.initializer.Constant(value=0.0))
return fluid.layers.batch_norm(
input,
param_attr=param_attr,
bias_attr=bias_attr,
moving_mean_name=name + '_mean',
moving_variance_name=name + '_variance')
elif norm_type == 'instance_norm':
helper = fluid.layer_helper.LayerHelper("instance_norm", **locals())
dtype = helper.input_dtype()
epsilon = 1e-5
mean = fluid.layers.reduce_mean(input, dim=[2, 3], keep_dim=True)
var = fluid.layers.reduce_mean(
fluid.layers.square(input - mean), dim=[2, 3], keep_dim=True)
if name is not None:
scale_name = name + "_scale"
offset_name = name + "_offset"
scale_param = fluid.ParamAttr(
name=scale_name,
initializer=fluid.initializer.Constant(1.0),
trainable=True)
offset_param = fluid.ParamAttr(
name=offset_name,
initializer=fluid.initializer.Constant(0.0),
trainable=True)
scale = helper.create_parameter(
attr=scale_param, shape=input.shape[1:2], dtype=dtype)
offset = helper.create_parameter(
attr=offset_param, shape=input.shape[1:2], dtype=dtype)
tmp = fluid.layers.elementwise_mul(x=(input - mean), y=scale, axis=1)
tmp = tmp / fluid.layers.sqrt(var + epsilon)
tmp = fluid.layers.elementwise_add(tmp, offset, axis=1)
return tmp
else:
raise NotImplementedError("norm tyoe: [%s] is not support" % norm_type)
def conv2d(input,
num_filters=64,
filter_size=7,
stride=1,
stddev=0.02,
padding=0,
groups=None,
name="conv2d",
norm=None,
act=None,
relufactor=0.0,
use_bias=False,
padding_type=None,
initial="normal",
use_cudnn=True):
if padding != 0 and padding_type != None:
warnings.warn(
'padding value and padding type are set in the same time, and the final padding width and padding height are computed by padding_type'
)
param_attr, bias_attr = initial_type(
name=name,
input=input,
op_type='conv',
fan_out=num_filters,
init=initial,
use_bias=use_bias,
filter_size=filter_size,
stddev=stddev)
def get_padding(filter_size, stride=1, dilation=1):
padding = ((stride - 1) + dilation * (filter_size - 1)) // 2
return padding
need_crop = False
if padding_type == "SAME":
top_padding, bottom_padding = cal_padding(input.shape[2], stride,
filter_size)
left_padding, right_padding = cal_padding(input.shape[2], stride,
filter_size)
height_padding = bottom_padding
width_padding = right_padding
if top_padding != bottom_padding or left_padding != right_padding:
height_padding = top_padding + stride
width_padding = left_padding + stride
need_crop = True
padding = [height_padding, width_padding]
elif padding_type == "VALID":
height_padding = 0
width_padding = 0
padding = [height_padding, width_padding]
elif padding_type == "DYNAMIC":
padding = get_padding(filter_size, stride)
else:
padding = padding
conv = fluid.layers.conv2d(
input,
num_filters,
filter_size,
groups=groups,
name=name,
stride=stride,
padding=padding,
use_cudnn=use_cudnn,
param_attr=param_attr,
bias_attr=bias_attr)
if need_crop:
conv = conv[:, :, 1:, 1:]
if norm is not None:
conv = norm_layer(input=conv, norm_type=norm, name=name + "_norm")
if act == 'relu':
conv = fluid.layers.relu(conv, name=name + '_relu')
elif act == 'leaky_relu':
conv = fluid.layers.leaky_relu(
conv, alpha=relufactor, name=name + '_leaky_relu')
elif act == 'tanh':
conv = fluid.layers.tanh(conv, name=name + '_tanh')
elif act == 'sigmoid':
conv = fluid.layers.sigmoid(conv, name=name + '_sigmoid')
elif act == 'swish':
conv = fluid.layers.swish(conv, name=name + '_swish')
elif act == None:
conv = conv
else:
raise NotImplementedError("activation: [%s] is not support" % act)
return conv
# -*- coding:utf-8 -*-
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import ast
import argparse
import os
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
from paddlehub.common.paddle_helper import add_vars_prefix
from efficientnetb0_imagenet.processor import postprocess, base64_to_cv2
from efficientnetb0_imagenet.data_feed import reader
from efficientnetb0_imagenet.efficientnet import EfficientNetB0
@moduleinfo(
name="efficientnetb0_imagenet",
type="CV/image_classification",
author="paddlepaddle",
author_email="paddle-dev@baidu.com",
summary=
"EfficientNetB0 is a image classfication model, this module is trained with imagenet datasets.",
version="1.1.0")
class EfficientNetB0ImageNet(hub.Module):
def _initialize(self):
self.default_pretrained_model_path = os.path.join(
self.directory, "efficientnetb0_imagenet_infer_model")
label_file = os.path.join(self.directory, "label_list.txt")
with open(label_file, 'r', encoding='utf-8') as file:
self.label_list = file.read().split("\n")[:-1]
self.classification = self.classify
self._set_config()
def get_expected_image_width(self):
return 224
def get_expected_image_height(self):
return 224
def get_pretrained_images_mean(self):
im_mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3)
return im_mean
def get_pretrained_images_std(self):
im_std = np.array([0.229, 0.224, 0.225]).reshape(1, 3)
return im_std
def _set_config(self):
"""
predictor config setting
"""
cpu_config = AnalysisConfig(self.default_pretrained_model_path)
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
self.cpu_predictor = create_paddle_predictor(cpu_config)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
int(_places[0])
use_gpu = True
except:
use_gpu = False
if use_gpu:
gpu_config = AnalysisConfig(self.default_pretrained_model_path)
gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(
memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config)
def context(self,
trainable=True,
pretrained=True,
override_params=None,
phase="train"):
"""context for transfer learning.
Args:
trainable (bool): Set parameters in program to be trainable.
pretrained (bool) : Whether to load pretrained model.
Returns:
inputs (dict): key is 'image', corresponding vaule is image tensor.
outputs (dict): key is :
'classification', corresponding value is the result of classification.
'feature_map', corresponding value is the result of the layer before the fully connected layer.
context_prog (fluid.Program): program for transfer learning.
"""
if phase in ["dev", "test", "predict", "eval"]:
is_test = False
elif phase in ["train"]:
is_test = True
else:
raise ValueError(
"Phase %s is error, which must be one of train, dev, test, eval and predict."
% phase)
context_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(context_prog, startup_prog):
with fluid.unique_name.guard():
image = fluid.layers.data(
name="image", shape=[3, 224, 224], dtype="float32")
efficientnet_b0 = EfficientNetB0(
override_params=override_params)
output, feature_map = efficientnet_b0.net(
input=image,
class_dim=len(self.label_list),
is_test=is_test)
name_prefix = '@HUB_{}@'.format(self.name)
inputs = {'image': name_prefix + image.name}
outputs = {
'classification': name_prefix + output.name,
'feature_map': name_prefix + feature_map.name
}
add_vars_prefix(context_prog, name_prefix)
add_vars_prefix(startup_prog, name_prefix)
global_vars = context_prog.global_block().vars
inputs = {
key: global_vars[value]
for key, value in inputs.items()
}
outputs = {
key: global_vars[value]
for key, value in outputs.items()
}
place = fluid.CPUPlace()
exe = fluid.Executor(place)
# pretrained
if pretrained:
def _if_exist(var):
b = os.path.exists(
os.path.join(self.default_pretrained_model_path,
var.name))
return b
fluid.io.load_vars(
exe,
self.default_pretrained_model_path,
context_prog,
predicate=_if_exist)
else:
exe.run(startup_prog)
# trainable
for param in context_prog.global_block().iter_parameters():
param.trainable = trainable
return inputs, outputs, context_prog
def classify(self,
images=None,
paths=None,
batch_size=1,
use_gpu=False,
top_k=1):
"""
API for image classification.
Args:
images (list[numpy.ndarray]): data of images, shape of each is [H, W, C], color space must be BGR.
paths (list[str]): The paths of images.
batch_size (int): batch size.
use_gpu (bool): Whether to use gpu.
top_k (int): Return top k results.
Returns:
res (list[dict]): The classfication results.
"""
if use_gpu:
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
int(_places[0])
except:
raise RuntimeError(
"Environment Variable CUDA_VISIBLE_DEVICES is not set correctly. If you wanna use gpu, please set CUDA_VISIBLE_DEVICES as cuda_device_id."
)
all_data = list()
for yield_data in reader(images, paths):
all_data.append(yield_data)
total_num = len(all_data)
loop_num = int(np.ceil(total_num / batch_size))
res = list()
for iter_id in range(loop_num):
batch_data = list()
handle_id = iter_id * batch_size
for image_id in range(batch_size):
try:
batch_data.append(all_data[handle_id + image_id])
except:
pass
# feed batch image
batch_image = np.array([data['image'] for data in batch_data])
batch_image = PaddleTensor(batch_image.copy())
predictor_output = self.gpu_predictor.run([
batch_image
]) if use_gpu else self.cpu_predictor.run([batch_image])
out = postprocess(
data_out=predictor_output[0].as_ndarray(),
label_list=self.label_list,
top_k=top_k)
res += out
return res
def save_inference_model(self,
dirname,
model_filename=None,
params_filename=None,
combined=True):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
@serving
def serving_method(self, images, **kwargs):
"""
Run as a service.
"""
images_decode = [base64_to_cv2(image) for image in images]
results = self.classify(images=images_decode, **kwargs)
return results
@runnable
def run_cmd(self, argvs):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(
title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options",
description=
"Run configuration for controlling module behavior, not required.")
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
results = self.classify(
paths=[args.input_path],
batch_size=args.batch_size,
use_gpu=args.use_gpu)
return results
def add_module_config_arg(self):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not.")
self.arg_config_group.add_argument(
'--batch_size',
type=ast.literal_eval,
default=1,
help="batch size.")
self.arg_config_group.add_argument(
'--top_k',
type=ast.literal_eval,
default=1,
help="Return top k results.")
def add_module_input_arg(self):
"""
Add the command input options.
"""
self.arg_input_group.add_argument(
'--input_path', type=str, help="path to image.")
if __name__ == '__main__':
b0 = EfficientNetB0ImageNet()
b0.context()
import cv2
test_image = [
cv2.imread(
'/mnt/zhangxuefei/program-paddle/PaddleHub/hub_module/tests/image_dataset/classification/animals/dog.jpeg'
)
]
res = b0.classification(images=test_image)
print(res)
res = b0.classification(paths=[
'/mnt/zhangxuefei/program-paddle/PaddleHub/hub_module/tests/image_dataset/classification/animals/dog.jpeg'
])
print(res)
res = b0.classification(images=test_image)
print(res)
res = b0.classify(images=test_image)
print(res)
# -*- coding:utf-8 -*-
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import cv2
import os
import numpy as np
def base64_to_cv2(b64str):
data = base64.b64decode(b64str.encode('utf8'))
data = np.fromstring(data, np.uint8)
data = cv2.imdecode(data, cv2.IMREAD_COLOR)
return data
def softmax(x):
orig_shape = x.shape
if len(x.shape) > 1:
tmp = np.max(x, axis=1)
x -= tmp.reshape((x.shape[0], 1))
x = np.exp(x)
tmp = np.sum(x, axis=1)
x /= tmp.reshape((x.shape[0], 1))
else:
tmp = np.max(x)
x -= tmp
x = np.exp(x)
tmp = np.sum(x)
x /= tmp
return x
def postprocess(data_out, label_list, top_k):
"""
Postprocess output of network, one image at a time.
Args:
data_out (numpy.ndarray): output data of network.
label_list (list): list of label.
top_k (int): Return top k results.
"""
output = []
for result in data_out:
result_i = softmax(result)
output_i = {}
indexs = np.argsort(result_i)[::-1][0:top_k]
for index in indexs:
label = label_list[index].split(',')[0]
output_i[label] = float(result_i[index])
output.append(output_i)
return output
...@@ -49,7 +49,7 @@ def context(trainable=True, pretrained=True) ...@@ -49,7 +49,7 @@ def context(trainable=True, pretrained=True)
* context\_prog(fluid.Program): 计算图,用于迁移学习。 * context\_prog(fluid.Program): 计算图,用于迁移学习。
```python ```python
def classification(images=None, def classify(images=None,
paths=None, paths=None,
batch_size=1, batch_size=1,
use_gpu=False, use_gpu=False,
...@@ -92,9 +92,9 @@ import cv2 ...@@ -92,9 +92,9 @@ import cv2
classifier = hub.Module(name="efficientnetb0_small_imagenet") classifier = hub.Module(name="efficientnetb0_small_imagenet")
result = classifier.classification(images=[cv2.imread('/PATH/TO/IMAGE')]) result = classifier.classify(images=[cv2.imread('/PATH/TO/IMAGE')])
# or # or
# result = classifier.classification(paths=['/PATH/TO/IMAGE']) # result = classifier.classify(paths=['/PATH/TO/IMAGE'])
``` ```
## 服务部署 ## 服务部署
......
# coding=utf-8 # -*- coding:utf-8 -*-
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os import os
import time import time
from collections import OrderedDict from collections import OrderedDict
......
# -*- coding:utf-8 -*-
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
...@@ -135,7 +150,6 @@ class EfficientNet(): ...@@ -135,7 +150,6 @@ class EfficientNet():
model_name, override_params) model_name, override_params)
self._bn_mom = self._global_params.batch_norm_momentum self._bn_mom = self._global_params.batch_norm_momentum
self._bn_eps = self._global_params.batch_norm_epsilon self._bn_eps = self._global_params.batch_norm_epsilon
self.is_test = is_test
self.padding_type = padding_type self.padding_type = padding_type
self.use_se = use_se self.use_se = use_se
...@@ -159,7 +173,7 @@ class EfficientNet(): ...@@ -159,7 +173,7 @@ class EfficientNet():
pool = fluid.layers.pool2d( pool = fluid.layers.pool2d(
input=conv, pool_type='avg', global_pooling=True, use_cudnn=False) input=conv, pool_type='avg', global_pooling=True, use_cudnn=False)
if self._global_params.dropout_rate: if not is_test and self._global_params.dropout_rate:
pool = fluid.layers.dropout( pool = fluid.layers.dropout(
pool, pool,
self._global_params.dropout_rate, self._global_params.dropout_rate,
...@@ -335,7 +349,7 @@ class EfficientNet(): ...@@ -335,7 +349,7 @@ class EfficientNet():
input_filters, output_filters = block_args.input_filters, block_args.output_filters input_filters, output_filters = block_args.input_filters, block_args.output_filters
if id_skip and block_args.stride == 1 and input_filters == output_filters: if id_skip and block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate: if drop_connect_rate:
conv = self._drop_connect(conv, drop_connect_rate, self.is_test) conv = self._drop_connect(conv, drop_connect_rate, is_test)
conv = fluid.layers.elementwise_add(conv, inputs) conv = fluid.layers.elementwise_add(conv, inputs)
return conv return conv
......
# -*- coding:utf-8 -*-
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
......
# coding=utf-8 # -*- coding:utf-8 -*-
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
...@@ -26,14 +40,15 @@ from efficientnetb0_small_imagenet.efficientnet import EfficientNetB0_small ...@@ -26,14 +40,15 @@ from efficientnetb0_small_imagenet.efficientnet import EfficientNetB0_small
summary= summary=
"EfficientNetB0 is a image classfication model, this module is trained with imagenet datasets.", "EfficientNetB0 is a image classfication model, this module is trained with imagenet datasets.",
version="1.0.0") version="1.0.0")
class EfficientNetB0ImageNet(hub.Module): class EfficientNetB0SmallImageNet(hub.Module):
def _initialize(self): def _initialize(self):
self.default_pretrained_model_path = os.path.join( self.default_pretrained_model_path = os.path.join(
self.directory, "efficientnetb0_small_imagenet_model") self.directory, "efficientnetb0_small_imagenet_infer_model")
label_file = os.path.join(self.directory, "label_list.txt") label_file = os.path.join(self.directory, "label_list.txt")
with open(label_file, 'r', encoding='utf-8') as file: with open(label_file, 'r', encoding='utf-8') as file:
self.label_list = file.read().split("\n")[:-1] self.label_list = file.read().split("\n")[:-1]
self.predictor_set = False self.classification = self.classify
self._set_config()
def get_expected_image_width(self): def get_expected_image_width(self):
return 224 return 224
...@@ -71,7 +86,11 @@ class EfficientNetB0ImageNet(hub.Module): ...@@ -71,7 +86,11 @@ class EfficientNetB0ImageNet(hub.Module):
memory_pool_init_size_mb=1000, device_id=0) memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config) self.gpu_predictor = create_paddle_predictor(gpu_config)
def context(self, trainable=True, pretrained=True): def context(self,
trainable=True,
pretrained=True,
override_params=None,
phase='train'):
"""context for transfer learning. """context for transfer learning.
Args: Args:
...@@ -85,15 +104,27 @@ class EfficientNetB0ImageNet(hub.Module): ...@@ -85,15 +104,27 @@ class EfficientNetB0ImageNet(hub.Module):
'feature_map', corresponding value is the result of the layer before the fully connected layer. 'feature_map', corresponding value is the result of the layer before the fully connected layer.
context_prog (fluid.Program): program for transfer learning. context_prog (fluid.Program): program for transfer learning.
""" """
if phase in ["dev", "test", "predict", "eval"]:
is_test = False
elif phase in ["train"]:
is_test = True
else:
raise ValueError(
"Phase %s is error, which must be one of train, dev, test, eval and predict."
% phase)
context_prog = fluid.Program() context_prog = fluid.Program()
startup_prog = fluid.Program() startup_prog = fluid.Program()
with fluid.program_guard(context_prog, startup_prog): with fluid.program_guard(context_prog, startup_prog):
with fluid.unique_name.guard(): with fluid.unique_name.guard():
image = fluid.layers.data( image = fluid.layers.data(
name="image", shape=[3, 224, 224], dtype="float32") name="image", shape=[3, 224, 224], dtype="float32")
efficientnet_b0 = EfficientNetB0_small() efficientnet_b0 = EfficientNetB0_small(
override_params=override_params)
output, feature_map = efficientnet_b0.net( output, feature_map = efficientnet_b0.net(
input=image, class_dim=len(self.label_list)) input=image,
class_dim=len(self.label_list),
is_test=is_test)
name_prefix = '@HUB_{}@'.format(self.name) name_prefix = '@HUB_{}@'.format(self.name)
inputs = {'image': name_prefix + image.name} inputs = {'image': name_prefix + image.name}
...@@ -129,16 +160,6 @@ class EfficientNetB0ImageNet(hub.Module): ...@@ -129,16 +160,6 @@ class EfficientNetB0ImageNet(hub.Module):
self.default_pretrained_model_path, self.default_pretrained_model_path,
context_prog, context_prog,
predicate=_if_exist) predicate=_if_exist)
print(inputs.keys())
fluid.io.save_inference_model(
dirname=os.path.join(
self.directory,
'efficientnetb0_small_imagenet_model'),
feeded_var_names=[name_prefix + 'image'],
target_vars=list(outputs.values()),
executor=exe,
main_program=context_prog)
else: else:
exe.run(startup_prog) exe.run(startup_prog)
# trainable # trainable
...@@ -146,12 +167,12 @@ class EfficientNetB0ImageNet(hub.Module): ...@@ -146,12 +167,12 @@ class EfficientNetB0ImageNet(hub.Module):
param.trainable = trainable param.trainable = trainable
return inputs, outputs, context_prog return inputs, outputs, context_prog
def classification(self, def classify(self,
images=None, images=None,
paths=None, paths=None,
batch_size=1, batch_size=1,
use_gpu=False, use_gpu=False,
top_k=1): top_k=1):
""" """
API for image classification. API for image classification.
...@@ -165,10 +186,6 @@ class EfficientNetB0ImageNet(hub.Module): ...@@ -165,10 +186,6 @@ class EfficientNetB0ImageNet(hub.Module):
Returns: Returns:
res (list[dict]): The classfication results. res (list[dict]): The classfication results.
""" """
if not self.predictor_set:
self._set_config()
self.predictor_set = True
if use_gpu: if use_gpu:
try: try:
_places = os.environ["CUDA_VISIBLE_DEVICES"] _places = os.environ["CUDA_VISIBLE_DEVICES"]
...@@ -236,7 +253,7 @@ class EfficientNetB0ImageNet(hub.Module): ...@@ -236,7 +253,7 @@ class EfficientNetB0ImageNet(hub.Module):
Run as a service. Run as a service.
""" """
images_decode = [base64_to_cv2(image) for image in images] images_decode = [base64_to_cv2(image) for image in images]
results = self.classification(images=images_decode, **kwargs) results = self.classify(images=images_decode, **kwargs)
return results return results
@runnable @runnable
...@@ -258,7 +275,7 @@ class EfficientNetB0ImageNet(hub.Module): ...@@ -258,7 +275,7 @@ class EfficientNetB0ImageNet(hub.Module):
self.add_module_config_arg() self.add_module_config_arg()
self.add_module_input_arg() self.add_module_input_arg()
args = self.parser.parse_args(argvs) args = self.parser.parse_args(argvs)
results = self.classification( results = self.classify(
paths=[args.input_path], paths=[args.input_path],
batch_size=args.batch_size, batch_size=args.batch_size,
use_gpu=args.use_gpu) use_gpu=args.use_gpu)
...@@ -290,3 +307,18 @@ class EfficientNetB0ImageNet(hub.Module): ...@@ -290,3 +307,18 @@ class EfficientNetB0ImageNet(hub.Module):
""" """
self.arg_input_group.add_argument( self.arg_input_group.add_argument(
'--input_path', type=str, help="path to image.") '--input_path', type=str, help="path to image.")
if __name__ == '__main__':
b0 = EfficientNetB0SmallImageNet()
b0.context()
import cv2
test_image = [cv2.imread('dog.jpeg')]
res = b0.classification(images=test_image)
print(res)
res = b0.classification(paths=['dog.jpeg'])
print(res)
res = b0.classification(images=test_image)
print(res)
res = b0.classify(images=test_image)
print(res)
# coding=utf-8 # -*- coding:utf-8 -*-
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
......
## 命令行预测
```
hub run efficientnetb1_imagenet --input_path "/PATH/TO/IMAGE"
```
## API
```python
def get_expected_image_width()
```
返回预处理的图片宽度,也就是224。
```python
def get_expected_image_height()
```
返回预处理的图片高度,也就是224。
```python
def get_pretrained_images_mean()
```
返回预处理的图片均值,也就是 \[0.485, 0.456, 0.406\]
```python
def get_pretrained_images_std()
```
返回预处理的图片标准差,也就是 \[0.229, 0.224, 0.225\]
```python
def context(trainable=True, pretrained=True)
```
**参数**
* trainable (bool): 计算图的参数是否为可训练的;
* pretrained (bool): 是否加载默认的预训练模型。
**返回**
* inputs (dict): 计算图的输入,key 为 'image', value 为图片的张量;
* outputs (dict): 计算图的输出,key 为 'classification' 和 'feature_map',其相应的值为:
* classification (paddle.fluid.framework.Variable): 分类结果,也就是全连接层的输出;
* feature\_map (paddle.fluid.framework.Variable): 特征匹配,全连接层前面的那个张量。
* context\_prog(fluid.Program): 计算图,用于迁移学习。
```python
def classify(images=None,
paths=None,
batch_size=1,
use_gpu=False,
top_k=1):
```
**参数**
* images (list\[numpy.ndarray\]): 图片数据,每一个图片数据的shape 均为 \[H, W, C\],颜色空间为 BGR;
* paths (list\[str\]): 图片的路径;
* batch\_size (int): batch 的大小;
* use\_gpu (bool): 是否使用 GPU 来预测;
* top\_k (int): 返回预测结果的前 k 个。
**返回**
res (list\[dict\]): 分类结果,列表的每一个元素均为字典,其中 key 为识别动物的类别,value为置信度。
```python
def save_inference_model(dirname,
model_filename=None,
params_filename=None,
combined=True)
```
将模型保存到指定路径。
**参数**
* dirname: 存在模型的目录名称
* model\_filename: 模型文件名称,默认为\_\_model\_\_
* params\_filename: 参数文件名称,默认为\_\_params\_\_(仅当`combined`为True时生效)
* combined: 是否将参数保存到统一的一个文件中
## 代码示例
```python
import paddlehub as hub
import cv2
classifier = hub.Module(name="efficientnetb1_imagenet")
result = classifier.classify(images=[cv2.imread('/PATH/TO/IMAGE')])
# or
# result = classifier.classify(paths=['/PATH/TO/IMAGE'])
```
## 服务部署
PaddleHub Serving可以部署一个在线图像识别服务。
## 第一步:启动PaddleHub Serving
运行启动命令:
```shell
$ hub serving start -m efficientnetb1_imagenet
```
这样就完成了一个在线图像识别服务化API的部署,默认端口号为8866。
**NOTE:** 如使用GPU预测,则需要在启动服务之前,请设置CUDA\_VISIBLE\_DEVICES环境变量,否则不用设置。
## 第二步:发送预测请求
配置好服务端,以下数行代码即可实现发送预测请求,获取预测结果
```python
import requests
import json
import cv2
import base64
def cv2_to_base64(image):
data = cv2.imencode('.jpg', image)[1]
return base64.b64encode(data.tostring()).decode('utf8')
# 发送HTTP请求
data = {'images':[cv2_to_base64(cv2.imread("/PATH/TO/IMAGE"))]}
headers = {"Content-type": "application/json"}
url = "http://127.0.0.1:8866/predict/efficientnetb1_imagenet"
r = requests.post(url=url, headers=headers, data=json.dumps(data))
# 打印预测结果
print(r.json()["results"])
```
### 查看代码
https://github.com/PaddlePaddle/PaddleClas
### 依赖
paddlepaddle >= 1.6.2
paddlehub >= 1.6.0
# -*- coding:utf-8 -*-
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from collections import OrderedDict
import cv2
import numpy as np
from PIL import Image
__all__ = ['reader']
DATA_DIM = 224
img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
def resize_short(img, target_size):
percent = float(target_size) / min(img.size[0], img.size[1])
resized_width = int(round(img.size[0] * percent))
resized_height = int(round(img.size[1] * percent))
img = img.resize((resized_width, resized_height), Image.LANCZOS)
return img
def crop_image(img, target_size, center):
width, height = img.size
size = target_size
if center == True:
w_start = (width - size) / 2
h_start = (height - size) / 2
else:
w_start = np.random.randint(0, width - size + 1)
h_start = np.random.randint(0, height - size + 1)
w_end = w_start + size
h_end = h_start + size
img = img.crop((w_start, h_start, w_end, h_end))
return img
def process_image(img):
img = resize_short(img, target_size=256)
img = crop_image(img, target_size=DATA_DIM, center=True)
if img.mode != 'RGB':
img = img.convert('RGB')
img = np.array(img).astype('float32').transpose((2, 0, 1)) / 255
img -= img_mean
img /= img_std
return img
def reader(images=None, paths=None):
"""
Preprocess to yield image.
Args:
images (list[numpy.ndarray]): images data, shape of each is [H, W, C].
paths (list[str]): paths to images.
Yield:
each (collections.OrderedDict): info of original image, preprocessed image.
"""
component = list()
if paths:
for im_path in paths:
each = OrderedDict()
assert os.path.isfile(
im_path), "The {} isn't a valid file path.".format(im_path)
each['org_im_path'] = im_path
each['org_im'] = Image.open(im_path)
each['org_im_width'], each['org_im_height'] = each['org_im'].size
component.append(each)
if images is not None:
assert type(images), "images is a list."
for im in images:
each = OrderedDict()
each['org_im'] = Image.fromarray(im[:, :, ::-1])
each['org_im_path'] = 'ndarray_time={}'.format(
round(time.time(), 6) * 1e6)
each['org_im_width'], each['org_im_height'] = each['org_im'].size
component.append(each)
for element in component:
element['image'] = process_image(element['org_im'])
yield element
# -*- coding:utf-8 -*-
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import warnings
import paddle.fluid as fluid
def initial_type(name,
input,
op_type,
fan_out,
init="google",
use_bias=False,
filter_size=0,
stddev=0.02):
if init == "kaiming":
if op_type == 'conv':
fan_in = input.shape[1] * filter_size * filter_size
elif op_type == 'deconv':
fan_in = fan_out * filter_size * filter_size
else:
if len(input.shape) > 2:
fan_in = input.shape[1] * input.shape[2] * input.shape[3]
else:
fan_in = input.shape[1]
bound = 1 / math.sqrt(fan_in)
param_attr = fluid.ParamAttr(
name=name + "_weights",
initializer=fluid.initializer.Uniform(low=-bound, high=bound))
if use_bias == True:
bias_attr = fluid.ParamAttr(
name=name + '_offset',
initializer=fluid.initializer.Uniform(low=-bound, high=bound))
else:
bias_attr = False
elif init == 'google':
n = filter_size * filter_size * fan_out
param_attr = fluid.ParamAttr(
name=name + "_weights",
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=math.sqrt(2.0 / n)))
if use_bias == True:
bias_attr = fluid.ParamAttr(
name=name + "_offset",
initializer=fluid.initializer.Constant(0.0))
else:
bias_attr = False
else:
param_attr = fluid.ParamAttr(
name=name + "_weights",
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=stddev))
if use_bias == True:
bias_attr = fluid.ParamAttr(
name=name + "_offset",
initializer=fluid.initializer.Constant(0.0))
else:
bias_attr = False
return param_attr, bias_attr
def cal_padding(img_size, stride, filter_size, dilation=1):
"""Calculate padding size."""
if img_size % stride == 0:
out_size = max(filter_size - stride, 0)
else:
out_size = max(filter_size - (img_size % stride), 0)
return out_size // 2, out_size - out_size // 2
def init_batch_norm_layer(name="batch_norm"):
param_attr = fluid.ParamAttr(
name=name + '_scale', initializer=fluid.initializer.Constant(1.0))
bias_attr = fluid.ParamAttr(
name=name + '_offset',
initializer=fluid.initializer.Constant(value=0.0))
return param_attr, bias_attr
def init_fc_layer(fout, name='fc'):
n = fout # fan-out
init_range = 1.0 / math.sqrt(n)
param_attr = fluid.ParamAttr(
name=name + '_weights',
initializer=fluid.initializer.UniformInitializer(
low=-init_range, high=init_range))
bias_attr = fluid.ParamAttr(
name=name + '_offset',
initializer=fluid.initializer.Constant(value=0.0))
return param_attr, bias_attr
def norm_layer(input, norm_type='batch_norm', name=None):
if norm_type == 'batch_norm':
param_attr = fluid.ParamAttr(
name=name + '_weights', initializer=fluid.initializer.Constant(1.0))
bias_attr = fluid.ParamAttr(
name=name + '_offset',
initializer=fluid.initializer.Constant(value=0.0))
return fluid.layers.batch_norm(
input,
param_attr=param_attr,
bias_attr=bias_attr,
moving_mean_name=name + '_mean',
moving_variance_name=name + '_variance')
elif norm_type == 'instance_norm':
helper = fluid.layer_helper.LayerHelper("instance_norm", **locals())
dtype = helper.input_dtype()
epsilon = 1e-5
mean = fluid.layers.reduce_mean(input, dim=[2, 3], keep_dim=True)
var = fluid.layers.reduce_mean(
fluid.layers.square(input - mean), dim=[2, 3], keep_dim=True)
if name is not None:
scale_name = name + "_scale"
offset_name = name + "_offset"
scale_param = fluid.ParamAttr(
name=scale_name,
initializer=fluid.initializer.Constant(1.0),
trainable=True)
offset_param = fluid.ParamAttr(
name=offset_name,
initializer=fluid.initializer.Constant(0.0),
trainable=True)
scale = helper.create_parameter(
attr=scale_param, shape=input.shape[1:2], dtype=dtype)
offset = helper.create_parameter(
attr=offset_param, shape=input.shape[1:2], dtype=dtype)
tmp = fluid.layers.elementwise_mul(x=(input - mean), y=scale, axis=1)
tmp = tmp / fluid.layers.sqrt(var + epsilon)
tmp = fluid.layers.elementwise_add(tmp, offset, axis=1)
return tmp
else:
raise NotImplementedError("norm tyoe: [%s] is not support" % norm_type)
def conv2d(input,
num_filters=64,
filter_size=7,
stride=1,
stddev=0.02,
padding=0,
groups=None,
name="conv2d",
norm=None,
act=None,
relufactor=0.0,
use_bias=False,
padding_type=None,
initial="normal",
use_cudnn=True):
if padding != 0 and padding_type != None:
warnings.warn(
'padding value and padding type are set in the same time, and the final padding width and padding height are computed by padding_type'
)
param_attr, bias_attr = initial_type(
name=name,
input=input,
op_type='conv',
fan_out=num_filters,
init=initial,
use_bias=use_bias,
filter_size=filter_size,
stddev=stddev)
def get_padding(filter_size, stride=1, dilation=1):
padding = ((stride - 1) + dilation * (filter_size - 1)) // 2
return padding
need_crop = False
if padding_type == "SAME":
top_padding, bottom_padding = cal_padding(input.shape[2], stride,
filter_size)
left_padding, right_padding = cal_padding(input.shape[2], stride,
filter_size)
height_padding = bottom_padding
width_padding = right_padding
if top_padding != bottom_padding or left_padding != right_padding:
height_padding = top_padding + stride
width_padding = left_padding + stride
need_crop = True
padding = [height_padding, width_padding]
elif padding_type == "VALID":
height_padding = 0
width_padding = 0
padding = [height_padding, width_padding]
elif padding_type == "DYNAMIC":
padding = get_padding(filter_size, stride)
else:
padding = padding
conv = fluid.layers.conv2d(
input,
num_filters,
filter_size,
groups=groups,
name=name,
stride=stride,
padding=padding,
use_cudnn=use_cudnn,
param_attr=param_attr,
bias_attr=bias_attr)
if need_crop:
conv = conv[:, :, 1:, 1:]
if norm is not None:
conv = norm_layer(input=conv, norm_type=norm, name=name + "_norm")
if act == 'relu':
conv = fluid.layers.relu(conv, name=name + '_relu')
elif act == 'leaky_relu':
conv = fluid.layers.leaky_relu(
conv, alpha=relufactor, name=name + '_leaky_relu')
elif act == 'tanh':
conv = fluid.layers.tanh(conv, name=name + '_tanh')
elif act == 'sigmoid':
conv = fluid.layers.sigmoid(conv, name=name + '_sigmoid')
elif act == 'swish':
conv = fluid.layers.swish(conv, name=name + '_swish')
elif act == None:
conv = conv
else:
raise NotImplementedError("activation: [%s] is not support" % act)
return conv
# -*- coding:utf-8 -*-
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import cv2
import os
import numpy as np
def base64_to_cv2(b64str):
data = base64.b64decode(b64str.encode('utf8'))
data = np.fromstring(data, np.uint8)
data = cv2.imdecode(data, cv2.IMREAD_COLOR)
return data
def softmax(x):
orig_shape = x.shape
if len(x.shape) > 1:
tmp = np.max(x, axis=1)
x -= tmp.reshape((x.shape[0], 1))
x = np.exp(x)
tmp = np.sum(x, axis=1)
x /= tmp.reshape((x.shape[0], 1))
else:
tmp = np.max(x)
x -= tmp
x = np.exp(x)
tmp = np.sum(x)
x /= tmp
return x
def postprocess(data_out, label_list, top_k):
"""
Postprocess output of network, one image at a time.
Args:
data_out (numpy.ndarray): output data of network.
label_list (list): list of label.
top_k (int): Return top k results.
"""
output = []
for result in data_out:
result_i = softmax(result)
output_i = {}
indexs = np.argsort(result_i)[::-1][0:top_k]
for index in indexs:
label = label_list[index].split(',')[0]
output_i[label] = float(result_i[index])
output.append(output_i)
return output
此差异已折叠。
...@@ -4,6 +4,6 @@ exclude: ...@@ -4,6 +4,6 @@ exclude:
- README.md - README.md
resources: resources:
- -
url: https://bj.bcebos.com/paddlehub/hub_dev/efficientnetb0_small_imagenet_model.tar.gz url: https://bj.bcebos.com/paddlehub/model/image/classification/efficientnetb0_small_imagenet_infer_model.tar.gz
dest: . dest: .
uncompress: True uncompress: True
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册