未验证 提交 8468e1ac 编写于 作者: K KP 提交者: GitHub

Remove fluid api in modules and pkg. (#1906)

上级 5294a272
......@@ -180,8 +180,10 @@
初始发布
* 1.0.1
* 1.0.3
移除 fluid api
- ```shell
$ hub install stylepro_artistic==1.0.1
$ hub install stylepro_artistic==1.0.3
```
......@@ -179,8 +179,10 @@
First release
* 1.0.1
* 1.0.3
Remove fluid api
- ```shell
$ hub install stylepro_artistic==1.0.1
$ hub install stylepro_artistic==1.0.3
```
# coding=utf-8
from paddle.fluid.initializer import Constant
from paddle.fluid.param_attr import ParamAttr
import paddle.fluid as fluid
def decoder_net():
x2paddle_22 = fluid.layers.create_parameter(
dtype='float32', shape=[4], name='x2paddle_22', attr='x2paddle_22', default_initializer=Constant(0.0))
x2paddle_36 = fluid.layers.create_parameter(
dtype='float32', shape=[4], name='x2paddle_36', attr='x2paddle_36', default_initializer=Constant(0.0))
x2paddle_44 = fluid.layers.create_parameter(
dtype='float32', shape=[4], name='x2paddle_44', attr='x2paddle_44', default_initializer=Constant(0.0))
x2paddle_input_1 = fluid.layers.data(
dtype='float32', shape=[1, 512, 64, 64], name='x2paddle_input_1', append_batch_size=False)
x2paddle_19 = fluid.layers.pad2d(
x2paddle_input_1, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_19')
x2paddle_20 = fluid.layers.conv2d(
x2paddle_19,
num_filters=256,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_1',
name='x2paddle_20',
bias_attr='x2paddle_2')
x2paddle_21 = fluid.layers.relu(x2paddle_20, name='x2paddle_21')
x2paddle_23 = fluid.layers.resize_nearest(x2paddle_21, name='x2paddle_23', out_shape=[128, 128])
x2paddle_24 = fluid.layers.pad2d(
x2paddle_23, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_24')
x2paddle_25 = fluid.layers.conv2d(
x2paddle_24,
num_filters=256,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_3',
name='x2paddle_25',
bias_attr='x2paddle_4')
x2paddle_26 = fluid.layers.relu(x2paddle_25, name='x2paddle_26')
x2paddle_27 = fluid.layers.pad2d(
x2paddle_26, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_27')
x2paddle_28 = fluid.layers.conv2d(
x2paddle_27,
num_filters=256,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_5',
name='x2paddle_28',
bias_attr='x2paddle_6')
x2paddle_29 = fluid.layers.relu(x2paddle_28, name='x2paddle_29')
x2paddle_30 = fluid.layers.pad2d(
x2paddle_29, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_30')
x2paddle_31 = fluid.layers.conv2d(
x2paddle_30,
num_filters=256,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_7',
name='x2paddle_31',
bias_attr='x2paddle_8')
x2paddle_32 = fluid.layers.relu(x2paddle_31, name='x2paddle_32')
x2paddle_33 = fluid.layers.pad2d(
x2paddle_32, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_33')
x2paddle_34 = fluid.layers.conv2d(
x2paddle_33,
num_filters=128,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_9',
name='x2paddle_34',
bias_attr='x2paddle_10')
x2paddle_35 = fluid.layers.relu(x2paddle_34, name='x2paddle_35')
x2paddle_37 = fluid.layers.resize_nearest(x2paddle_35, name='x2paddle_37', out_shape=[256, 256])
x2paddle_38 = fluid.layers.pad2d(
x2paddle_37, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_38')
x2paddle_39 = fluid.layers.conv2d(
x2paddle_38,
num_filters=128,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_11',
name='x2paddle_39',
bias_attr='x2paddle_12')
x2paddle_40 = fluid.layers.relu(x2paddle_39, name='x2paddle_40')
x2paddle_41 = fluid.layers.pad2d(
x2paddle_40, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_41')
x2paddle_42 = fluid.layers.conv2d(
x2paddle_41,
num_filters=64,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_13',
name='x2paddle_42',
bias_attr='x2paddle_14')
x2paddle_43 = fluid.layers.relu(x2paddle_42, name='x2paddle_43')
x2paddle_45 = fluid.layers.resize_nearest(x2paddle_43, name='x2paddle_45', out_shape=[512, 512])
x2paddle_46 = fluid.layers.pad2d(
x2paddle_45, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_46')
x2paddle_47 = fluid.layers.conv2d(
x2paddle_46,
num_filters=64,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_15',
name='x2paddle_47',
bias_attr='x2paddle_16')
x2paddle_48 = fluid.layers.relu(x2paddle_47, name='x2paddle_48')
x2paddle_49 = fluid.layers.pad2d(
x2paddle_48, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_49')
x2paddle_50 = fluid.layers.conv2d(
x2paddle_49,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_17',
name='x2paddle_50',
bias_attr='x2paddle_18')
return x2paddle_input_1, x2paddle_50
# coding=utf-8
from paddle.fluid.initializer import Constant
from paddle.fluid.param_attr import ParamAttr
import paddle.fluid as fluid
def encoder_net():
x2paddle_0 = fluid.layers.data(dtype='float32', shape=[1, 3, 512, 512], name='x2paddle_0', append_batch_size=False)
x2paddle_21 = fluid.layers.conv2d(
x2paddle_0,
num_filters=3,
filter_size=[1, 1],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_1',
name='x2paddle_21',
bias_attr='x2paddle_2')
x2paddle_22 = fluid.layers.pad2d(
x2paddle_21, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_22')
x2paddle_23 = fluid.layers.conv2d(
x2paddle_22,
num_filters=64,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_3',
name='x2paddle_23',
bias_attr='x2paddle_4')
x2paddle_24 = fluid.layers.relu(x2paddle_23, name='x2paddle_24')
x2paddle_25 = fluid.layers.pad2d(
x2paddle_24, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_25')
x2paddle_26 = fluid.layers.conv2d(
x2paddle_25,
num_filters=64,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_5',
name='x2paddle_26',
bias_attr='x2paddle_6')
x2paddle_27 = fluid.layers.relu(x2paddle_26, name='x2paddle_27')
x2paddle_28 = fluid.layers.pool2d(
x2paddle_27,
pool_size=[2, 2],
pool_type='max',
pool_stride=[2, 2],
pool_padding=[0, 0],
ceil_mode=False,
name='x2paddle_28',
exclusive=False)
x2paddle_29 = fluid.layers.pad2d(
x2paddle_28, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_29')
x2paddle_30 = fluid.layers.conv2d(
x2paddle_29,
num_filters=128,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_7',
name='x2paddle_30',
bias_attr='x2paddle_8')
x2paddle_31 = fluid.layers.relu(x2paddle_30, name='x2paddle_31')
x2paddle_32 = fluid.layers.pad2d(
x2paddle_31, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_32')
x2paddle_33 = fluid.layers.conv2d(
x2paddle_32,
num_filters=128,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_9',
name='x2paddle_33',
bias_attr='x2paddle_10')
x2paddle_34 = fluid.layers.relu(x2paddle_33, name='x2paddle_34')
x2paddle_35 = fluid.layers.pool2d(
x2paddle_34,
pool_size=[2, 2],
pool_type='max',
pool_stride=[2, 2],
pool_padding=[0, 0],
ceil_mode=False,
name='x2paddle_35',
exclusive=False)
x2paddle_36 = fluid.layers.pad2d(
x2paddle_35, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_36')
x2paddle_37 = fluid.layers.conv2d(
x2paddle_36,
num_filters=256,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_11',
name='x2paddle_37',
bias_attr='x2paddle_12')
x2paddle_38 = fluid.layers.relu(x2paddle_37, name='x2paddle_38')
x2paddle_39 = fluid.layers.pad2d(
x2paddle_38, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_39')
x2paddle_40 = fluid.layers.conv2d(
x2paddle_39,
num_filters=256,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_13',
name='x2paddle_40',
bias_attr='x2paddle_14')
x2paddle_41 = fluid.layers.relu(x2paddle_40, name='x2paddle_41')
x2paddle_42 = fluid.layers.pad2d(
x2paddle_41, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_42')
x2paddle_43 = fluid.layers.conv2d(
x2paddle_42,
num_filters=256,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_15',
name='x2paddle_43',
bias_attr='x2paddle_16')
x2paddle_44 = fluid.layers.relu(x2paddle_43, name='x2paddle_44')
x2paddle_45 = fluid.layers.pad2d(
x2paddle_44, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_45')
x2paddle_46 = fluid.layers.conv2d(
x2paddle_45,
num_filters=256,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_17',
name='x2paddle_46',
bias_attr='x2paddle_18')
x2paddle_47 = fluid.layers.relu(x2paddle_46, name='x2paddle_47')
x2paddle_48 = fluid.layers.pool2d(
x2paddle_47,
pool_size=[2, 2],
pool_type='max',
pool_stride=[2, 2],
pool_padding=[0, 0],
ceil_mode=False,
name='x2paddle_48',
exclusive=False)
x2paddle_49 = fluid.layers.pad2d(
x2paddle_48, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_49')
x2paddle_50 = fluid.layers.conv2d(
x2paddle_49,
num_filters=512,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_19',
name='x2paddle_50',
bias_attr='x2paddle_20')
x2paddle_51 = fluid.layers.relu(x2paddle_50, name='x2paddle_51')
return x2paddle_0, x2paddle_51
......@@ -2,32 +2,38 @@
from __future__ import absolute_import
from __future__ import division
import argparse
import ast
import copy
import time
import os
import argparse
import time
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
from stylepro_artistic.encoder_network import encoder_net
from stylepro_artistic.decoder_network import decoder_net
from stylepro_artistic.processor import postprocess, fr, cv2_to_base64, base64_to_cv2
import paddle
from paddle.inference import Config
from paddle.inference import create_predictor
from stylepro_artistic.data_feed import reader
from stylepro_artistic.processor import base64_to_cv2
from stylepro_artistic.processor import cv2_to_base64
from stylepro_artistic.processor import fr
from stylepro_artistic.processor import postprocess
import paddlehub as hub
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
# coding=utf-8
@moduleinfo(
name="stylepro_artistic",
version="1.0.2",
version="1.0.3",
type="cv/style_transfer",
summary="StylePro Artistic is an algorithm for Arbitrary image style, which is parameter-free, fast yet effective.",
author="baidu-bdl",
author_email="")
class StyleProjection(hub.Module):
def _initialize(self):
self.pretrained_encoder_net = os.path.join(self.directory, "style_projection_enc")
self.pretrained_decoder_net = os.path.join(self.directory, "style_projection_dec")
......@@ -38,15 +44,15 @@ class StyleProjection(hub.Module):
predictor config setting
"""
# encoder
cpu_config_enc = AnalysisConfig(self.pretrained_encoder_net)
cpu_config_enc = Config(self.pretrained_encoder_net)
cpu_config_enc.disable_glog_info()
cpu_config_enc.disable_gpu()
self.cpu_predictor_enc = create_paddle_predictor(cpu_config_enc)
self.cpu_predictor_enc = create_predictor(cpu_config_enc)
# decoder
cpu_config_dec = AnalysisConfig(self.pretrained_decoder_net)
cpu_config_dec = Config(self.pretrained_decoder_net)
cpu_config_dec.disable_glog_info()
cpu_config_dec.disable_gpu()
self.cpu_predictor_dec = create_paddle_predictor(cpu_config_dec)
self.cpu_predictor_dec = create_predictor(cpu_config_dec)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
......@@ -56,15 +62,15 @@ class StyleProjection(hub.Module):
use_gpu = False
if use_gpu:
# encoder
gpu_config_enc = AnalysisConfig(self.pretrained_encoder_net)
gpu_config_enc = Config(self.pretrained_encoder_net)
gpu_config_enc.disable_glog_info()
gpu_config_enc.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor_enc = create_paddle_predictor(gpu_config_enc)
self.gpu_predictor_enc = create_predictor(gpu_config_enc)
# decoder
gpu_config_dec = AnalysisConfig(self.pretrained_decoder_net)
gpu_config_dec = Config(self.pretrained_decoder_net)
gpu_config_dec.disable_glog_info()
gpu_config_dec.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor_dec = create_paddle_predictor(gpu_config_dec)
self.gpu_predictor_dec = create_predictor(gpu_config_dec)
def style_transfer(self,
images=None,
......@@ -102,22 +108,36 @@ class StyleProjection(hub.Module):
"Environment Variable CUDA_VISIBLE_DEVICES is not set correctly. If you wanna use gpu, please set CUDA_VISIBLE_DEVICES as cuda_device_id."
)
predictor_enc = self.gpu_predictor_enc if use_gpu else self.cpu_predictor_enc
input_names_enc = predictor_enc.get_input_names()
input_handle_enc = predictor_enc.get_input_handle(input_names_enc[0])
output_names_enc = predictor_enc.get_output_names()
output_handle_enc = predictor_enc.get_output_handle(output_names_enc[0])
predictor_dec = self.gpu_predictor_dec if use_gpu else self.cpu_predictor_dec
input_names_dec = predictor_dec.get_input_names()
input_handle_dec = predictor_dec.get_input_handle(input_names_dec[0])
output_names_dec = predictor_dec.get_output_names()
output_handle_dec = predictor_dec.get_output_handle(output_names_dec[0])
im_output = []
for component, w, h in reader(images, paths):
content = PaddleTensor(component['content_arr'].copy())
content_feats = self.gpu_predictor_enc.run([content]) if use_gpu else self.cpu_predictor_enc.run([content])
input_handle_enc.copy_from_cpu(component['content_arr'])
predictor_enc.run()
content_feats = output_handle_enc.copy_to_cpu()
accumulate = np.zeros((3, 512, 512))
for idx, style_arr in enumerate(component['styles_arr_list']):
style = PaddleTensor(style_arr.copy())
# encode
style_feats = self.gpu_predictor_enc.run([style]) if use_gpu else self.cpu_predictor_enc.run([style])
fr_feats = fr(content_feats[0].as_ndarray(), style_feats[0].as_ndarray(), alpha)
fr_feats = PaddleTensor(fr_feats.copy())
input_handle_enc.copy_from_cpu(style_arr)
predictor_enc.run()
style_feats = output_handle_enc.copy_to_cpu()
fr_feats = fr(content_feats, style_feats, alpha)
# decode
predict_outputs = self.gpu_predictor_dec.run([fr_feats]) if use_gpu else self.cpu_predictor_dec.run(
[fr_feats])
input_handle_dec.copy_from_cpu(fr_feats)
predictor_dec.run()
predict_outputs = output_handle_dec.copy_to_cpu()
# interpolation
accumulate += predict_outputs[0].as_ndarray()[0] * component['style_interpolation_weights'][idx]
accumulate += predict_outputs[0] * component['style_interpolation_weights'][idx]
# postprocess
save_im_name = 'ndarray_{}.jpg'.format(time.time())
result = postprocess(accumulate, output_dir, save_im_name, visualization, size=(w, h))
......@@ -134,39 +154,37 @@ class StyleProjection(hub.Module):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
encode_program, encode_feeded_var_names, encode_target_vars = fluid.io.load_inference_model(
encode_program, encode_feeded_var_names, encode_target_vars = paddle.static.load_inference_model(
dirname=self.pretrained_encoder_net, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
main_program=encode_program,
executor=exe,
feeded_var_names=encode_feeded_var_names,
target_vars=encode_target_vars,
model_filename=model_filename,
params_filename=params_filename)
paddle.static.save_inference_model(dirname=dirname,
main_program=encode_program,
executor=exe,
feeded_var_names=encode_feeded_var_names,
target_vars=encode_target_vars,
model_filename=model_filename,
params_filename=params_filename)
def _save_decode_model(self, dirname, model_filename=None, params_filename=None, combined=True):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
decode_program, decode_feeded_var_names, decode_target_vars = fluid.io.load_inference_model(
decode_program, decode_feeded_var_names, decode_target_vars = paddle.static.load_inference_model(
dirname=self.pretrained_decoder_net, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
main_program=decode_program,
executor=exe,
feeded_var_names=decode_feeded_var_names,
target_vars=decode_target_vars,
model_filename=model_filename,
params_filename=params_filename)
paddle.static.save_inference_model(dirname=dirname,
main_program=decode_program,
executor=exe,
feeded_var_names=decode_feeded_var_names,
target_vars=decode_target_vars,
model_filename=model_filename,
params_filename=params_filename)
@serving
def serving_method(self, images, **kwargs):
......@@ -186,11 +204,10 @@ class StyleProjection(hub.Module):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
......@@ -202,20 +219,29 @@ class StyleProjection(hub.Module):
paths = [{'content': args.content, 'styles': args.styles.split(',')}]
else:
paths = [{'content': args.content, 'styles': args.styles.split(','), 'weights': list(args.weights)}]
results = self.style_transfer(
paths=paths, alpha=args.alpha, use_gpu=args.use_gpu, output_dir=args.output_dir, visualization=True)
results = self.style_transfer(paths=paths,
alpha=args.alpha,
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=True)
return results
def add_module_config_arg(self):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--use_gpu', type=ast.literal_eval, default=False, help="whether use GPU or not")
self.arg_config_group.add_argument(
'--output_dir', type=str, default='transfer_result', help="The directory to save output images.")
self.arg_config_group.add_argument(
'--visualization', type=ast.literal_eval, default=True, help="whether to save output as images.")
self.arg_config_group.add_argument('--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not")
self.arg_config_group.add_argument('--output_dir',
type=str,
default='transfer_result',
help="The directory to save output images.")
self.arg_config_group.add_argument('--visualization',
type=ast.literal_eval,
default=True,
help="whether to save output as images.")
def add_module_input_arg(self):
"""
......@@ -223,7 +249,11 @@ class StyleProjection(hub.Module):
"""
self.arg_input_group.add_argument('--content', type=str, help="path to content.")
self.arg_input_group.add_argument('--styles', type=str, help="path to styles.")
self.arg_input_group.add_argument(
'--weights', type=ast.literal_eval, default=None, help="interpolation weights of styles.")
self.arg_config_group.add_argument(
'--alpha', type=ast.literal_eval, default=1, help="The parameter to control the tranform degree.")
self.arg_input_group.add_argument('--weights',
type=ast.literal_eval,
default=None,
help="interpolation weights of styles.")
self.arg_config_group.add_argument('--alpha',
type=ast.literal_eval,
default=1,
help="The parameter to control the tranform degree.")
# resnet50_vd_animals
|模型名称|resnet50_vd_animals|
| :--- | :---: |
| :--- | :---: |
|类别|图像-图像分类|
|网络|ResNet50_vd|
|数据集|百度自建动物数据集|
......@@ -31,13 +31,13 @@
- ### 2、安装
- ```shell
$ hub install resnet50_vd_animals==1.0.0
$ hub install resnet50_vd_animals
```
- 如您安装时遇到问题,可参考:[零基础windows安装](../../../../docs/docs_ch/get_start/windows_quickstart.md)
| [零基础Linux安装](../../../../docs/docs_ch/get_start/linux_quickstart.md) | [零基础MacOS安装](../../../../docs/docs_ch/get_start/mac_quickstart.md)
## 三、模型API预测
- ### 1、命令行预测
- ```
......@@ -167,3 +167,11 @@
* 1.0.0
初始发布
* 1.0.1
移除 fluid api
- ```shell
$ hub install resnet50_vd_animals==1.0.1
```
......@@ -171,3 +171,10 @@
First release
* 1.0.1
Remove fluid api
- ```shell
$ hub install resnet50_vd_animals==1.0.1
```
......@@ -2,22 +2,23 @@
from __future__ import absolute_import
from __future__ import division
import ast
import argparse
import ast
import os
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
import paddle
from paddle.inference import Config
from paddle.inference import create_predictor
from resnet50_vd_animals.data_feed import reader
from resnet50_vd_animals.processor import base64_to_cv2
from resnet50_vd_animals.processor import postprocess
from paddlehub.module.module import moduleinfo, runnable, serving
import paddlehub as hub
from paddlehub.common.paddle_helper import add_vars_prefix
from resnet50_vd_animals.processor import postprocess, base64_to_cv2
from resnet50_vd_animals.data_feed import reader
from resnet50_vd_animals.resnet_vd import ResNet50_vd
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
@moduleinfo(
......@@ -26,8 +27,9 @@ from resnet50_vd_animals.resnet_vd import ResNet50_vd
author="baidu-vis",
author_email="",
summary="ResNet50vd is a image classfication model, this module is trained with Baidu's self-built animals dataset.",
version="1.0.0")
version="1.0.1")
class ResNet50vdAnimals(hub.Module):
def _initialize(self):
self.default_pretrained_model_path = os.path.join(self.directory, "model")
label_file = os.path.join(self.directory, "label_list.txt")
......@@ -97,54 +99,6 @@ class ResNet50vdAnimals(hub.Module):
xpu_config.enable_xpu(100)
self.xpu_predictor = create_predictor(xpu_config)
def context(self, trainable=True, pretrained=True):
"""context for transfer learning.
Args:
trainable (bool): Set parameters in program to be trainable.
pretrained (bool) : Whether to load pretrained model.
Returns:
inputs (dict): key is 'image', corresponding vaule is image tensor.
outputs (dict): key is :
'classification', corresponding value is the result of classification.
'feature_map', corresponding value is the result of the layer before the fully connected layer.
context_prog (fluid.Program): program for transfer learning.
"""
context_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(context_prog, startup_prog):
with fluid.unique_name.guard():
image = fluid.layers.data(name="image", shape=[3, 224, 224], dtype="float32")
resnet_vd = ResNet50_vd()
output, feature_map = resnet_vd.net(input=image, class_dim=len(self.label_list))
name_prefix = '@HUB_{}@'.format(self.name)
inputs = {'image': name_prefix + image.name}
outputs = {'classification': name_prefix + output.name, 'feature_map': name_prefix + feature_map.name}
add_vars_prefix(context_prog, name_prefix)
add_vars_prefix(startup_prog, name_prefix)
global_vars = context_prog.global_block().vars
inputs = {key: global_vars[value] for key, value in inputs.items()}
outputs = {key: global_vars[value] for key, value in outputs.items()}
place = fluid.CPUPlace()
exe = fluid.Executor(place)
# pretrained
if pretrained:
def _if_exist(var):
b = os.path.exists(os.path.join(self.default_pretrained_model_path, var.name))
return b
fluid.io.load_vars(exe, self.default_pretrained_model_path, context_prog, predicate=_if_exist)
else:
exe.run(startup_prog)
# trainable
for param in context_prog.global_block().iter_parameters():
param.trainable = trainable
return inputs, outputs, context_prog
def classification(self, images=None, paths=None, batch_size=1, use_gpu=False, top_k=1, use_device=None):
"""
API for image classification.
......@@ -215,19 +169,19 @@ class ResNet50vdAnimals(hub.Module):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
program, feeded_var_names, target_vars = paddle.static.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
fluid.io.save_inference_model(dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
paddle.static.save_inference_model(dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
@serving
def serving_method(self, images, **kwargs):
......
#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
__all__ = ["ResNet", "ResNet50_vd", "ResNet101_vd", "ResNet152_vd", "ResNet200_vd"]
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"batch_size": 256,
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
}
}
class ResNet():
def __init__(self, layers=50, is_3x3=False):
self.params = train_parameters
self.layers = layers
self.is_3x3 = is_3x3
def net(self, input, class_dim=1000):
is_3x3 = self.is_3x3
layers = self.layers
supported_layers = [50, 101, 152, 200]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers)
if layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
elif layers == 200:
depth = [3, 12, 48, 3]
num_filters = [64, 128, 256, 512]
if is_3x3 == False:
conv = self.conv_bn_layer(input=input, num_filters=64, filter_size=7, stride=2, act='relu')
else:
conv = self.conv_bn_layer(input=input, num_filters=32, filter_size=3, stride=2, act='relu', name='conv1_1')
conv = self.conv_bn_layer(input=conv, num_filters=32, filter_size=3, stride=1, act='relu', name='conv1_2')
conv = self.conv_bn_layer(input=conv, num_filters=64, filter_size=3, stride=1, act='relu', name='conv1_3')
conv = fluid.layers.pool2d(input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
for block in range(len(depth)):
for i in range(depth[block]):
if layers in [101, 152, 200] and block == 2:
if i == 0:
conv_name = "res" + str(block + 2) + "a"
else:
conv_name = "res" + str(block + 2) + "b" + str(i)
else:
conv_name = "res" + str(block + 2) + chr(97 + i)
conv = self.bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
if_first=block == 0,
name=conv_name)
pool = fluid.layers.pool2d(input=conv, pool_size=7, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
out = fluid.layers.fc(
input=pool,
size=class_dim,
param_attr=fluid.param_attr.ParamAttr(initializer=fluid.initializer.Uniform(-stdv, stdv)))
return out, pool
def conv_bn_layer(self, input, num_filters, filter_size, stride=1, groups=1, act=None, name=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
if name == "conv1":
bn_name = "bn_" + name
else:
bn_name = "bn" + name[3:]
return fluid.layers.batch_norm(
input=conv,
act=act,
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def conv_bn_layer_new(self, input, num_filters, filter_size, stride=1, groups=1, act=None, name=None):
pool = fluid.layers.pool2d(input=input, pool_size=2, pool_stride=2, pool_padding=0, pool_type='avg')
conv = fluid.layers.conv2d(
input=pool,
num_filters=num_filters,
filter_size=filter_size,
stride=1,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
if name == "conv1":
bn_name = "bn_" + name
else:
bn_name = "bn" + name[3:]
return fluid.layers.batch_norm(
input=conv,
act=act,
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def shortcut(self, input, ch_out, stride, name, if_first=False):
ch_in = input.shape[1]
if ch_in != ch_out or stride != 1:
if if_first:
return self.conv_bn_layer(input, ch_out, 1, stride, name=name)
else:
return self.conv_bn_layer_new(input, ch_out, 1, stride, name=name)
else:
return input
def bottleneck_block(self, input, num_filters, stride, name, if_first):
conv0 = self.conv_bn_layer(
input=input, num_filters=num_filters, filter_size=1, act='relu', name=name + "_branch2a")
conv1 = self.conv_bn_layer(
input=conv0, num_filters=num_filters, filter_size=3, stride=stride, act='relu', name=name + "_branch2b")
conv2 = self.conv_bn_layer(
input=conv1, num_filters=num_filters * 4, filter_size=1, act=None, name=name + "_branch2c")
short = self.shortcut(input, num_filters * 4, stride, if_first=if_first, name=name + "_branch1")
return fluid.layers.elementwise_add(x=short, y=conv2, act='relu')
def ResNet50_vd():
model = ResNet(layers=50, is_3x3=True)
return model
def ResNet101_vd():
model = ResNet(layers=101, is_3x3=True)
return model
def ResNet152_vd():
model = ResNet(layers=152, is_3x3=True)
return model
def ResNet200_vd():
model = ResNet(layers=200, is_3x3=True)
return model
......@@ -163,8 +163,10 @@
初始发布
* 1.2.0
* 1.2.1
移除 fluid api
- ```shell
$ hub install pyramidbox_lite_mobile==1.2.0
$ hub install pyramidbox_lite_mobile==1.2.1
```
......@@ -162,8 +162,10 @@
First release
* 1.2.0
* 1.2.1
Remove fluid api
- ```shell
$ hub install pyramidbox_lite_mobile==1.2.0
$ hub install pyramidbox_lite_mobile==1.2.1
```
......@@ -2,28 +2,32 @@
from __future__ import absolute_import
from __future__ import division
import ast
import argparse
import ast
import os
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
import paddle
from paddle.inference import Config
from paddle.inference import create_predictor
from pyramidbox_lite_mobile.data_feed import reader
from pyramidbox_lite_mobile.processor import postprocess, base64_to_cv2
from pyramidbox_lite_mobile.processor import base64_to_cv2
from pyramidbox_lite_mobile.processor import postprocess
import paddlehub as hub
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
@moduleinfo(
name="pyramidbox_lite_mobile",
type="CV/face_detection",
author="baidu-vis",
author_email="",
summary="PyramidBox-Lite-Mobile is a high-performance face detection model.",
version="1.2.0")
@moduleinfo(name="pyramidbox_lite_mobile",
type="CV/face_detection",
author="baidu-vis",
author_email="",
summary="PyramidBox-Lite-Mobile is a high-performance face detection model.",
version="1.2.1")
class PyramidBoxLiteMobile(hub.Module):
def _initialize(self):
self.default_pretrained_model_path = os.path.join(self.directory, "pyramidbox_lite_mobile_face_detection")
self._set_config()
......@@ -33,10 +37,10 @@ class PyramidBoxLiteMobile(hub.Module):
"""
predictor config setting
"""
cpu_config = AnalysisConfig(self.default_pretrained_model_path)
cpu_config = Config(self.default_pretrained_model_path)
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
self.cpu_predictor = create_paddle_predictor(cpu_config)
self.cpu_predictor = create_predictor(cpu_config)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
......@@ -45,10 +49,10 @@ class PyramidBoxLiteMobile(hub.Module):
except:
use_gpu = False
if use_gpu:
gpu_config = AnalysisConfig(self.default_pretrained_model_path)
gpu_config = Config(self.default_pretrained_model_path)
gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config)
self.gpu_predictor = create_predictor(gpu_config)
def face_detection(self,
images=None,
......@@ -98,18 +102,26 @@ class PyramidBoxLiteMobile(hub.Module):
# process one by one
for element in reader(images, paths, shrink):
image = np.expand_dims(element['image'], axis=0).astype('float32')
image_tensor = PaddleTensor(image.copy())
data_out = self.gpu_predictor.run([image_tensor]) if use_gpu else self.cpu_predictor.run([image_tensor])
out = postprocess(
data_out=data_out[0].as_ndarray(),
org_im=element['org_im'],
org_im_path=element['org_im_path'],
image_width=element['image_width'],
image_height=element['image_height'],
output_dir=output_dir,
visualization=visualization,
shrink=shrink,
confs_threshold=confs_threshold)
predictor = self.gpu_predictor if use_gpu else self.cpu_predictor
input_names = predictor.get_input_names()
input_handle = predictor.get_input_handle(input_names[0])
input_handle.copy_from_cpu(image)
predictor.run()
output_names = predictor.get_output_names()
output_handle = predictor.get_output_handle(output_names[0])
output_data = output_handle.copy_to_cpu()
out = postprocess(data_out=output_data,
org_im=element['org_im'],
org_im_path=element['org_im_path'],
image_width=element['image_width'],
image_height=element['image_height'],
output_dir=output_dir,
visualization=visualization,
shrink=shrink,
confs_threshold=confs_threshold)
res.append(out)
return res
......@@ -117,23 +129,21 @@ class PyramidBoxLiteMobile(hub.Module):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
program, feeded_var_names, target_vars = paddle.static.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
var = program.global_block().vars['detection_output_0.tmp_1']
var.desc.set_dtype(fluid.core.VarDesc.VarType.INT32)
fluid.io.save_inference_model(
dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
paddle.static.save_inference_model(dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
@serving
def serving_method(self, images, **kwargs):
......@@ -149,36 +159,40 @@ class PyramidBoxLiteMobile(hub.Module):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options", description="Run configuration for controlling module behavior, not required.")
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
results = self.face_detection(
paths=[args.input_path],
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=args.visualization,
shrink=args.shrink,
confs_threshold=args.confs_threshold)
results = self.face_detection(paths=[args.input_path],
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=args.visualization,
shrink=args.shrink,
confs_threshold=args.confs_threshold)
return results
def add_module_config_arg(self):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--use_gpu', type=ast.literal_eval, default=False, help="whether use GPU or not")
self.arg_config_group.add_argument(
'--output_dir', type=str, default='detection_result', help="The directory to save output images.")
self.arg_config_group.add_argument(
'--visualization', type=ast.literal_eval, default=False, help="whether to save output as images.")
self.arg_config_group.add_argument('--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not")
self.arg_config_group.add_argument('--output_dir',
type=str,
default='detection_result',
help="The directory to save output images.")
self.arg_config_group.add_argument('--visualization',
type=ast.literal_eval,
default=False,
help="whether to save output as images.")
def add_module_input_arg(self):
"""
......@@ -190,5 +204,7 @@ class PyramidBoxLiteMobile(hub.Module):
type=ast.literal_eval,
default=0.5,
help="resize the image to shrink * original_shape before feeding into network.")
self.arg_input_group.add_argument(
'--confs_threshold', type=ast.literal_eval, default=0.6, help="confidence threshold.")
self.arg_input_group.add_argument('--confs_threshold',
type=ast.literal_eval,
default=0.6,
help="confidence threshold.")
......@@ -208,7 +208,10 @@
初始发布
* 1.3.0
* 1.3.1
移除 fluid api
- ```shell
$ hub install pyramidbox_lite_mobile_mask==1.3.0
$ hub install pyramidbox_lite_mobile_mask==1.3.1
```
......@@ -184,7 +184,10 @@
First release
* 1.3.0
* 1.3.1
Remove fluid api
- ```shell
$ hub install pyramidbox_lite_mobile_mask==1.3.0
$ hub install pyramidbox_lite_mobile_mask==1.3.1
```
......@@ -2,18 +2,22 @@
from __future__ import absolute_import
from __future__ import division
import ast
import argparse
import ast
import os
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
import paddle
from paddle.inference import Config
from paddle.inference import create_predictor
from pyramidbox_lite_mobile_mask.data_feed import reader
from pyramidbox_lite_mobile_mask.processor import postprocess, base64_to_cv2
from pyramidbox_lite_mobile_mask.processor import base64_to_cv2
from pyramidbox_lite_mobile_mask.processor import postprocess
import paddlehub as hub
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
@moduleinfo(
......@@ -23,8 +27,9 @@ from pyramidbox_lite_mobile_mask.processor import postprocess, base64_to_cv2
author_email="",
summary=
"Pyramidbox-Lite-Mobile-Mask is a high-performance face detection model used to detect whether people wear masks.",
version="1.3.0")
version="1.3.1")
class PyramidBoxLiteMobileMask(hub.Module):
def _initialize(self, face_detector_module=None):
"""
Args:
......@@ -42,10 +47,10 @@ class PyramidBoxLiteMobileMask(hub.Module):
"""
predictor config setting
"""
cpu_config = AnalysisConfig(self.default_pretrained_model_path)
cpu_config = Config(self.default_pretrained_model_path)
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
self.cpu_predictor = create_paddle_predictor(cpu_config)
self.cpu_predictor = create_predictor(cpu_config)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
......@@ -54,10 +59,10 @@ class PyramidBoxLiteMobileMask(hub.Module):
except:
use_gpu = False
if use_gpu:
gpu_config = AnalysisConfig(self.default_pretrained_model_path)
gpu_config = Config(self.default_pretrained_model_path)
gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config)
self.gpu_predictor = create_predictor(gpu_config)
def set_face_detector_module(self, face_detector_module):
"""
......@@ -146,12 +151,18 @@ class PyramidBoxLiteMobileMask(hub.Module):
pass
image_arr = np.squeeze(np.array(batch_data), axis=1)
image_tensor = PaddleTensor(image_arr.copy())
data_out = self.gpu_predictor.run([image_tensor]) if use_gpu else self.cpu_predictor.run([image_tensor])
# len(data_out) == 1
# data_out[0].as_ndarray().shape == (-1, 2)
data_out = data_out[0].as_ndarray()
predict_out = np.concatenate((predict_out, data_out))
predictor = self.gpu_predictor if use_gpu else self.cpu_predictor
input_names = predictor.get_input_names()
input_handle = predictor.get_input_handle(input_names[0])
input_handle.copy_from_cpu(image_arr)
predictor.run()
output_names = predictor.get_output_names()
output_handle = predictor.get_output_handle(output_names[0])
output_data = output_handle.copy_to_cpu()
predict_out = np.concatenate((predict_out, output_data))
predict_out = predict_out[1:]
# postprocess one by one
......@@ -160,13 +171,12 @@ class PyramidBoxLiteMobileMask(hub.Module):
detect_faces_list = [handled['face'] for handled in all_element[i]['preprocessed']]
interval_left = sum(element_image_num[0:i])
interval_right = interval_left + element_image_num[i]
out = postprocess(
confidence_out=predict_out[interval_left:interval_right],
org_im=all_element[i]['org_im'],
org_im_path=all_element[i]['org_im_path'],
detected_faces=detect_faces_list,
output_dir=output_dir,
visualization=visualization)
out = postprocess(confidence_out=predict_out[interval_left:interval_right],
org_im=all_element[i]['org_im'],
org_im_path=all_element[i]['org_im_path'],
detected_faces=detect_faces_list,
output_dir=output_dir,
visualization=visualization)
res.append(out)
return res
......@@ -183,20 +193,19 @@ class PyramidBoxLiteMobileMask(hub.Module):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
program, feeded_var_names, target_vars = paddle.static.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
paddle.static.save_inference_model(dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
@serving
def serving_method(self, images, **kwargs):
......@@ -212,36 +221,40 @@ class PyramidBoxLiteMobileMask(hub.Module):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options", description="Run configuration for controlling module behavior, not required.")
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
results = self.face_detection(
paths=[args.input_path],
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=args.visualization,
shrink=args.shrink,
confs_threshold=args.confs_threshold)
results = self.face_detection(paths=[args.input_path],
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=args.visualization,
shrink=args.shrink,
confs_threshold=args.confs_threshold)
return results
def add_module_config_arg(self):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--use_gpu', type=ast.literal_eval, default=False, help="whether use GPU or not")
self.arg_config_group.add_argument(
'--output_dir', type=str, default='detection_result', help="The directory to save output images.")
self.arg_config_group.add_argument(
'--visualization', type=ast.literal_eval, default=False, help="whether to save output as images.")
self.arg_config_group.add_argument('--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not")
self.arg_config_group.add_argument('--output_dir',
type=str,
default='detection_result',
help="The directory to save output images.")
self.arg_config_group.add_argument('--visualization',
type=ast.literal_eval,
default=False,
help="whether to save output as images.")
def add_module_input_arg(self):
"""
......@@ -253,5 +266,7 @@ class PyramidBoxLiteMobileMask(hub.Module):
type=ast.literal_eval,
default=0.5,
help="resize the image to `shrink * original_shape` before feeding into network.")
self.arg_input_group.add_argument(
'--confs_threshold', type=ast.literal_eval, default=0.6, help="confidence threshold.")
self.arg_input_group.add_argument('--confs_threshold',
type=ast.literal_eval,
default=0.6,
help="confidence threshold.")
......@@ -166,6 +166,11 @@
* 1.2.0
修复numpy数据读取问题
* 1.2.1
移除 fluid api
- ```shell
$ hub install pyramidbox_lite_server==1.2.0
$ hub install pyramidbox_lite_server==1.2.1
```
......@@ -166,6 +166,11 @@
* 1.2.0
Fix the problem of reading numpy
* 1.2.1
Remove fluid api
- ```shell
$ hub install pyramidbox_lite_server==1.2.0
$ hub install pyramidbox_lite_server==1.2.1
```
......@@ -2,28 +2,32 @@
from __future__ import absolute_import
from __future__ import division
import ast
import argparse
import ast
import os
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
import paddle
from paddle.inference import Config
from paddle.inference import create_predictor
from pyramidbox_lite_server.data_feed import reader
from pyramidbox_lite_server.processor import postprocess, base64_to_cv2
from pyramidbox_lite_server.processor import base64_to_cv2
from pyramidbox_lite_server.processor import postprocess
import paddlehub as hub
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
@moduleinfo(
name="pyramidbox_lite_server",
type="CV/face_detection",
author="baidu-vis",
author_email="",
summary="PyramidBox-Lite-Server is a high-performance face detection model.",
version="1.2.0")
@moduleinfo(name="pyramidbox_lite_server",
type="CV/face_detection",
author="baidu-vis",
author_email="",
summary="PyramidBox-Lite-Server is a high-performance face detection model.",
version="1.2.1")
class PyramidBoxLiteServer(hub.Module):
def _initialize(self):
self.default_pretrained_model_path = os.path.join(self.directory, "pyramidbox_lite_server_face_detection")
self._set_config()
......@@ -33,10 +37,10 @@ class PyramidBoxLiteServer(hub.Module):
"""
predictor config setting
"""
cpu_config = AnalysisConfig(self.default_pretrained_model_path)
cpu_config = Config(self.default_pretrained_model_path)
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
self.cpu_predictor = create_paddle_predictor(cpu_config)
self.cpu_predictor = create_predictor(cpu_config)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
......@@ -45,10 +49,10 @@ class PyramidBoxLiteServer(hub.Module):
except:
use_gpu = False
if use_gpu:
gpu_config = AnalysisConfig(self.default_pretrained_model_path)
gpu_config = Config(self.default_pretrained_model_path)
gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config)
self.gpu_predictor = create_predictor(gpu_config)
def face_detection(self,
images=None,
......@@ -98,18 +102,26 @@ class PyramidBoxLiteServer(hub.Module):
# process one by one
for element in reader(images, paths, shrink):
image = np.expand_dims(element['image'], axis=0).astype('float32')
image_tensor = PaddleTensor(image.copy())
data_out = self.gpu_predictor.run([image_tensor]) if use_gpu else self.cpu_predictor.run([image_tensor])
out = postprocess(
data_out=data_out[0].as_ndarray(),
org_im=element['org_im'],
org_im_path=element['org_im_path'],
image_width=element['image_width'],
image_height=element['image_height'],
output_dir=output_dir,
visualization=visualization,
shrink=shrink,
confs_threshold=confs_threshold)
predictor = self.gpu_predictor if use_gpu else self.cpu_predictor
input_names = predictor.get_input_names()
input_handle = predictor.get_input_handle(input_names[0])
input_handle.copy_from_cpu(image)
predictor.run()
output_names = predictor.get_output_names()
output_handle = predictor.get_output_handle(output_names[0])
output_data = output_handle.copy_to_cpu()
out = postprocess(data_out=output_data,
org_im=element['org_im'],
org_im_path=element['org_im_path'],
image_width=element['image_width'],
image_height=element['image_height'],
output_dir=output_dir,
visualization=visualization,
shrink=shrink,
confs_threshold=confs_threshold)
res.append(out)
return res
......@@ -117,20 +129,19 @@ class PyramidBoxLiteServer(hub.Module):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
program, feeded_var_names, target_vars = paddle.static.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
paddle.static.save_inference_model(dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
@serving
def serving_method(self, images, **kwargs):
......@@ -146,36 +157,40 @@ class PyramidBoxLiteServer(hub.Module):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options", description="Run configuration for controlling module behavior, not required.")
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
results = self.face_detection(
paths=[args.input_path],
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=args.visualization,
shrink=args.shrink,
confs_threshold=args.confs_threshold)
results = self.face_detection(paths=[args.input_path],
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=args.visualization,
shrink=args.shrink,
confs_threshold=args.confs_threshold)
return results
def add_module_config_arg(self):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--use_gpu', type=ast.literal_eval, default=False, help="whether use GPU or not")
self.arg_config_group.add_argument(
'--output_dir', type=str, default='detection_result', help="The directory to save output images.")
self.arg_config_group.add_argument(
'--visualization', type=ast.literal_eval, default=False, help="whether to save output as images.")
self.arg_config_group.add_argument('--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not")
self.arg_config_group.add_argument('--output_dir',
type=str,
default='detection_result',
help="The directory to save output images.")
self.arg_config_group.add_argument('--visualization',
type=ast.literal_eval,
default=False,
help="whether to save output as images.")
def add_module_input_arg(self):
"""
......@@ -187,5 +202,7 @@ class PyramidBoxLiteServer(hub.Module):
type=ast.literal_eval,
default=0.5,
help="resize the image to shrink * original_shape before feeding into network.")
self.arg_input_group.add_argument(
'--confs_threshold', type=ast.literal_eval, default=0.6, help="confidence threshold.")
self.arg_input_group.add_argument('--confs_threshold',
type=ast.literal_eval,
default=0.6,
help="confidence threshold.")
......@@ -208,7 +208,10 @@
初始发布
* 1.3.1
* 1.3.2
移除 fluid api
- ```shell
$ hub install pyramidbox_lite_server_mask==1.3.1
$ hub install pyramidbox_lite_server_mask==1.3.2
```
......@@ -185,7 +185,10 @@
First release
* 1.3.1
* 1.3.2
Remove fluid api
- ```shell
$ hub install pyramidbox_lite_server_mask==1.3.1
$ hub install pyramidbox_lite_server_mask==1.3.2
```
......@@ -2,18 +2,22 @@
from __future__ import absolute_import
from __future__ import division
import ast
import argparse
import ast
import os
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
import paddle
from paddle.inference import Config
from paddle.inference import create_predictor
from pyramidbox_lite_server_mask.data_feed import reader
from pyramidbox_lite_server_mask.processor import postprocess, base64_to_cv2
from pyramidbox_lite_server_mask.processor import base64_to_cv2
from pyramidbox_lite_server_mask.processor import postprocess
import paddlehub as hub
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
@moduleinfo(
......@@ -23,15 +27,15 @@ from pyramidbox_lite_server_mask.processor import postprocess, base64_to_cv2
author_email="",
summary=
"PyramidBox-Lite-Server-Mask is a high-performance face detection model used to detect whether people wear masks.",
version="1.3.1")
version="1.3.2")
class PyramidBoxLiteServerMask(hub.Module):
def _initialize(self, face_detector_module=None):
"""
Args:
face_detector_module (class): module to detect face.
"""
self.default_pretrained_model_path = os.path.join(
self.directory, "pyramidbox_lite_server_mask_model")
self.default_pretrained_model_path = os.path.join(self.directory, "pyramidbox_lite_server_mask_model")
if face_detector_module is None:
self.face_detector = hub.Module(name='pyramidbox_lite_server')
else:
......@@ -43,10 +47,10 @@ class PyramidBoxLiteServerMask(hub.Module):
"""
predictor config setting
"""
cpu_config = AnalysisConfig(self.default_pretrained_model_path)
cpu_config = Config(self.default_pretrained_model_path)
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
self.cpu_predictor = create_paddle_predictor(cpu_config)
self.cpu_predictor = create_predictor(cpu_config)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
......@@ -55,11 +59,10 @@ class PyramidBoxLiteServerMask(hub.Module):
except:
use_gpu = False
if use_gpu:
gpu_config = AnalysisConfig(self.default_pretrained_model_path)
gpu_config = Config(self.default_pretrained_model_path)
gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(
memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config)
gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_predictor(gpu_config)
def set_face_detector_module(self, face_detector_module):
"""
......@@ -123,16 +126,13 @@ class PyramidBoxLiteServerMask(hub.Module):
# get all data
all_element = list()
for yield_data in reader(self.face_detector, shrink, confs_threshold,
images, paths, use_gpu, use_multi_scale):
for yield_data in reader(self.face_detector, shrink, confs_threshold, images, paths, use_gpu, use_multi_scale):
all_element.append(yield_data)
image_list = list()
element_image_num = list()
for i in range(len(all_element)):
element_image = [
handled['image'] for handled in all_element[i]['preprocessed']
]
element_image = [handled['image'] for handled in all_element[i]['preprocessed']]
element_image_num.append(len(element_image))
image_list.extend(element_image)
......@@ -150,76 +150,61 @@ class PyramidBoxLiteServerMask(hub.Module):
pass
image_arr = np.squeeze(np.array(batch_data), axis=1)
image_tensor = PaddleTensor(image_arr.copy())
data_out = self.gpu_predictor.run([
image_tensor
]) if use_gpu else self.cpu_predictor.run([image_tensor])
# len(data_out) == 1
# data_out[0].as_ndarray().shape == (-1, 2)
data_out = data_out[0].as_ndarray()
predict_out = np.concatenate((predict_out, data_out))
predictor = self.gpu_predictor if use_gpu else self.cpu_predictor
input_names = predictor.get_input_names()
input_handle = predictor.get_input_handle(input_names[0])
input_handle.copy_from_cpu(image_arr)
predictor.run()
output_names = predictor.get_output_names()
output_handle = predictor.get_output_handle(output_names[0])
output_data = output_handle.copy_to_cpu()
predict_out = np.concatenate((predict_out, output_data))
predict_out = predict_out[1:]
# postprocess one by one
res = list()
for i in range(len(all_element)):
detect_faces_list = [
handled['face'] for handled in all_element[i]['preprocessed']
]
detect_faces_list = [handled['face'] for handled in all_element[i]['preprocessed']]
interval_left = sum(element_image_num[0:i])
interval_right = interval_left + element_image_num[i]
out = postprocess(
confidence_out=predict_out[interval_left:interval_right],
org_im=all_element[i]['org_im'],
org_im_path=all_element[i]['org_im_path'],
detected_faces=detect_faces_list,
output_dir=output_dir,
visualization=visualization)
out = postprocess(confidence_out=predict_out[interval_left:interval_right],
org_im=all_element[i]['org_im'],
org_im_path=all_element[i]['org_im_path'],
detected_faces=detect_faces_list,
output_dir=output_dir,
visualization=visualization)
res.append(out)
return res
def save_inference_model(self,
dirname,
model_filename=None,
params_filename=None,
combined=True):
def save_inference_model(self, dirname, model_filename=None, params_filename=None, combined=True):
classifier_dir = os.path.join(dirname, 'mask_detector')
detector_dir = os.path.join(dirname, 'pyramidbox_lite')
self._save_classifier_model(classifier_dir, model_filename,
params_filename, combined)
self._save_detector_model(detector_dir, model_filename, params_filename,
combined)
def _save_detector_model(self,
dirname,
model_filename=None,
params_filename=None,
combined=True):
self.face_detector.save_inference_model(dirname, model_filename,
params_filename, combined)
def _save_classifier_model(self,
dirname,
model_filename=None,
params_filename=None,
combined=True):
self._save_classifier_model(classifier_dir, model_filename, params_filename, combined)
self._save_detector_model(detector_dir, model_filename, params_filename, combined)
def _save_detector_model(self, dirname, model_filename=None, params_filename=None, combined=True):
self.face_detector.save_inference_model(dirname, model_filename, params_filename, combined)
def _save_classifier_model(self, dirname, model_filename=None, params_filename=None, combined=True):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
program, feeded_var_names, target_vars = paddle.static.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
paddle.static.save_inference_model(dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
@serving
def serving_method(self, images, **kwargs):
......@@ -235,64 +220,52 @@ class PyramidBoxLiteServerMask(hub.Module):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(
title="Input options", description="Input data. Required")
self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options",
description=
"Run configuration for controlling module behavior, not required.")
title="Config options", description="Run configuration for controlling module behavior, not required.")
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
results = self.face_detection(
paths=[args.input_path],
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=args.visualization,
shrink=args.shrink,
confs_threshold=args.confs_threshold)
results = self.face_detection(paths=[args.input_path],
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=args.visualization,
shrink=args.shrink,
confs_threshold=args.confs_threshold)
return results
def add_module_config_arg(self):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not")
self.arg_config_group.add_argument(
'--output_dir',
type=str,
default='detection_result',
help="The directory to save output images.")
self.arg_config_group.add_argument(
'--visualization',
type=ast.literal_eval,
default=False,
help="whether to save output as images.")
self.arg_config_group.add_argument('--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not")
self.arg_config_group.add_argument('--output_dir',
type=str,
default='detection_result',
help="The directory to save output images.")
self.arg_config_group.add_argument('--visualization',
type=ast.literal_eval,
default=False,
help="whether to save output as images.")
def add_module_input_arg(self):
"""
Add the command input options.
"""
self.arg_input_group.add_argument(
'--input_path', type=str, help="path to image.")
self.arg_input_group.add_argument('--input_path', type=str, help="path to image.")
self.arg_input_group.add_argument(
'--shrink',
type=ast.literal_eval,
default=0.5,
help=
"resize the image to `shrink * original_shape` before feeding into network."
)
self.arg_input_group.add_argument(
'--confs_threshold',
type=ast.literal_eval,
default=0.6,
help="confidence threshold.")
help="resize the image to `shrink * original_shape` before feeding into network.")
self.arg_input_group.add_argument('--confs_threshold',
type=ast.literal_eval,
default=0.6,
help="confidence threshold.")
......@@ -164,7 +164,10 @@
初始发布
* 1.1.2
* 1.1.3
移除 fluid api
- ```shell
$ hub install ultra_light_fast_generic_face_detector_1mb_320==1.1.2
$ hub install ultra_light_fast_generic_face_detector_1mb_320==1.1.3
```
......@@ -163,7 +163,10 @@
First release
* 1.1.2
* 1.1.3
Remove fluid api
- ```shell
$ hub install ultra_light_fast_generic_face_detector_1mb_320==1.1.2
$ hub install ultra_light_fast_generic_face_detector_1mb_320==1.1.3
```
......@@ -2,18 +2,22 @@
from __future__ import absolute_import
from __future__ import division
import ast
import argparse
import ast
import os
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
from ultra_light_fast_generic_face_detector_1mb_320.processor import postprocess, base64_to_cv2
import paddle
from paddle.inference import Config
from paddle.inference import create_predictor
from ultra_light_fast_generic_face_detector_1mb_320.data_feed import reader
from ultra_light_fast_generic_face_detector_1mb_320.processor import base64_to_cv2
from ultra_light_fast_generic_face_detector_1mb_320.processor import postprocess
import paddlehub as hub
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
@moduleinfo(
......@@ -23,8 +27,9 @@ from ultra_light_fast_generic_face_detector_1mb_320.data_feed import reader
author_email="paddle-dev@baidu.com",
summary=
"Ultra-Light-Fast-Generic-Face-Detector-1MB is a high-performance object detection model release on https://github.com/Linzaer/Ultra-Light-Fast-Generic-Face-Detector-1MB.",
version="1.1.2")
version="1.1.3")
class FaceDetector320(hub.Module):
def _initialize(self):
self.default_pretrained_model_path = os.path.join(self.directory,
"ultra_light_fast_generic_face_detector_1mb_320")
......@@ -34,10 +39,10 @@ class FaceDetector320(hub.Module):
"""
predictor config setting
"""
cpu_config = AnalysisConfig(self.default_pretrained_model_path)
cpu_config = Config(self.default_pretrained_model_path)
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
self.cpu_predictor = create_paddle_predictor(cpu_config)
self.cpu_predictor = create_predictor(cpu_config)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
......@@ -46,29 +51,28 @@ class FaceDetector320(hub.Module):
except:
use_gpu = False
if use_gpu:
gpu_config = AnalysisConfig(self.default_pretrained_model_path)
gpu_config = Config(self.default_pretrained_model_path)
gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config)
self.gpu_predictor = create_predictor(gpu_config)
def save_inference_model(self, dirname, model_filename=None, params_filename=None, combined=True):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
program, feeded_var_names, target_vars = paddle.static.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
paddle.static.save_inference_model(dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
def face_detection(self,
images=None,
......@@ -129,24 +133,31 @@ class FaceDetector320(hub.Module):
except:
pass
# feed batch image
batch_image = np.array([data['image'] for data in batch_data])
batch_image = PaddleTensor(batch_image.astype('float32'))
data_out = self.gpu_predictor.run([batch_image]) if use_gpu else self.cpu_predictor.run([batch_image])
confidences = data_out[0].as_ndarray()
boxes = data_out[1].as_ndarray()
batch_image = np.array([data['image'] for data in batch_data]).astype('float32')
predictor = self.gpu_predictor if use_gpu else self.cpu_predictor
input_names = predictor.get_input_names()
input_handle = predictor.get_input_handle(input_names[0])
input_handle.copy_from_cpu(batch_image)
predictor.run()
output_names = predictor.get_output_names()
output_handle = predictor.get_output_handle(output_names[0])
confidences = output_handle.copy_to_cpu()
output_handle = predictor.get_output_handle(output_names[1])
boxes = output_handle.copy_to_cpu()
# postprocess one by one
for i in range(len(batch_data)):
out = postprocess(
confidences=confidences[i],
boxes=boxes[i],
orig_im=batch_data[i]['orig_im'],
orig_im_shape=batch_data[i]['orig_im_shape'],
orig_im_path=batch_data[i]['orig_im_path'],
output_dir=output_dir,
visualization=visualization,
confs_threshold=confs_threshold,
iou_threshold=iou_threshold)
out = postprocess(confidences=confidences[i],
boxes=boxes[i],
orig_im=batch_data[i]['orig_im'],
orig_im_shape=batch_data[i]['orig_im_shape'],
orig_im_path=batch_data[i]['orig_im_path'],
output_dir=output_dir,
visualization=visualization,
confs_threshold=confs_threshold,
iou_threshold=iou_threshold)
res.append(out)
return res
......@@ -164,38 +175,39 @@ class FaceDetector320(hub.Module):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options", description="Run configuration for controlling module behavior, not required.")
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
results = self.face_detection(
paths=[args.input_path],
batch_size=args.batch_size,
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=args.visualization)
results = self.face_detection(paths=[args.input_path],
batch_size=args.batch_size,
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=args.visualization)
return results
def add_module_config_arg(self):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--use_gpu', type=ast.literal_eval, default=False, help="whether use GPU or not")
self.arg_config_group.add_argument(
'--output_dir',
type=str,
default='face_detector_320_predict_output',
help="The directory to save output images.")
self.arg_config_group.add_argument(
'--visualization', type=ast.literal_eval, default=False, help="whether to save output as images.")
self.arg_config_group.add_argument('--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not")
self.arg_config_group.add_argument('--output_dir',
type=str,
default='face_detector_320_predict_output',
help="The directory to save output images.")
self.arg_config_group.add_argument('--visualization',
type=ast.literal_eval,
default=False,
help="whether to save output as images.")
self.arg_config_group.add_argument('--batch_size', type=ast.literal_eval, default=1, help="batch size.")
def add_module_input_arg(self):
......
......@@ -164,7 +164,10 @@
初始发布
* 1.1.2
* 1.1.3
移除 fluid api
- ```shell
$ hub install ultra_light_fast_generic_face_detector_1mb_640==1.1.2
$ hub install ultra_light_fast_generic_face_detector_1mb_640==1.1.3
```
......@@ -163,7 +163,10 @@
First release
* 1.1.2
* 1.1.3
Remove fluid api
- ```shell
$ hub install ultra_light_fast_generic_face_detector_1mb_640==1.1.2
$ hub install ultra_light_fast_generic_face_detector_1mb_640==1.1.3
```
......@@ -2,18 +2,22 @@
from __future__ import absolute_import
from __future__ import division
import ast
import argparse
import ast
import os
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
from ultra_light_fast_generic_face_detector_1mb_640.processor import postprocess, base64_to_cv2
import paddle
from paddle.inference import Config
from paddle.inference import create_predictor
from ultra_light_fast_generic_face_detector_1mb_640.data_feed import reader
from ultra_light_fast_generic_face_detector_1mb_640.processor import base64_to_cv2
from ultra_light_fast_generic_face_detector_1mb_640.processor import postprocess
import paddlehub as hub
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
@moduleinfo(
......@@ -23,8 +27,9 @@ from ultra_light_fast_generic_face_detector_1mb_640.data_feed import reader
author_email="paddle-dev@baidu.com",
summary=
"Ultra-Light-Fast-Generic-Face-Detector-1MB is a high-performance object detection model release on https://github.com/Linzaer/Ultra-Light-Fast-Generic-Face-Detector-1MB.",
version="1.1.2")
version="1.1.3")
class FaceDetector640(hub.Module):
def _initialize(self):
self.default_pretrained_model_path = os.path.join(self.directory,
"ultra_light_fast_generic_face_detector_1mb_640")
......@@ -34,10 +39,10 @@ class FaceDetector640(hub.Module):
"""
predictor config setting
"""
cpu_config = AnalysisConfig(self.default_pretrained_model_path)
cpu_config = Config(self.default_pretrained_model_path)
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
self.cpu_predictor = create_paddle_predictor(cpu_config)
self.cpu_predictor = create_predictor(cpu_config)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
......@@ -46,29 +51,28 @@ class FaceDetector640(hub.Module):
except:
use_gpu = False
if use_gpu:
gpu_config = AnalysisConfig(self.default_pretrained_model_path)
gpu_config = Config(self.default_pretrained_model_path)
gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config)
self.gpu_predictor = create_predictor(gpu_config)
def save_inference_model(self, dirname, model_filename=None, params_filename=None, combined=True):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
program, feeded_var_names, target_vars = paddle.static.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
paddle.static.save_inference_model(dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
def face_detection(self,
images=None,
......@@ -128,24 +132,31 @@ class FaceDetector640(hub.Module):
except:
pass
# feed batch image
batch_image = np.array([data['image'] for data in batch_data])
batch_image = PaddleTensor(batch_image.astype('float32'))
data_out = self.gpu_predictor.run([batch_image]) if use_gpu else self.cpu_predictor.run([batch_image])
confidences = data_out[0].as_ndarray()
boxes = data_out[1].as_ndarray()
batch_image = np.array([data['image'] for data in batch_data]).astype('float32')
predictor = self.gpu_predictor if use_gpu else self.cpu_predictor
input_names = predictor.get_input_names()
input_handle = predictor.get_input_handle(input_names[0])
input_handle.copy_from_cpu(batch_image)
predictor.run()
output_names = predictor.get_output_names()
output_handle = predictor.get_output_handle(output_names[0])
confidences = output_handle.copy_to_cpu()
output_handle = predictor.get_output_handle(output_names[1])
boxes = output_handle.copy_to_cpu()
# postprocess one by one
for i in range(len(batch_data)):
out = postprocess(
confidences=confidences[i],
boxes=boxes[i],
orig_im=batch_data[i]['orig_im'],
orig_im_shape=batch_data[i]['orig_im_shape'],
orig_im_path=batch_data[i]['orig_im_path'],
output_dir=output_dir,
visualization=visualization,
confs_threshold=confs_threshold,
iou_threshold=iou_threshold)
out = postprocess(confidences=confidences[i],
boxes=boxes[i],
orig_im=batch_data[i]['orig_im'],
orig_im_shape=batch_data[i]['orig_im_shape'],
orig_im_path=batch_data[i]['orig_im_path'],
output_dir=output_dir,
visualization=visualization,
confs_threshold=confs_threshold,
iou_threshold=iou_threshold)
res.append(out)
return res
......@@ -163,38 +174,39 @@ class FaceDetector640(hub.Module):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options", description="Run configuration for controlling module behavior, not required.")
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
results = self.face_detection(
paths=[args.input_path],
batch_size=args.batch_size,
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=args.visualization)
results = self.face_detection(paths=[args.input_path],
batch_size=args.batch_size,
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=args.visualization)
return results
def add_module_config_arg(self):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--use_gpu', type=ast.literal_eval, default=False, help="whether use GPU or not")
self.arg_config_group.add_argument(
'--output_dir',
type=str,
default='face_detector_640_predict_output',
help="The directory to save output images.")
self.arg_config_group.add_argument(
'--visualization', type=ast.literal_eval, default=False, help="whether to save output as images.")
self.arg_config_group.add_argument('--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not")
self.arg_config_group.add_argument('--output_dir',
type=str,
default='face_detector_640_predict_output',
help="The directory to save output images.")
self.arg_config_group.add_argument('--visualization',
type=ast.literal_eval,
default=False,
help="whether to save output as images.")
self.arg_config_group.add_argument('--batch_size', type=ast.literal_eval, default=1, help="batch size.")
def add_module_input_arg(self):
......
......@@ -48,7 +48,7 @@
- ```shell
$ hub run face_landmark_localization --input_path "/PATH/TO/IMAGE"
```
- 通过命令行方式实现hub模型的调用,更多请见 [PaddleHub命令行指令](../../../../docs/docs_ch/tutorial/cmd_usage.rst)
- ### 2、预测代码示例
......@@ -56,17 +56,17 @@
- ```python
import paddlehub as hub
import cv2
face_landmark = hub.Module(name="face_landmark_localization")
# Replace face detection module to speed up predictions but reduce performance
# face_landmark.set_face_detector_module(hub.Module(name="ultra_light_fast_generic_face_detector_1mb_320"))
result = face_landmark.keypoint_detection(images=[cv2.imread('/PATH/TO/IMAGE')])
# or
# result = face_landmark.keypoint_detection(paths=['/PATH/TO/IMAGE'])
```
- ### 3、API
- ```python
......@@ -125,7 +125,7 @@
params_filename=None,
combined=False):
```
- 将模型保存到指定路径,由于人脸关键点检测模型由人脸检测+关键点检测两个模型组成,因此保存后会存在两个子目录,其中`face_landmark`为人脸关键点模型,`detector`为人脸检测模型。
- **参数**
- dirname: 存在模型的目录名称
......@@ -158,17 +158,17 @@
import cv2
import base64
import paddlehub as hub
def cv2_to_base64(image):
data = cv2.imencode('.jpg', image)[1]
return base64.b64encode(data.tostring()).decode('utf8')
# 发送HTTP请求
data = {'images':[cv2_to_base64(cv2.imread("/PATH/TO/IMAGE"))]}
headers = {"Content-type": "application/json"}
url = "http://127.0.0.1:8866/predict/face_landmark_localization"
r = requests.post(url=url, headers=headers, data=json.dumps(data))
# 打印预测结果
print(r.json()["results"])
```
......@@ -178,12 +178,15 @@
* 1.0.0
初始发布
* 1.0.1
* 1.0.2
* 1.0.3
移除 fluid api
* ```shell
$ hub install face_landmark_localization==1.0.2
$ hub install face_landmark_localization==1.0.3
```
# coding=utf-8
from __future__ import absolute_import
import paddle.fluid as fluid
__all__ = ["face_landmark_localization"]
def face_landmark_localization(image):
# image = fluid.layers.data(shape=[1, 60, 60], name='data', dtype='float32')
Conv1 = fluid.layers.conv2d(
image,
param_attr='Conv1_weights',
name='Conv1',
dilation=[1, 1],
filter_size=[5, 5],
stride=[1, 1],
groups=1,
bias_attr='Conv1_bias',
padding=[2, 2],
num_filters=20)
ActivationTangH1 = fluid.layers.tanh(Conv1, name='ActivationTangH1')
ActivationAbs1 = fluid.layers.abs(ActivationTangH1, name='ActivationAbs1')
Pool1 = fluid.layers.pool2d(
ActivationAbs1,
exclusive=False,
pool_type='max',
pool_padding=[0, 0],
name='Pool1',
global_pooling=False,
pool_stride=[2, 2],
ceil_mode=True,
pool_size=[2, 2])
Conv2 = fluid.layers.conv2d(
Pool1,
param_attr='Conv2_weights',
name='Conv2',
dilation=[1, 1],
filter_size=[5, 5],
stride=[1, 1],
groups=1,
bias_attr='Conv2_bias',
padding=[2, 2],
num_filters=48)
ActivationTangH2 = fluid.layers.tanh(Conv2, name='ActivationTangH2')
ActivationAbs2 = fluid.layers.abs(ActivationTangH2, name='ActivationAbs2')
Pool2 = fluid.layers.pool2d(
ActivationAbs2,
exclusive=False,
pool_type='max',
pool_padding=[0, 0],
name='Pool2',
global_pooling=False,
pool_stride=[2, 2],
ceil_mode=True,
pool_size=[2, 2])
Conv3 = fluid.layers.conv2d(
Pool2,
param_attr='Conv3_weights',
name='Conv3',
dilation=[1, 1],
filter_size=[3, 3],
stride=[1, 1],
groups=1,
bias_attr='Conv3_bias',
padding=[0, 0],
num_filters=64)
ActivationTangH3 = fluid.layers.tanh(Conv3, name='ActivationTangH3')
ActivationAbs3 = fluid.layers.abs(ActivationTangH3, name='ActivationAbs3')
Pool3 = fluid.layers.pool2d(
ActivationAbs3,
exclusive=False,
pool_type='max',
pool_padding=[0, 0],
name='Pool3',
global_pooling=False,
pool_stride=[2, 2],
ceil_mode=True,
pool_size=[3, 3])
Conv4 = fluid.layers.conv2d(
Pool3,
param_attr='Conv4_weights',
name='Conv4',
dilation=[1, 1],
filter_size=[3, 3],
stride=[1, 1],
groups=1,
bias_attr='Conv4_bias',
padding=[0, 0],
num_filters=80)
ActivationTangH4 = fluid.layers.tanh(Conv4, name='ActivationTangH4')
ActivationAbs4 = fluid.layers.abs(ActivationTangH4, name='ActivationAbs4')
Dense1 = fluid.layers.fc(
ActivationAbs4, param_attr='Dense1_weights', act=None, name='Dense1', size=512, bias_attr='Dense1_bias')
ActivationTangH5 = fluid.layers.tanh(Dense1, name='ActivationTangH5')
ActivationAbs5 = fluid.layers.abs(ActivationTangH5, name='ActivationAbs5')
Dense3 = fluid.layers.fc(
ActivationAbs5, param_attr='Dense3_weights', act=None, name='Dense3', size=136, bias_attr='Dense3_bias')
return Dense3
......@@ -168,6 +168,10 @@
修复numpy数据读取问题
* 1.1.3
移除 fluid api
- ```shell
$ hub install ssd_mobilenet_v1_pascal==1.1.2
$ hub install ssd_mobilenet_v1_pascal==1.1.3
```
......@@ -167,6 +167,10 @@
Fix the problem of reading numpy
* 1.1.3
Remove fluid api
- ```shell
$ hub install ssd_mobilenet_v1_pascal==1.1.2
$ hub install ssd_mobilenet_v1_pascal==1.1.3
```
......@@ -167,6 +167,10 @@
修复numpy数据读取问题
* 1.0.3
移除 fluid api
- ```shell
$ hub install yolov3_darknet53_pedestrian==1.0.2
$ hub install yolov3_darknet53_pedestrian==1.0.3
```
......@@ -166,6 +166,10 @@
Fix the problem of reading numpy
* 1.0.3
Remove fluid api
- ```shell
$ hub install yolov3_darknet53_pedestrian==1.0.2
$ hub install yolov3_darknet53_pedestrian==1.0.3
```
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册