未验证 提交 8468e1ac 编写于 作者: K KP 提交者: GitHub

Remove fluid api in modules and pkg. (#1906)

上级 5294a272
......@@ -180,8 +180,10 @@
初始发布
* 1.0.1
* 1.0.3
移除 fluid api
- ```shell
$ hub install stylepro_artistic==1.0.1
$ hub install stylepro_artistic==1.0.3
```
......@@ -179,8 +179,10 @@
First release
* 1.0.1
* 1.0.3
Remove fluid api
- ```shell
$ hub install stylepro_artistic==1.0.1
$ hub install stylepro_artistic==1.0.3
```
# coding=utf-8
from paddle.fluid.initializer import Constant
from paddle.fluid.param_attr import ParamAttr
import paddle.fluid as fluid
def decoder_net():
x2paddle_22 = fluid.layers.create_parameter(
dtype='float32', shape=[4], name='x2paddle_22', attr='x2paddle_22', default_initializer=Constant(0.0))
x2paddle_36 = fluid.layers.create_parameter(
dtype='float32', shape=[4], name='x2paddle_36', attr='x2paddle_36', default_initializer=Constant(0.0))
x2paddle_44 = fluid.layers.create_parameter(
dtype='float32', shape=[4], name='x2paddle_44', attr='x2paddle_44', default_initializer=Constant(0.0))
x2paddle_input_1 = fluid.layers.data(
dtype='float32', shape=[1, 512, 64, 64], name='x2paddle_input_1', append_batch_size=False)
x2paddle_19 = fluid.layers.pad2d(
x2paddle_input_1, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_19')
x2paddle_20 = fluid.layers.conv2d(
x2paddle_19,
num_filters=256,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_1',
name='x2paddle_20',
bias_attr='x2paddle_2')
x2paddle_21 = fluid.layers.relu(x2paddle_20, name='x2paddle_21')
x2paddle_23 = fluid.layers.resize_nearest(x2paddle_21, name='x2paddle_23', out_shape=[128, 128])
x2paddle_24 = fluid.layers.pad2d(
x2paddle_23, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_24')
x2paddle_25 = fluid.layers.conv2d(
x2paddle_24,
num_filters=256,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_3',
name='x2paddle_25',
bias_attr='x2paddle_4')
x2paddle_26 = fluid.layers.relu(x2paddle_25, name='x2paddle_26')
x2paddle_27 = fluid.layers.pad2d(
x2paddle_26, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_27')
x2paddle_28 = fluid.layers.conv2d(
x2paddle_27,
num_filters=256,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_5',
name='x2paddle_28',
bias_attr='x2paddle_6')
x2paddle_29 = fluid.layers.relu(x2paddle_28, name='x2paddle_29')
x2paddle_30 = fluid.layers.pad2d(
x2paddle_29, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_30')
x2paddle_31 = fluid.layers.conv2d(
x2paddle_30,
num_filters=256,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_7',
name='x2paddle_31',
bias_attr='x2paddle_8')
x2paddle_32 = fluid.layers.relu(x2paddle_31, name='x2paddle_32')
x2paddle_33 = fluid.layers.pad2d(
x2paddle_32, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_33')
x2paddle_34 = fluid.layers.conv2d(
x2paddle_33,
num_filters=128,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_9',
name='x2paddle_34',
bias_attr='x2paddle_10')
x2paddle_35 = fluid.layers.relu(x2paddle_34, name='x2paddle_35')
x2paddle_37 = fluid.layers.resize_nearest(x2paddle_35, name='x2paddle_37', out_shape=[256, 256])
x2paddle_38 = fluid.layers.pad2d(
x2paddle_37, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_38')
x2paddle_39 = fluid.layers.conv2d(
x2paddle_38,
num_filters=128,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_11',
name='x2paddle_39',
bias_attr='x2paddle_12')
x2paddle_40 = fluid.layers.relu(x2paddle_39, name='x2paddle_40')
x2paddle_41 = fluid.layers.pad2d(
x2paddle_40, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_41')
x2paddle_42 = fluid.layers.conv2d(
x2paddle_41,
num_filters=64,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_13',
name='x2paddle_42',
bias_attr='x2paddle_14')
x2paddle_43 = fluid.layers.relu(x2paddle_42, name='x2paddle_43')
x2paddle_45 = fluid.layers.resize_nearest(x2paddle_43, name='x2paddle_45', out_shape=[512, 512])
x2paddle_46 = fluid.layers.pad2d(
x2paddle_45, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_46')
x2paddle_47 = fluid.layers.conv2d(
x2paddle_46,
num_filters=64,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_15',
name='x2paddle_47',
bias_attr='x2paddle_16')
x2paddle_48 = fluid.layers.relu(x2paddle_47, name='x2paddle_48')
x2paddle_49 = fluid.layers.pad2d(
x2paddle_48, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_49')
x2paddle_50 = fluid.layers.conv2d(
x2paddle_49,
num_filters=3,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_17',
name='x2paddle_50',
bias_attr='x2paddle_18')
return x2paddle_input_1, x2paddle_50
# coding=utf-8
from paddle.fluid.initializer import Constant
from paddle.fluid.param_attr import ParamAttr
import paddle.fluid as fluid
def encoder_net():
x2paddle_0 = fluid.layers.data(dtype='float32', shape=[1, 3, 512, 512], name='x2paddle_0', append_batch_size=False)
x2paddle_21 = fluid.layers.conv2d(
x2paddle_0,
num_filters=3,
filter_size=[1, 1],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_1',
name='x2paddle_21',
bias_attr='x2paddle_2')
x2paddle_22 = fluid.layers.pad2d(
x2paddle_21, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_22')
x2paddle_23 = fluid.layers.conv2d(
x2paddle_22,
num_filters=64,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_3',
name='x2paddle_23',
bias_attr='x2paddle_4')
x2paddle_24 = fluid.layers.relu(x2paddle_23, name='x2paddle_24')
x2paddle_25 = fluid.layers.pad2d(
x2paddle_24, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_25')
x2paddle_26 = fluid.layers.conv2d(
x2paddle_25,
num_filters=64,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_5',
name='x2paddle_26',
bias_attr='x2paddle_6')
x2paddle_27 = fluid.layers.relu(x2paddle_26, name='x2paddle_27')
x2paddle_28 = fluid.layers.pool2d(
x2paddle_27,
pool_size=[2, 2],
pool_type='max',
pool_stride=[2, 2],
pool_padding=[0, 0],
ceil_mode=False,
name='x2paddle_28',
exclusive=False)
x2paddle_29 = fluid.layers.pad2d(
x2paddle_28, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_29')
x2paddle_30 = fluid.layers.conv2d(
x2paddle_29,
num_filters=128,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_7',
name='x2paddle_30',
bias_attr='x2paddle_8')
x2paddle_31 = fluid.layers.relu(x2paddle_30, name='x2paddle_31')
x2paddle_32 = fluid.layers.pad2d(
x2paddle_31, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_32')
x2paddle_33 = fluid.layers.conv2d(
x2paddle_32,
num_filters=128,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_9',
name='x2paddle_33',
bias_attr='x2paddle_10')
x2paddle_34 = fluid.layers.relu(x2paddle_33, name='x2paddle_34')
x2paddle_35 = fluid.layers.pool2d(
x2paddle_34,
pool_size=[2, 2],
pool_type='max',
pool_stride=[2, 2],
pool_padding=[0, 0],
ceil_mode=False,
name='x2paddle_35',
exclusive=False)
x2paddle_36 = fluid.layers.pad2d(
x2paddle_35, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_36')
x2paddle_37 = fluid.layers.conv2d(
x2paddle_36,
num_filters=256,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_11',
name='x2paddle_37',
bias_attr='x2paddle_12')
x2paddle_38 = fluid.layers.relu(x2paddle_37, name='x2paddle_38')
x2paddle_39 = fluid.layers.pad2d(
x2paddle_38, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_39')
x2paddle_40 = fluid.layers.conv2d(
x2paddle_39,
num_filters=256,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_13',
name='x2paddle_40',
bias_attr='x2paddle_14')
x2paddle_41 = fluid.layers.relu(x2paddle_40, name='x2paddle_41')
x2paddle_42 = fluid.layers.pad2d(
x2paddle_41, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_42')
x2paddle_43 = fluid.layers.conv2d(
x2paddle_42,
num_filters=256,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_15',
name='x2paddle_43',
bias_attr='x2paddle_16')
x2paddle_44 = fluid.layers.relu(x2paddle_43, name='x2paddle_44')
x2paddle_45 = fluid.layers.pad2d(
x2paddle_44, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_45')
x2paddle_46 = fluid.layers.conv2d(
x2paddle_45,
num_filters=256,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_17',
name='x2paddle_46',
bias_attr='x2paddle_18')
x2paddle_47 = fluid.layers.relu(x2paddle_46, name='x2paddle_47')
x2paddle_48 = fluid.layers.pool2d(
x2paddle_47,
pool_size=[2, 2],
pool_type='max',
pool_stride=[2, 2],
pool_padding=[0, 0],
ceil_mode=False,
name='x2paddle_48',
exclusive=False)
x2paddle_49 = fluid.layers.pad2d(
x2paddle_48, pad_value=0.0, mode='reflect', paddings=[1, 1, 1, 1], name='x2paddle_49')
x2paddle_50 = fluid.layers.conv2d(
x2paddle_49,
num_filters=512,
filter_size=[3, 3],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1],
groups=1,
param_attr='x2paddle_19',
name='x2paddle_50',
bias_attr='x2paddle_20')
x2paddle_51 = fluid.layers.relu(x2paddle_50, name='x2paddle_51')
return x2paddle_0, x2paddle_51
......@@ -2,32 +2,38 @@
from __future__ import absolute_import
from __future__ import division
import argparse
import ast
import copy
import time
import os
import argparse
import time
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
from stylepro_artistic.encoder_network import encoder_net
from stylepro_artistic.decoder_network import decoder_net
from stylepro_artistic.processor import postprocess, fr, cv2_to_base64, base64_to_cv2
import paddle
from paddle.inference import Config
from paddle.inference import create_predictor
from stylepro_artistic.data_feed import reader
from stylepro_artistic.processor import base64_to_cv2
from stylepro_artistic.processor import cv2_to_base64
from stylepro_artistic.processor import fr
from stylepro_artistic.processor import postprocess
import paddlehub as hub
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
# coding=utf-8
@moduleinfo(
name="stylepro_artistic",
version="1.0.2",
version="1.0.3",
type="cv/style_transfer",
summary="StylePro Artistic is an algorithm for Arbitrary image style, which is parameter-free, fast yet effective.",
author="baidu-bdl",
author_email="")
class StyleProjection(hub.Module):
def _initialize(self):
self.pretrained_encoder_net = os.path.join(self.directory, "style_projection_enc")
self.pretrained_decoder_net = os.path.join(self.directory, "style_projection_dec")
......@@ -38,15 +44,15 @@ class StyleProjection(hub.Module):
predictor config setting
"""
# encoder
cpu_config_enc = AnalysisConfig(self.pretrained_encoder_net)
cpu_config_enc = Config(self.pretrained_encoder_net)
cpu_config_enc.disable_glog_info()
cpu_config_enc.disable_gpu()
self.cpu_predictor_enc = create_paddle_predictor(cpu_config_enc)
self.cpu_predictor_enc = create_predictor(cpu_config_enc)
# decoder
cpu_config_dec = AnalysisConfig(self.pretrained_decoder_net)
cpu_config_dec = Config(self.pretrained_decoder_net)
cpu_config_dec.disable_glog_info()
cpu_config_dec.disable_gpu()
self.cpu_predictor_dec = create_paddle_predictor(cpu_config_dec)
self.cpu_predictor_dec = create_predictor(cpu_config_dec)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
......@@ -56,15 +62,15 @@ class StyleProjection(hub.Module):
use_gpu = False
if use_gpu:
# encoder
gpu_config_enc = AnalysisConfig(self.pretrained_encoder_net)
gpu_config_enc = Config(self.pretrained_encoder_net)
gpu_config_enc.disable_glog_info()
gpu_config_enc.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor_enc = create_paddle_predictor(gpu_config_enc)
self.gpu_predictor_enc = create_predictor(gpu_config_enc)
# decoder
gpu_config_dec = AnalysisConfig(self.pretrained_decoder_net)
gpu_config_dec = Config(self.pretrained_decoder_net)
gpu_config_dec.disable_glog_info()
gpu_config_dec.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor_dec = create_paddle_predictor(gpu_config_dec)
self.gpu_predictor_dec = create_predictor(gpu_config_dec)
def style_transfer(self,
images=None,
......@@ -102,22 +108,36 @@ class StyleProjection(hub.Module):
"Environment Variable CUDA_VISIBLE_DEVICES is not set correctly. If you wanna use gpu, please set CUDA_VISIBLE_DEVICES as cuda_device_id."
)
predictor_enc = self.gpu_predictor_enc if use_gpu else self.cpu_predictor_enc
input_names_enc = predictor_enc.get_input_names()
input_handle_enc = predictor_enc.get_input_handle(input_names_enc[0])
output_names_enc = predictor_enc.get_output_names()
output_handle_enc = predictor_enc.get_output_handle(output_names_enc[0])
predictor_dec = self.gpu_predictor_dec if use_gpu else self.cpu_predictor_dec
input_names_dec = predictor_dec.get_input_names()
input_handle_dec = predictor_dec.get_input_handle(input_names_dec[0])
output_names_dec = predictor_dec.get_output_names()
output_handle_dec = predictor_dec.get_output_handle(output_names_dec[0])
im_output = []
for component, w, h in reader(images, paths):
content = PaddleTensor(component['content_arr'].copy())
content_feats = self.gpu_predictor_enc.run([content]) if use_gpu else self.cpu_predictor_enc.run([content])
input_handle_enc.copy_from_cpu(component['content_arr'])
predictor_enc.run()
content_feats = output_handle_enc.copy_to_cpu()
accumulate = np.zeros((3, 512, 512))
for idx, style_arr in enumerate(component['styles_arr_list']):
style = PaddleTensor(style_arr.copy())
# encode
style_feats = self.gpu_predictor_enc.run([style]) if use_gpu else self.cpu_predictor_enc.run([style])
fr_feats = fr(content_feats[0].as_ndarray(), style_feats[0].as_ndarray(), alpha)
fr_feats = PaddleTensor(fr_feats.copy())
input_handle_enc.copy_from_cpu(style_arr)
predictor_enc.run()
style_feats = output_handle_enc.copy_to_cpu()
fr_feats = fr(content_feats, style_feats, alpha)
# decode
predict_outputs = self.gpu_predictor_dec.run([fr_feats]) if use_gpu else self.cpu_predictor_dec.run(
[fr_feats])
input_handle_dec.copy_from_cpu(fr_feats)
predictor_dec.run()
predict_outputs = output_handle_dec.copy_to_cpu()
# interpolation
accumulate += predict_outputs[0].as_ndarray()[0] * component['style_interpolation_weights'][idx]
accumulate += predict_outputs[0] * component['style_interpolation_weights'][idx]
# postprocess
save_im_name = 'ndarray_{}.jpg'.format(time.time())
result = postprocess(accumulate, output_dir, save_im_name, visualization, size=(w, h))
......@@ -134,14 +154,13 @@ class StyleProjection(hub.Module):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
encode_program, encode_feeded_var_names, encode_target_vars = fluid.io.load_inference_model(
encode_program, encode_feeded_var_names, encode_target_vars = paddle.static.load_inference_model(
dirname=self.pretrained_encoder_net, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
paddle.static.save_inference_model(dirname=dirname,
main_program=encode_program,
executor=exe,
feeded_var_names=encode_feeded_var_names,
......@@ -153,14 +172,13 @@ class StyleProjection(hub.Module):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
decode_program, decode_feeded_var_names, decode_target_vars = fluid.io.load_inference_model(
decode_program, decode_feeded_var_names, decode_target_vars = paddle.static.load_inference_model(
dirname=self.pretrained_decoder_net, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
paddle.static.save_inference_model(dirname=dirname,
main_program=decode_program,
executor=exe,
feeded_var_names=decode_feeded_var_names,
......@@ -186,8 +204,7 @@ class StyleProjection(hub.Module):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
......@@ -202,20 +219,29 @@ class StyleProjection(hub.Module):
paths = [{'content': args.content, 'styles': args.styles.split(',')}]
else:
paths = [{'content': args.content, 'styles': args.styles.split(','), 'weights': list(args.weights)}]
results = self.style_transfer(
paths=paths, alpha=args.alpha, use_gpu=args.use_gpu, output_dir=args.output_dir, visualization=True)
results = self.style_transfer(paths=paths,
alpha=args.alpha,
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=True)
return results
def add_module_config_arg(self):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--use_gpu', type=ast.literal_eval, default=False, help="whether use GPU or not")
self.arg_config_group.add_argument(
'--output_dir', type=str, default='transfer_result', help="The directory to save output images.")
self.arg_config_group.add_argument(
'--visualization', type=ast.literal_eval, default=True, help="whether to save output as images.")
self.arg_config_group.add_argument('--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not")
self.arg_config_group.add_argument('--output_dir',
type=str,
default='transfer_result',
help="The directory to save output images.")
self.arg_config_group.add_argument('--visualization',
type=ast.literal_eval,
default=True,
help="whether to save output as images.")
def add_module_input_arg(self):
"""
......@@ -223,7 +249,11 @@ class StyleProjection(hub.Module):
"""
self.arg_input_group.add_argument('--content', type=str, help="path to content.")
self.arg_input_group.add_argument('--styles', type=str, help="path to styles.")
self.arg_input_group.add_argument(
'--weights', type=ast.literal_eval, default=None, help="interpolation weights of styles.")
self.arg_config_group.add_argument(
'--alpha', type=ast.literal_eval, default=1, help="The parameter to control the tranform degree.")
self.arg_input_group.add_argument('--weights',
type=ast.literal_eval,
default=None,
help="interpolation weights of styles.")
self.arg_config_group.add_argument('--alpha',
type=ast.literal_eval,
default=1,
help="The parameter to control the tranform degree.")
......@@ -31,7 +31,7 @@
- ### 2、安装
- ```shell
$ hub install resnet50_vd_animals==1.0.0
$ hub install resnet50_vd_animals
```
- 如您安装时遇到问题,可参考:[零基础windows安装](../../../../docs/docs_ch/get_start/windows_quickstart.md)
| [零基础Linux安装](../../../../docs/docs_ch/get_start/linux_quickstart.md) | [零基础MacOS安装](../../../../docs/docs_ch/get_start/mac_quickstart.md)
......@@ -167,3 +167,11 @@
* 1.0.0
初始发布
* 1.0.1
移除 fluid api
- ```shell
$ hub install resnet50_vd_animals==1.0.1
```
......@@ -171,3 +171,10 @@
First release
* 1.0.1
Remove fluid api
- ```shell
$ hub install resnet50_vd_animals==1.0.1
```
......@@ -2,22 +2,23 @@
from __future__ import absolute_import
from __future__ import division
import ast
import argparse
import ast
import os
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
import paddle
from paddle.inference import Config
from paddle.inference import create_predictor
from resnet50_vd_animals.data_feed import reader
from resnet50_vd_animals.processor import base64_to_cv2
from resnet50_vd_animals.processor import postprocess
from paddlehub.module.module import moduleinfo, runnable, serving
import paddlehub as hub
from paddlehub.common.paddle_helper import add_vars_prefix
from resnet50_vd_animals.processor import postprocess, base64_to_cv2
from resnet50_vd_animals.data_feed import reader
from resnet50_vd_animals.resnet_vd import ResNet50_vd
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
@moduleinfo(
......@@ -26,8 +27,9 @@ from resnet50_vd_animals.resnet_vd import ResNet50_vd
author="baidu-vis",
author_email="",
summary="ResNet50vd is a image classfication model, this module is trained with Baidu's self-built animals dataset.",
version="1.0.0")
version="1.0.1")
class ResNet50vdAnimals(hub.Module):
def _initialize(self):
self.default_pretrained_model_path = os.path.join(self.directory, "model")
label_file = os.path.join(self.directory, "label_list.txt")
......@@ -97,54 +99,6 @@ class ResNet50vdAnimals(hub.Module):
xpu_config.enable_xpu(100)
self.xpu_predictor = create_predictor(xpu_config)
def context(self, trainable=True, pretrained=True):
"""context for transfer learning.
Args:
trainable (bool): Set parameters in program to be trainable.
pretrained (bool) : Whether to load pretrained model.
Returns:
inputs (dict): key is 'image', corresponding vaule is image tensor.
outputs (dict): key is :
'classification', corresponding value is the result of classification.
'feature_map', corresponding value is the result of the layer before the fully connected layer.
context_prog (fluid.Program): program for transfer learning.
"""
context_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(context_prog, startup_prog):
with fluid.unique_name.guard():
image = fluid.layers.data(name="image", shape=[3, 224, 224], dtype="float32")
resnet_vd = ResNet50_vd()
output, feature_map = resnet_vd.net(input=image, class_dim=len(self.label_list))
name_prefix = '@HUB_{}@'.format(self.name)
inputs = {'image': name_prefix + image.name}
outputs = {'classification': name_prefix + output.name, 'feature_map': name_prefix + feature_map.name}
add_vars_prefix(context_prog, name_prefix)
add_vars_prefix(startup_prog, name_prefix)
global_vars = context_prog.global_block().vars
inputs = {key: global_vars[value] for key, value in inputs.items()}
outputs = {key: global_vars[value] for key, value in outputs.items()}
place = fluid.CPUPlace()
exe = fluid.Executor(place)
# pretrained
if pretrained:
def _if_exist(var):
b = os.path.exists(os.path.join(self.default_pretrained_model_path, var.name))
return b
fluid.io.load_vars(exe, self.default_pretrained_model_path, context_prog, predicate=_if_exist)
else:
exe.run(startup_prog)
# trainable
for param in context_prog.global_block().iter_parameters():
param.trainable = trainable
return inputs, outputs, context_prog
def classification(self, images=None, paths=None, batch_size=1, use_gpu=False, top_k=1, use_device=None):
"""
API for image classification.
......@@ -215,13 +169,13 @@ class ResNet50vdAnimals(hub.Module):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
program, feeded_var_names, target_vars = paddle.static.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
fluid.io.save_inference_model(dirname=dirname,
paddle.static.save_inference_model(dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
......
#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
__all__ = ["ResNet", "ResNet50_vd", "ResNet101_vd", "ResNet152_vd", "ResNet200_vd"]
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"batch_size": 256,
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
}
}
class ResNet():
def __init__(self, layers=50, is_3x3=False):
self.params = train_parameters
self.layers = layers
self.is_3x3 = is_3x3
def net(self, input, class_dim=1000):
is_3x3 = self.is_3x3
layers = self.layers
supported_layers = [50, 101, 152, 200]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers)
if layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
elif layers == 200:
depth = [3, 12, 48, 3]
num_filters = [64, 128, 256, 512]
if is_3x3 == False:
conv = self.conv_bn_layer(input=input, num_filters=64, filter_size=7, stride=2, act='relu')
else:
conv = self.conv_bn_layer(input=input, num_filters=32, filter_size=3, stride=2, act='relu', name='conv1_1')
conv = self.conv_bn_layer(input=conv, num_filters=32, filter_size=3, stride=1, act='relu', name='conv1_2')
conv = self.conv_bn_layer(input=conv, num_filters=64, filter_size=3, stride=1, act='relu', name='conv1_3')
conv = fluid.layers.pool2d(input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
for block in range(len(depth)):
for i in range(depth[block]):
if layers in [101, 152, 200] and block == 2:
if i == 0:
conv_name = "res" + str(block + 2) + "a"
else:
conv_name = "res" + str(block + 2) + "b" + str(i)
else:
conv_name = "res" + str(block + 2) + chr(97 + i)
conv = self.bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
if_first=block == 0,
name=conv_name)
pool = fluid.layers.pool2d(input=conv, pool_size=7, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
out = fluid.layers.fc(
input=pool,
size=class_dim,
param_attr=fluid.param_attr.ParamAttr(initializer=fluid.initializer.Uniform(-stdv, stdv)))
return out, pool
def conv_bn_layer(self, input, num_filters, filter_size, stride=1, groups=1, act=None, name=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
if name == "conv1":
bn_name = "bn_" + name
else:
bn_name = "bn" + name[3:]
return fluid.layers.batch_norm(
input=conv,
act=act,
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def conv_bn_layer_new(self, input, num_filters, filter_size, stride=1, groups=1, act=None, name=None):
pool = fluid.layers.pool2d(input=input, pool_size=2, pool_stride=2, pool_padding=0, pool_type='avg')
conv = fluid.layers.conv2d(
input=pool,
num_filters=num_filters,
filter_size=filter_size,
stride=1,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
if name == "conv1":
bn_name = "bn_" + name
else:
bn_name = "bn" + name[3:]
return fluid.layers.batch_norm(
input=conv,
act=act,
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def shortcut(self, input, ch_out, stride, name, if_first=False):
ch_in = input.shape[1]
if ch_in != ch_out or stride != 1:
if if_first:
return self.conv_bn_layer(input, ch_out, 1, stride, name=name)
else:
return self.conv_bn_layer_new(input, ch_out, 1, stride, name=name)
else:
return input
def bottleneck_block(self, input, num_filters, stride, name, if_first):
conv0 = self.conv_bn_layer(
input=input, num_filters=num_filters, filter_size=1, act='relu', name=name + "_branch2a")
conv1 = self.conv_bn_layer(
input=conv0, num_filters=num_filters, filter_size=3, stride=stride, act='relu', name=name + "_branch2b")
conv2 = self.conv_bn_layer(
input=conv1, num_filters=num_filters * 4, filter_size=1, act=None, name=name + "_branch2c")
short = self.shortcut(input, num_filters * 4, stride, if_first=if_first, name=name + "_branch1")
return fluid.layers.elementwise_add(x=short, y=conv2, act='relu')
def ResNet50_vd():
model = ResNet(layers=50, is_3x3=True)
return model
def ResNet101_vd():
model = ResNet(layers=101, is_3x3=True)
return model
def ResNet152_vd():
model = ResNet(layers=152, is_3x3=True)
return model
def ResNet200_vd():
model = ResNet(layers=200, is_3x3=True)
return model
......@@ -163,8 +163,10 @@
初始发布
* 1.2.0
* 1.2.1
移除 fluid api
- ```shell
$ hub install pyramidbox_lite_mobile==1.2.0
$ hub install pyramidbox_lite_mobile==1.2.1
```
......@@ -162,8 +162,10 @@
First release
* 1.2.0
* 1.2.1
Remove fluid api
- ```shell
$ hub install pyramidbox_lite_mobile==1.2.0
$ hub install pyramidbox_lite_mobile==1.2.1
```
......@@ -2,28 +2,32 @@
from __future__ import absolute_import
from __future__ import division
import ast
import argparse
import ast
import os
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
import paddle
from paddle.inference import Config
from paddle.inference import create_predictor
from pyramidbox_lite_mobile.data_feed import reader
from pyramidbox_lite_mobile.processor import postprocess, base64_to_cv2
from pyramidbox_lite_mobile.processor import base64_to_cv2
from pyramidbox_lite_mobile.processor import postprocess
import paddlehub as hub
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
@moduleinfo(
name="pyramidbox_lite_mobile",
@moduleinfo(name="pyramidbox_lite_mobile",
type="CV/face_detection",
author="baidu-vis",
author_email="",
summary="PyramidBox-Lite-Mobile is a high-performance face detection model.",
version="1.2.0")
version="1.2.1")
class PyramidBoxLiteMobile(hub.Module):
def _initialize(self):
self.default_pretrained_model_path = os.path.join(self.directory, "pyramidbox_lite_mobile_face_detection")
self._set_config()
......@@ -33,10 +37,10 @@ class PyramidBoxLiteMobile(hub.Module):
"""
predictor config setting
"""
cpu_config = AnalysisConfig(self.default_pretrained_model_path)
cpu_config = Config(self.default_pretrained_model_path)
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
self.cpu_predictor = create_paddle_predictor(cpu_config)
self.cpu_predictor = create_predictor(cpu_config)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
......@@ -45,10 +49,10 @@ class PyramidBoxLiteMobile(hub.Module):
except:
use_gpu = False
if use_gpu:
gpu_config = AnalysisConfig(self.default_pretrained_model_path)
gpu_config = Config(self.default_pretrained_model_path)
gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config)
self.gpu_predictor = create_predictor(gpu_config)
def face_detection(self,
images=None,
......@@ -98,10 +102,18 @@ class PyramidBoxLiteMobile(hub.Module):
# process one by one
for element in reader(images, paths, shrink):
image = np.expand_dims(element['image'], axis=0).astype('float32')
image_tensor = PaddleTensor(image.copy())
data_out = self.gpu_predictor.run([image_tensor]) if use_gpu else self.cpu_predictor.run([image_tensor])
out = postprocess(
data_out=data_out[0].as_ndarray(),
predictor = self.gpu_predictor if use_gpu else self.cpu_predictor
input_names = predictor.get_input_names()
input_handle = predictor.get_input_handle(input_names[0])
input_handle.copy_from_cpu(image)
predictor.run()
output_names = predictor.get_output_names()
output_handle = predictor.get_output_handle(output_names[0])
output_data = output_handle.copy_to_cpu()
out = postprocess(data_out=output_data,
org_im=element['org_im'],
org_im_path=element['org_im_path'],
image_width=element['image_width'],
......@@ -117,17 +129,15 @@ class PyramidBoxLiteMobile(hub.Module):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
program, feeded_var_names, target_vars = paddle.static.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
var = program.global_block().vars['detection_output_0.tmp_1']
var.desc.set_dtype(fluid.core.VarDesc.VarType.INT32)
fluid.io.save_inference_model(
dirname=dirname,
paddle.static.save_inference_model(dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
......@@ -149,8 +159,7 @@ class PyramidBoxLiteMobile(hub.Module):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
......@@ -160,8 +169,7 @@ class PyramidBoxLiteMobile(hub.Module):
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
results = self.face_detection(
paths=[args.input_path],
results = self.face_detection(paths=[args.input_path],
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=args.visualization,
......@@ -173,12 +181,18 @@ class PyramidBoxLiteMobile(hub.Module):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--use_gpu', type=ast.literal_eval, default=False, help="whether use GPU or not")
self.arg_config_group.add_argument(
'--output_dir', type=str, default='detection_result', help="The directory to save output images.")
self.arg_config_group.add_argument(
'--visualization', type=ast.literal_eval, default=False, help="whether to save output as images.")
self.arg_config_group.add_argument('--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not")
self.arg_config_group.add_argument('--output_dir',
type=str,
default='detection_result',
help="The directory to save output images.")
self.arg_config_group.add_argument('--visualization',
type=ast.literal_eval,
default=False,
help="whether to save output as images.")
def add_module_input_arg(self):
"""
......@@ -190,5 +204,7 @@ class PyramidBoxLiteMobile(hub.Module):
type=ast.literal_eval,
default=0.5,
help="resize the image to shrink * original_shape before feeding into network.")
self.arg_input_group.add_argument(
'--confs_threshold', type=ast.literal_eval, default=0.6, help="confidence threshold.")
self.arg_input_group.add_argument('--confs_threshold',
type=ast.literal_eval,
default=0.6,
help="confidence threshold.")
......@@ -208,7 +208,10 @@
初始发布
* 1.3.0
* 1.3.1
移除 fluid api
- ```shell
$ hub install pyramidbox_lite_mobile_mask==1.3.0
$ hub install pyramidbox_lite_mobile_mask==1.3.1
```
......@@ -184,7 +184,10 @@
First release
* 1.3.0
* 1.3.1
Remove fluid api
- ```shell
$ hub install pyramidbox_lite_mobile_mask==1.3.0
$ hub install pyramidbox_lite_mobile_mask==1.3.1
```
......@@ -2,18 +2,22 @@
from __future__ import absolute_import
from __future__ import division
import ast
import argparse
import ast
import os
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
import paddle
from paddle.inference import Config
from paddle.inference import create_predictor
from pyramidbox_lite_mobile_mask.data_feed import reader
from pyramidbox_lite_mobile_mask.processor import postprocess, base64_to_cv2
from pyramidbox_lite_mobile_mask.processor import base64_to_cv2
from pyramidbox_lite_mobile_mask.processor import postprocess
import paddlehub as hub
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
@moduleinfo(
......@@ -23,8 +27,9 @@ from pyramidbox_lite_mobile_mask.processor import postprocess, base64_to_cv2
author_email="",
summary=
"Pyramidbox-Lite-Mobile-Mask is a high-performance face detection model used to detect whether people wear masks.",
version="1.3.0")
version="1.3.1")
class PyramidBoxLiteMobileMask(hub.Module):
def _initialize(self, face_detector_module=None):
"""
Args:
......@@ -42,10 +47,10 @@ class PyramidBoxLiteMobileMask(hub.Module):
"""
predictor config setting
"""
cpu_config = AnalysisConfig(self.default_pretrained_model_path)
cpu_config = Config(self.default_pretrained_model_path)
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
self.cpu_predictor = create_paddle_predictor(cpu_config)
self.cpu_predictor = create_predictor(cpu_config)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
......@@ -54,10 +59,10 @@ class PyramidBoxLiteMobileMask(hub.Module):
except:
use_gpu = False
if use_gpu:
gpu_config = AnalysisConfig(self.default_pretrained_model_path)
gpu_config = Config(self.default_pretrained_model_path)
gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config)
self.gpu_predictor = create_predictor(gpu_config)
def set_face_detector_module(self, face_detector_module):
"""
......@@ -146,12 +151,18 @@ class PyramidBoxLiteMobileMask(hub.Module):
pass
image_arr = np.squeeze(np.array(batch_data), axis=1)
image_tensor = PaddleTensor(image_arr.copy())
data_out = self.gpu_predictor.run([image_tensor]) if use_gpu else self.cpu_predictor.run([image_tensor])
# len(data_out) == 1
# data_out[0].as_ndarray().shape == (-1, 2)
data_out = data_out[0].as_ndarray()
predict_out = np.concatenate((predict_out, data_out))
predictor = self.gpu_predictor if use_gpu else self.cpu_predictor
input_names = predictor.get_input_names()
input_handle = predictor.get_input_handle(input_names[0])
input_handle.copy_from_cpu(image_arr)
predictor.run()
output_names = predictor.get_output_names()
output_handle = predictor.get_output_handle(output_names[0])
output_data = output_handle.copy_to_cpu()
predict_out = np.concatenate((predict_out, output_data))
predict_out = predict_out[1:]
# postprocess one by one
......@@ -160,8 +171,7 @@ class PyramidBoxLiteMobileMask(hub.Module):
detect_faces_list = [handled['face'] for handled in all_element[i]['preprocessed']]
interval_left = sum(element_image_num[0:i])
interval_right = interval_left + element_image_num[i]
out = postprocess(
confidence_out=predict_out[interval_left:interval_right],
out = postprocess(confidence_out=predict_out[interval_left:interval_right],
org_im=all_element[i]['org_im'],
org_im_path=all_element[i]['org_im_path'],
detected_faces=detect_faces_list,
......@@ -183,14 +193,13 @@ class PyramidBoxLiteMobileMask(hub.Module):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
program, feeded_var_names, target_vars = paddle.static.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
paddle.static.save_inference_model(dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
......@@ -212,8 +221,7 @@ class PyramidBoxLiteMobileMask(hub.Module):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
......@@ -223,8 +231,7 @@ class PyramidBoxLiteMobileMask(hub.Module):
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
results = self.face_detection(
paths=[args.input_path],
results = self.face_detection(paths=[args.input_path],
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=args.visualization,
......@@ -236,12 +243,18 @@ class PyramidBoxLiteMobileMask(hub.Module):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--use_gpu', type=ast.literal_eval, default=False, help="whether use GPU or not")
self.arg_config_group.add_argument(
'--output_dir', type=str, default='detection_result', help="The directory to save output images.")
self.arg_config_group.add_argument(
'--visualization', type=ast.literal_eval, default=False, help="whether to save output as images.")
self.arg_config_group.add_argument('--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not")
self.arg_config_group.add_argument('--output_dir',
type=str,
default='detection_result',
help="The directory to save output images.")
self.arg_config_group.add_argument('--visualization',
type=ast.literal_eval,
default=False,
help="whether to save output as images.")
def add_module_input_arg(self):
"""
......@@ -253,5 +266,7 @@ class PyramidBoxLiteMobileMask(hub.Module):
type=ast.literal_eval,
default=0.5,
help="resize the image to `shrink * original_shape` before feeding into network.")
self.arg_input_group.add_argument(
'--confs_threshold', type=ast.literal_eval, default=0.6, help="confidence threshold.")
self.arg_input_group.add_argument('--confs_threshold',
type=ast.literal_eval,
default=0.6,
help="confidence threshold.")
......@@ -166,6 +166,11 @@
* 1.2.0
修复numpy数据读取问题
* 1.2.1
移除 fluid api
- ```shell
$ hub install pyramidbox_lite_server==1.2.0
$ hub install pyramidbox_lite_server==1.2.1
```
......@@ -166,6 +166,11 @@
* 1.2.0
Fix the problem of reading numpy
* 1.2.1
Remove fluid api
- ```shell
$ hub install pyramidbox_lite_server==1.2.0
$ hub install pyramidbox_lite_server==1.2.1
```
......@@ -2,28 +2,32 @@
from __future__ import absolute_import
from __future__ import division
import ast
import argparse
import ast
import os
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
import paddle
from paddle.inference import Config
from paddle.inference import create_predictor
from pyramidbox_lite_server.data_feed import reader
from pyramidbox_lite_server.processor import postprocess, base64_to_cv2
from pyramidbox_lite_server.processor import base64_to_cv2
from pyramidbox_lite_server.processor import postprocess
import paddlehub as hub
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
@moduleinfo(
name="pyramidbox_lite_server",
@moduleinfo(name="pyramidbox_lite_server",
type="CV/face_detection",
author="baidu-vis",
author_email="",
summary="PyramidBox-Lite-Server is a high-performance face detection model.",
version="1.2.0")
version="1.2.1")
class PyramidBoxLiteServer(hub.Module):
def _initialize(self):
self.default_pretrained_model_path = os.path.join(self.directory, "pyramidbox_lite_server_face_detection")
self._set_config()
......@@ -33,10 +37,10 @@ class PyramidBoxLiteServer(hub.Module):
"""
predictor config setting
"""
cpu_config = AnalysisConfig(self.default_pretrained_model_path)
cpu_config = Config(self.default_pretrained_model_path)
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
self.cpu_predictor = create_paddle_predictor(cpu_config)
self.cpu_predictor = create_predictor(cpu_config)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
......@@ -45,10 +49,10 @@ class PyramidBoxLiteServer(hub.Module):
except:
use_gpu = False
if use_gpu:
gpu_config = AnalysisConfig(self.default_pretrained_model_path)
gpu_config = Config(self.default_pretrained_model_path)
gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config)
self.gpu_predictor = create_predictor(gpu_config)
def face_detection(self,
images=None,
......@@ -98,10 +102,18 @@ class PyramidBoxLiteServer(hub.Module):
# process one by one
for element in reader(images, paths, shrink):
image = np.expand_dims(element['image'], axis=0).astype('float32')
image_tensor = PaddleTensor(image.copy())
data_out = self.gpu_predictor.run([image_tensor]) if use_gpu else self.cpu_predictor.run([image_tensor])
out = postprocess(
data_out=data_out[0].as_ndarray(),
predictor = self.gpu_predictor if use_gpu else self.cpu_predictor
input_names = predictor.get_input_names()
input_handle = predictor.get_input_handle(input_names[0])
input_handle.copy_from_cpu(image)
predictor.run()
output_names = predictor.get_output_names()
output_handle = predictor.get_output_handle(output_names[0])
output_data = output_handle.copy_to_cpu()
out = postprocess(data_out=output_data,
org_im=element['org_im'],
org_im_path=element['org_im_path'],
image_width=element['image_width'],
......@@ -117,14 +129,13 @@ class PyramidBoxLiteServer(hub.Module):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
program, feeded_var_names, target_vars = paddle.static.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
paddle.static.save_inference_model(dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
......@@ -146,8 +157,7 @@ class PyramidBoxLiteServer(hub.Module):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
......@@ -157,8 +167,7 @@ class PyramidBoxLiteServer(hub.Module):
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
results = self.face_detection(
paths=[args.input_path],
results = self.face_detection(paths=[args.input_path],
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=args.visualization,
......@@ -170,12 +179,18 @@ class PyramidBoxLiteServer(hub.Module):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--use_gpu', type=ast.literal_eval, default=False, help="whether use GPU or not")
self.arg_config_group.add_argument(
'--output_dir', type=str, default='detection_result', help="The directory to save output images.")
self.arg_config_group.add_argument(
'--visualization', type=ast.literal_eval, default=False, help="whether to save output as images.")
self.arg_config_group.add_argument('--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not")
self.arg_config_group.add_argument('--output_dir',
type=str,
default='detection_result',
help="The directory to save output images.")
self.arg_config_group.add_argument('--visualization',
type=ast.literal_eval,
default=False,
help="whether to save output as images.")
def add_module_input_arg(self):
"""
......@@ -187,5 +202,7 @@ class PyramidBoxLiteServer(hub.Module):
type=ast.literal_eval,
default=0.5,
help="resize the image to shrink * original_shape before feeding into network.")
self.arg_input_group.add_argument(
'--confs_threshold', type=ast.literal_eval, default=0.6, help="confidence threshold.")
self.arg_input_group.add_argument('--confs_threshold',
type=ast.literal_eval,
default=0.6,
help="confidence threshold.")
......@@ -208,7 +208,10 @@
初始发布
* 1.3.1
* 1.3.2
移除 fluid api
- ```shell
$ hub install pyramidbox_lite_server_mask==1.3.1
$ hub install pyramidbox_lite_server_mask==1.3.2
```
......@@ -185,7 +185,10 @@
First release
* 1.3.1
* 1.3.2
Remove fluid api
- ```shell
$ hub install pyramidbox_lite_server_mask==1.3.1
$ hub install pyramidbox_lite_server_mask==1.3.2
```
......@@ -2,18 +2,22 @@
from __future__ import absolute_import
from __future__ import division
import ast
import argparse
import ast
import os
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
import paddle
from paddle.inference import Config
from paddle.inference import create_predictor
from pyramidbox_lite_server_mask.data_feed import reader
from pyramidbox_lite_server_mask.processor import postprocess, base64_to_cv2
from pyramidbox_lite_server_mask.processor import base64_to_cv2
from pyramidbox_lite_server_mask.processor import postprocess
import paddlehub as hub
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
@moduleinfo(
......@@ -23,15 +27,15 @@ from pyramidbox_lite_server_mask.processor import postprocess, base64_to_cv2
author_email="",
summary=
"PyramidBox-Lite-Server-Mask is a high-performance face detection model used to detect whether people wear masks.",
version="1.3.1")
version="1.3.2")
class PyramidBoxLiteServerMask(hub.Module):
def _initialize(self, face_detector_module=None):
"""
Args:
face_detector_module (class): module to detect face.
"""
self.default_pretrained_model_path = os.path.join(
self.directory, "pyramidbox_lite_server_mask_model")
self.default_pretrained_model_path = os.path.join(self.directory, "pyramidbox_lite_server_mask_model")
if face_detector_module is None:
self.face_detector = hub.Module(name='pyramidbox_lite_server')
else:
......@@ -43,10 +47,10 @@ class PyramidBoxLiteServerMask(hub.Module):
"""
predictor config setting
"""
cpu_config = AnalysisConfig(self.default_pretrained_model_path)
cpu_config = Config(self.default_pretrained_model_path)
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
self.cpu_predictor = create_paddle_predictor(cpu_config)
self.cpu_predictor = create_predictor(cpu_config)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
......@@ -55,11 +59,10 @@ class PyramidBoxLiteServerMask(hub.Module):
except:
use_gpu = False
if use_gpu:
gpu_config = AnalysisConfig(self.default_pretrained_model_path)
gpu_config = Config(self.default_pretrained_model_path)
gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(
memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config)
gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_predictor(gpu_config)
def set_face_detector_module(self, face_detector_module):
"""
......@@ -123,16 +126,13 @@ class PyramidBoxLiteServerMask(hub.Module):
# get all data
all_element = list()
for yield_data in reader(self.face_detector, shrink, confs_threshold,
images, paths, use_gpu, use_multi_scale):
for yield_data in reader(self.face_detector, shrink, confs_threshold, images, paths, use_gpu, use_multi_scale):
all_element.append(yield_data)
image_list = list()
element_image_num = list()
for i in range(len(all_element)):
element_image = [
handled['image'] for handled in all_element[i]['preprocessed']
]
element_image = [handled['image'] for handled in all_element[i]['preprocessed']]
element_image_num.append(len(element_image))
image_list.extend(element_image)
......@@ -150,26 +150,27 @@ class PyramidBoxLiteServerMask(hub.Module):
pass
image_arr = np.squeeze(np.array(batch_data), axis=1)
image_tensor = PaddleTensor(image_arr.copy())
data_out = self.gpu_predictor.run([
image_tensor
]) if use_gpu else self.cpu_predictor.run([image_tensor])
# len(data_out) == 1
# data_out[0].as_ndarray().shape == (-1, 2)
data_out = data_out[0].as_ndarray()
predict_out = np.concatenate((predict_out, data_out))
predictor = self.gpu_predictor if use_gpu else self.cpu_predictor
input_names = predictor.get_input_names()
input_handle = predictor.get_input_handle(input_names[0])
input_handle.copy_from_cpu(image_arr)
predictor.run()
output_names = predictor.get_output_names()
output_handle = predictor.get_output_handle(output_names[0])
output_data = output_handle.copy_to_cpu()
predict_out = np.concatenate((predict_out, output_data))
predict_out = predict_out[1:]
# postprocess one by one
res = list()
for i in range(len(all_element)):
detect_faces_list = [
handled['face'] for handled in all_element[i]['preprocessed']
]
detect_faces_list = [handled['face'] for handled in all_element[i]['preprocessed']]
interval_left = sum(element_image_num[0:i])
interval_right = interval_left + element_image_num[i]
out = postprocess(
confidence_out=predict_out[interval_left:interval_right],
out = postprocess(confidence_out=predict_out[interval_left:interval_right],
org_im=all_element[i]['org_im'],
org_im_path=all_element[i]['org_im_path'],
detected_faces=detect_faces_list,
......@@ -178,42 +179,26 @@ class PyramidBoxLiteServerMask(hub.Module):
res.append(out)
return res
def save_inference_model(self,
dirname,
model_filename=None,
params_filename=None,
combined=True):
def save_inference_model(self, dirname, model_filename=None, params_filename=None, combined=True):
classifier_dir = os.path.join(dirname, 'mask_detector')
detector_dir = os.path.join(dirname, 'pyramidbox_lite')
self._save_classifier_model(classifier_dir, model_filename,
params_filename, combined)
self._save_detector_model(detector_dir, model_filename, params_filename,
combined)
def _save_detector_model(self,
dirname,
model_filename=None,
params_filename=None,
combined=True):
self.face_detector.save_inference_model(dirname, model_filename,
params_filename, combined)
def _save_classifier_model(self,
dirname,
model_filename=None,
params_filename=None,
combined=True):
self._save_classifier_model(classifier_dir, model_filename, params_filename, combined)
self._save_detector_model(detector_dir, model_filename, params_filename, combined)
def _save_detector_model(self, dirname, model_filename=None, params_filename=None, combined=True):
self.face_detector.save_inference_model(dirname, model_filename, params_filename, combined)
def _save_classifier_model(self, dirname, model_filename=None, params_filename=None, combined=True):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
program, feeded_var_names, target_vars = paddle.static.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
paddle.static.save_inference_model(dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
......@@ -235,22 +220,17 @@ class PyramidBoxLiteServerMask(hub.Module):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(
title="Input options", description="Input data. Required")
self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options",
description=
"Run configuration for controlling module behavior, not required.")
title="Config options", description="Run configuration for controlling module behavior, not required.")
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
results = self.face_detection(
paths=[args.input_path],
results = self.face_detection(paths=[args.input_path],
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=args.visualization,
......@@ -262,18 +242,15 @@ class PyramidBoxLiteServerMask(hub.Module):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--use_gpu',
self.arg_config_group.add_argument('--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not")
self.arg_config_group.add_argument(
'--output_dir',
self.arg_config_group.add_argument('--output_dir',
type=str,
default='detection_result',
help="The directory to save output images.")
self.arg_config_group.add_argument(
'--visualization',
self.arg_config_group.add_argument('--visualization',
type=ast.literal_eval,
default=False,
help="whether to save output as images.")
......@@ -282,17 +259,13 @@ class PyramidBoxLiteServerMask(hub.Module):
"""
Add the command input options.
"""
self.arg_input_group.add_argument(
'--input_path', type=str, help="path to image.")
self.arg_input_group.add_argument('--input_path', type=str, help="path to image.")
self.arg_input_group.add_argument(
'--shrink',
type=ast.literal_eval,
default=0.5,
help=
"resize the image to `shrink * original_shape` before feeding into network."
)
self.arg_input_group.add_argument(
'--confs_threshold',
help="resize the image to `shrink * original_shape` before feeding into network.")
self.arg_input_group.add_argument('--confs_threshold',
type=ast.literal_eval,
default=0.6,
help="confidence threshold.")
......@@ -164,7 +164,10 @@
初始发布
* 1.1.2
* 1.1.3
移除 fluid api
- ```shell
$ hub install ultra_light_fast_generic_face_detector_1mb_320==1.1.2
$ hub install ultra_light_fast_generic_face_detector_1mb_320==1.1.3
```
......@@ -163,7 +163,10 @@
First release
* 1.1.2
* 1.1.3
Remove fluid api
- ```shell
$ hub install ultra_light_fast_generic_face_detector_1mb_320==1.1.2
$ hub install ultra_light_fast_generic_face_detector_1mb_320==1.1.3
```
......@@ -2,18 +2,22 @@
from __future__ import absolute_import
from __future__ import division
import ast
import argparse
import ast
import os
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
from ultra_light_fast_generic_face_detector_1mb_320.processor import postprocess, base64_to_cv2
import paddle
from paddle.inference import Config
from paddle.inference import create_predictor
from ultra_light_fast_generic_face_detector_1mb_320.data_feed import reader
from ultra_light_fast_generic_face_detector_1mb_320.processor import base64_to_cv2
from ultra_light_fast_generic_face_detector_1mb_320.processor import postprocess
import paddlehub as hub
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
@moduleinfo(
......@@ -23,8 +27,9 @@ from ultra_light_fast_generic_face_detector_1mb_320.data_feed import reader
author_email="paddle-dev@baidu.com",
summary=
"Ultra-Light-Fast-Generic-Face-Detector-1MB is a high-performance object detection model release on https://github.com/Linzaer/Ultra-Light-Fast-Generic-Face-Detector-1MB.",
version="1.1.2")
version="1.1.3")
class FaceDetector320(hub.Module):
def _initialize(self):
self.default_pretrained_model_path = os.path.join(self.directory,
"ultra_light_fast_generic_face_detector_1mb_320")
......@@ -34,10 +39,10 @@ class FaceDetector320(hub.Module):
"""
predictor config setting
"""
cpu_config = AnalysisConfig(self.default_pretrained_model_path)
cpu_config = Config(self.default_pretrained_model_path)
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
self.cpu_predictor = create_paddle_predictor(cpu_config)
self.cpu_predictor = create_predictor(cpu_config)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
......@@ -46,23 +51,22 @@ class FaceDetector320(hub.Module):
except:
use_gpu = False
if use_gpu:
gpu_config = AnalysisConfig(self.default_pretrained_model_path)
gpu_config = Config(self.default_pretrained_model_path)
gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config)
self.gpu_predictor = create_predictor(gpu_config)
def save_inference_model(self, dirname, model_filename=None, params_filename=None, combined=True):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
program, feeded_var_names, target_vars = paddle.static.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
paddle.static.save_inference_model(dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
......@@ -129,16 +133,23 @@ class FaceDetector320(hub.Module):
except:
pass
# feed batch image
batch_image = np.array([data['image'] for data in batch_data])
batch_image = PaddleTensor(batch_image.astype('float32'))
data_out = self.gpu_predictor.run([batch_image]) if use_gpu else self.cpu_predictor.run([batch_image])
confidences = data_out[0].as_ndarray()
boxes = data_out[1].as_ndarray()
batch_image = np.array([data['image'] for data in batch_data]).astype('float32')
predictor = self.gpu_predictor if use_gpu else self.cpu_predictor
input_names = predictor.get_input_names()
input_handle = predictor.get_input_handle(input_names[0])
input_handle.copy_from_cpu(batch_image)
predictor.run()
output_names = predictor.get_output_names()
output_handle = predictor.get_output_handle(output_names[0])
confidences = output_handle.copy_to_cpu()
output_handle = predictor.get_output_handle(output_names[1])
boxes = output_handle.copy_to_cpu()
# postprocess one by one
for i in range(len(batch_data)):
out = postprocess(
confidences=confidences[i],
out = postprocess(confidences=confidences[i],
boxes=boxes[i],
orig_im=batch_data[i]['orig_im'],
orig_im_shape=batch_data[i]['orig_im_shape'],
......@@ -164,8 +175,7 @@ class FaceDetector320(hub.Module):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
......@@ -175,8 +185,7 @@ class FaceDetector320(hub.Module):
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
results = self.face_detection(
paths=[args.input_path],
results = self.face_detection(paths=[args.input_path],
batch_size=args.batch_size,
use_gpu=args.use_gpu,
output_dir=args.output_dir,
......@@ -187,15 +196,18 @@ class FaceDetector320(hub.Module):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--use_gpu', type=ast.literal_eval, default=False, help="whether use GPU or not")
self.arg_config_group.add_argument(
'--output_dir',
self.arg_config_group.add_argument('--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not")
self.arg_config_group.add_argument('--output_dir',
type=str,
default='face_detector_320_predict_output',
help="The directory to save output images.")
self.arg_config_group.add_argument(
'--visualization', type=ast.literal_eval, default=False, help="whether to save output as images.")
self.arg_config_group.add_argument('--visualization',
type=ast.literal_eval,
default=False,
help="whether to save output as images.")
self.arg_config_group.add_argument('--batch_size', type=ast.literal_eval, default=1, help="batch size.")
def add_module_input_arg(self):
......
......@@ -164,7 +164,10 @@
初始发布
* 1.1.2
* 1.1.3
移除 fluid api
- ```shell
$ hub install ultra_light_fast_generic_face_detector_1mb_640==1.1.2
$ hub install ultra_light_fast_generic_face_detector_1mb_640==1.1.3
```
......@@ -163,7 +163,10 @@
First release
* 1.1.2
* 1.1.3
Remove fluid api
- ```shell
$ hub install ultra_light_fast_generic_face_detector_1mb_640==1.1.2
$ hub install ultra_light_fast_generic_face_detector_1mb_640==1.1.3
```
......@@ -2,18 +2,22 @@
from __future__ import absolute_import
from __future__ import division
import ast
import argparse
import ast
import os
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
from ultra_light_fast_generic_face_detector_1mb_640.processor import postprocess, base64_to_cv2
import paddle
from paddle.inference import Config
from paddle.inference import create_predictor
from ultra_light_fast_generic_face_detector_1mb_640.data_feed import reader
from ultra_light_fast_generic_face_detector_1mb_640.processor import base64_to_cv2
from ultra_light_fast_generic_face_detector_1mb_640.processor import postprocess
import paddlehub as hub
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
@moduleinfo(
......@@ -23,8 +27,9 @@ from ultra_light_fast_generic_face_detector_1mb_640.data_feed import reader
author_email="paddle-dev@baidu.com",
summary=
"Ultra-Light-Fast-Generic-Face-Detector-1MB is a high-performance object detection model release on https://github.com/Linzaer/Ultra-Light-Fast-Generic-Face-Detector-1MB.",
version="1.1.2")
version="1.1.3")
class FaceDetector640(hub.Module):
def _initialize(self):
self.default_pretrained_model_path = os.path.join(self.directory,
"ultra_light_fast_generic_face_detector_1mb_640")
......@@ -34,10 +39,10 @@ class FaceDetector640(hub.Module):
"""
predictor config setting
"""
cpu_config = AnalysisConfig(self.default_pretrained_model_path)
cpu_config = Config(self.default_pretrained_model_path)
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
self.cpu_predictor = create_paddle_predictor(cpu_config)
self.cpu_predictor = create_predictor(cpu_config)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
......@@ -46,23 +51,22 @@ class FaceDetector640(hub.Module):
except:
use_gpu = False
if use_gpu:
gpu_config = AnalysisConfig(self.default_pretrained_model_path)
gpu_config = Config(self.default_pretrained_model_path)
gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config)
self.gpu_predictor = create_predictor(gpu_config)
def save_inference_model(self, dirname, model_filename=None, params_filename=None, combined=True):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
program, feeded_var_names, target_vars = paddle.static.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
paddle.static.save_inference_model(dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
......@@ -128,16 +132,23 @@ class FaceDetector640(hub.Module):
except:
pass
# feed batch image
batch_image = np.array([data['image'] for data in batch_data])
batch_image = PaddleTensor(batch_image.astype('float32'))
data_out = self.gpu_predictor.run([batch_image]) if use_gpu else self.cpu_predictor.run([batch_image])
confidences = data_out[0].as_ndarray()
boxes = data_out[1].as_ndarray()
batch_image = np.array([data['image'] for data in batch_data]).astype('float32')
predictor = self.gpu_predictor if use_gpu else self.cpu_predictor
input_names = predictor.get_input_names()
input_handle = predictor.get_input_handle(input_names[0])
input_handle.copy_from_cpu(batch_image)
predictor.run()
output_names = predictor.get_output_names()
output_handle = predictor.get_output_handle(output_names[0])
confidences = output_handle.copy_to_cpu()
output_handle = predictor.get_output_handle(output_names[1])
boxes = output_handle.copy_to_cpu()
# postprocess one by one
for i in range(len(batch_data)):
out = postprocess(
confidences=confidences[i],
out = postprocess(confidences=confidences[i],
boxes=boxes[i],
orig_im=batch_data[i]['orig_im'],
orig_im_shape=batch_data[i]['orig_im_shape'],
......@@ -163,8 +174,7 @@ class FaceDetector640(hub.Module):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
......@@ -174,8 +184,7 @@ class FaceDetector640(hub.Module):
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
results = self.face_detection(
paths=[args.input_path],
results = self.face_detection(paths=[args.input_path],
batch_size=args.batch_size,
use_gpu=args.use_gpu,
output_dir=args.output_dir,
......@@ -186,15 +195,18 @@ class FaceDetector640(hub.Module):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--use_gpu', type=ast.literal_eval, default=False, help="whether use GPU or not")
self.arg_config_group.add_argument(
'--output_dir',
self.arg_config_group.add_argument('--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not")
self.arg_config_group.add_argument('--output_dir',
type=str,
default='face_detector_640_predict_output',
help="The directory to save output images.")
self.arg_config_group.add_argument(
'--visualization', type=ast.literal_eval, default=False, help="whether to save output as images.")
self.arg_config_group.add_argument('--visualization',
type=ast.literal_eval,
default=False,
help="whether to save output as images.")
self.arg_config_group.add_argument('--batch_size', type=ast.literal_eval, default=1, help="batch size.")
def add_module_input_arg(self):
......
......@@ -183,7 +183,10 @@
* 1.0.2
* 1.0.3
移除 fluid api
* ```shell
$ hub install face_landmark_localization==1.0.2
$ hub install face_landmark_localization==1.0.3
```
# coding=utf-8
from __future__ import absolute_import
import paddle.fluid as fluid
__all__ = ["face_landmark_localization"]
def face_landmark_localization(image):
# image = fluid.layers.data(shape=[1, 60, 60], name='data', dtype='float32')
Conv1 = fluid.layers.conv2d(
image,
param_attr='Conv1_weights',
name='Conv1',
dilation=[1, 1],
filter_size=[5, 5],
stride=[1, 1],
groups=1,
bias_attr='Conv1_bias',
padding=[2, 2],
num_filters=20)
ActivationTangH1 = fluid.layers.tanh(Conv1, name='ActivationTangH1')
ActivationAbs1 = fluid.layers.abs(ActivationTangH1, name='ActivationAbs1')
Pool1 = fluid.layers.pool2d(
ActivationAbs1,
exclusive=False,
pool_type='max',
pool_padding=[0, 0],
name='Pool1',
global_pooling=False,
pool_stride=[2, 2],
ceil_mode=True,
pool_size=[2, 2])
Conv2 = fluid.layers.conv2d(
Pool1,
param_attr='Conv2_weights',
name='Conv2',
dilation=[1, 1],
filter_size=[5, 5],
stride=[1, 1],
groups=1,
bias_attr='Conv2_bias',
padding=[2, 2],
num_filters=48)
ActivationTangH2 = fluid.layers.tanh(Conv2, name='ActivationTangH2')
ActivationAbs2 = fluid.layers.abs(ActivationTangH2, name='ActivationAbs2')
Pool2 = fluid.layers.pool2d(
ActivationAbs2,
exclusive=False,
pool_type='max',
pool_padding=[0, 0],
name='Pool2',
global_pooling=False,
pool_stride=[2, 2],
ceil_mode=True,
pool_size=[2, 2])
Conv3 = fluid.layers.conv2d(
Pool2,
param_attr='Conv3_weights',
name='Conv3',
dilation=[1, 1],
filter_size=[3, 3],
stride=[1, 1],
groups=1,
bias_attr='Conv3_bias',
padding=[0, 0],
num_filters=64)
ActivationTangH3 = fluid.layers.tanh(Conv3, name='ActivationTangH3')
ActivationAbs3 = fluid.layers.abs(ActivationTangH3, name='ActivationAbs3')
Pool3 = fluid.layers.pool2d(
ActivationAbs3,
exclusive=False,
pool_type='max',
pool_padding=[0, 0],
name='Pool3',
global_pooling=False,
pool_stride=[2, 2],
ceil_mode=True,
pool_size=[3, 3])
Conv4 = fluid.layers.conv2d(
Pool3,
param_attr='Conv4_weights',
name='Conv4',
dilation=[1, 1],
filter_size=[3, 3],
stride=[1, 1],
groups=1,
bias_attr='Conv4_bias',
padding=[0, 0],
num_filters=80)
ActivationTangH4 = fluid.layers.tanh(Conv4, name='ActivationTangH4')
ActivationAbs4 = fluid.layers.abs(ActivationTangH4, name='ActivationAbs4')
Dense1 = fluid.layers.fc(
ActivationAbs4, param_attr='Dense1_weights', act=None, name='Dense1', size=512, bias_attr='Dense1_bias')
ActivationTangH5 = fluid.layers.tanh(Dense1, name='ActivationTangH5')
ActivationAbs5 = fluid.layers.abs(ActivationTangH5, name='ActivationAbs5')
Dense3 = fluid.layers.fc(
ActivationAbs5, param_attr='Dense3_weights', act=None, name='Dense3', size=136, bias_attr='Dense3_bias')
return Dense3
......@@ -2,21 +2,25 @@
from __future__ import absolute_import
from __future__ import division
import ast
import argparse
import time
import ast
import os
import time
from collections import OrderedDict
import cv2
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
from face_landmark_localization.processor import postprocess, base64_to_cv2
import paddle
from face_landmark_localization.data_feed import reader
from face_landmark_localization.processor import base64_to_cv2
from face_landmark_localization.processor import postprocess
from paddle.inference import Config
from paddle.inference import create_predictor
import paddlehub as hub
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
@moduleinfo(
......@@ -26,8 +30,9 @@ from face_landmark_localization.data_feed import reader
author_email="paddle-dev@baidu.com",
summary=
"Face_Landmark_Localization can be used to locate face landmark. This Module is trained through the MPII Human Pose dataset.",
version="1.0.2")
version="1.0.3")
class FaceLandmarkLocalization(hub.Module):
def _initialize(self, face_detector_module=None):
"""
Args:
......@@ -44,10 +49,10 @@ class FaceLandmarkLocalization(hub.Module):
"""
predictor config setting
"""
cpu_config = AnalysisConfig(self.default_pretrained_model_path)
cpu_config = Config(self.default_pretrained_model_path)
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
self.cpu_predictor = create_paddle_predictor(cpu_config)
self.cpu_predictor = create_predictor(cpu_config)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
......@@ -56,10 +61,10 @@ class FaceLandmarkLocalization(hub.Module):
except:
use_gpu = False
if use_gpu:
gpu_config = AnalysisConfig(self.default_pretrained_model_path)
gpu_config = Config(self.default_pretrained_model_path)
gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config)
self.gpu_predictor = create_predictor(gpu_config)
def set_face_detector_module(self, face_detector_module):
"""
......@@ -77,24 +82,25 @@ class FaceLandmarkLocalization(hub.Module):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
program, feeded_var_names, target_vars = paddle.static.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
face_landmark_dir = os.path.join(dirname, "face_landmark")
detector_dir = os.path.join(dirname, "detector")
fluid.io.save_inference_model(
dirname=face_landmark_dir,
paddle.static.save_inference_model(dirname=face_landmark_dir,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
self.face_detector.save_inference_model(
dirname=detector_dir, model_filename=model_filename, params_filename=params_filename, combined=combined)
self.face_detector.save_inference_model(dirname=detector_dir,
model_filename=model_filename,
params_filename=params_filename,
combined=combined)
def keypoint_detection(self,
images=None,
......@@ -144,10 +150,17 @@ class FaceLandmarkLocalization(hub.Module):
except:
pass
# feed batch image
batch_image = np.array([data['face'] for data in batch_data])
face_tensor = PaddleTensor(batch_image.astype('float32'))
pred_out = self.gpu_predictor.run([face_tensor]) if use_gpu else self.cpu_predictor.run([face_tensor])
points = pred_out[0].as_ndarray()
batch_image = np.array([data['face'] for data in batch_data]).astype('float32')
predictor = self.gpu_predictor if use_gpu else self.cpu_predictor
input_names = predictor.get_input_names()
input_handle = predictor.get_input_handle(input_names[0])
input_handle.copy_from_cpu(batch_image)
predictor.run()
output_names = predictor.get_output_names()
output_handle = predictor.get_output_handle(output_names[0])
points = output_handle.copy_to_cpu()
for idx, sample in enumerate(batch_data):
sample['points'] = points[idx].reshape(68, -1)
res += batch_data
......@@ -169,8 +182,7 @@ class FaceLandmarkLocalization(hub.Module):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
......@@ -181,20 +193,28 @@ class FaceLandmarkLocalization(hub.Module):
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
results = self.keypoint_detection(
paths=[args.input_path], use_gpu=args.use_gpu, output_dir=args.output_dir, visualization=args.visualization)
results = self.keypoint_detection(paths=[args.input_path],
use_gpu=args.use_gpu,
output_dir=args.output_dir,
visualization=args.visualization)
return results
def add_module_config_arg(self):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--use_gpu', type=ast.literal_eval, default=False, help="whether use GPU or not")
self.arg_config_group.add_argument(
'--output_dir', type=str, default=None, help="The directory to save output images.")
self.arg_config_group.add_argument(
'--visualization', type=ast.literal_eval, default=False, help="whether to save output as images.")
self.arg_config_group.add_argument('--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not")
self.arg_config_group.add_argument('--output_dir',
type=str,
default=None,
help="The directory to save output images.")
self.arg_config_group.add_argument('--visualization',
type=ast.literal_eval,
default=False,
help="whether to save output as images.")
def add_module_input_arg(self):
"""
......
......@@ -168,6 +168,10 @@
修复numpy数据读取问题
* 1.1.3
移除 fluid api
- ```shell
$ hub install ssd_mobilenet_v1_pascal==1.1.2
$ hub install ssd_mobilenet_v1_pascal==1.1.3
```
......@@ -167,6 +167,10 @@
Fix the problem of reading numpy
* 1.1.3
Remove fluid api
- ```shell
$ hub install ssd_mobilenet_v1_pascal==1.1.2
$ hub install ssd_mobilenet_v1_pascal==1.1.3
```
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from paddle import fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.regularizer import L2Decay
__all__ = ['MobileNet']
class MobileNet(object):
"""
MobileNet v1, see https://arxiv.org/abs/1704.04861
Args:
norm_type (str): normalization type, 'bn' and 'sync_bn' are supported
norm_decay (float): weight decay for normalization layer weights
conv_group_scale (int): scaling factor for convolution groups
with_extra_blocks (bool): if extra blocks should be added
extra_block_filters (list): number of filter for each extra block
class_dim (int): number of class while classification
yolo_v3 (bool): whether to output layers which yolo_v3 needs
"""
__shared__ = ['norm_type', 'weight_prefix_name']
def __init__(self,
norm_type='bn',
norm_decay=0.,
conv_group_scale=1,
conv_learning_rate=1.0,
with_extra_blocks=False,
extra_block_filters=[[256, 512], [128, 256], [128, 256],
[64, 128]],
weight_prefix_name='',
class_dim=1000,
yolo_v3=False):
self.norm_type = norm_type
self.norm_decay = norm_decay
self.conv_group_scale = conv_group_scale
self.conv_learning_rate = conv_learning_rate
self.with_extra_blocks = with_extra_blocks
self.extra_block_filters = extra_block_filters
self.prefix_name = weight_prefix_name
self.class_dim = class_dim
self.yolo_v3 = yolo_v3
def _conv_norm(self,
input,
filter_size,
num_filters,
stride,
padding,
num_groups=1,
act='relu',
use_cudnn=True,
name=None):
parameter_attr = ParamAttr(
learning_rate=self.conv_learning_rate,
initializer=fluid.initializer.MSRA(),
name=name + "_weights")
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
act=None,
use_cudnn=use_cudnn,
param_attr=parameter_attr,
bias_attr=False)
bn_name = name + "_bn"
norm_decay = self.norm_decay
bn_param_attr = ParamAttr(
regularizer=L2Decay(norm_decay), name=bn_name + '_scale')
bn_bias_attr = ParamAttr(
regularizer=L2Decay(norm_decay), name=bn_name + '_offset')
return fluid.layers.batch_norm(
input=conv,
act=act,
param_attr=bn_param_attr,
bias_attr=bn_bias_attr,
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def depthwise_separable(self,
input,
num_filters1,
num_filters2,
num_groups,
stride,
scale,
name=None):
depthwise_conv = self._conv_norm(
input=input,
filter_size=3,
num_filters=int(num_filters1 * scale),
stride=stride,
padding=1,
num_groups=int(num_groups * scale),
use_cudnn=False,
name=name + "_dw")
pointwise_conv = self._conv_norm(
input=depthwise_conv,
filter_size=1,
num_filters=int(num_filters2 * scale),
stride=1,
padding=0,
name=name + "_sep")
return pointwise_conv
def _extra_block(self,
input,
num_filters1,
num_filters2,
num_groups,
stride,
name=None):
pointwise_conv = self._conv_norm(
input=input,
filter_size=1,
num_filters=int(num_filters1),
stride=1,
num_groups=int(num_groups),
padding=0,
name=name + "_extra1")
normal_conv = self._conv_norm(
input=pointwise_conv,
filter_size=3,
num_filters=int(num_filters2),
stride=2,
num_groups=int(num_groups),
padding=1,
name=name + "_extra2")
return normal_conv
def __call__(self, input):
scale = self.conv_group_scale
blocks = []
# input 1/1
out = self._conv_norm(
input, 3, int(32 * scale), 2, 1, name=self.prefix_name + "conv1")
# 1/2
out = self.depthwise_separable(
out, 32, 64, 32, 1, scale, name=self.prefix_name + "conv2_1")
out = self.depthwise_separable(
out, 64, 128, 64, 2, scale, name=self.prefix_name + "conv2_2")
# 1/4
out = self.depthwise_separable(
out, 128, 128, 128, 1, scale, name=self.prefix_name + "conv3_1")
out = self.depthwise_separable(
out, 128, 256, 128, 2, scale, name=self.prefix_name + "conv3_2")
# 1/8
blocks.append(out)
out = self.depthwise_separable(
out, 256, 256, 256, 1, scale, name=self.prefix_name + "conv4_1")
out = self.depthwise_separable(
out, 256, 512, 256, 2, scale, name=self.prefix_name + "conv4_2")
# 1/16
blocks.append(out)
for i in range(5):
out = self.depthwise_separable(
out,
512,
512,
512,
1,
scale,
name=self.prefix_name + "conv5_" + str(i + 1))
module11 = out
out = self.depthwise_separable(
out, 512, 1024, 512, 2, scale, name=self.prefix_name + "conv5_6")
# 1/32
out = self.depthwise_separable(
out, 1024, 1024, 1024, 1, scale, name=self.prefix_name + "conv6")
module13 = out
blocks.append(out)
if self.yolo_v3:
return blocks
if not self.with_extra_blocks:
out = fluid.layers.pool2d(
input=out, pool_type='avg', global_pooling=True)
out = fluid.layers.fc(
input=out,
size=self.class_dim,
param_attr=ParamAttr(
initializer=fluid.initializer.MSRA(), name="fc7_weights"),
bias_attr=ParamAttr(name="fc7_offset"))
out = fluid.layers.softmax(out)
blocks.append(out)
return blocks
num_filters = self.extra_block_filters
module14 = self._extra_block(module13, num_filters[0][0],
num_filters[0][1], 1, 2,
self.prefix_name + "conv7_1")
module15 = self._extra_block(module14, num_filters[1][0],
num_filters[1][1], 1, 2,
self.prefix_name + "conv7_2")
module16 = self._extra_block(module15, num_filters[2][0],
num_filters[2][1], 1, 2,
self.prefix_name + "conv7_3")
module17 = self._extra_block(module16, num_filters[3][0],
num_filters[3][1], 1, 2,
self.prefix_name + "conv7_4")
return module11, module13, module14, module15, module16, module17
......@@ -167,6 +167,10 @@
修复numpy数据读取问题
* 1.0.3
移除 fluid api
- ```shell
$ hub install yolov3_darknet53_pedestrian==1.0.2
$ hub install yolov3_darknet53_pedestrian==1.0.3
```
......@@ -166,6 +166,10 @@
Fix the problem of reading numpy
* 1.0.3
Remove fluid api
- ```shell
$ hub install yolov3_darknet53_pedestrian==1.0.2
$ hub install yolov3_darknet53_pedestrian==1.0.3
```
......@@ -166,6 +166,10 @@
修复numpy数据读取问题
* 1.0.3
移除 fluid api
- ```shell
$ hub install yolov3_darknet53_vehicles==1.0.2
$ hub install yolov3_darknet53_vehicles==1.0.3
```
......@@ -176,4 +176,8 @@
* 1.1.2
修复cudnn为8.0.4显存泄露问题
移除 fluid api
- ```shell
$ hub install deeplabv3p_xception65_humanseg==1.1.2
```
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册