未验证 提交 94949b0e 编写于 作者: jm_12138's avatar jm_12138 提交者: GitHub

update mobilenet_v2_dishes (#2018)

上级 fb41acf6
......@@ -134,6 +134,10 @@
初始发布
* 1.1.0
移除 Fluid API
- ```shell
$ hub install mobilenet_v2_dishes==1.0.0
$ hub install mobilenet_v2_dishes==1.1.0
```
......@@ -133,6 +133,10 @@
First release
* 1.1.0
Remove Fluid API
- ```shell
$ hub install mobilenet_v2_dishes==1.0.0
$ hub install mobilenet_v2_dishes==1.1.0
```
......@@ -3,7 +3,6 @@ import os
import time
from collections import OrderedDict
import cv2
import numpy as np
from PIL import Image
......@@ -63,7 +62,8 @@ def reader(images=None, paths=None):
if paths:
for im_path in paths:
each = OrderedDict()
assert os.path.isfile(im_path), "The {} isn't a valid file path.".format(im_path)
assert os.path.isfile(
im_path), "The {} isn't a valid file path.".format(im_path)
each['org_im_path'] = im_path
each['org_im'] = Image.open(im_path)
each['org_im_width'], each['org_im_height'] = each['org_im'].size
......@@ -73,7 +73,8 @@ def reader(images=None, paths=None):
for im in images:
each = OrderedDict()
each['org_im'] = Image.fromarray(im[:, :, ::-1])
each['org_im_path'] = 'ndarray_time={}'.format(round(time.time(), 6) * 1e6)
each['org_im_path'] = 'ndarray_time={}'.format(
round(time.time(), 6) * 1e6)
each['org_im_width'], each['org_im_height'] = each['org_im'].size
component.append(each)
......
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
__all__ = ['MobileNetV2']
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"batch_size": 256,
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
}
}
class MobileNetV2():
def __init__(self):
self.params = train_parameters
def net(self, input, class_dim=1000, scale=1.0):
bottleneck_params_list = [
(1, 16, 1, 1),
(6, 24, 2, 2),
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1),
(6, 160, 3, 2),
(6, 320, 1, 1),
]
#conv1
input = self.conv_bn_layer(
input, num_filters=int(32 * scale), filter_size=3, stride=2, padding=1, if_act=True, name='conv1_1')
# bottleneck sequences
i = 1
in_c = int(32 * scale)
for layer_setting in bottleneck_params_list:
t, c, n, s = layer_setting
i += 1
input = self.invresi_blocks(input=input, in_c=in_c, t=t, c=int(c * scale), n=n, s=s, name='conv' + str(i))
in_c = int(c * scale)
#last_conv
input = self.conv_bn_layer(
input=input,
num_filters=int(1280 * scale) if scale > 1.0 else 1280,
filter_size=1,
stride=1,
padding=0,
if_act=True,
name='conv9')
input = fluid.layers.pool2d(input=input, pool_size=7, pool_stride=1, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(
input=input,
size=class_dim,
param_attr=ParamAttr(name='fc10_weights'),
bias_attr=ParamAttr(name='fc10_offset'))
return output, input
def conv_bn_layer(self,
input,
filter_size,
num_filters,
stride,
padding,
channels=None,
num_groups=1,
if_act=True,
name=None,
use_cudnn=True):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
act=None,
use_cudnn=use_cudnn,
param_attr=ParamAttr(name=name + '_weights'),
bias_attr=False)
bn_name = name + '_bn'
bn = fluid.layers.batch_norm(
input=conv,
param_attr=ParamAttr(name=bn_name + "_scale"),
bias_attr=ParamAttr(name=bn_name + "_offset"),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
if if_act:
return fluid.layers.relu6(bn)
else:
return bn
def shortcut(self, input, data_residual):
return fluid.layers.elementwise_add(input, data_residual)
def inverted_residual_unit(self,
input,
num_in_filter,
num_filters,
ifshortcut,
stride,
filter_size,
padding,
expansion_factor,
name=None):
num_expfilter = int(round(num_in_filter * expansion_factor))
channel_expand = self.conv_bn_layer(
input=input,
num_filters=num_expfilter,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
if_act=True,
name=name + '_expand')
bottleneck_conv = self.conv_bn_layer(
input=channel_expand,
num_filters=num_expfilter,
filter_size=filter_size,
stride=stride,
padding=padding,
num_groups=num_expfilter,
if_act=True,
name=name + '_dwise',
use_cudnn=False)
linear_out = self.conv_bn_layer(
input=bottleneck_conv,
num_filters=num_filters,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
if_act=False,
name=name + '_linear')
if ifshortcut:
out = self.shortcut(input=input, data_residual=linear_out)
return out
else:
return linear_out
def invresi_blocks(self, input, in_c, t, c, n, s, name=None):
first_block = self.inverted_residual_unit(
input=input,
num_in_filter=in_c,
num_filters=c,
ifshortcut=False,
stride=s,
filter_size=3,
padding=1,
expansion_factor=t,
name=name + '_1')
last_residual_block = first_block
last_c = c
for i in range(1, n):
last_residual_block = self.inverted_residual_unit(
input=last_residual_block,
num_in_filter=last_c,
num_filters=c,
ifshortcut=True,
stride=1,
filter_size=3,
padding=1,
expansion_factor=t,
name=name + '_' + str(i + 1))
return last_residual_block
......@@ -7,15 +7,11 @@ import argparse
import os
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddle.inference import Config, create_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
from paddlehub.common.paddle_helper import add_vars_prefix
from mobilenet_v2_dishes.processor import postprocess, base64_to_cv2
from mobilenet_v2_dishes.data_feed import reader
from mobilenet_v2_dishes.mobilenet_v2 import MobileNetV2
from .processor import postprocess, base64_to_cv2
from .data_feed import reader
@moduleinfo(
......@@ -24,11 +20,12 @@ from mobilenet_v2_dishes.mobilenet_v2 import MobileNetV2
author="baidu-vis",
author_email="",
summary=
"Mobilenet_V2 is a image classfication model, this module is trained with Baidu's self-built dishes dataset.",
version="1.0.0")
class MobileNetV2Dishes(hub.Module):
def _initialize(self):
self.default_pretrained_model_path = os.path.join(self.directory, "model")
"Mobilenet_V2 is a image classfication model, this module is trained with Baidu self-built dishes dataset.",
version="1.1.0")
class MobileNetV2Dishes:
def __init__(self):
self.default_pretrained_model_path = os.path.join(
self.directory, "model", "model")
label_file = os.path.join(self.directory, "label_list.txt")
with open(label_file, 'r', encoding='utf-8') as file:
self.label_list = file.read().split("\n")[:-1]
......@@ -52,10 +49,12 @@ class MobileNetV2Dishes(hub.Module):
"""
predictor config setting
"""
cpu_config = AnalysisConfig(self.default_pretrained_model_path)
model = self.default_pretrained_model_path+'.pdmodel'
params = self.default_pretrained_model_path+'.pdiparams'
cpu_config = Config(model, params)
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
self.cpu_predictor = create_paddle_predictor(cpu_config)
self.cpu_predictor = create_predictor(cpu_config)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
......@@ -64,60 +63,18 @@ class MobileNetV2Dishes(hub.Module):
except:
use_gpu = False
if use_gpu:
gpu_config = AnalysisConfig(self.default_pretrained_model_path)
gpu_config = Config(model, params)
gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config)
def context(self, trainable=True, pretrained=True):
"""context for transfer learning.
Args:
trainable (bool): Set parameters in program to be trainable.
pretrained (bool) : Whether to load pretrained model.
Returns:
inputs (dict): key is 'image', corresponding vaule is image tensor.
outputs (dict): key is :
'classification', corresponding value is the result of classification.
'feature_map', corresponding value is the result of the layer before the fully connected layer.
context_prog (fluid.Program): program for transfer learning.
"""
context_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(context_prog, startup_prog):
with fluid.unique_name.guard():
image = fluid.layers.data(name="image", shape=[3, 224, 224], dtype="float32")
mobile_net = MobileNetV2()
output, feature_map = mobile_net.net(input=image, class_dim=len(self.label_list), scale=1.0)
name_prefix = '@HUB_{}@'.format(self.name)
inputs = {'image': name_prefix + image.name}
outputs = {'classification': name_prefix + output.name, 'feature_map': name_prefix + feature_map.name}
add_vars_prefix(context_prog, name_prefix)
add_vars_prefix(startup_prog, name_prefix)
global_vars = context_prog.global_block().vars
inputs = {key: global_vars[value] for key, value in inputs.items()}
outputs = {key: global_vars[value] for key, value in outputs.items()}
place = fluid.CPUPlace()
exe = fluid.Executor(place)
# pretrained
if pretrained:
def _if_exist(var):
b = os.path.exists(os.path.join(self.default_pretrained_model_path, var.name))
return b
fluid.io.load_vars(exe, self.default_pretrained_model_path, context_prog, predicate=_if_exist)
else:
exe.run(startup_prog)
# trainable
for param in context_prog.global_block().iter_parameters():
param.trainable = trainable
return inputs, outputs, context_prog
def classification(self, images=None, paths=None, batch_size=1, use_gpu=False, top_k=1):
gpu_config.enable_use_gpu(
memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_predictor(gpu_config)
def classification(self,
images=None,
paths=None,
batch_size=1,
use_gpu=False,
top_k=1):
"""
API for image classification.
......@@ -158,32 +115,21 @@ class MobileNetV2Dishes(hub.Module):
pass
# feed batch image
batch_image = np.array([data['image'] for data in batch_data])
batch_image = PaddleTensor(batch_image.copy())
predictor_output = self.gpu_predictor.run([batch_image]) if use_gpu else self.cpu_predictor.run(
[batch_image])
out = postprocess(data_out=predictor_output[0].as_ndarray(), label_list=self.label_list, top_k=top_k)
predictor = self.gpu_predictor if use_gpu else self.cpu_predictor
input_names = predictor.get_input_names()
input_handle = predictor.get_input_handle(input_names[0])
input_handle.copy_from_cpu(batch_image.copy())
predictor.run()
output_names = predictor.get_output_names()
output_handle = predictor.get_output_handle(output_names[0])
out = postprocess(
data_out=output_handle.copy_to_cpu(),
label_list=self.label_list,
top_k=top_k)
res += out
return res
def save_inference_model(self, dirname, model_filename=None, params_filename=None, combined=True):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
@serving
def serving_method(self, images, **kwargs):
"""
......@@ -203,13 +149,19 @@ class MobileNetV2Dishes(hub.Module):
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
self.arg_input_group = self.parser.add_argument_group(
title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options", description="Run configuration for controlling module behavior, not required.")
title="Config options",
description=
"Run configuration for controlling module behavior, not required.")
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
results = self.classification(paths=[args.input_path], batch_size=args.batch_size, use_gpu=args.use_gpu)
results = self.classification(
paths=[args.input_path],
batch_size=args.batch_size,
use_gpu=args.use_gpu)
return results
def add_module_config_arg(self):
......@@ -217,12 +169,24 @@ class MobileNetV2Dishes(hub.Module):
Add the command config options.
"""
self.arg_config_group.add_argument(
'--use_gpu', type=ast.literal_eval, default=False, help="whether use GPU or not.")
self.arg_config_group.add_argument('--batch_size', type=ast.literal_eval, default=1, help="batch size.")
self.arg_config_group.add_argument('--top_k', type=ast.literal_eval, default=1, help="Return top k results.")
'--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not.")
self.arg_config_group.add_argument(
'--batch_size',
type=ast.literal_eval,
default=1,
help="batch size.")
self.arg_config_group.add_argument(
'--top_k',
type=ast.literal_eval,
default=1,
help="Return top k results.")
def add_module_input_arg(self):
"""
Add the command input options.
"""
self.arg_input_group.add_argument('--input_path', type=str, help="path to image.")
self.arg_input_group.add_argument(
'--input_path', type=str, help="path to image.")
......@@ -5,7 +5,6 @@ from __future__ import print_function
import base64
import cv2
import os
import numpy as np
......
import os
import shutil
import unittest
import cv2
import requests
import paddlehub as hub
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
class TestHubModule(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
img_url = 'https://unsplash.com/photos/rAyCBQTH7ws/download?ixid=MnwxMjA3fDB8MXxhbGx8fHx8fHx8fHwxNjYzMTIzODM5&force=true&w=640'
if not os.path.exists('tests'):
os.makedirs('tests')
response = requests.get(img_url)
assert response.status_code == 200, 'Network Error.'
with open('tests/test.jpg', 'wb') as f:
f.write(response.content)
cls.module = hub.Module(name="mobilenet_v2_dishes")
@classmethod
def tearDownClass(cls) -> None:
shutil.rmtree('tests')
shutil.rmtree('inference')
def test_classification1(self):
results = self.module.classification(
paths=['tests/test.jpg']
)
data = results[0]
self.assertTrue('海鲜面' in data)
self.assertTrue(data['海鲜面'] > 0.01)
def test_classification2(self):
results = self.module.classification(
images=[cv2.imread('tests/test.jpg')]
)
data = results[0]
self.assertTrue('海鲜面' in data)
self.assertTrue(data['海鲜面'] > 0.01)
def test_classification3(self):
results = self.module.classification(
images=[cv2.imread('tests/test.jpg')],
use_gpu=True
)
data = results[0]
self.assertTrue('海鲜面' in data)
self.assertTrue(data['海鲜面'] > 0.01)
def test_classification4(self):
self.assertRaises(
AssertionError,
self.module.classification,
paths=['no.jpg']
)
def test_classification5(self):
self.assertRaises(
TypeError,
self.module.classification,
images=['test.jpg']
)
def test_save_inference_model(self):
self.module.save_inference_model('./inference/model')
self.assertTrue(os.path.exists('./inference/model.pdmodel'))
self.assertTrue(os.path.exists('./inference/model.pdiparams'))
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册