未验证 提交 0514cf31 编写于 作者: jm_12138's avatar jm_12138 提交者: GitHub

update res2net101_vd_26w_4s_imagenet (#2043)

上级 ceda319a
...@@ -129,6 +129,10 @@ ...@@ -129,6 +129,10 @@
初始发布 初始发布
* 1.1.0
移除 Fluid API
- ```shell - ```shell
$ hub install res2net101_vd_26w_4s_imagenet==1.0.0 $ hub install res2net101_vd_26w_4s_imagenet==1.1.0
``` ```
...@@ -128,6 +128,10 @@ ...@@ -128,6 +128,10 @@
First release First release
* 1.1.0
Remove Fluid API
- ```shell - ```shell
$ hub install res2net101_vd_26w_4s_imagenet==1.0.0 $ hub install res2net101_vd_26w_4s_imagenet==1.1.0
``` ```
# coding=utf-8
import os import os
import time import time
from collections import OrderedDict from collections import OrderedDict
import cv2
import numpy as np import numpy as np
from PIL import Image from PIL import Image
......
# coding=utf-8
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
import ast
import argparse import argparse
import ast
import os import os
import numpy as np import numpy as np
import paddle.fluid as fluid from paddle.inference import Config
import paddlehub as hub from paddle.inference import create_predictor
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
from paddlehub.common.paddle_helper import add_vars_prefix
from res2net101_vd_26w_4s_imagenet.processor import postprocess, base64_to_cv2 from .data_feed import reader
from res2net101_vd_26w_4s_imagenet.data_feed import reader from .processor import base64_to_cv2
from res2net101_vd_26w_4s_imagenet.res2net_vd import Res2Net101_vd_26w_4s from .processor import postprocess
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
@moduleinfo( @moduleinfo(
...@@ -24,10 +23,12 @@ from res2net101_vd_26w_4s_imagenet.res2net_vd import Res2Net101_vd_26w_4s ...@@ -24,10 +23,12 @@ from res2net101_vd_26w_4s_imagenet.res2net_vd import Res2Net101_vd_26w_4s
author="paddlepaddle", author="paddlepaddle",
author_email="paddle-dev@baidu.com", author_email="paddle-dev@baidu.com",
summary="res2net101_vd_26w_4s is a image classfication model, this module is trained with imagenet datasets.", summary="res2net101_vd_26w_4s is a image classfication model, this module is trained with imagenet datasets.",
version="1.0.0") version="1.1.0")
class Res2Net101vd26w4sImagenet(hub.Module): class Res2Net101vd26w4sImagenet:
def _initialize(self):
self.default_pretrained_model_path = os.path.join(self.directory, "res2net101_vd_26w_4s_imagenet_model") def __init__(self):
self.default_pretrained_model_path = os.path.join(self.directory, "res2net101_vd_26w_4s_imagenet_model",
"model")
label_file = os.path.join(self.directory, "label_list.txt") label_file = os.path.join(self.directory, "label_list.txt")
with open(label_file, 'r', encoding='utf-8') as file: with open(label_file, 'r', encoding='utf-8') as file:
self.label_list = file.read().split("\n")[:-1] self.label_list = file.read().split("\n")[:-1]
...@@ -51,10 +52,12 @@ class Res2Net101vd26w4sImagenet(hub.Module): ...@@ -51,10 +52,12 @@ class Res2Net101vd26w4sImagenet(hub.Module):
""" """
predictor config setting predictor config setting
""" """
cpu_config = AnalysisConfig(self.default_pretrained_model_path) model = self.default_pretrained_model_path + '.pdmodel'
params = self.default_pretrained_model_path + '.pdiparams'
cpu_config = Config(model, params)
cpu_config.disable_glog_info() cpu_config.disable_glog_info()
cpu_config.disable_gpu() cpu_config.disable_gpu()
self.cpu_predictor = create_paddle_predictor(cpu_config) self.cpu_predictor = create_predictor(cpu_config)
try: try:
_places = os.environ["CUDA_VISIBLE_DEVICES"] _places = os.environ["CUDA_VISIBLE_DEVICES"]
...@@ -63,58 +66,10 @@ class Res2Net101vd26w4sImagenet(hub.Module): ...@@ -63,58 +66,10 @@ class Res2Net101vd26w4sImagenet(hub.Module):
except: except:
use_gpu = False use_gpu = False
if use_gpu: if use_gpu:
gpu_config = AnalysisConfig(self.default_pretrained_model_path) gpu_config = Config(model, params)
gpu_config.disable_glog_info() gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0) gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config) self.gpu_predictor = create_predictor(gpu_config)
def context(self, trainable=True, pretrained=True):
"""context for transfer learning.
Args:
trainable (bool): Set parameters in program to be trainable.
pretrained (bool) : Whether to load pretrained model.
Returns:
inputs (dict): key is 'image', corresponding vaule is image tensor.
outputs (dict): key is :
'classification', corresponding value is the result of classification.
'feature_map', corresponding value is the result of the layer before the fully connected layer.
context_prog (fluid.Program): program for transfer learning.
"""
context_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(context_prog, startup_prog):
with fluid.unique_name.guard():
image = fluid.layers.data(name="image", shape=[3, 224, 224], dtype="float32")
resnet_vd = Res2Net101_vd_26w_4s()
output, feature_map = resnet_vd.net(input=image, class_dim=len(self.label_list))
name_prefix = '@HUB_{}@'.format(self.name)
inputs = {'image': name_prefix + image.name}
outputs = {'classification': name_prefix + output.name, 'feature_map': name_prefix + feature_map.name}
add_vars_prefix(context_prog, name_prefix)
add_vars_prefix(startup_prog, name_prefix)
global_vars = context_prog.global_block().vars
inputs = {key: global_vars[value] for key, value in inputs.items()}
outputs = {key: global_vars[value] for key, value in outputs.items()}
place = fluid.CPUPlace()
exe = fluid.Executor(place)
# pretrained
if pretrained:
def _if_exist(var):
b = os.path.exists(os.path.join(self.default_pretrained_model_path, var.name))
return b
fluid.io.load_vars(exe, self.default_pretrained_model_path, context_prog, predicate=_if_exist)
else:
exe.run(startup_prog)
# trainable
for param in context_prog.global_block().iter_parameters():
param.trainable = trainable
return inputs, outputs, context_prog
def classification(self, images=None, paths=None, batch_size=1, use_gpu=False, top_k=1): def classification(self, images=None, paths=None, batch_size=1, use_gpu=False, top_k=1):
""" """
...@@ -152,32 +107,19 @@ class Res2Net101vd26w4sImagenet(hub.Module): ...@@ -152,32 +107,19 @@ class Res2Net101vd26w4sImagenet(hub.Module):
pass pass
# feed batch image # feed batch image
batch_image = np.array([data['image'] for data in batch_data]) batch_image = np.array([data['image'] for data in batch_data])
batch_image = PaddleTensor(batch_image.copy())
predictor_output = self.gpu_predictor.run([batch_image]) if use_gpu else self.cpu_predictor.run( predictor = self.gpu_predictor if use_gpu else self.cpu_predictor
[batch_image]) input_names = predictor.get_input_names()
out = postprocess(data_out=predictor_output[0].as_ndarray(), label_list=self.label_list, top_k=top_k) input_handle = predictor.get_input_handle(input_names[0])
input_handle.copy_from_cpu(batch_image.copy())
predictor.run()
output_names = predictor.get_output_names()
output_handle = predictor.get_output_handle(output_names[0])
out = postprocess(data_out=output_handle.copy_to_cpu(), label_list=self.label_list, top_k=top_k)
res += out res += out
return res return res
def save_inference_model(self, dirname, model_filename=None, params_filename=None, combined=True):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
@serving @serving
def serving_method(self, images, **kwargs): def serving_method(self, images, **kwargs):
""" """
...@@ -192,11 +134,10 @@ class Res2Net101vd26w4sImagenet(hub.Module): ...@@ -192,11 +134,10 @@ class Res2Net101vd26w4sImagenet(hub.Module):
""" """
Run as a command. Run as a command.
""" """
self.parser = argparse.ArgumentParser( self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
description="Run the {} module.".format(self.name), prog='hub run {}'.format(self.name),
prog='hub run {}'.format(self.name), usage='%(prog)s',
usage='%(prog)s', add_help=True)
add_help=True)
self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required") self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group( self.arg_config_group = self.parser.add_argument_group(
title="Config options", description="Run configuration for controlling module behavior, not required.") title="Config options", description="Run configuration for controlling module behavior, not required.")
...@@ -210,8 +151,10 @@ class Res2Net101vd26w4sImagenet(hub.Module): ...@@ -210,8 +151,10 @@ class Res2Net101vd26w4sImagenet(hub.Module):
""" """
Add the command config options. Add the command config options.
""" """
self.arg_config_group.add_argument( self.arg_config_group.add_argument('--use_gpu',
'--use_gpu', type=ast.literal_eval, default=False, help="whether use GPU or not.") type=ast.literal_eval,
default=False,
help="whether use GPU or not.")
self.arg_config_group.add_argument('--batch_size', type=ast.literal_eval, default=1, help="batch size.") self.arg_config_group.add_argument('--batch_size', type=ast.literal_eval, default=1, help="batch size.")
self.arg_config_group.add_argument('--top_k', type=ast.literal_eval, default=1, help="Return top k results.") self.arg_config_group.add_argument('--top_k', type=ast.literal_eval, default=1, help="Return top k results.")
......
...@@ -4,9 +4,8 @@ from __future__ import division ...@@ -4,9 +4,8 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
import base64 import base64
import cv2
import os
import cv2
import numpy as np import numpy as np
...@@ -18,7 +17,6 @@ def base64_to_cv2(b64str): ...@@ -18,7 +17,6 @@ def base64_to_cv2(b64str):
def softmax(x): def softmax(x):
orig_shape = x.shape
if len(x.shape) > 1: if len(x.shape) > 1:
tmp = np.max(x, axis=1) tmp = np.max(x, axis=1)
x -= tmp.reshape((x.shape[0], 1)) x -= tmp.reshape((x.shape[0], 1))
......
#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.fluid as fluid
import math
from paddle.fluid.param_attr import ParamAttr
__all__ = [
"Res2Net_vd", "Res2Net50_vd_48w_2s", "Res2Net50_vd_26w_4s", "Res2Net50_vd_14w_8s", "Res2Net50_vd_26w_6s",
"Res2Net50_vd_26w_8s", "Res2Net101_vd_26w_4s", "Res2Net152_vd_26w_4s", "Res2Net200_vd_26w_4s"
]
class Res2Net_vd():
def __init__(self, layers=50, scales=4, width=26):
self.layers = layers
self.scales = scales
self.width = width
def net(self, input, class_dim=1000):
layers = self.layers
supported_layers = [50, 101, 152, 200]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers)
basic_width = self.width * self.scales
num_filters1 = [basic_width * t for t in [1, 2, 4, 8]]
num_filters2 = [256 * t for t in [1, 2, 4, 8]]
if layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
elif layers == 200:
depth = [3, 12, 48, 3]
conv = self.conv_bn_layer(input=input, num_filters=32, filter_size=3, stride=2, act='relu', name='conv1_1')
conv = self.conv_bn_layer(input=conv, num_filters=32, filter_size=3, stride=1, act='relu', name='conv1_2')
conv = self.conv_bn_layer(input=conv, num_filters=64, filter_size=3, stride=1, act='relu', name='conv1_3')
conv = fluid.layers.pool2d(input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
for block in range(len(depth)):
for i in range(depth[block]):
if layers in [101, 152] and block == 2:
if i == 0:
conv_name = "res" + str(block + 2) + "a"
else:
conv_name = "res" + str(block + 2) + "b" + str(i)
else:
conv_name = "res" + str(block + 2) + chr(97 + i)
conv = self.bottleneck_block(
input=conv,
num_filters1=num_filters1[block],
num_filters2=num_filters2[block],
stride=2 if i == 0 and block != 0 else 1,
if_first=block == i == 0,
name=conv_name)
pool = fluid.layers.pool2d(input=conv, pool_size=7, pool_stride=1, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
out = fluid.layers.fc(
input=pool,
size=class_dim,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), name='fc_weights'),
bias_attr=fluid.param_attr.ParamAttr(name='fc_offset'))
return out, pool
def conv_bn_layer(self, input, num_filters, filter_size, stride=1, groups=1, act=None, name=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
if name == "conv1":
bn_name = "bn_" + name
else:
bn_name = "bn" + name[3:]
return fluid.layers.batch_norm(
input=conv,
act=act,
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def conv_bn_layer_new(self, input, num_filters, filter_size, stride=1, groups=1, act=None, name=None):
pool = fluid.layers.pool2d(
input=input, pool_size=2, pool_stride=2, pool_padding=0, pool_type='avg', ceil_mode=True)
conv = fluid.layers.conv2d(
input=pool,
num_filters=num_filters,
filter_size=filter_size,
stride=1,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
if name == "conv1":
bn_name = "bn_" + name
else:
bn_name = "bn" + name[3:]
return fluid.layers.batch_norm(
input=conv,
act=act,
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def shortcut(self, input, ch_out, stride, name, if_first=False):
ch_in = input.shape[1]
if ch_in != ch_out or stride != 1:
if if_first:
return self.conv_bn_layer(input, ch_out, 1, stride, name=name)
else:
return self.conv_bn_layer_new(input, ch_out, 1, stride, name=name)
elif if_first:
return self.conv_bn_layer(input, ch_out, 1, stride, name=name)
else:
return input
def bottleneck_block(self, input, num_filters1, num_filters2, stride, name, if_first):
conv0 = self.conv_bn_layer(
input=input, num_filters=num_filters1, filter_size=1, stride=1, act='relu', name=name + '_branch2a')
xs = fluid.layers.split(conv0, self.scales, 1)
ys = []
for s in range(self.scales - 1):
if s == 0 or stride == 2:
ys.append(
self.conv_bn_layer(
input=xs[s],
num_filters=num_filters1 // self.scales,
stride=stride,
filter_size=3,
act='relu',
name=name + '_branch2b_' + str(s + 1)))
else:
ys.append(
self.conv_bn_layer(
input=xs[s] + ys[-1],
num_filters=num_filters1 // self.scales,
stride=stride,
filter_size=3,
act='relu',
name=name + '_branch2b_' + str(s + 1)))
if stride == 1:
ys.append(xs[-1])
else:
ys.append(
fluid.layers.pool2d(input=xs[-1], pool_size=3, pool_stride=stride, pool_padding=1, pool_type='avg'))
conv1 = fluid.layers.concat(ys, axis=1)
conv2 = self.conv_bn_layer(
input=conv1, num_filters=num_filters2, filter_size=1, act=None, name=name + "_branch2c")
short = self.shortcut(input, num_filters2, stride, if_first=if_first, name=name + "_branch1")
return fluid.layers.elementwise_add(x=short, y=conv2, act='relu')
def Res2Net50_vd_48w_2s():
model = Res2Net_vd(layers=50, scales=2, width=48)
return model
def Res2Net50_vd_26w_4s():
model = Res2Net_vd(layers=50, scales=4, width=26)
return model
def Res2Net50_vd_14w_8s():
model = Res2Net_vd(layers=50, scales=8, width=14)
return model
def Res2Net50_vd_26w_6s():
model = Res2Net_vd(layers=50, scales=6, width=26)
return model
def Res2Net50_vd_26w_8s():
model = Res2Net_vd(layers=50, scales=8, width=26)
return model
def Res2Net101_vd_26w_4s():
model = Res2Net_vd(layers=101, scales=4, width=26)
return model
def Res2Net152_vd_26w_4s():
model = Res2Net_vd(layers=152, scales=4, width=26)
return model
def Res2Net200_vd_26w_4s():
model = Res2Net_vd(layers=200, scales=4, width=26)
return model
import os
import shutil
import unittest
import cv2
import requests
import paddlehub as hub
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
class TestHubModule(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
img_url = 'https://unsplash.com/photos/brFsZ7qszSY/download?ixid=MnwxMjA3fDB8MXxzZWFyY2h8OHx8ZG9nfGVufDB8fHx8MTY2MzA1ODQ1MQ&force=true&w=640'
if not os.path.exists('tests'):
os.makedirs('tests')
response = requests.get(img_url)
assert response.status_code == 200, 'Network Error.'
with open('tests/test.jpg', 'wb') as f:
f.write(response.content)
cls.module = hub.Module(name="res2net101_vd_26w_4s_imagenet")
@classmethod
def tearDownClass(cls) -> None:
shutil.rmtree('tests')
shutil.rmtree('inference')
def test_classification1(self):
results = self.module.classification(paths=['tests/test.jpg'])
data = results[0]
self.assertTrue('Pembroke' in data)
self.assertTrue(data['Pembroke'] > 0.5)
def test_classification2(self):
results = self.module.classification(images=[cv2.imread('tests/test.jpg')])
data = results[0]
self.assertTrue('Pembroke' in data)
self.assertTrue(data['Pembroke'] > 0.5)
def test_classification3(self):
results = self.module.classification(images=[cv2.imread('tests/test.jpg')], use_gpu=True)
data = results[0]
self.assertTrue('Pembroke' in data)
self.assertTrue(data['Pembroke'] > 0.5)
def test_classification4(self):
self.assertRaises(AssertionError, self.module.classification, paths=['no.jpg'])
def test_classification5(self):
self.assertRaises(TypeError, self.module.classification, images=['tests/test.jpg'])
def test_save_inference_model(self):
self.module.save_inference_model('./inference/model')
self.assertTrue(os.path.exists('./inference/model.pdmodel'))
self.assertTrue(os.path.exists('./inference/model.pdiparams'))
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册