提交 576c6034 编写于 作者: C ceci3

update mobilenet space and add register

上级 9e4324ef
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .mobilenetv2_space import MobileNetV2Space
...@@ -16,14 +16,18 @@ from __future__ import absolute_import ...@@ -16,14 +16,18 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import sys
sys.path.append('..')
import numpy as np import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from SearchSpace import SearchSpace from searchspacebase import SearchSpaceBase
from base_layer import conv_bn_layer from .base_layer import conv_bn_layer
from .registry import SEARCHSPACE
class MobileNetV2Space(SearchSpace): @SEARCHSPACE.register_module
def __init__(self, input_size, output_size, block_num, scale=1.0): class MobileNetV2Space(SearchSpaceBase):
def __init__(self, input_size, output_size, block_num, scale=1.0, class_dim=1000):
super(MobileNetV2Space, self).__init__(input_size, output_size, block_num) super(MobileNetV2Space, self).__init__(input_size, output_size, block_num)
self.head_num = np.array([3,4,8,12,16,24,32]) #7 self.head_num = np.array([3,4,8,12,16,24,32]) #7
self.filter_num1 = np.array([3,4,8,12,16,24,32,48]) #8 self.filter_num1 = np.array([3,4,8,12,16,24,32,48]) #8
...@@ -36,10 +40,11 @@ class MobileNetV2Space(SearchSpace): ...@@ -36,10 +40,11 @@ class MobileNetV2Space(SearchSpace):
self.multiply = np.array([1,2,3,4,6]) #5 self.multiply = np.array([1,2,3,4,6]) #5
self.repeat = np.array([1,2,3,4,5,6]) #6 self.repeat = np.array([1,2,3,4,5,6]) #6
self.scale=scale self.scale=scale
self.class_dim=class_dim
def init_tokens(self): def init_tokens(self):
""" """
initial tokens. The fist tokens to controller. The initial token send to controller.
The first one is the index of the first layers' channel in self.head_num, The first one is the index of the first layers' channel in self.head_num,
each line in the following represent the index of the [expansion_factor, filter_num, repeat_num, kernel_size] each line in the following represent the index of the [expansion_factor, filter_num, repeat_num, kernel_size]
""" """
...@@ -74,7 +79,7 @@ class MobileNetV2Space(SearchSpace): ...@@ -74,7 +79,7 @@ class MobileNetV2Space(SearchSpace):
if tokens is None: if tokens is None:
tokens = self.init_tokens() tokens = self.init_tokens()
bottleneck_params_list = [ base_bottleneck_params_list = [
(1, self.head_num[tokens[0]], 1, 1, 3), (1, self.head_num[tokens[0]], 1, 1, 3),
(self.multiply[tokens[1]], self.filter_num1[tokens[2]], self.repeat[tokens[3]], 2, self.k_size[tokens[4]]), (self.multiply[tokens[1]], self.filter_num1[tokens[2]], self.repeat[tokens[3]], 2, self.k_size[tokens[4]]),
(self.multiply[tokens[5]], self.filter_num1[tokens[6]], self.repeat[tokens[7]], 2, self.k_size[tokens[8]]), (self.multiply[tokens[5]], self.filter_num1[tokens[6]], self.repeat[tokens[7]], 2, self.k_size[tokens[8]]),
...@@ -85,10 +90,24 @@ class MobileNetV2Space(SearchSpace): ...@@ -85,10 +90,24 @@ class MobileNetV2Space(SearchSpace):
(self.multiply[tokens[25]], self.filter_num6[tokens[26]], self.repeat[tokens[27]], 1, self.k_size[tokens[28]]), (self.multiply[tokens[25]], self.filter_num6[tokens[26]], self.repeat[tokens[27]], 1, self.k_size[tokens[28]]),
] ]
bottleneck_params_list = bottleneck_params_list[:self.block_num] assert self.block_num < 7, 'block number must less than 7, but receive block number is {}'.format(self.block_num)
# the stride = 2 means downsample feature map in the convolution, so only when stride=2, block_num minus 1,
# otherwise, add layers to params_list directly.
bottleneck_params_list = []
for param_list in base_bottleneck_params_list:
if param_list[3] == 1:
bottleneck_params_list.append(param_list)
else:
if self.block_num > 1:
bottleneck_params_list.append(param_list)
self.block_num -= 1
else:
break
def net_arch(input): def net_arch(input):
#conv1 #conv1
# all padding is 'SAME' in the conv2d, can compute the actual padding automatic.
input = conv_bn_layer( input = conv_bn_layer(
input, input,
num_filters=int(32 * self.scale), num_filters=int(32 * self.scale),
...@@ -114,23 +133,18 @@ class MobileNetV2Space(SearchSpace): ...@@ -114,23 +133,18 @@ class MobileNetV2Space(SearchSpace):
k=k, k=k,
name='conv' + str(i)) name='conv' + str(i))
in_c = int(c * self.scale) in_c = int(c * self.scale)
##last_conv
#input = conv_bn_layer(
# input=input,
# num_filters=int(1280 * self.scale) if self.scale > 1.0 else 1280,
# filter_size=1,
# stride=1,
# padding='SAME',
# act='relu6',
# name='conv9')
#input = fluid.layers.pool2d(
# input=input, pool_type='avg', global_pooling=True)
#output = fluid.layers.fc(input=input, # if output_size is 1, add fc layer in the end
# size=class_dim, if self.output_size == 1:
# param_attr=ParamAttr(name='fc10_weights'), input = fluid.layers.fc(input=input,
# bias_attr=ParamAttr(name='fc10_offset')) size=self.class_dim,
param_attr=ParamAttr(name='fc10_weights'),
bias_attr=ParamAttr(name='fc10_offset'))
else:
assert self.output_size == input.shape[2], \
("output_size must EQUAL to input_size / (2^block_num)."
"But receive input_size={}, output_size={}, block_num={}".format(
self.input_size, self.output_size, self.block_num))
return input return input
...@@ -140,8 +154,8 @@ class MobileNetV2Space(SearchSpace): ...@@ -140,8 +154,8 @@ class MobileNetV2Space(SearchSpace):
def shortcut(self, input, data_residual): def shortcut(self, input, data_residual):
"""Build shortcut layer. """Build shortcut layer.
Args: Args:
input: Variable, input. input(Variable): input.
data_residual: Variable, residual layer. data_residual(Variable): residual layer.
Returns: Returns:
Variable, layer output. Variable, layer output.
""" """
...@@ -166,7 +180,7 @@ class MobileNetV2Space(SearchSpace): ...@@ -166,7 +180,7 @@ class MobileNetV2Space(SearchSpace):
ifshortcut(bool), whether using shortcut. ifshortcut(bool), whether using shortcut.
stride(int), stride. stride(int), stride.
filter_size(int), filter size. filter_size(int), filter size.
padding(str, 'SAME'|'VAILD'), padding. padding(str|int|list), padding.
expansion_factor(float), expansion factor. expansion_factor(float), expansion factor.
name(str), name. name(str), name.
Returns: Returns:
......
import sys
sys.path.append('..')
from utils.registry import Registry
SEARCHSPACE = Registry('searchspace')
...@@ -12,9 +12,9 @@ ...@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
__all__ = ['SearchSpace'] __all__ = ['SearchSpaceBase']
class SearchSpace(object): class SearchSpaceBase(object):
"""Controller for Neural Architecture Search. """Controller for Neural Architecture Search.
""" """
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from searchspace.registry import SEARCHSPACE
class SearchSpaceFactory(object):
def __init__(self):
pass
def get_search_space(self, key, config):
"""
get specific model space based on key and config.
Args:
key(str): model space name.
config(dict): basic config information.
return:
model space(class)
"""
cls = SEARCHSPACE.get(key)
space = cls(config['input_size'], config['output_size'], config['block_num'])
return space
from MobileNetV2Space import MobileNetV2Space # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
class SearchSpaceFactory(object): # Licensed under the Apache License, Version 2.0 (the "License"
def __init__(self): # you may not use this file except in compliance with the License.
pass # You may obtain a copy of the License at
#
def get_search_space(self, key, config): # http://www.apache.org/licenses/LICENSE-2.0
""" #
Args: # Unless required by applicable law or agreed to in writing, software
key(str): model name # distributed under the License is distributed on an "AS IS" BASIS,
config(dict): basic config information. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
""" # See the License for the specific language governing permissions and
if key == 'MobileNetV2': # limitations under the License.
space = MobileNetV2Space(config['input_size'], config['output_size'], config['block_num'])
return space
import paddle.fluid as fluid import paddle.fluid as fluid
from searchspacefactory import SearchSpaceFactory
if __name__ == '__main__': if __name__ == '__main__':
# if output_size is 1, the model will add fc layer in the end.
config = {'input_size': 224, 'output_size': 7, 'block_num': 5} config = {'input_size': 224, 'output_size': 7, 'block_num': 5}
space = SearchSpaceFactory() space = SearchSpaceFactory()
my_space = space.get_search_space('MobileNetV2', config) my_space = space.get_search_space('MobileNetV2Space', config)
model_arch = my_space.token2arch() model_arch = my_space.token2arch()
train_prog = fluid.Program() train_prog = fluid.Program()
startup_prog = fluid.Program() startup_prog = fluid.Program()
with fluid.program_guard(train_prog, startup_prog): with fluid.program_guard(train_prog, startup_prog):
model_input = fluid.layers.data(name='model_in', shape=[1, 3, 224, 224], dtype='float32', append_batch_size=False) input_size= config['input_size']
model_input = fluid.layers.data(name='model_in', shape=[1, 3, input_size, input_size], dtype='float32', append_batch_size=False)
print('input shape', model_input.shape)
predict = model_arch(model_input) predict = model_arch(model_input)
print('output shape', predict.shape) print('output shape', predict.shape)
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
class Registry(object):
def __init__(self, name):
self._name = name
self._module_dict = dict()
def __repr__(self):
format_str = self.__class__.__name__ + '(name={}, items={})'.format(self._name, list(self._module_dict.keys()))
return format_str
@property
def name(self):
return self._name
@property
def module_dict(self):
return self._module_dict
def get(self, key):
return self._module_dict.get(key, None)
def _register_module(self, module_class):
if not inspect.isclass(module_class):
raise TypeError('module must be a class, but receive {}.'.format(type(module_class)))
module_name = module_class.__name__
if module_name in self._module_dict:
raise KeyError('{} is already registered in {}.'.format(module_name, self.name))
self._module_dict[module_name] = module_class
def register_module(self, cls):
self._register_module(cls)
return cls
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册