提交 fed41425 编写于 作者: C ceci3

update space

上级 d367b8ee
......@@ -14,6 +14,8 @@
import mobilenetv2
from .mobilenetv2 import *
import mobilenetv1
from .mobilenetv1 import *
import resnet
from .resnet import *
import search_space_registry
......@@ -28,4 +30,3 @@ __all__ += mobilenetv2.__all__
__all__ += search_space_registry.__all__
__all__ += search_space_factory.__all__
__all__ += search_space_base.__all__
......@@ -25,12 +25,14 @@ from .base_layer import conv_bn_layer
__all__ = ["CombineSearchSpace"]
class CombineSearchSpace(object):
"""
Combine Search Space.
Args:
configs(list<tuple>): multi config.
"""
def __init__(self, config_lists):
self.lens = len(config_lists)
self.spaces = []
......@@ -50,11 +52,10 @@ class CombineSearchSpace(object):
"""
cls = SEARCHSPACE.get(key)
space = cls(config['input_size'], config['output_size'],
config['block_num'])
config['block_num'], config['block_mask'])
return space
def init_tokens(self):
"""
Combine init tokens.
......@@ -96,4 +97,3 @@ class CombineSearchSpace(object):
model_archs.append(space.token2arch(token))
return model_archs
......@@ -32,11 +32,15 @@ class MobileNetV2Space(SearchSpaceBase):
input_size,
output_size,
block_num,
block_mask=None,
scale=1.0,
class_dim=1000):
super(MobileNetV2Space, self).__init__(input_size, output_size,
block_num)
block_num, block_mask)
assert self.block_mask == None, 'MobileNetV2Space will use origin MobileNetV2 as seach space, so use input_size, output_size and block_num to search'
# self.head_num means the first convolution channel
self.head_num = np.array([3, 4, 8, 12, 16, 24, 32]) #7
# self.filter_num1 ~ self.filter_num6 means following convlution channel
self.filter_num1 = np.array([3, 4, 8, 12, 16, 24, 32, 48]) #8
self.filter_num2 = np.array([8, 12, 16, 24, 32, 48, 64, 80]) #8
self.filter_num3 = np.array([16, 24, 32, 48, 64, 80, 96, 128]) #8
......@@ -46,8 +50,11 @@ class MobileNetV2Space(SearchSpaceBase):
[32, 48, 64, 80, 96, 128, 144, 160, 192, 224]) #10
self.filter_num6 = np.array(
[64, 80, 96, 128, 144, 160, 192, 224, 256, 320, 384, 512]) #12
# self.k_size means kernel size
self.k_size = np.array([3, 5]) #2
# self.multiply means expansion_factor of each _inverted_residual_unit
self.multiply = np.array([1, 2, 3, 4, 6]) #5
# self.repeat means repeat_num _inverted_residual_unit in each _invresi_blocks
self.repeat = np.array([1, 2, 3, 4, 5, 6]) #6
self.scale = scale
self.class_dim = class_dim
......@@ -87,14 +94,14 @@ class MobileNetV2Space(SearchSpaceBase):
"""
# head_num + 7 * [multiple(expansion_factor), filter_num, repeat, kernel_size]
# yapf: disable
range_table_base = [7,
5, 8, 6, 2,
5, 8, 6, 2,
5, 8, 6, 2,
5, 8, 6, 2,
5, 10, 6, 2,
5, 10, 6, 2,
5, 12, 6, 2]
range_table_base = [len(self.head_num),
len(self.multiply), len(self.filter_num1), len(self.repeat), len(self.k_size),
len(self.multiply), len(self.filter_num1), len(self.repeat), len(self.k_size),
len(self.multiply), len(self.filter_num2), len(self.repeat), len(self.k_size),
len(self.multiply), len(self.filter_num3), len(self.repeat), len(self.k_size),
len(self.multiply), len(self.filter_num4), len(self.repeat), len(self.k_size),
len(self.multiply), len(self.filter_num5), len(self.repeat), len(self.k_size),
len(self.multiply), len(self.filter_num6), len(self.repeat), len(self.k_size)]
range_table_base = list(np.array(range_table_base) - 1)
# yapf: enable
return range_table_base[:self.token_len]
......@@ -106,6 +113,7 @@ class MobileNetV2Space(SearchSpaceBase):
if tokens is None:
tokens = self.init_tokens()
print(tokens)
bottleneck_params_list = []
if self.block_num >= 1:
......@@ -169,7 +177,6 @@ class MobileNetV2Space(SearchSpaceBase):
# if output_size is 1, add fc layer in the end
if self.output_size == 1:
print('NOTE: if output_size is 1, add fc layer in the end!!!')
input = fluid.layers.fc(
input=input,
size=self.class_dim,
......
......@@ -32,13 +32,18 @@ class ResNetSpace(SearchSpaceBase):
input_size,
output_size,
block_num,
block_mask=None,
extract_feature=False,
class_dim=1000):
super(ResNetSpace, self).__init__(input_size, output_size, block_num)
super(ResNetSpace, self).__init__(input_size, output_size, block_num,
block_mask)
assert self.block_mask == None, 'ResNetSpace will use origin ResNet as seach space, so use input_size, output_size and block_num to search'
# self.filter_num1 ~ self.filter_num4 means convolution channel
self.filter_num1 = np.array([48, 64, 96, 128, 160, 192, 224]) #7
self.filter_num2 = np.array([64, 96, 128, 160, 192, 256, 320]) #7
self.filter_num3 = np.array([128, 160, 192, 256, 320, 384]) #6
self.filter_num4 = np.array([192, 256, 384, 512, 640]) #5
# self.repeat1 ~ self.repeat4 means depth of network
self.repeat1 = [2, 3, 4, 5, 6] #5
self.repeat2 = [2, 3, 4, 5, 6, 7] #6
self.repeat3 = [2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24] #13
......@@ -62,7 +67,11 @@ class ResNetSpace(SearchSpaceBase):
Get range table of current search space, constrains the range of tokens.
"""
#2 * self.block_num, 2 means depth and num_filter
range_table_base = [6, 6, 5, 4, 4, 5, 12, 5]
range_table_base = [
len(self.filter_num1), len(self.repeat1), len(self.filter_num2),
len(self.repeat2), len(self.filter_num3), len(self.repeat3),
len(self.filter_num4), len(self.repeat4)
]
return range_table_base[:self.token_len]
def token2arch(self, tokens=None):
......@@ -77,23 +86,23 @@ class ResNetSpace(SearchSpaceBase):
if self.block_num >= 1:
filter1 = self.filter_num1[tokens[0]]
repeat1 = self.repeat1[tokens[1]]
depth.append(filter1)
num_filters.append(repeat1)
num_filters.append(filter1)
depth.append(repeat1)
if self.block_num >= 2:
filter2 = self.filter_num2[tokens[2]]
repeat2 = self.repeat2[tokens[3]]
depth.append(filter2)
num_filters.append(repeat2)
num_filters.append(filter2)
depth.append(repeat2)
if self.block_num >= 3:
filter3 = self.filter_num3[tokens[4]]
repeat3 = self.repeat3[tokens[5]]
depth.append(filter3)
num_filters.append(repeat3)
num_filters.append(filter3)
depth.append(repeat3)
if self.block_num >= 4:
filter4 = self.filter_num4[tokens[6]]
repeat4 = self.repeat4[tokens[7]]
depth.append(filter4)
num_filters.append(repeat4)
num_filters.append(filter4)
depth.append(repeat4)
def net_arch(input):
conv = conv_bn_layer(
......@@ -105,7 +114,7 @@ class ResNetSpace(SearchSpaceBase):
name='resnet_conv0')
for block in range(len(depth)):
for i in range(depth[block]):
conv = self._basicneck_block(
conv = self._bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
......@@ -138,22 +147,29 @@ class ResNetSpace(SearchSpaceBase):
else:
return input
def _basicneck_block(self, input, num_filters, stride, name=None):
def _bottleneck_block(self, input, num_filters, stride, name=None):
conv0 = conv_bn_layer(
input=input,
filter_size=3,
num_filters=num_filters,
stride=stride,
filter_size=1,
act='relu',
name=name + '_basicneck_conv0')
name=name + '_bottleneck_conv0')
conv1 = conv_bn_layer(
input=conv0,
filter_size=3,
num_filters=num_filters,
stride=1,
filter_size=3,
stride=stride,
act='relu',
name=name + '_bottleneck_conv1')
conv2 = conv_bn_layer(
input=conv1,
num_filters=num_filters * 4,
filter_size=1,
act=None,
name=name + '_basicneck_conv1')
name=name + '_bottleneck_conv2')
short = self._shortcut(
input, num_filters, stride, name=name + '_short')
input, num_filters * 4, stride, name=name + '_shortcut')
return fluid.layers.elementwise_add(
x=short, y=conv1, act='relu', name=name + '_basicneck_add')
x=short, y=conv2, act='relu', name=name + '_bottleneck_add')
......@@ -19,10 +19,11 @@ class SearchSpaceBase(object):
"""Controller for Neural Architecture Search.
"""
def __init__(self, input_size, output_size, block_num, *argss):
def __init__(self, input_size, output_size, block_num, block_mask, *argss):
self.input_size = input_size
self.output_size = output_size
self.block_num = block_num
self.block_mask = block_mask
def init_tokens(self):
"""Get init tokens in search space.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册