提交 2da6b284 编写于 作者: Y yangfukui

Merge branch 'develop' of ssh://gitlab.baidu.com:8022/tianfei01/PaddleSlim into develop

......@@ -14,4 +14,8 @@
from . import graph_wrapper
from .graph_wrapper import *
from . import registry
from .registry import *
__all__ = graph_wrapper.__all__
__all__ += registry.__all__
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
__all__ = ["Registry"]
class Registry(object):
def __init__(self, name):
self._name = name
self._module_dict = dict()
def __repr__(self):
format_str = self.__class__.__name__ + '(name={}, items={})'.format(self._name, list(self._module_dict.keys()))
format_str = self.__class__.__name__ + '(name={}, items={})'.format(
self._name, list(self._module_dict.keys()))
return format_str
@property
def name(self):
return self._name
@property
def module_dict(self):
return self._module_dict
......@@ -20,12 +40,14 @@ class Registry(object):
def _register_module(self, module_class):
if not inspect.isclass(module_class):
raise TypeError('module must be a class, but receive {}.'.format(type(module_class)))
raise TypeError('module must be a class, but receive {}.'.format(
type(module_class)))
module_name = module_class.__name__
if module_name in self._module_dict:
raise KeyError('{} is already registered in {}.'.format(module_name, self.name))
raise KeyError('{} is already registered in {}.'.format(
module_name, self.name))
self._module_dict[module_name] = module_class
def register_module(self, cls):
def register(self, cls):
self._register_module(cls)
return cls
......@@ -11,3 +11,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import search_space
from search_space import *
__all__ = []
__all__ += search_space.__all__
......@@ -11,3 +11,21 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mobilenetv2
from .mobilenetv2 import *
import resnet
from .resnet import *
import search_space_registry
from search_space_registry import *
import search_space_factory
from search_space_factory import *
import search_space_base
from search_space_base import *
__all__ = []
__all__ += mobilenetv2.__all__
__all__ += search_space_registry.__all__
__all__ += search_space_factory.__all__
__all__ += search_space_base.__all__
......@@ -16,7 +16,15 @@ import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
def conv_bn_layer(input, filter_size, num_filters, stride, padding, num_groups=1, act=None, name=None, use_cudnn=True):
def conv_bn_layer(input,
filter_size,
num_filters,
stride,
padding,
num_groups=1,
act=None,
name=None,
use_cudnn=True):
"""Build convolution and batch normalization layers.
Args:
input(Variable): input.
......@@ -31,12 +39,27 @@ def conv_bn_layer(input, filter_size, num_filters, stride, padding, num_groups=1
Returns:
Variable, layers output.
"""
conv = fluid.layers.conv2d(input, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=padding,
groups=num_groups, act=None, use_cudnn=use_cudnn, param_attr=ParamAttr(name=name+'_weights'), bias_attr=False)
conv = fluid.layers.conv2d(
input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
act=None,
use_cudnn=use_cudnn,
param_attr=ParamAttr(name=name + '_weights'),
bias_attr=False)
bn_name = name + '_bn'
bn = fluid.layers.batch_norm(input=conv, param_attr=ParamAttr(name=bn_name+'_scale'), bias_attr=ParamAttr(name=bn_name+'_offset'),
moving_mean_name=bn_name+'_mean', moving_variance_name=bn_name+'_variance')
bn = fluid.layers.batch_norm(
input=conv,
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(name=bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
if act == 'relu6':
return fluid.layers.relu6(bn)
elif act == 'sigmoid':
return fluid.layers.sigmoid(bn)
else:
return bn
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from .search_space_base import SearchSpaceBase
from .search_space_registry import SEARCHSPACE
from .base_layer import conv_bn_layer
__all__ = ["CombineSearchSpace"]
class CombineSearchSpace(object):
"""
Combine Search Space.
Args:
configs(list<tuple>): multi config.
"""
def __init__(self, config_lists):
self.lens = len(config_lists)
self.spaces = []
for config_list in config_lists:
key, config = config_list
self.spaces.append(self._get_single_search_space(key, config))
def _get_single_search_space(self, key, config):
"""
get specific model space based on key and config.
Args:
key(str): model space name.
config(dict): basic config information.
return:
model space(class)
"""
cls = SEARCHSPACE.get(key)
space = cls(config['input_size'], config['output_size'],
config['block_num'])
return space
def init_tokens(self):
"""
Combine init tokens.
"""
tokens = []
self.single_token_num = []
for space in self.spaces:
tokens.extend(space.init_tokens())
self.single_token_num.append(len(space.init_tokens()))
return tokens
def range_table(self):
"""
Combine range table.
"""
range_tables = []
for space in self.spaces:
range_tables.extend(space.range_table())
return range_tables
def token2arch(self, tokens=None):
"""
Combine model arch
"""
if tokens is None:
tokens = self.init_tokens()
token_list = []
start_idx = 0
end_idx = 0
for i in range(len(self.single_token_num)):
end_idx += self.single_token_num[i]
token_list.append(tokens[start_idx:end_idx])
start_idx = end_idx
model_archs = []
for space, token in zip(self.spaces, token_list):
model_archs.append(space.token2arch(token))
return model_archs
......@@ -19,26 +19,39 @@ from __future__ import print_function
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from ..search_space_base import SearchSpaceBase
from .layer import conv_bn_layer
from .registry import SEARCHSPACE
from .search_space_base import SearchSpaceBase
from .base_layer import conv_bn_layer
from .search_space_registry import SEARCHSPACE
@SEARCHSPACE.register_module
__all__ = ["MobileNetV2Space"]
@SEARCHSPACE.register
class MobileNetV2Space(SearchSpaceBase):
def __init__(self, input_size, output_size, block_num, scale=1.0, class_dim=1000):
super(MobileNetV2Space, self).__init__(input_size, output_size, block_num)
self.head_num = np.array([3,4,8,12,16,24,32]) #7
self.filter_num1 = np.array([3,4,8,12,16,24,32,48]) #8
self.filter_num2 = np.array([8,12,16,24,32,48,64,80]) #8
self.filter_num3 = np.array([16,24,32,48,64,80,96,128]) #8
self.filter_num4 = np.array([24,32,48,64,80,96,128,144,160,192]) #10
self.filter_num5 = np.array([32,48,64,80,96,128,144,160,192,224]) #10
self.filter_num6 = np.array([64,80,96,128,144,160,192,224,256,320,384,512]) #12
self.k_size = np.array([3,5]) #2
self.multiply = np.array([1,2,3,4,6]) #5
self.repeat = np.array([1,2,3,4,5,6]) #6
self.scale=scale
self.class_dim=class_dim
def __init__(self,
input_size,
output_size,
block_num,
scale=1.0,
class_dim=1000):
super(MobileNetV2Space, self).__init__(input_size, output_size,
block_num)
self.head_num = np.array([3, 4, 8, 12, 16, 24, 32]) #7
self.filter_num1 = np.array([3, 4, 8, 12, 16, 24, 32, 48]) #8
self.filter_num2 = np.array([8, 12, 16, 24, 32, 48, 64, 80]) #8
self.filter_num3 = np.array([16, 24, 32, 48, 64, 80, 96, 128]) #8
self.filter_num4 = np.array(
[24, 32, 48, 64, 80, 96, 128, 144, 160, 192]) #10
self.filter_num5 = np.array(
[32, 48, 64, 80, 96, 128, 144, 160, 192, 224]) #10
self.filter_num6 = np.array(
[64, 80, 96, 128, 144, 160, 192, 224, 256, 320, 384, 512]) #12
self.k_size = np.array([3, 5]) #2
self.multiply = np.array([1, 2, 3, 4, 6]) #5
self.repeat = np.array([1, 2, 3, 4, 5, 6]) #6
self.scale = scale
self.class_dim = class_dim
def init_tokens(self):
"""
......@@ -47,7 +60,8 @@ class MobileNetV2Space(SearchSpaceBase):
each line in the following represent the index of the [expansion_factor, filter_num, repeat_num, kernel_size]
"""
# original MobileNetV2
return [4, # 1, 16, 1
# yapf: disable
init_token_base = [4, # 1, 16, 1
4, 5, 1, 0, # 6, 24, 1
4, 5, 1, 0, # 6, 24, 2
4, 4, 2, 0, # 6, 32, 3
......@@ -55,13 +69,22 @@ class MobileNetV2Space(SearchSpaceBase):
4, 5, 2, 0, # 6, 96, 3
4, 7, 2, 0, # 6, 160, 3
4, 9, 0, 0] # 6, 320, 1
# yapf: enable
if self.block_num < 5:
self.token_len = 1 + (self.block_num - 1) * 4
else:
self.token_len = 1 + (self.block_num + 2 * (self.block_num - 5)) * 4
return init_token_base[:self.token_len]
def range_table(self):
"""
get range table of current search space
"""
# head_num + 7 * [multiple(expansion_factor), filter_num, repeat, kernel_size]
return [7,
# yapf: disable
range_table_base = [7,
5, 8, 6, 2,
5, 8, 6, 2,
5, 8, 6, 2,
......@@ -69,40 +92,39 @@ class MobileNetV2Space(SearchSpaceBase):
5, 10, 6, 2,
5, 10, 6, 2,
5, 12, 6, 2]
# yapf: enable
return range_table_base[:self.token_len]
def token2arch(self, tokens=None):
"""
return net_arch function
"""
if tokens is None:
tokens = self.init_tokens()
base_bottleneck_params_list = [
(1, self.head_num[tokens[0]], 1, 1, 3),
(self.multiply[tokens[1]], self.filter_num1[tokens[2]], self.repeat[tokens[3]], 2, self.k_size[tokens[4]]),
(self.multiply[tokens[5]], self.filter_num1[tokens[6]], self.repeat[tokens[7]], 2, self.k_size[tokens[8]]),
(self.multiply[tokens[9]], self.filter_num2[tokens[10]], self.repeat[tokens[11]], 2, self.k_size[tokens[12]]),
(self.multiply[tokens[13]], self.filter_num3[tokens[14]], self.repeat[tokens[15]], 2, self.k_size[tokens[16]]),
(self.multiply[tokens[17]], self.filter_num3[tokens[18]], self.repeat[tokens[19]], 1, self.k_size[tokens[20]]),
(self.multiply[tokens[21]], self.filter_num5[tokens[22]], self.repeat[tokens[23]], 2, self.k_size[tokens[24]]),
(self.multiply[tokens[25]], self.filter_num6[tokens[26]], self.repeat[tokens[27]], 1, self.k_size[tokens[28]]),
]
assert self.block_num < 7, 'block number must less than 7, but receive block number is {}'.format(
self.block_num)
assert self.block_num < 7, 'block number must less than 7, but receive block number is {}'.format(self.block_num)
if tokens is None:
tokens = self.init_tokens()
# the stride = 2 means downsample feature map in the convolution, so only when stride=2, block_num minus 1,
# otherwise, add layers to params_list directly.
bottleneck_params_list = []
for param_list in base_bottleneck_params_list:
if param_list[3] == 1:
bottleneck_params_list.append(param_list)
else:
if self.block_num > 1:
bottleneck_params_list.append(param_list)
self.block_num -= 1
else:
break
if self.block_num >= 1: bottleneck_params_list.append((1, self.head_num[tokens[0]], 1, 1, 3))
if self.block_num >= 2: bottleneck_params_list.append((self.multiply[tokens[1]], self.filter_num1[tokens[2]],
self.repeat[tokens[3]], 2, self.k_size[tokens[4]]))
if self.block_num >= 3: bottleneck_params_list.append((self.multiply[tokens[5]], self.filter_num1[tokens[6]],
self.repeat[tokens[7]], 2, self.k_size[tokens[8]]))
if self.block_num >= 4: bottleneck_params_list.append((self.multiply[tokens[9]], self.filter_num2[tokens[10]],
self.repeat[tokens[11]], 2, self.k_size[tokens[12]]))
if self.block_num >= 5:
bottleneck_params_list.append((self.multiply[tokens[13]], self.filter_num3[tokens[14]],
self.repeat[tokens[15]], 2, self.k_size[tokens[16]]))
bottleneck_params_list.append((self.multiply[tokens[17]], self.filter_num3[tokens[18]],
self.repeat[tokens[19]], 1, self.k_size[tokens[20]]))
if self.block_num >= 6:
bottleneck_params_list.append((self.multiply[tokens[21]], self.filter_num5[tokens[22]],
self.repeat[tokens[23]], 2, self.k_size[tokens[24]]))
bottleneck_params_list.append((self.multiply[tokens[25]], self.filter_num6[tokens[26]],
self.repeat[tokens[27]], 1, self.k_size[tokens[28]]))
def net_arch(input):
#conv1
# all padding is 'SAME' in the conv2d, can compute the actual padding automatic.
......@@ -113,7 +135,7 @@ class MobileNetV2Space(SearchSpaceBase):
stride=2,
padding='SAME',
act='relu6',
name='conv1_1')
name='mobilenetv2_conv1_1')
# bottleneck sequences
i = 1
......@@ -121,7 +143,7 @@ class MobileNetV2Space(SearchSpaceBase):
for layer_setting in bottleneck_params_list:
t, c, n, s, k = layer_setting
i += 1
input = self.invresi_blocks(
input = self._invresi_blocks(
input=input,
in_c=in_c,
t=t,
......@@ -129,15 +151,16 @@ class MobileNetV2Space(SearchSpaceBase):
n=n,
s=s,
k=k,
name='conv' + str(i))
name='mobilenetv2_conv' + str(i))
in_c = int(c * self.scale)
# if output_size is 1, add fc layer in the end
if self.output_size == 1:
input = fluid.layers.fc(input=input,
size=self.class_dim,
param_attr=ParamAttr(name='fc10_weights'),
bias_attr=ParamAttr(name='fc10_offset'))
input = fluid.layers.fc(
input=input,
size=self.class_dim,
param_attr=ParamAttr(name='mobilenetv2_fc_weights'),
bias_attr=ParamAttr(name='mobilenetv2_fc_offset'))
else:
assert self.output_size == input.shape[2], \
("output_size must EQUAL to input_size / (2^block_num)."
......@@ -148,8 +171,7 @@ class MobileNetV2Space(SearchSpaceBase):
return net_arch
def shortcut(self, input, data_residual):
def _shortcut(self, input, data_residual):
"""Build shortcut layer.
Args:
input(Variable): input.
......@@ -159,8 +181,7 @@ class MobileNetV2Space(SearchSpaceBase):
"""
return fluid.layers.elementwise_add(input, data_residual)
def inverted_residual_unit(self,
def _inverted_residual_unit(self,
input,
num_in_filter,
num_filters,
......@@ -217,18 +238,10 @@ class MobileNetV2Space(SearchSpaceBase):
name=name + '_linear')
out = linear_out
if ifshortcut:
out = self.shortcut(input=input, data_residual=out)
out = self._shortcut(input=input, data_residual=out)
return out
def invresi_blocks(self,
input,
in_c,
t,
c,
n,
s,
k,
name=None):
def _invresi_blocks(self, input, in_c, t, c, n, s, k, name=None):
"""Build inverted residual blocks.
Args:
input: Variable, input.
......@@ -242,7 +255,7 @@ class MobileNetV2Space(SearchSpaceBase):
Returns:
Variable, layers output.
"""
first_block = self.inverted_residual_unit(
first_block = self._inverted_residual_unit(
input=input,
num_in_filter=in_c,
num_filters=c,
......@@ -256,7 +269,7 @@ class MobileNetV2Space(SearchSpaceBase):
last_c = c
for i in range(1, n):
last_residual_block = self.inverted_residual_unit(
last_residual_block = self._inverted_residual_unit(
input=last_residual_block,
num_in_filter=last_c,
num_filters=c,
......@@ -266,5 +279,3 @@ class MobileNetV2Space(SearchSpaceBase):
expansion_factor=t,
name=name + '_' + str(i + 1))
return last_residual_block
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from .search_space_base import SearchSpaceBase
from .base_layer import conv_bn_layer
from .search_space_registry import SEARCHSPACE
__all__ = ["ResNetSpace"]
@SEARCHSPACE.register
class ResNetSpace(SearchSpaceBase):
def __init__(self, input_size, output_size, block_num, scale=1.0, class_dim=1000):
super(ResNetSpace, self).__init__(input_size, output_size, block_num)
pass
def init_tokens(self):
return [0,0,0,0,0,0]
def range_table(self):
return [3,3,3,3,3,3]
def token2arch(self,tokens=None):
if tokens is None:
self.init_tokens()
def net_arch(input):
input = conv_bn_layer(
input,
num_filters=32,
filter_size=3,
stride=2,
padding='SAME',
act='sigmoid',
name='resnet_conv1_1')
return input
return net_arch
......@@ -14,6 +14,7 @@
__all__ = ['SearchSpaceBase']
class SearchSpaceBase(object):
"""Controller for Neural Architecture Search.
"""
......@@ -38,7 +39,6 @@ class SearchSpaceBase(object):
Args:
tokens(list<int>): The tokens which represent a network.
Return:
list<layers>
model arch
"""
raise NotImplementedError('Abstract method.')
......@@ -12,25 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from searchspace.registry import SEARCHSPACE
from .combine_search_space import CombineSearchSpace
__all__ = ["SearchSpaceFactory"]
class SearchSpaceFactory(object):
def __init__(self):
pass
def get_search_space(self, key, config):
def get_search_space(self, config_lists):
"""
get specific model space based on key and config.
get model spaces based on list(key, config).
Args:
key(str): model space name.
config(dict): basic config information.
return:
model space(class)
"""
cls = SEARCHSPACE.get(key)
space = cls(config['input_size'], config['output_size'], config['block_num'])
return space
assert isinstance(config_lists, list), "configs must be a list"
return CombineSearchSpace(config_lists)
......@@ -12,4 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from .mobilenetv2_space import MobileNetV2Space
from ...core import Registry
__all__ = ["SEARCHSPACE"]
SEARCHSPACE = Registry('searchspace')
from ..utils.registry import Registry
SEARCHSPACE = Registry('searchspace')
......@@ -22,34 +22,33 @@ from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass
from paddle.fluid.contrib.slim.quantization import TransformForMobilePass
from paddle.fluid import core
WEIGHT_QUANTIZATION_TYPES=['abs_max', 'channel_wise_abs_max']
WEIGHT_QUANTIZATION_TYPES=['abs_max', 'channel_wise_abs_max', 'range_abs_max', 'moving_average_abs_max']
ACTIVATION_QUANTIZATION_TYPES=['abs_max','range_abs_max', 'moving_average_abs_max']
VALID_DTYPES = ['int8']
_quant_config_default = {
# weight quantize type, default is 'abs_max'
'weight_quantize_type': 'abs_max',
# activation quantize type, default is 'abs_max'
'activation_quantize_type': 'abs_max',
# weight quantize bit num, default is 8
'weight_bits': 8,
# activation quantize bit num, default is 8
'activation_bits': 8,
# ops of name_scope in not_quant_pattern list, will not be quantized
'not_quant_pattern': ['skip_quant'],
# ops of type in quantize_op_types, will be quantized
'quantize_op_types': ['conv2d', 'depthwise_conv2d', 'mul'],
# data type after quantization, such as 'uint8', 'int8', etc. default is 'int8'
'dtype': 'int8',
# window size for 'range_abs_max' quantization. defaulf is 10000
'window_size': 10000,
# The decay coefficient of moving average, default is 0.9
'moving_rate': 0.9,
# if set quant_weight_only True, then only quantize parameters of layers which need to be quantized,
# and activations will not be quantized.
'quant_weight_only': False
}
# weight quantize type, default is 'abs_max'
'weight_quantize_type': 'abs_max',
# activation quantize type, default is 'abs_max'
'activation_quantize_type': 'abs_max',
# weight quantize bit num, default is 8
'weight_bits': 8,
# activation quantize bit num, default is 8
'activation_bits': 8,
# ops of name_scope in not_quant_pattern list, will not be quantized
'not_quant_pattern': ['skip_quant'],
# ops of type in quantize_op_types, will be quantized
'quantize_op_types': ['conv2d', 'depthwise_conv2d', 'mul'],
# data type after quantization, such as 'uint8', 'int8', etc. default is 'int8'
'dtype': 'int8',
# window size for 'range_abs_max' quantization. defaulf is 10000
'window_size': 10000,
# The decay coefficient of moving average, default is 0.9
'moving_rate': 0.9,
# if set quant_weight_only True, then only quantize parameters of layers which need to be quantized,
# and activations will not be quantized.
'quant_weight_only': False
}
def _parse_configs(user_config):
......@@ -125,8 +124,10 @@ def quant_aware(program, place, config, scope=None, for_test=False):
scope = fluid.global_scope() if not scope else scope
assert isinstance(config, dict), "config must be dict"
assert 'weight_quantize_type' in config.keys(), 'weight_quantize_type must be configured'
assert 'activation_quantize_type' in config.keys(), 'activation_quantize_type must be configured'
assert 'weight_quantize_type' in config.keys(
), 'weight_quantize_type must be configured'
assert 'activation_quantize_type' in config.keys(
), 'activation_quantize_type must be configured'
config = _parse_configs(config)
main_graph = IrGraph(core.Graph(program.desc), for_test=for_test)
......@@ -141,8 +142,7 @@ def quant_aware(program, place, config, scope=None, for_test=False):
window_size=config['window_size'],
moving_rate=config['moving_rate'],
quantizable_op_type=config['quantize_op_types'],
skip_pattern=config['not_quant_pattern']
)
skip_pattern=config['not_quant_pattern'])
transform_pass.apply(main_graph)
......@@ -164,7 +164,7 @@ def quant_post(program, place, config, scope=None):
for_test: is for test program.
Return:
fluid.Program: the quantization program is not trainable.
"""
"""
pass
......@@ -196,7 +196,8 @@ def convert(program, scope, place, config, save_int8=False):
freezed_program = test_graph.to_program()
if save_int8:
convert_int8_pass = ConvertToInt8Pass(scope=fluid.global_scope(), place=place)
convert_int8_pass = ConvertToInt8Pass(
scope=fluid.global_scope(), place=place)
convert_int8_pass.apply(test_graph)
freezed_program_int8 = test_graph.to_program()
return freezed_program, freezed_program_int8
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append("../")
import unittest
import paddle.fluid as fluid
from paddleslim.nas import SearchSpaceFactory
class TestSearchSpaceFactory(unittest.TestCase):
def test_factory(self):
# if output_size is 1, the model will add fc layer in the end.
config = {'input_size': 224, 'output_size': 7, 'block_num': 5}
space = SearchSpaceFactory()
my_space = space.get_search_space([('MobileNetV2Space', config)])
model_arch = my_space.token2arch()
train_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(train_prog, startup_prog):
input_size = config['input_size']
model_input = fluid.layers.data(
name='model_in',
shape=[1, 3, input_size, input_size],
dtype='float32',
append_batch_size=False)
predict = model_arch[0](model_input)
self.assertTrue(predict.shape[2] == config['output_size'])
class TestMultiSearchSpace(unittest.TestCase):
space = SearchSpaceFactory()
config0 = {'input_size': 224, 'output_size': 7, 'block_num': 5}
config1 = {'input_size': 7, 'output_size': 1, 'block_num': 2}
my_space = space.get_search_space([('MobileNetV2Space', config0), ('ResNetSpace', config1)])
model_archs = my_space.token2arch()
train_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(train_prog, startup_prog):
input_size= config0['input_size']
model_input = fluid.layers.data(name='model_in', shape=[1, 3, input_size, input_size], dtype='float32', append_batch_size=False)
for model_arch in model_archs:
predict = model_arch(model_input)
model_input = predict
print(predict)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册