提交 d2c912d8 编写于 作者: Y yangfukui

add ignore pyc files; quant test

上级 268e88b4
*.egg-info *.egg-info
*.pyc
build/ build/
./dist/ ./dist/
...@@ -27,7 +27,7 @@ class ResNet(): ...@@ -27,7 +27,7 @@ class ResNet():
self.layers = layers self.layers = layers
self.prefix_name = prefix_name self.prefix_name = prefix_name
def net(self, input, class_dim=1000, conv1_name='conv1', fc_name=None): def net(self, input, class_dim=1000, conv1_name='conv1', fc_name='res_fc'):
layers = self.layers layers = self.layers
prefix_name = self.prefix_name if self.prefix_name is '' else self.prefix_name + '_' prefix_name = self.prefix_name if self.prefix_name is '' else self.prefix_name + '_'
supported_layers = [34, 50, 101, 152] supported_layers = [34, 50, 101, 152]
......
...@@ -189,7 +189,7 @@ def train(args): ...@@ -189,7 +189,7 @@ def train(args):
'activation_quantize_type': 'abs_max', 'activation_quantize_type': 'abs_max',
# weight quantize bit num, default is 8 # weight quantize bit num, default is 8
'weight_bits': 8, 'weight_bits': 8,
# activation quantize bit num default is 8 # activation quantize bit num, default is 8
'activation_bits': 8, 'activation_bits': 8,
# op of name_scope in not_quant_pattern list, will not quantized # op of name_scope in not_quant_pattern list, will not quantized
'not_quant_pattern': ['skip_quant'], 'not_quant_pattern': ['skip_quant'],
...@@ -199,7 +199,7 @@ def train(args): ...@@ -199,7 +199,7 @@ def train(args):
'dtype': 'int8', 'dtype': 'int8',
# window size for 'range_abs_max' quantization. defaulf is 10000 # window size for 'range_abs_max' quantization. defaulf is 10000
'window_size': 10000, 'window_size': 10000,
# The decay coefficient of moving averagedefault is 0.9 # The decay coefficient of moving average, default is 0.9
'moving_rate': 0.9, 'moving_rate': 0.9,
# if set quant_weight_only True, then only quantize parameters of layers which need quantization, # if set quant_weight_only True, then only quantize parameters of layers which need quantization,
# and insert anti-quantization op for parameters of these layers. # and insert anti-quantization op for parameters of these layers.
......
#!/usr/bin/env bash #!/usr/bin/env bash
source activate py27_paddle1.6
#MobileNet v1: #MobileNet v1:
python quanter_test.py \ python quanter_test.py \
--model=MobileNet \ --model=MobileNet \
--pretrained_fp32_model=${pretrain_dir}/MobileNetV1_pretrained \ --pretrained_fp32_model='../../pretrain/MobileNetV1_pretrained/' \
--use_gpu=True \ --use_gpu=True \
--data_dir=${data_dir} \ --data_dir='/home/ssd8/wsz/tianfei01/traindata/imagenet/' \
--batch_size=256 \ --batch_size=256 \
--total_images=1281167 \ --total_images=1281167 \
--class_dim=1000 \ --class_dim=1000 \
...@@ -17,7 +18,6 @@ python quanter_test.py \ ...@@ -17,7 +18,6 @@ python quanter_test.py \
--act_quant_type=abs_max \ --act_quant_type=abs_max \
--wt_quant_type=abs_max --wt_quant_type=abs_max
#ResNet50: #ResNet50:
#python quanter_test.py \ #python quanter_test.py \
# --model=ResNet50 \ # --model=ResNet50 \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册