From d2c912d88365f77ffcae1098a7399c84d2437069 Mon Sep 17 00:00:00 2001 From: yangfukui Date: Thu, 7 Nov 2019 19:22:22 +0800 Subject: [PATCH] add ignore pyc files; quant test --- .gitignore | 1 + paddleslim/quant/test/models/resnet.py | 2 +- paddleslim/quant/test/quanter_test.py | 4 ++-- paddleslim/quant/test/run_quant.sh | 6 +++--- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index 2ea48a8b..3f9835a0 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ *.egg-info +*.pyc build/ ./dist/ diff --git a/paddleslim/quant/test/models/resnet.py b/paddleslim/quant/test/models/resnet.py index 0adf4276..6272727d 100644 --- a/paddleslim/quant/test/models/resnet.py +++ b/paddleslim/quant/test/models/resnet.py @@ -27,7 +27,7 @@ class ResNet(): self.layers = layers self.prefix_name = prefix_name - def net(self, input, class_dim=1000, conv1_name='conv1', fc_name=None): + def net(self, input, class_dim=1000, conv1_name='conv1', fc_name='res_fc'): layers = self.layers prefix_name = self.prefix_name if self.prefix_name is '' else self.prefix_name + '_' supported_layers = [34, 50, 101, 152] diff --git a/paddleslim/quant/test/quanter_test.py b/paddleslim/quant/test/quanter_test.py index 83e69335..988b1608 100644 --- a/paddleslim/quant/test/quanter_test.py +++ b/paddleslim/quant/test/quanter_test.py @@ -189,7 +189,7 @@ def train(args): 'activation_quantize_type': 'abs_max', # weight quantize bit num, default is 8 'weight_bits': 8, - # activation quantize bit num, default is 8 + # activation quantize bit num, default is 8 'activation_bits': 8, # op of name_scope in not_quant_pattern list, will not quantized 'not_quant_pattern': ['skip_quant'], @@ -199,7 +199,7 @@ def train(args): 'dtype': 'int8', # window size for 'range_abs_max' quantization. defaulf is 10000 'window_size': 10000, - # The decay coefficient of moving average,default is 0.9 + # The decay coefficient of moving average, default is 0.9 'moving_rate': 0.9, # if set quant_weight_only True, then only quantize parameters of layers which need quantization, # and insert anti-quantization op for parameters of these layers. diff --git a/paddleslim/quant/test/run_quant.sh b/paddleslim/quant/test/run_quant.sh index 35f1c05a..1ac883f7 100644 --- a/paddleslim/quant/test/run_quant.sh +++ b/paddleslim/quant/test/run_quant.sh @@ -1,11 +1,12 @@ #!/usr/bin/env bash +source activate py27_paddle1.6 #MobileNet v1: python quanter_test.py \ --model=MobileNet \ - --pretrained_fp32_model=${pretrain_dir}/MobileNetV1_pretrained \ + --pretrained_fp32_model='../../pretrain/MobileNetV1_pretrained/' \ --use_gpu=True \ - --data_dir=${data_dir} \ + --data_dir='/home/ssd8/wsz/tianfei01/traindata/imagenet/' \ --batch_size=256 \ --total_images=1281167 \ --class_dim=1000 \ @@ -17,7 +18,6 @@ python quanter_test.py \ --act_quant_type=abs_max \ --wt_quant_type=abs_max - #ResNet50: #python quanter_test.py \ # --model=ResNet50 \ -- GitLab