diff --git a/demo/auto_compression/detection/README.md b/demo/auto_compression/detection/README.md index 4c7dcc2a4672883c9a58b733dbdbb19e178404a4..eb75ae20c7f98d9f3cf6a307c36e0250f1a64224 100644 --- a/demo/auto_compression/detection/README.md +++ b/demo/auto_compression/detection/README.md @@ -45,7 +45,8 @@ pip install paddlepaddle-gpu 安装paddleslim: ```shell -pip install paddleslim +https://github.com/PaddlePaddle/PaddleSlim.git +python setup.py install ``` 安装paddledet: diff --git a/demo/auto_compression/image_classification/README.md b/demo/auto_compression/image_classification/README.md index 1393e23b40fd06dd831d4ea5d2d54aca6907c4a5..f0b2224e900595514a65070cf7756bd57cf38ec0 100644 --- a/demo/auto_compression/image_classification/README.md +++ b/demo/auto_compression/image_classification/README.md @@ -43,7 +43,8 @@ pip install paddlepaddle-gpu 安装paddleslim: ```shell -pip install paddleslim +https://github.com/PaddlePaddle/PaddleSlim.git +python setup.py install ``` #### 3.2 准备数据集 diff --git a/demo/auto_compression/nlp/README.md b/demo/auto_compression/nlp/README.md index 06587000732e26d62613c6e1ed7b3ddaff3d3bb2..3500cbf9d114ed865113d657d8e0e2cf9f8e0dd3 100644 --- a/demo/auto_compression/nlp/README.md +++ b/demo/auto_compression/nlp/README.md @@ -56,7 +56,8 @@ pip install paddlepaddle-gpu 安装paddleslim: ```shell -pip install paddleslim +https://github.com/PaddlePaddle/PaddleSlim.git +python setup.py install ``` 安装paddlenlp: diff --git a/demo/auto_compression/semantic_segmentation/README.md b/demo/auto_compression/semantic_segmentation/README.md index a8d592994119af0ef215657739e53b3105b555cb..5385dd5a7fdac69386f2e83284510be08f1da9cd 100644 --- a/demo/auto_compression/semantic_segmentation/README.md +++ b/demo/auto_compression/semantic_segmentation/README.md @@ -48,7 +48,8 @@ pip install paddlepaddle-gpu 安装paddleslim: ```shell -pip install paddleslim +https://github.com/PaddlePaddle/PaddleSlim.git +python setup.py install ``` 安装paddleseg diff --git a/paddleslim/auto_compression/utils/predict.py b/paddleslim/auto_compression/utils/predict.py index d72369cbe8a1e251ca311097770d55b9a6f56e7b..0e268b846fa5a5cc6d56cd4383aad81842c81f66 100644 --- a/paddleslim/auto_compression/utils/predict.py +++ b/paddleslim/auto_compression/utils/predict.py @@ -27,9 +27,9 @@ def predict_compressed_model(model_dir, latency_dict(dict): The latency latency of the model under various compression strategies. """ local_rank = paddle.distributed.get_rank() - quant_model_path = f'quant_model/rank_{local_rank}' - prune_model_path = f'prune_model/rank_{local_rank}' - sparse_model_path = f'sparse_model/rank_{local_rank}' + quant_model_path = f'quant_model_rank_{local_rank}_tmp' + prune_model_path = f'prune_model_rank_{local_rank}_tmp' + sparse_model_path = f'sparse_model_rank_{local_rank}_tmp' latency_dict = {} @@ -116,7 +116,7 @@ def predict_compressed_model(model_dir, model_dir=sparse_model_path, model_filename=model_filename, params_filename=params_filename, - save_model_path='quant_model', + save_model_path=quant_model_path, quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"], is_full_quantize=False, activation_bits=8, @@ -131,10 +131,10 @@ def predict_compressed_model(model_dir, latency_dict.update({f'sparse_{sparse_ratio}_int8': latency}) # NOTE: Delete temporary model files - if os.path.exists('quant_model'): - shutil.rmtree('quant_model', ignore_errors=True) - if os.path.exists('prune_model'): - shutil.rmtree('prune_model', ignore_errors=True) - if os.path.exists('sparse_model'): - shutil.rmtree('sparse_model', ignore_errors=True) + if os.path.exists(quant_model_path): + shutil.rmtree(quant_model_path, ignore_errors=True) + if os.path.exists(prune_model_path): + shutil.rmtree(prune_model_path, ignore_errors=True) + if os.path.exists(sparse_model_path): + shutil.rmtree(sparse_model_path, ignore_errors=True) return latency_dict diff --git a/paddleslim/auto_compression/utils/prune_model.py b/paddleslim/auto_compression/utils/prune_model.py index b9e27fa0e104e57e7cb1199739645820e7f21f74..5152d06acc778ddaa9c23a83025b6fefe71f175d 100644 --- a/paddleslim/auto_compression/utils/prune_model.py +++ b/paddleslim/auto_compression/utils/prune_model.py @@ -122,7 +122,7 @@ def get_prune_model(model_file, param_file, ratio, save_path): main_prog = static.Program() startup_prog = static.Program() place = paddle.CPUPlace() - exe = paddle.static.Executor() + exe = paddle.static.Executor(place) scope = static.global_scope() exe.run(startup_prog)