未验证 提交 b22dbf3a 编写于 作者: C ceci3 提交者: GitHub

Rename fluid API to static API (#1193)

上级 2d4964bc
...@@ -61,7 +61,7 @@ image = paddle.static.data( ...@@ -61,7 +61,7 @@ image = paddle.static.data(
train_loader = paddle.io.DataLoader(train_dataset, feed_list=[image], batch_size=32, return_list=False) train_loader = paddle.io.DataLoader(train_dataset, feed_list=[image], batch_size=32, return_list=False)
# 开始自动压缩 # 开始自动压缩
ac = AutoCompression( ac = AutoCompression(
model_dir="./MobileNetV1_infer/", model_dir="./MobileNetV1_infer",
model_filename="inference.pdmodel", model_filename="inference.pdmodel",
params_filename="inference.pdiparams", params_filename="inference.pdiparams",
save_dir="output", save_dir="output",
...@@ -76,6 +76,7 @@ ac.compress() ...@@ -76,6 +76,7 @@ ac.compress()
- DataLoader传入的数据集是待压缩模型所用的数据集,DataLoader继承自`paddle.io.DataLoader` - DataLoader传入的数据集是待压缩模型所用的数据集,DataLoader继承自`paddle.io.DataLoader`
- 如无需验证自动化压缩过程中模型的精度,`eval_callback`可不传入function,程序会自动根据损失来选择最优模型。 - 如无需验证自动化压缩过程中模型的精度,`eval_callback`可不传入function,程序会自动根据损失来选择最优模型。
- 自动化压缩Config中定义量化、蒸馏、剪枝等压缩算法会合并执行,压缩策略有:量化+蒸馏,剪枝+蒸馏等等。 - 自动化压缩Config中定义量化、蒸馏、剪枝等压缩算法会合并执行,压缩策略有:量化+蒸馏,剪枝+蒸馏等等。
- 如果要压缩的模型参数是存储在各自分离的文件中,需要先通过[convert.py](./convert.py) 脚本将其保存成一个单独的二进制文件。
## 应用示例 ## 应用示例
......
import argparse
import paddle
from ..utility import add_arguments, print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('model_dir', str, None, "inference model directory.")
add_arg('save_dir', str, None, "directory to save compressed model.")
add_arg('model_filename', str, None, "inference model filename.")
# yapf: enable
if __name__ == '__main__':
args = parser.parse_args()
print_arguments(args)
paddle.enable_static()
place = fluid.CPUPlace()
exe = paddle.static.Executor(paddle.CPUPlace())
[inference_program, feed_target_names,
fetch_targets] = paddle.fluid.io.load_inference_model(
dirname=args.model_dir,
executor=exe,
model_filename=args.model_filename,
params_filename=None)
feed_vars = [
inference_program.global_block().var(name) for name in feed_target_names
]
paddle.static.save_inference_model(
args.save_dir,
executor=exe,
model_filename='model.pdmodel',
params_filename='model.pdiparams',
feed_vars=feed_vars,
fetch_vars=fetch_targets)
...@@ -3,7 +3,7 @@ Global: ...@@ -3,7 +3,7 @@ Global:
reader_config: configs/yolo_reader.yml reader_config: configs/yolo_reader.yml
input_list: ['image', 'scale_factor'] input_list: ['image', 'scale_factor']
Evaluation: True Evaluation: True
model_dir: ./ppyoloe_crn_l_300e_coco/ model_dir: ./ppyoloe_crn_l_300e_coco
model_filename: model.pdmodel model_filename: model.pdmodel
params_filename: model.pdiparams params_filename: model.pdiparams
......
...@@ -2,7 +2,7 @@ Global: ...@@ -2,7 +2,7 @@ Global:
reader_config: configs/ssd_reader.yml reader_config: configs/ssd_reader.yml
input_list: ['image', 'scale_factor', 'im_shape'] input_list: ['image', 'scale_factor', 'im_shape']
Evaluation: True Evaluation: True
model_dir: ./ssd_mobilenet_v1_300_120e_voc/ model_dir: ./ssd_mobilenet_v1_300_120e_voc
model_filename: model.pdmodel model_filename: model.pdmodel
params_filename: model.pdiparams params_filename: model.pdiparams
......
...@@ -2,7 +2,7 @@ Global: ...@@ -2,7 +2,7 @@ Global:
reader_config: configs/tinypose_reader.yml reader_config: configs/tinypose_reader.yml
input_list: ['image'] input_list: ['image']
Evaluation: False Evaluation: False
model_dir: ./tinypose_128x96/ model_dir: ./tinypose_128x96
model_filename: model.pdmodel model_filename: model.pdmodel
params_filename: model.pdiparams params_filename: model.pdiparams
......
...@@ -2,7 +2,7 @@ Global: ...@@ -2,7 +2,7 @@ Global:
reader_config: configs/yolo_reader.yml reader_config: configs/yolo_reader.yml
input_list: ['image', 'im_shape', 'scale_factor'] input_list: ['image', 'im_shape', 'scale_factor']
Evaluation: True Evaluation: True
model_dir: ./yolov3_mobilenet_v1_270e_coco/ # Model Link: https://paddledet.bj.bcebos.com/models/slim/yolov3_mobilenet_v1_270e_coco.tar model_dir: ./yolov3_mobilenet_v1_270e_coco # Model Link: https://paddledet.bj.bcebos.com/models/slim/yolov3_mobilenet_v1_270e_coco.tar
model_filename: model.pdmodel model_filename: model.pdmodel
params_filename: model.pdiparams params_filename: model.pdiparams
......
...@@ -4,7 +4,7 @@ Global: ...@@ -4,7 +4,7 @@ Global:
input_list: {'image': 'x2paddle_images'} input_list: {'image': 'x2paddle_images'}
Evaluation: True Evaluation: True
arch: 'YOLOv5' arch: 'YOLOv5'
model_dir: ./yolov5s_infer/ model_dir: ./yolov5s_infer
model_filename: model.pdmodel model_filename: model.pdmodel
params_filename: model.pdiparams params_filename: model.pdiparams
......
...@@ -109,7 +109,7 @@ tar -zxvf afqmc.tar ...@@ -109,7 +109,7 @@ tar -zxvf afqmc.tar
export CUDA_VISIBLE_DEVICES=0 export CUDA_VISIBLE_DEVICES=0
python run.py \ python run.py \
--model_type='ppminilm' \ --model_type='ppminilm' \
--model_dir='./afqmc/' \ --model_dir='./afqmc' \
--model_filename='inference.pdmodel' \ --model_filename='inference.pdmodel' \
--params_filename='inference.pdiparams' \ --params_filename='inference.pdiparams' \
--dataset='clue' \ --dataset='clue' \
...@@ -147,7 +147,7 @@ TrainConfig: ...@@ -147,7 +147,7 @@ TrainConfig:
```yaml ```yaml
Distillation: Distillation:
teacher_model_dir: ./afqmc/ teacher_model_dir: ./afqmc
teacher_model_filename: inference.pdmodel teacher_model_filename: inference.pdmodel
teacher_params_filename: inference.pdiparams teacher_params_filename: inference.pdiparams
``` ```
......
export FLAGS_cudnn_deterministic=True export FLAGS_cudnn_deterministic=True
python run.py \ python run.py \
--model_type='ppminilm' \ --model_type='ppminilm' \
--model_dir='./all_original_models/AFQMC' \ --model_dir='./afqmc' \
--model_filename='infer.pdmodel' \ --model_filename='inference.pdmodel' \
--params_filename='infer.pdiparams' \ --params_filename='inference.pdiparams' \
--dataset='clue' \ --dataset='clue' \
--save_dir='./save_afqmc_pruned/' \ --save_dir='./save_afqmc_pruned/' \
--batch_size=16 \ --batch_size=16 \
--max_seq_length=128 \ --max_seq_length=128 \
--task_name='afqmc' \ --task_name='afqmc' \
--config_path='./configs/afqmc.yaml' --config_path='./configs/pp-minilm/auto/afqmc.yaml'
...@@ -35,7 +35,7 @@ from .strategy_config import TrainConfig, ProgramInfo, merge_config ...@@ -35,7 +35,7 @@ from .strategy_config import TrainConfig, ProgramInfo, merge_config
from .auto_strategy import prepare_strategy, get_final_quant_config, create_strategy_config, create_train_config from .auto_strategy import prepare_strategy, get_final_quant_config, create_strategy_config, create_train_config
from .config_helpers import load_config, extract_strategy_config, extract_train_config from .config_helpers import load_config, extract_strategy_config, extract_train_config
from .utils.predict import with_variable_shape from .utils.predict import with_variable_shape
from .utils import get_feed_vars, wrap_dataloader from .utils import get_feed_vars, wrap_dataloader, load_inference_model
_logger = get_logger(__name__, level=logging.INFO) _logger = get_logger(__name__, level=logging.INFO)
...@@ -64,14 +64,10 @@ class AutoCompression: ...@@ -64,14 +64,10 @@ class AutoCompression:
Args: Args:
model_dir(str): The path of inference model that will be compressed, and model_dir(str): The path of inference model that will be compressed, and
the model and params that saved by ``paddle.static.io.save_inference_model`` the model and params that saved by ``paddle.static.save_inference_model``
are under the path. are under the path.
model_filename(str, optional): The name of model file. If parameters model_filename(str): The name of model file.
are saved in separate files, set it as 'None'. Default: 'None'. params_filename(str): The name of params file.
params_filename(str, optional): The name of params file.
When all parameters are saved in a single file, set it
as filename. If parameters are saved in separate files,
set it as 'None'. Default : 'None'.
save_dir(str): The path to save compressed model. The models in this directory will be overwrited save_dir(str): The path to save compressed model. The models in this directory will be overwrited
after calling 'compress()' function. after calling 'compress()' function.
train_data_loader(Python Generator, Paddle.io.DataLoader): The train_data_loader(Python Generator, Paddle.io.DataLoader): The
...@@ -121,12 +117,19 @@ class AutoCompression: ...@@ -121,12 +117,19 @@ class AutoCompression:
deploy_hardware(str, optional): The hardware you want to deploy. Default: 'gpu'. deploy_hardware(str, optional): The hardware you want to deploy. Default: 'gpu'.
""" """
self.model_dir = model_dir self.model_dir = model_dir
if model_filename == 'None': if model_filename == 'None':
model_filename = None model_filename = None
self.model_filename = model_filename self.model_filename = model_filename
if params_filename == 'None': if params_filename == 'None':
params_filename = None params_filename = None
self.params_filename = params_filename self.params_filename = params_filename
if params_filename is None and model_filename is not None:
raise NotImplementedError(
"NOT SUPPORT parameters saved in separate files. Please convert it to single binary file first."
)
self.final_dir = save_dir self.final_dir = save_dir
if not os.path.exists(self.final_dir): if not os.path.exists(self.final_dir):
os.makedirs(self.final_dir) os.makedirs(self.final_dir)
...@@ -241,12 +244,9 @@ class AutoCompression: ...@@ -241,12 +244,9 @@ class AutoCompression:
], f'Type of input_shapes should be in [dict, tuple or list] but got {type(input_shapes)}.' ], f'Type of input_shapes should be in [dict, tuple or list] but got {type(input_shapes)}.'
paddle.enable_static() paddle.enable_static()
exe = paddle.static.Executor(paddle.CPUPlace()) exe = paddle.static.Executor(paddle.CPUPlace())
[inference_program, feed_target_names, fetch_targets] = ( [inference_program, feed_target_names,
paddle.static.load_inference_model( fetch_targets] = (load_inference_model(model_dir, exe, model_filename,
model_dir, params_filename))
exe,
model_filename=model_filename,
params_filename=params_filename))
if type(input_shapes) in [list, tuple]: if type(input_shapes) in [list, tuple]:
assert len( assert len(
...@@ -307,10 +307,10 @@ class AutoCompression: ...@@ -307,10 +307,10 @@ class AutoCompression:
return exe, places return exe, places
def _get_model_type(self, exe, model_dir, model_filename, params_filename): def _get_model_type(self, exe, model_dir, model_filename, params_filename):
[inference_program, _, _]= paddle.fluid.io.load_inference_model( \ [inference_program, _, _]= (load_inference_model( \
dirname=model_dir, \ model_dir, \
model_filename=model_filename, params_filename=params_filename, model_filename=model_filename, params_filename=params_filename,
executor=exe) executor=exe))
_, _, model_type = get_patterns(inference_program) _, _, model_type = get_patterns(inference_program)
_logger.info(f"Detect model type: {model_type}") _logger.info(f"Detect model type: {model_type}")
return model_type return model_type
...@@ -575,10 +575,23 @@ class AutoCompression: ...@@ -575,10 +575,23 @@ class AutoCompression:
final_model_path = os.path.join(self.final_dir) final_model_path = os.path.join(self.final_dir)
if not os.path.exists(final_model_path): if not os.path.exists(final_model_path):
os.makedirs(final_model_path) os.makedirs(final_model_path)
tmp_model_file = ".".join([tmp_model_path, "pdmodel"])
if not os.path.exists(tmp_model_file):
tmp_model_file = os.path.join(tmp_model_path, self.model_filename) tmp_model_file = os.path.join(tmp_model_path, self.model_filename)
tmp_params_file = ".".join([tmp_model_path, "pdiparams"])
if not os.path.exists(tmp_params_file):
tmp_params_file = os.path.join(tmp_model_path, self.params_filename) tmp_params_file = os.path.join(tmp_model_path, self.params_filename)
if self.model_filename is None:
self.model_filename = "infer.pdmodel"
if self.params_filename is None:
self.params_filename = "infer.pdiparams"
final_model_file = os.path.join(final_model_path, self.model_filename) final_model_file = os.path.join(final_model_path, self.model_filename)
final_params_file = os.path.join(final_model_path, self.params_filename) final_params_file = os.path.join(final_model_path, self.params_filename)
if paddle.distributed.get_rank() == 0: if paddle.distributed.get_rank() == 0:
shutil.move(tmp_model_file, final_model_file) shutil.move(tmp_model_file, final_model_file)
shutil.move(tmp_params_file, final_params_file) shutil.move(tmp_params_file, final_params_file)
...@@ -656,8 +669,8 @@ class AutoCompression: ...@@ -656,8 +669,8 @@ class AutoCompression:
model_dir = os.path.join( model_dir = os.path.join(
self.tmp_dir, 'strategy_{}'.format(str(strategy_idx))) self.tmp_dir, 'strategy_{}'.format(str(strategy_idx)))
[inference_program, feed_target_names, fetch_targets]= paddle.fluid.io.load_inference_model( \ [inference_program, feed_target_names, fetch_targets]= load_inference_model( \
dirname=model_dir, \ model_dir, \
model_filename=self.model_filename, params_filename=self.params_filename, model_filename=self.model_filename, params_filename=self.params_filename,
executor=self._exe) executor=self._exe)
...@@ -694,7 +707,7 @@ class AutoCompression: ...@@ -694,7 +707,7 @@ class AutoCompression:
def _start_train(self, train_program_info, test_program_info, strategy, def _start_train(self, train_program_info, test_program_info, strategy,
train_config): train_config):
best_metric = -1.0 best_metric = -1.0
total_epochs = self.train_config.epochs if self.train_config.epochs else 100 total_epochs = train_config.epochs if train_config.epochs else 100
total_train_iter = 0 total_train_iter = 0
for epoch_id in range(total_epochs): for epoch_id in range(total_epochs):
for batch_id, data in enumerate(self.train_dataloader()): for batch_id, data in enumerate(self.train_dataloader()):
...@@ -716,8 +729,8 @@ class AutoCompression: ...@@ -716,8 +729,8 @@ class AutoCompression:
total_train_iter, epoch_id, batch_id, total_train_iter, epoch_id, batch_id,
np_probs_float)) np_probs_float))
total_train_iter += 1 total_train_iter += 1
if total_train_iter % int(self.train_config.eval_iter if total_train_iter % int(
) == 0 and total_train_iter != 0: train_config.eval_iter) == 0 and total_train_iter != 0:
if self.eval_function is not None: if self.eval_function is not None:
# GMP pruner step 3: update params before summrizing sparsity, saving model or evaluation. # GMP pruner step 3: update params before summrizing sparsity, saving model or evaluation.
...@@ -755,7 +768,7 @@ class AutoCompression: ...@@ -755,7 +768,7 @@ class AutoCompression:
_logger.warning( _logger.warning(
"Not set eval function, so unable to test accuracy performance." "Not set eval function, so unable to test accuracy performance."
) )
if self.train_config.train_iter and total_train_iter >= self.train_config.train_iter: if train_config.train_iter and total_train_iter >= train_config.train_iter:
break break
if 'unstructure' in self._strategy or train_config.sparse_model: if 'unstructure' in self._strategy or train_config.sparse_model:
...@@ -784,11 +797,16 @@ class AutoCompression: ...@@ -784,11 +797,16 @@ class AutoCompression:
'strategy_{}'.format(str(strategy_idx + 1))) 'strategy_{}'.format(str(strategy_idx + 1)))
if not os.path.exists(model_dir): if not os.path.exists(model_dir):
os.makedirs(model_dir) os.makedirs(model_dir)
paddle.fluid.io.save_inference_model( feed_vars = [
dirname=str(model_dir), test_program.global_block().var(name)
feeded_var_names=test_program_info.feed_target_names, for name in test_program_info.feed_target_names
target_vars=test_program_info.fetch_targets, ]
paddle.static.save_inference_model(
path_prefix=str(model_dir),
feed_vars=feed_vars,
fetch_vars=test_program_info.fetch_targets,
executor=self._exe, executor=self._exe,
main_program=test_program, program=test_program,
model_filename=self.model_filename, model_filename=self.model_filename,
params_filename=self.params_filename) params_filename=self.params_filename)
...@@ -23,6 +23,7 @@ from ..dist import * ...@@ -23,6 +23,7 @@ from ..dist import *
from ..common.recover_program import recover_inference_program, _remove_fetch_node from ..common.recover_program import recover_inference_program, _remove_fetch_node
from ..common import get_logger from ..common import get_logger
from .strategy_config import ProgramInfo from .strategy_config import ProgramInfo
from .utils import load_inference_model
_logger = get_logger(__name__, level=logging.INFO) _logger = get_logger(__name__, level=logging.INFO)
__all__ = [ __all__ = [
...@@ -151,20 +152,21 @@ def _load_program_and_merge(executor, ...@@ -151,20 +152,21 @@ def _load_program_and_merge(executor,
feed_target_names=None): feed_target_names=None):
scope = paddle.static.global_scope() scope = paddle.static.global_scope()
new_scope = paddle.static.Scope() new_scope = paddle.static.Scope()
if params_filename == 'None': if params_filename == 'None':
params_filename = None params_filename = None
try:
if params_filename is None and model_filename is not None:
raise NotImplementedError(
"NOT SUPPORT parameters saved in separate files. Please convert it to single binary file first."
)
with paddle.static.scope_guard(new_scope): with paddle.static.scope_guard(new_scope):
[teacher_program, teacher_feed_target_names, teacher_fetch_targets]= paddle.fluid.io.load_inference_model( \ [teacher_program, teacher_feed_target_names, teacher_fetch_targets]= (load_inference_model( \
dirname=model_dir, \ model_dir, \
model_filename=model_filename, \ model_filename=model_filename, \
params_filename=params_filename, \ params_filename=params_filename, \
executor=executor) executor=executor))
except:
with paddle.static.scope_guard(new_scope):
[teacher_program, teacher_feed_target_names, teacher_fetch_targets]= paddle.static.load_inference_model( \
path_prefix=model_dir, \
executor=executor)
_remove_fetch_node(teacher_program) _remove_fetch_node(teacher_program)
...@@ -209,9 +211,9 @@ def build_distill_program(executor, ...@@ -209,9 +211,9 @@ def build_distill_program(executor,
"""build distill program with infermodel""" """build distill program with infermodel"""
startup_program = paddle.static.Program() startup_program = paddle.static.Program()
if train_program_info is None: if train_program_info is None:
[train_program, feed_target_names, fetch_targets]= paddle.static.load_inference_model( \ [train_program, feed_target_names, fetch_targets]= (load_inference_model( \
path_prefix=config["model_dir"] if "model_dir" in config else config["model_path_prefix"], \ path_prefix=config["model_dir"] if "model_dir" in config else config["model_path_prefix"], \
executor=executor) executor=executor))
train_program = recover_inference_program(train_program) train_program = recover_inference_program(train_program)
else: else:
train_program = train_program_info.program train_program = train_program_info.program
...@@ -497,7 +499,7 @@ def remove_unused_var_nodes(program): ...@@ -497,7 +499,7 @@ def remove_unused_var_nodes(program):
Returns: Returns:
program(paddle.static.Program): The sparse model. program(paddle.static.Program): The sparse model.
''' '''
from paddle.fluid import core from paddle.framework import core
from paddle.fluid.framework import IrGraph from paddle.fluid.framework import IrGraph
graph = IrGraph(core.Graph(program.desc), for_test=True) graph = IrGraph(core.Graph(program.desc), for_test=True)
removed_nodes = set() removed_nodes = set()
......
...@@ -16,6 +16,9 @@ from __future__ import absolute_import ...@@ -16,6 +16,9 @@ from __future__ import absolute_import
from .predict import predict_compressed_model from .predict import predict_compressed_model
from .dataloader import * from .dataloader import *
from . import dataloader from . import dataloader
from .load_model import *
from . import load_model
__all__ = ["predict_compressed_model"] __all__ = ["predict_compressed_model"]
__all__ += dataloader.__all__ __all__ += dataloader.__all__
__all__ += load_model.__all__
import paddle import paddle
from paddle.fluid.framework import IrGraph from paddle.fluid.framework import IrGraph
from paddle.fluid import core from paddle.framework import core
from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass, AddQuantDequantPass, QuantizationFreezePass from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass, AddQuantDequantPass, QuantizationFreezePass
try: try:
...@@ -11,6 +11,8 @@ except: ...@@ -11,6 +11,8 @@ except:
TRANSFORM_PASS_OP_TYPES = QuantizationTransformPass._supported_quantizable_op_type TRANSFORM_PASS_OP_TYPES = QuantizationTransformPass._supported_quantizable_op_type
QUANT_DEQUANT_PASS_OP_TYPES = AddQuantDequantPass._supported_quantizable_op_type QUANT_DEQUANT_PASS_OP_TYPES = AddQuantDequantPass._supported_quantizable_op_type
from .load_model import load_inference_model
def post_quant_fake(executor, def post_quant_fake(executor,
model_dir, model_dir,
...@@ -51,7 +53,7 @@ def post_quant_fake(executor, ...@@ -51,7 +53,7 @@ def post_quant_fake(executor,
for op_type in _quantizable_op_type: for op_type in _quantizable_op_type:
assert op_type in _support_quantize_op_type, \ assert op_type in _support_quantize_op_type, \
op_type + " is not supported for quantization." op_type + " is not supported for quantization."
_program, _feed_list, _fetch_list = paddle.fluid.io.load_inference_model( _program, _feed_list, _fetch_list = load_inference_model(
model_dir, model_dir,
executor, executor,
model_filename=model_filename, model_filename=model_filename,
...@@ -108,12 +110,13 @@ def post_quant_fake(executor, ...@@ -108,12 +110,13 @@ def post_quant_fake(executor,
_program = graph.to_program() _program = graph.to_program()
paddle.fluid.io.save_inference_model( feed_vars = [_program.global_block().var(name) for name in _feed_list]
dirname=save_model_path, paddle.static.save_inference_model(
path_prefix=save_model_path,
model_filename=model_filename, model_filename=model_filename,
params_filename=params_filename, params_filename=params_filename,
feeded_var_names=_feed_list, feed_vars=feed_vars,
target_vars=_fetch_list, fetch_vars=_fetch_list,
executor=executor, executor=executor,
main_program=_program) program=_program)
print("The quantized model is saved in: " + save_model_path) print("The quantized model is saved in: " + save_model_path)
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
__all__ = ['load_inference_model']
def load_inference_model(path_prefix,
executor,
model_filename=None,
params_filename=None):
if model_filename is not None and params_filename is not None:
[inference_program, feed_target_names, fetch_targets] = (
paddle.static.load_inference_model(
path_prefix=path_prefix,
executor=executor,
model_filename=model_filename,
params_filename=params_filename))
else:
[inference_program, feed_target_names, fetch_targets] = (
paddle.static.load_inference_model(
path_prefix=path_prefix, executor=executor))
return [inference_program, feed_target_names, fetch_targets]
...@@ -4,6 +4,7 @@ import paddle ...@@ -4,6 +4,7 @@ import paddle
from ...analysis import TableLatencyPredictor from ...analysis import TableLatencyPredictor
from .prune_model import get_sparse_model, get_prune_model from .prune_model import get_sparse_model, get_prune_model
from .fake_ptq import post_quant_fake from .fake_ptq import post_quant_fake
from .load_model import load_inference_model
def with_variable_shape(model_dir, model_filename=None, params_filename=None): def with_variable_shape(model_dir, model_filename=None, params_filename=None):
...@@ -18,12 +19,11 @@ def with_variable_shape(model_dir, model_filename=None, params_filename=None): ...@@ -18,12 +19,11 @@ def with_variable_shape(model_dir, model_filename=None, params_filename=None):
""" """
paddle.enable_static() paddle.enable_static()
exe = paddle.static.Executor(paddle.CPUPlace()) exe = paddle.static.Executor(paddle.CPUPlace())
[inference_program, feed_target_names, fetch_targets] = ( inference_program, feed_target_names, fetch_targets = load_inference_model(
paddle.fluid.io.load_inference_model(
model_dir, model_dir,
exe, exe,
model_filename=model_filename, model_filename=model_filename,
params_filename=params_filename)) params_filename=params_filename)
for var_ in inference_program.list_vars(): for var_ in inference_program.list_vars():
if var_.name in feed_target_names: if var_.name in feed_target_names:
if var_.shape.count(-1) > 1: if var_.shape.count(-1) > 1:
......
...@@ -2,10 +2,10 @@ import os ...@@ -2,10 +2,10 @@ import os
import time import time
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.static as static import paddle.static as static
from ...prune import Pruner from ...prune import Pruner
from ...core import GraphWrapper from ...core import GraphWrapper
from .load_model import load_inference_model
__all__ = ["get_sparse_model", "get_prune_model"] __all__ = ["get_sparse_model", "get_prune_model"]
...@@ -37,12 +37,8 @@ def get_sparse_model(executor, places, model_file, param_file, ratio, ...@@ -37,12 +37,8 @@ def get_sparse_model(executor, places, model_file, param_file, ratio,
startup_prog = static.Program() startup_prog = static.Program()
executor.run(startup_prog) executor.run(startup_prog)
[inference_program, feed_target_names, fetch_targets] = ( inference_program, feed_target_names, fetch_targets = load_inference_model(
fluid.io.load_inference_model( folder, executor, model_filename=model_name, params_filename=param_name)
folder,
executor,
model_filename=model_name,
params_filename=param_name))
thresholds = {} thresholds = {}
graph = GraphWrapper(inference_program) graph = GraphWrapper(inference_program)
...@@ -87,12 +83,15 @@ def get_sparse_model(executor, places, model_file, param_file, ratio, ...@@ -87,12 +83,15 @@ def get_sparse_model(executor, places, model_file, param_file, ratio,
paddle.static.global_scope().find_var(name).get_tensor().set( paddle.static.global_scope().find_var(name).get_tensor().set(
array, paddle.CPUPlace()) array, paddle.CPUPlace())
fluid.io.save_inference_model( feed_vars = [
inference_program.global_block().var(name) for name in feed_target_names
]
static.save_inference_model(
save_path, save_path,
feeded_var_names=feed_target_names, feed_vars=feed_vars,
target_vars=fetch_targets, fetch_vars=fetch_targets,
executor=executor, executor=executor,
main_program=inference_program, program=inference_program,
model_filename=model_name, model_filename=model_name,
params_filename=param_name) params_filename=param_name)
print("The pruned model is saved in: ", save_path) print("The pruned model is saved in: ", save_path)
...@@ -127,12 +126,8 @@ def get_prune_model(executor, places, model_file, param_file, ratio, save_path): ...@@ -127,12 +126,8 @@ def get_prune_model(executor, places, model_file, param_file, ratio, save_path):
scope = static.global_scope() scope = static.global_scope()
executor.run(startup_prog) executor.run(startup_prog)
[inference_program, feed_target_names, fetch_targets] = ( inference_program, feed_target_names, fetch_targets = load_inference_model(
fluid.io.load_inference_model( folder, executor, model_filename=model_name, params_filename=param_name)
folder,
executor,
model_filename=model_name,
params_filename=param_name))
prune_params = [] prune_params = []
graph = GraphWrapper(inference_program) graph = GraphWrapper(inference_program)
...@@ -162,11 +157,14 @@ def get_prune_model(executor, places, model_file, param_file, ratio, save_path): ...@@ -162,11 +157,14 @@ def get_prune_model(executor, places, model_file, param_file, ratio, save_path):
param_backup=None, param_backup=None,
param_shape_backup=None) param_shape_backup=None)
fluid.io.save_inference_model( feed_vars = [
main_program.global_block().var(name) for name in feed_target_names
]
static.save_inference_model(
save_path, save_path,
feeded_var_names=feed_target_names, feed_vars=feed_vars,
target_vars=fetch_targets, fetch_vars=fetch_targets,
executor=executor, executor=executor,
main_program=main_program, program=main_program,
model_filename=model_name, model_filename=model_name,
params_filename=param_name) params_filename=param_name)
import sys import sys
import os import os
sys.path.append("../") sys.path.append("../../")
import unittest import unittest
import tempfile import tempfile
import paddle import paddle
import unittest import unittest
import numpy as np import numpy as np
from static_case import StaticCase
from paddle.io import Dataset from paddle.io import Dataset
from paddleslim.auto_compression import AutoCompression from paddleslim.auto_compression import AutoCompression
from paddleslim.auto_compression.config_helpers import load_config from paddleslim.auto_compression.config_helpers import load_config
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册