From f5783ed069e1246103da47cdf5ebbeeae581e31e Mon Sep 17 00:00:00 2001 From: Guanghua Yu <742925032@qq.com> Date: Thu, 27 Oct 2022 17:42:38 +0800 Subject: [PATCH] update dygraph PTQ export model api (#1477) --- paddleslim/dygraph/quant/ptq.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/paddleslim/dygraph/quant/ptq.py b/paddleslim/dygraph/quant/ptq.py index 2d8e47d8..78727d95 100644 --- a/paddleslim/dygraph/quant/ptq.py +++ b/paddleslim/dygraph/quant/ptq.py @@ -118,7 +118,7 @@ class PTQ(object): return fuse_list - def save_quantized_model(self, model, path, input_spec=None): + def save_quantized_model(self, model, path, input_spec=None, **kwargs): """ Save the quantized inference model. @@ -131,7 +131,7 @@ class PTQ(object): InputSpec or example Tensor. If None, all input variables of the original Layer's forward method would be the inputs of the saved model. Default: None. - + kwargs (dict, optional): Other save configuration options for compatibility. Returns: None """ @@ -143,7 +143,7 @@ class PTQ(object): model.eval() self.ptq.save_quantized_model( - model=model, path=path, input_spec=input_spec) + model=model, path=path, input_spec=input_spec, **kwargs) if training: model.train() -- GitLab