未验证 提交 f5783ed0 编写于 作者: G Guanghua Yu 提交者: GitHub

update dygraph PTQ export model api (#1477)

上级 d6928c6a
...@@ -118,7 +118,7 @@ class PTQ(object): ...@@ -118,7 +118,7 @@ class PTQ(object):
return fuse_list return fuse_list
def save_quantized_model(self, model, path, input_spec=None): def save_quantized_model(self, model, path, input_spec=None, **kwargs):
""" """
Save the quantized inference model. Save the quantized inference model.
...@@ -131,7 +131,7 @@ class PTQ(object): ...@@ -131,7 +131,7 @@ class PTQ(object):
InputSpec or example Tensor. If None, all input variables of InputSpec or example Tensor. If None, all input variables of
the original Layer's forward method would be the inputs of the original Layer's forward method would be the inputs of
the saved model. Default: None. the saved model. Default: None.
kwargs (dict, optional): Other save configuration options for compatibility.
Returns: Returns:
None None
""" """
...@@ -143,7 +143,7 @@ class PTQ(object): ...@@ -143,7 +143,7 @@ class PTQ(object):
model.eval() model.eval()
self.ptq.save_quantized_model( self.ptq.save_quantized_model(
model=model, path=path, input_spec=input_spec) model=model, path=path, input_spec=input_spec, **kwargs)
if training: if training:
model.train() model.train()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册