未验证 提交 58ac4d43 编写于 作者: W whs 提交者: GitHub

Fix saving inference model in PACT quanter (#728)

上级 b849d609
......@@ -213,7 +213,12 @@ class QAT(object):
def save_quantized_model(self, model, path, input_spec=None):
if self.weight_preprocess is not None or self.act_preprocess is not None:
training = model.training
model = self._remove_preprocess(model)
if training:
model.train()
else:
model.eval()
self.imperative_qat.save_quantized_model(
layer=model, path=path, input_spec=input_spec)
......
......@@ -256,6 +256,16 @@ class TestImperativeQatPACT(unittest.TestCase):
_logger.info("After quantization: top1: {}, top5: {}".format(top1_1,
top5_1))
# test for saving model in train mode
lenet.train()
quanter.save_quantized_model(
lenet,
'./dygraph_qat',
input_spec=[
paddle.static.InputSpec(
shape=[None, 1, 28, 28], dtype='float32')
])
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册