未验证 提交 6e13e86a 编写于 作者: P Peihan 提交者: GitHub

verify correctness of dy2static model by using paddle-inference api (#26372)

* verify correctness of dy2static model by using paddle-inference api

* update python doc style
上级 d12ac984
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.core import AnalysisConfig
from paddle.fluid.core import create_paddle_predictor
class PredictorTools(object):
'''
Paddle-Inference predictor
'''
def __init__(self, model_path, params_file, feeds_var):
'''
__init__
'''
self.model_path = model_path
self.params_file = params_file
self.feeds_var = feeds_var
def _load_model_and_set_config(self):
'''
load model from file and set analysis config
'''
if os.path.exists(os.path.join(self.model_path, self.params_file)):
config = AnalysisConfig(
os.path.join(self.model_path, "__model__"),
os.path.join(self.model_path, self.params_file))
else:
config = AnalysisConfig(os.path.join(self.model_path))
if fluid.is_compiled_with_cuda():
config.enable_use_gpu(100, 0)
else:
config.disable_gpu()
config.switch_specify_input_names(True)
config.switch_use_feed_fetch_ops(False)
config.enable_memory_optim()
config.disable_glog_info()
config.switch_ir_optim(True)
return config
def _get_analysis_outputs(self, config):
'''
Return outputs of paddle inference
Args:
config (AnalysisConfig): predictor configs
Returns:
outs (numpy array): forward netwrok prediction outputs
'''
predictor = create_paddle_predictor(config)
tensor_shapes = predictor.get_input_tensor_shape()
names = predictor.get_input_names()
for i, name in enumerate(names):
#assert name in self.feeds_var, '{} not in feeded dict'.format(name)
shape = tensor_shapes[name]
tensor = predictor.get_input_tensor(name)
feed_data = self.feeds_var[i]
tensor.copy_from_cpu(np.array(feed_data))
if type(feed_data) == fluid.LoDTensor:
tensor.set_lod(feed_data.lod())
# ensure no diff in multiple repeat times
repeat_time = 10
for i in range(repeat_time):
predictor.zero_copy_run()
output_names = predictor.get_output_names()
outs = [
predictor.get_output_tensor(out_name).copy_to_cpu()
for out_name in output_names
]
return outs
def __call__(self):
'''
__call__
'''
config = self._load_model_and_set_config()
outputs = self._get_analysis_outputs(config)
return outputs
...@@ -23,6 +23,8 @@ from paddle.fluid.dygraph.io import VARIABLE_FILENAME ...@@ -23,6 +23,8 @@ from paddle.fluid.dygraph.io import VARIABLE_FILENAME
from bert_dygraph_model import PretrainModelLayer from bert_dygraph_model import PretrainModelLayer
from bert_utils import get_bert_config, get_feed_data_reader from bert_utils import get_bert_config, get_feed_data_reader
from predictor_utils import PredictorTools
program_translator = ProgramTranslator() program_translator = ProgramTranslator()
place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace( place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace(
) )
...@@ -152,6 +154,12 @@ def predict_dygraph_jit(data): ...@@ -152,6 +154,12 @@ def predict_dygraph_jit(data):
return pred_res return pred_res
def predict_analysis_inference(data):
output = PredictorTools(MODEL_SAVE_PATH, VARIABLE_FILENAME, data)
out = output()
return out
class TestBert(unittest.TestCase): class TestBert(unittest.TestCase):
def setUp(self): def setUp(self):
self.bert_config = get_bert_config() self.bert_config = get_bert_config()
...@@ -178,9 +186,11 @@ class TestBert(unittest.TestCase): ...@@ -178,9 +186,11 @@ class TestBert(unittest.TestCase):
dygraph_pred_res = predict_dygraph(self.bert_config, data) dygraph_pred_res = predict_dygraph(self.bert_config, data)
static_pred_res = predict_static(data) static_pred_res = predict_static(data)
dygraph_jit_pred_res = predict_dygraph_jit(data) dygraph_jit_pred_res = predict_dygraph_jit(data)
predictor_pred_res = predict_analysis_inference(data)
for dy_res, st_res, dy_jit_res in zip( for dy_res, st_res, dy_jit_res, predictor_res in zip(
dygraph_pred_res, static_pred_res, dygraph_jit_pred_res): dygraph_pred_res, static_pred_res, dygraph_jit_pred_res,
predictor_pred_res):
self.assertTrue( self.assertTrue(
np.allclose(st_res, dy_res), np.allclose(st_res, dy_res),
"dygraph_res: {},\n static_res: {}".format( "dygraph_res: {},\n static_res: {}".format(
...@@ -191,6 +201,11 @@ class TestBert(unittest.TestCase): ...@@ -191,6 +201,11 @@ class TestBert(unittest.TestCase):
"dygraph_jit_res: {},\n static_res: {}".format( "dygraph_jit_res: {},\n static_res: {}".format(
dy_jit_res[~np.isclose(st_res, dy_jit_res)], dy_jit_res[~np.isclose(st_res, dy_jit_res)],
st_res[~np.isclose(st_res, dy_jit_res)])) st_res[~np.isclose(st_res, dy_jit_res)]))
self.assertTrue(
np.allclose(st_res, predictor_res),
"dygraph_jit_res: {},\n static_res: {}".format(
predictor_res[~np.isclose(st_res, predictor_res)],
st_res[~np.isclose(st_res, predictor_res)]))
break break
......
...@@ -22,6 +22,8 @@ from paddle.fluid.dygraph import to_variable ...@@ -22,6 +22,8 @@ from paddle.fluid.dygraph import to_variable
from paddle.fluid.dygraph import declarative, ProgramTranslator from paddle.fluid.dygraph import declarative, ProgramTranslator
from paddle.fluid.dygraph.io import VARIABLE_FILENAME from paddle.fluid.dygraph.io import VARIABLE_FILENAME
from predictor_utils import PredictorTools
SEED = 2020 SEED = 2020
DATATYPE = 'float32' DATATYPE = 'float32'
program_translator = ProgramTranslator() program_translator = ProgramTranslator()
...@@ -693,9 +695,11 @@ class TestTrain(unittest.TestCase): ...@@ -693,9 +695,11 @@ class TestTrain(unittest.TestCase):
static_pred_res = self.predict_static(video_data) static_pred_res = self.predict_static(video_data)
dygraph_pred_res = self.predict_dygraph(video_data) dygraph_pred_res = self.predict_dygraph(video_data)
dygraph_jit_pred_res = self.predict_dygraph_jit(video_data) dygraph_jit_pred_res = self.predict_dygraph_jit(video_data)
predictor_pred_res = self.predict_analysis_inference(video_data)
for dy_res, st_res, dy_jit_res in zip( for dy_res, st_res, dy_jit_res, predictor_res in zip(
dygraph_pred_res, static_pred_res, dygraph_jit_pred_res): dygraph_pred_res, static_pred_res, dygraph_jit_pred_res,
predictor_pred_res):
self.assertTrue( self.assertTrue(
np.allclose(st_res, dy_res), np.allclose(st_res, dy_res),
"dygraph_res: {},\n static_res: {}".format( "dygraph_res: {},\n static_res: {}".format(
...@@ -706,6 +710,11 @@ class TestTrain(unittest.TestCase): ...@@ -706,6 +710,11 @@ class TestTrain(unittest.TestCase):
"dygraph_jit_res: {},\n static_res: {}".format( "dygraph_jit_res: {},\n static_res: {}".format(
dy_jit_res[~np.isclose(st_res, dy_jit_res)], dy_jit_res[~np.isclose(st_res, dy_jit_res)],
st_res[~np.isclose(st_res, dy_jit_res)])) st_res[~np.isclose(st_res, dy_jit_res)]))
self.assertTrue(
np.allclose(st_res, predictor_res),
"dygraph_jit_res: {},\n static_res: {}".format(
predictor_res[~np.isclose(st_res, predictor_res)],
st_res[~np.isclose(st_res, predictor_res)]))
break break
def predict_dygraph(self, data): def predict_dygraph(self, data):
...@@ -749,6 +758,11 @@ class TestTrain(unittest.TestCase): ...@@ -749,6 +758,11 @@ class TestTrain(unittest.TestCase):
return pred_res return pred_res
def predict_analysis_inference(self, data):
output = PredictorTools(self.args.infer_dir, VARIABLE_FILENAME, [data])
out = output()
return out
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -27,6 +27,8 @@ from paddle.fluid.dygraph import Embedding, Linear, GRUUnit ...@@ -27,6 +27,8 @@ from paddle.fluid.dygraph import Embedding, Linear, GRUUnit
from paddle.fluid.dygraph import declarative, ProgramTranslator from paddle.fluid.dygraph import declarative, ProgramTranslator
from paddle.fluid.dygraph.io import VARIABLE_FILENAME from paddle.fluid.dygraph.io import VARIABLE_FILENAME
from predictor_utils import PredictorTools
SEED = 2020 SEED = 2020
program_translator = ProgramTranslator() program_translator = ProgramTranslator()
...@@ -536,6 +538,7 @@ class TestLACModel(unittest.TestCase): ...@@ -536,6 +538,7 @@ class TestLACModel(unittest.TestCase):
dy_pre = self.predict_dygraph(batch) dy_pre = self.predict_dygraph(batch)
st_pre = self.predict_static(batch) st_pre = self.predict_static(batch)
dy_jit_pre = self.predict_dygraph_jit(batch) dy_jit_pre = self.predict_dygraph_jit(batch)
predictor_pre = self.predict_analysis_inference(batch)
self.assertTrue( self.assertTrue(
np.allclose(dy_pre, st_pre), np.allclose(dy_pre, st_pre),
msg="dy_pre:\n {}\n, st_pre: \n{}.".format(dy_pre, st_pre)) msg="dy_pre:\n {}\n, st_pre: \n{}.".format(dy_pre, st_pre))
...@@ -543,6 +546,10 @@ class TestLACModel(unittest.TestCase): ...@@ -543,6 +546,10 @@ class TestLACModel(unittest.TestCase):
np.allclose(dy_jit_pre, st_pre), np.allclose(dy_jit_pre, st_pre),
msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format(dy_jit_pre, msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format(dy_jit_pre,
st_pre)) st_pre))
self.assertTrue(
np.allclose(predictor_pre, st_pre),
msg="predictor_pre:\n {}\n, st_pre: \n{}.".format(predictor_pre,
st_pre))
def predict_dygraph(self, batch): def predict_dygraph(self, batch):
words, targets, length = batch words, targets, length = batch
...@@ -591,6 +598,14 @@ class TestLACModel(unittest.TestCase): ...@@ -591,6 +598,14 @@ class TestLACModel(unittest.TestCase):
return pred_res.numpy() return pred_res.numpy()
def predict_analysis_inference(self, batch):
words, targets, length = batch
output = PredictorTools(self.args.model_save_dir, VARIABLE_FILENAME,
[words, length])
out = output()
return out
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -29,6 +29,8 @@ from paddle.fluid.dygraph.jit import declarative ...@@ -29,6 +29,8 @@ from paddle.fluid.dygraph.jit import declarative
from paddle.fluid.dygraph.io import VARIABLE_FILENAME from paddle.fluid.dygraph.io import VARIABLE_FILENAME
from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator
from predictor_utils import PredictorTools
SEED = 2020 SEED = 2020
...@@ -220,6 +222,10 @@ class TestMNISTWithDeclarative(TestMNIST): ...@@ -220,6 +222,10 @@ class TestMNISTWithDeclarative(TestMNIST):
dygraph_infer_out = self.jit_load_and_run_inference_dygraph( dygraph_infer_out = self.jit_load_and_run_inference_dygraph(
infer_model_path, inputs) infer_model_path, inputs)
self.assertTrue(np.allclose(gt_out.numpy(), dygraph_infer_out)) self.assertTrue(np.allclose(gt_out.numpy(), dygraph_infer_out))
# load in Paddle-Inference
predictor_infer_out = self.predictor_load_and_run_inference_analysis(
infer_model_path, inputs)
self.assertTrue(np.allclose(gt_out.numpy(), predictor_infer_out))
@switch_to_static_graph @switch_to_static_graph
def jit_load_and_run_inference_static(self, model_path, inputs): def jit_load_and_run_inference_static(self, model_path, inputs):
...@@ -241,6 +247,11 @@ class TestMNISTWithDeclarative(TestMNIST): ...@@ -241,6 +247,11 @@ class TestMNISTWithDeclarative(TestMNIST):
pred = infer_net(inputs[0]) pred = infer_net(inputs[0])
return pred.numpy() return pred.numpy()
def predictor_load_and_run_inference_analysis(self, model_path, inputs):
output = PredictorTools(model_path, VARIABLE_FILENAME, inputs)
out = output()
return out
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -23,6 +23,8 @@ from paddle.fluid.dygraph.io import VARIABLE_FILENAME ...@@ -23,6 +23,8 @@ from paddle.fluid.dygraph.io import VARIABLE_FILENAME
import unittest import unittest
from predictor_utils import PredictorTools
# Note: Set True to eliminate randomness. # Note: Set True to eliminate randomness.
# 1. For one operation, cuDNN has several algorithms, # 1. For one operation, cuDNN has several algorithms,
# some algorithm results are non-deterministic, like convolution algorithms. # some algorithm results are non-deterministic, like convolution algorithms.
...@@ -550,6 +552,12 @@ def predict_dygraph_jit(args, data): ...@@ -550,6 +552,12 @@ def predict_dygraph_jit(args, data):
return pred_res.numpy() return pred_res.numpy()
def predict_analysis_inference(args, data):
output = PredictorTools(args.model_save_path, VARIABLE_FILENAME, [data])
out = output()
return out
class TestMobileNet(unittest.TestCase): class TestMobileNet(unittest.TestCase):
def setUp(self): def setUp(self):
self.args = Args() self.args = Args()
...@@ -577,12 +585,18 @@ class TestMobileNet(unittest.TestCase): ...@@ -577,12 +585,18 @@ class TestMobileNet(unittest.TestCase):
dy_pre = predict_dygraph(self.args, image) dy_pre = predict_dygraph(self.args, image)
st_pre = predict_static(self.args, image) st_pre = predict_static(self.args, image)
dy_jit_pre = predict_dygraph_jit(self.args, image) dy_jit_pre = predict_dygraph_jit(self.args, image)
predictor_pre = predict_analysis_inference(self.args, image)
self.assertTrue( self.assertTrue(
np.allclose(dy_pre, st_pre), np.allclose(dy_pre, st_pre),
msg="dy_pre:\n {}\n, st_pre: \n{}.".format(dy_pre, st_pre)) msg="dy_pre:\n {}\n, st_pre: \n{}.".format(dy_pre, st_pre))
self.assertTrue( self.assertTrue(
np.allclose(dy_jit_pre, st_pre), np.allclose(dy_jit_pre, st_pre),
msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format(dy_jit_pre, st_pre)) msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format(dy_jit_pre, st_pre))
self.assertTrue(
np.allclose(
predictor_pre, st_pre, atol=1e-5),
msg="inference_pred_res:\n {}\n, st_pre: \n{}.".format(
predictor_pre, st_pre))
def test_mobile_net(self): def test_mobile_net(self):
# MobileNet-V1 # MobileNet-V1
......
...@@ -26,6 +26,8 @@ from paddle.fluid.dygraph import declarative, ProgramTranslator ...@@ -26,6 +26,8 @@ from paddle.fluid.dygraph import declarative, ProgramTranslator
from paddle.fluid.dygraph.nn import BatchNorm, Conv2D, Linear, Pool2D from paddle.fluid.dygraph.nn import BatchNorm, Conv2D, Linear, Pool2D
from paddle.fluid.dygraph.io import VARIABLE_FILENAME from paddle.fluid.dygraph.io import VARIABLE_FILENAME
from predictor_utils import PredictorTools
SEED = 2020 SEED = 2020
IMAGENET1000 = 1281167 IMAGENET1000 = 1281167
base_lr = 0.001 base_lr = 0.001
...@@ -307,6 +309,12 @@ def predict_dygraph_jit(data): ...@@ -307,6 +309,12 @@ def predict_dygraph_jit(data):
return pred_res.numpy() return pred_res.numpy()
def predict_analysis_inference(data):
output = PredictorTools(MODEL_SAVE_PATH, VARIABLE_FILENAME, [data])
out = output()
return out
class TestResnet(unittest.TestCase): class TestResnet(unittest.TestCase):
def train(self, to_static): def train(self, to_static):
program_translator.enable(to_static) program_translator.enable(to_static)
...@@ -317,12 +325,17 @@ class TestResnet(unittest.TestCase): ...@@ -317,12 +325,17 @@ class TestResnet(unittest.TestCase):
dy_pre = predict_dygraph(image) dy_pre = predict_dygraph(image)
st_pre = predict_static(image) st_pre = predict_static(image)
dy_jit_pre = predict_dygraph_jit(image) dy_jit_pre = predict_dygraph_jit(image)
predictor_pre = predict_analysis_inference(image)
self.assertTrue( self.assertTrue(
np.allclose(dy_pre, st_pre), np.allclose(dy_pre, st_pre),
msg="dy_pre:\n {}\n, st_pre: \n{}.".format(dy_pre, st_pre)) msg="dy_pre:\n {}\n, st_pre: \n{}.".format(dy_pre, st_pre))
self.assertTrue( self.assertTrue(
np.allclose(dy_jit_pre, st_pre), np.allclose(dy_jit_pre, st_pre),
msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format(dy_jit_pre, st_pre)) msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format(dy_jit_pre, st_pre))
self.assertTrue(
np.allclose(predictor_pre, st_pre),
msg="predictor_pre:\n {}\n, st_pre: \n{}.".format(predictor_pre,
st_pre))
def test_resnet(self): def test_resnet(self):
static_loss = self.train(to_static=True) static_loss = self.train(to_static=True)
......
...@@ -26,6 +26,8 @@ from paddle.fluid.dygraph import declarative ...@@ -26,6 +26,8 @@ from paddle.fluid.dygraph import declarative
from paddle.fluid.dygraph import ProgramTranslator from paddle.fluid.dygraph import ProgramTranslator
from paddle.fluid.dygraph.io import VARIABLE_FILENAME from paddle.fluid.dygraph.io import VARIABLE_FILENAME
from predictor_utils import PredictorTools
SEED = 2020 SEED = 2020
np.random.seed(SEED) np.random.seed(SEED)
...@@ -434,6 +436,12 @@ def predict_dygraph_jit(data): ...@@ -434,6 +436,12 @@ def predict_dygraph_jit(data):
return pred_res.numpy() return pred_res.numpy()
def predict_analysis_inference(data):
output = PredictorTools(MODEL_SAVE_PATH, VARIABLE_FILENAME, [data])
out = output()
return out
class TestSeResnet(unittest.TestCase): class TestSeResnet(unittest.TestCase):
def setUp(self): def setUp(self):
self.train_reader = paddle.batch( self.train_reader = paddle.batch(
...@@ -447,12 +455,17 @@ class TestSeResnet(unittest.TestCase): ...@@ -447,12 +455,17 @@ class TestSeResnet(unittest.TestCase):
dy_pre = predict_dygraph(image) dy_pre = predict_dygraph(image)
st_pre = predict_static(image) st_pre = predict_static(image)
dy_jit_pre = predict_dygraph_jit(image) dy_jit_pre = predict_dygraph_jit(image)
predictor_pre = predict_analysis_inference(image)
self.assertTrue( self.assertTrue(
np.allclose(dy_pre, st_pre), np.allclose(dy_pre, st_pre),
msg="dy_pre:\n {}\n, st_pre: \n{}.".format(dy_pre, st_pre)) msg="dy_pre:\n {}\n, st_pre: \n{}.".format(dy_pre, st_pre))
self.assertTrue( self.assertTrue(
np.allclose(dy_jit_pre, st_pre), np.allclose(dy_jit_pre, st_pre),
msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format(dy_jit_pre, st_pre)) msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format(dy_jit_pre, st_pre))
self.assertTrue(
np.allclose(predictor_pre, st_pre),
msg="predictor_pre:\n {}\n, st_pre: \n{}.".format(predictor_pre,
st_pre))
def test_check_result(self): def test_check_result(self):
pred_1, loss_1, acc1_1, acc5_1 = train( pred_1, loss_1, acc1_1, acc5_1 = train(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册