未验证 提交 7d90a673 编写于 作者: Z zhouzj 提交者: GitHub

Fix latencyPredict's tests. (#1559)

* Fix tests.
上级 5b0346c5
......@@ -110,7 +110,7 @@ class ModelCase5(paddle.nn.Layer):
image = inputs['image']
image = self.bn1(image)
img_size = paddle.fluid.data(
name='img_size', shape=[None, 2], dtype='int64')
name='img_size', shape=[None, 2], dtype='int32')
anchors = [10, 13, 16, 30, 33, 23]
boxes, scores = paddle.fluid.layers.yolo_box(
x=image,
......@@ -180,7 +180,7 @@ class ModelCase7(paddle.nn.Layer):
image = inputs['image']
image = self.bn1(image)
img_size = paddle.fluid.data(
name='img_size', shape=[None, 2], dtype='int64')
name='img_size', shape=[None, 2], dtype='int32')
anchors = [10, 13, 16, 30, 33, 23]
boxes, scores = paddle.fluid.layers.yolo_box(
x=image,
......@@ -194,53 +194,94 @@ class ModelCase7(paddle.nn.Layer):
return boxes, scores, box, var
class TestCase1(unittest.TestCase):
class TestCase(unittest.TestCase):
def setUp(slef):
os.system(
'wget -q https://bj.bcebos.com/v1/paddle-slim-models/LatencyPredictor/test_mobilenetv1.tar'
)
os.system('tar -xf test_mobilenetv1.tar')
os.system(
'wget -q https://bj.bcebos.com/v1/paddle-slim-models/LatencyPredictor/test_mobilenetv1_qat.tar'
)
os.system('tar -xf test_mobilenetv1_qat.tar')
def test_case1(self):
paddle.disable_static()
model = mobilenet_v1()
predictor = TableLatencyPredictor(table_file='SD710')
model_file, param_file = save_cls_model(
model,
input_shape=[1, 3, 224, 224],
save_dir="./inference_model",
data_type='fp32')
model_file = 'test_mobilenetv1/inference.pdmodel'
param_file = 'test_mobilenetv1/inference.pdiparams'
latency = predictor.predict(
model_file=model_file, param_file=param_file, data_type='fp32')
assert latency > 0
model_file, param_file = save_cls_model(
model,
input_shape=[1, 3, 224, 224],
save_dir="./inference_model",
data_type='int8')
model_file = 'test_mobilenetv1_qat/inference.pdmodel'
param_file = 'test_mobilenetv1_qat/inference.pdiparams'
latency = predictor.predict(
model_file=model_file, param_file=param_file, data_type='int8')
assert latency > 0
class TestCase2(unittest.TestCase):
def setUp(slef):
os.system(
'wget -q https://bj.bcebos.com/v1/paddle-slim-models/LatencyPredictor/test_mobilenetv2.tar'
)
os.system('tar -xf test_mobilenetv2.tar')
os.system(
'wget -q https://bj.bcebos.com/v1/paddle-slim-models/LatencyPredictor/test_mobilenetv2_qat.tar'
)
os.system('tar -xf test_mobilenetv2_qat.tar')
def _infer_shape(self, model_dir, model_filename, params_filename,
input_shapes, save_path):
assert type(input_shapes) in [
dict, list, tuple
], f'Type of input_shapes should be in [dict, tuple or list] but got {type(input_shapes)}.'
paddle.enable_static()
exe = paddle.static.Executor(paddle.CPUPlace())
model_name = '.'.join(model_filename.split('.')[:-1])
model_path_prefix = os.path.join(model_dir, model_name)
[inference_program, feed_target_names, fetch_targets] = (
paddle.static.load_inference_model(
path_prefix=model_path_prefix, executor=exe))
if type(input_shapes) in [list, tuple]:
assert len(
feed_target_names
) == 1, f"The number of model's inputs should be 1 but got {feed_target_names}."
input_shapes = {feed_target_names[0]: input_shapes}
feed_vars = []
for var_ in inference_program.list_vars():
if var_.name in feed_target_names:
feed_vars.append(var_)
var_.desc.set_shape(input_shapes[var_.name])
for block in inference_program.blocks:
for op in block.ops:
if op.type not in ["feed", "fetch"]:
op.desc.infer_shape(block.desc)
save_path = os.path.join(save_path, "infered_shape")
os.makedirs(save_path)
paddle.static.save_inference_model(
save_path,
feed_vars,
fetch_targets,
exe,
program=inference_program,
clip_extra=False)
print(f"Saved model infered shape to {save_path}")
def test_case2(self):
paddle.disable_static()
model = mobilenet_v2()
predictor = TableLatencyPredictor(table_file='SD710')
model_file, param_file = save_cls_model(
model,
input_shape=[1, 3, 224, 224],
save_dir="./inference_model",
data_type='fp32')
model_file = 'test_mobilenetv2/inference.pdmodel'
param_file = 'test_mobilenetv2/inference.pdiparams'
latency = predictor.predict(
model_file=model_file, param_file=param_file, data_type='fp32')
assert latency > 0
class TestCase3(unittest.TestCase):
def test_case3(self):
paddle.disable_static()
model = mobilenet_v2()
model_file, param_file = save_cls_model(
model,
input_shape=[1, 3, 224, 224],
save_dir="./inference_model",
data_type='fp32')
pbmodel_file = opt_model(
model_file=model_file,
param_file=param_file,
......@@ -255,39 +296,50 @@ class TestCase3(unittest.TestCase):
graph_keys = pred._get_key_info_from_graph(graph=graph)
assert len(graph_keys) > 0
self._infer_shape(
model_dir='test_mobilenetv2',
model_filename='inference.pdmodel',
params_filename='inference.pdiparams',
input_shapes=[1, 3, 250, 250],
save_path='test_mobilenetv2_250')
class TestCase4(unittest.TestCase):
def test_case4(self):
paddle.disable_static()
model = ModelCase1()
model_file, param_file = save_cls_model(
model,
input_shape=[1, 116, 28, 28],
save_dir="./inference_model",
data_type='fp32')
predictor = TableLatencyPredictor(table_file='SD710')
model_file = 'test_mobilenetv2_250/infered_shape.pdmodel'
param_file = 'test_mobilenetv2_250/infered_shape.pdiparams'
latency = predictor.predict(
model_file=model_file, param_file=param_file, data_type='fp32')
assert latency > 0
self._infer_shape(
model_dir='test_mobilenetv2_qat',
model_filename='inference.pdmodel',
params_filename='inference.pdiparams',
input_shapes=[1, 3, 250, 250],
save_path='test_mobilenetv2_qat_250')
class TestCase5(unittest.TestCase):
def test_case5(self):
model_file = 'test_mobilenetv2_qat_250/infered_shape.pdmodel'
param_file = 'test_mobilenetv2_qat_250/infered_shape.pdiparams'
latency = predictor.predict(
model_file=model_file, param_file=param_file, data_type='int8')
assert latency > 0
class TestCase3(unittest.TestCase):
def test_case3(self):
paddle.disable_static()
model = mobilenet_v1()
predictor = TableLatencyPredictor(table_file='SD710')
model = ModelCase1()
model_file, param_file = save_cls_model(
model,
input_shape=[1, 3, 224, 224],
input_shape=[1, 116, 28, 28],
save_dir="./inference_model",
data_type='fp32')
predictor = TableLatencyPredictor(table_file='SD710')
latency = predictor.predict(
model_file=model_file, param_file=param_file, data_type='fp32')
assert latency > 0
class TestCase6(unittest.TestCase):
def test_case6(self):
class TestCase4(unittest.TestCase):
def test_case4(self):
paddle.disable_static()
model = ModelCase2()
predictor = TableLatencyPredictor(table_file='SD710')
......@@ -301,8 +353,8 @@ class TestCase6(unittest.TestCase):
assert latency > 0
class TestCase7(unittest.TestCase):
def test_case7(self):
class TestCase5(unittest.TestCase):
def test_case5(self):
paddle.disable_static()
model = ModelCase3()
predictor = TableLatencyPredictor(table_file='SD710')
......@@ -317,8 +369,8 @@ class TestCase7(unittest.TestCase):
assert latency > 0
class TestCase8(unittest.TestCase):
def test_case8(self):
class TestCase6(unittest.TestCase):
def test_case6(self):
paddle.disable_static()
model = ModelCase4()
predictor = LatencyPredictor()
......@@ -341,8 +393,8 @@ class TestCase8(unittest.TestCase):
assert len(graph_keys) > 0
class TestCase9(unittest.TestCase):
def test_case9(self):
class TestCase7(unittest.TestCase):
def test_case7(self):
paddle.disable_static()
model = ModelCase5()
predictor = LatencyPredictor()
......@@ -365,33 +417,13 @@ class TestCase9(unittest.TestCase):
assert len(graph_keys) > 0
class TestCase10(unittest.TestCase):
def test_case10(self):
class TestCase8(unittest.TestCase):
def test_case8(self):
paddle.disable_static()
model = mobilenet_v2()
model2 = ModelCase6()
model3 = ModelCase7()
predictor = TableLatencyPredictor(table_file='SD710')
model = ModelCase6()
model_file, param_file = save_cls_model(
model,
input_shape=[1, 3, 250, 250],
save_dir="./inference_model",
data_type='fp32')
latency = predictor.predict(
model_file=model_file, param_file=param_file, data_type='fp32')
assert latency > 0
model_file, param_file = save_cls_model(
model,
input_shape=[1, 3, 250, 250],
save_dir="./inference_model",
data_type='int8')
latency = predictor.predict(
model_file=model_file, param_file=param_file, data_type='int8')
assert latency > 0
model_file, param_file = save_cls_model(
model2,
input_shape=[1, 3, 16, 16],
save_dir="./inference_model",
data_type='fp32')
......@@ -399,8 +431,10 @@ class TestCase10(unittest.TestCase):
model_file=model_file, param_file=param_file, data_type='fp32')
assert latency > 0
paddle.disable_static()
model2 = ModelCase7()
model_file, param_file = save_det_model(
model3,
model2,
input_shape=[1, 255, 14, 14],
save_dir="./inference_model",
data_type='fp32')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册