@@ -118,7 +127,7 @@ class TableLatencyPredictor(LatencyPredictor):
...
@@ -118,7 +127,7 @@ class TableLatencyPredictor(LatencyPredictor):
model_file(str), param_file(str): The inference model(*.pdmodel, *.pdiparams).
model_file(str), param_file(str): The inference model(*.pdmodel, *.pdiparams).
data_type(str): Data type, fp32 or int8. Default : fp32
data_type(str): Data type, fp32 or int8. Default : fp32
threads(int): threads num
threads(int): threads num
input_shape(list): Generally, the input shape is confirmed when saving the inference model and the parameter is only effective for variable length input shape.
input_shape(list): Generally, the input shape is confirmed when saving the inference model and the parameter is only effective for input shape that has variable length.
Returns:
Returns:
latency(float): The latency of the model.
latency(float): The latency of the model.
"""
"""
...
@@ -142,19 +151,31 @@ class TableLatencyPredictor(LatencyPredictor):
...
@@ -142,19 +151,31 @@ class TableLatencyPredictor(LatencyPredictor):
ifinput_shape!=None:
ifinput_shape!=None:
ori_shape=self._get_input_shape(graph)
ori_shape=self._get_input_shape(graph)
assertori_shape==input_shape,"The parameter \'input_shape\' dosn't work now. The input shape is confirmed when saving the inference model"
assertori_shape==input_shape,"The parameter \'input_shape\' dosn't work for now. The input shape is fixed when saving the inference model"