未验证 提交 a479afa5 编写于 作者: D dyning 提交者: GitHub

Merge pull request #18 from littletomatodonkey/fix_trt_bench

fix benchmark
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import utils import utils
import argparse import argparse
import numpy as np import numpy as np
...@@ -24,6 +23,7 @@ from paddle.fluid.core import create_paddle_predictor ...@@ -24,6 +23,7 @@ from paddle.fluid.core import create_paddle_predictor
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def parse_args(): def parse_args():
def str2bool(v): def str2bool(v):
return v.lower() in ("true", "t", "1") return v.lower() in ("true", "t", "1")
...@@ -47,19 +47,18 @@ def parse_args(): ...@@ -47,19 +47,18 @@ def parse_args():
def create_predictor(args): def create_predictor(args):
config = AnalysisConfig(args.model_file, args.params_file) config = AnalysisConfig(args.model_file, args.params_file)
if args.use_gpu: if args.use_gpu:
config.enable_use_gpu(args.gpu_mem, 0) config.enable_use_gpu(args.gpu_mem, 0)
else: else:
config.disable_gpu() config.disable_gpu()
config.disable_glog_info() config.disable_glog_info()
config.switch_ir_optim(args.ir_optim) # default true config.switch_ir_optim(args.ir_optim) # default true
if args.use_tensorrt: if args.use_tensorrt:
config.enable_tensorrt_engine( config.enable_tensorrt_engine(
precision_mode=AnalysisConfig.Precision.Half if args.use_fp16 else AnalysisConfig.Precision.Float32, precision_mode=AnalysisConfig.Precision.Half
max_batch_size=args.batch_size) if args.use_fp16 else AnalysisConfig.Precision.Float32,
max_batch_size=args.batch_size)
config.enable_memory_optim() config.enable_memory_optim()
# use zero copy # use zero copy
...@@ -79,7 +78,7 @@ def create_operators(): ...@@ -79,7 +78,7 @@ def create_operators():
resize_op = utils.ResizeImage(resize_short=256) resize_op = utils.ResizeImage(resize_short=256)
crop_op = utils.CropImage(size=(size, size)) crop_op = utils.CropImage(size=(size, size))
normalize_op = utils.NormalizeImage( normalize_op = utils.NormalizeImage(
scale=img_scale, mean=img_mean, std=img_std) scale=img_scale, mean=img_mean, std=img_std)
totensor_op = utils.ToTensor() totensor_op = utils.ToTensor()
return [decode_op, resize_op, crop_op, normalize_op, totensor_op] return [decode_op, resize_op, crop_op, normalize_op, totensor_op]
...@@ -104,38 +103,62 @@ def main(): ...@@ -104,38 +103,62 @@ def main():
assert args.model_name is not None assert args.model_name is not None
assert args.use_tensorrt == True assert args.use_tensorrt == True
# HALF precission predict only work when using tensorrt # HALF precission predict only work when using tensorrt
if args.use_fp16==True: if args.use_fp16 == True:
assert args.use_tensorrt == True assert args.use_tensorrt == True
operators = create_operators() operators = create_operators()
predictor = create_predictor(args) predictor = create_predictor(args)
inputs = preprocess(args.image_file, operators) inputs = preprocess(args.image_file, operators)
inputs = np.expand_dims(inputs, axis=0).repeat(args.batch_size, axis=0).copy() inputs = np.expand_dims(
inputs, axis=0).repeat(
args.batch_size, axis=0).copy()
input_names = predictor.get_input_names() input_names = predictor.get_input_names()
input_tensor = predictor.get_input_tensor(input_names[0]) input_tensor = predictor.get_input_tensor(input_names[0])
input_tensor.copy_from_cpu(inputs)
output_names = predictor.get_output_names()
output_tensor = predictor.get_output_tensor(output_names[0])
test_num = 500
test_time = 0.0
if not args.enable_benchmark: if not args.enable_benchmark:
inputs = preprocess(args.image_file, operators)
inputs = np.expand_dims(
inputs, axis=0).repeat(
args.batch_size, axis=0).copy()
input_tensor.copy_from_cpu(inputs)
predictor.zero_copy_run() predictor.zero_copy_run()
output = output_tensor.copy_to_cpu()
output = output.flatten()
cls = np.argmax(output)
score = output[cls]
logger.info("class: {0}".format(cls))
logger.info("score: {0}".format(score))
else: else:
for i in range(0,1010): for i in range(0, test_num + 10):
if i == 10: inputs = np.random.rand(args.batch_size, 3, 224,
start = time.time() 224).astype(np.float32)
start_time = time.time()
input_tensor.copy_from_cpu(inputs)
predictor.zero_copy_run() predictor.zero_copy_run()
end = time.time() output = output_tensor.copy_to_cpu()
fp_message = "FP16" if args.use_fp16 else "FP32" output = output.flatten()
logger.info("{0}\t{1}\tbatch size: {2}\ttime(ms): {3}".format(args.model_name, fp_message, args.batch_size, end-start)) if i >= 10:
test_time += time.time() - start_time
cls = np.argmax(output)
score = output[cls]
logger.info("class: {0}".format(cls))
logger.info("score: {0}".format(score))
output_names = predictor.get_output_names() fp_message = "FP16" if args.use_fp16 else "FP32"
output_tensor = predictor.get_output_tensor(output_names[0]) logger.info("{0}\t{1}\tbatch size: {2}\ttime(ms): {3}".format(
output = output_tensor.copy_to_cpu() args.model_name, fp_message, args.batch_size, 1000 * test_time /
output = output.flatten() test_num))
cls = np.argmax(output)
score = output[cls]
logger.info("class: {0}".format(cls))
logger.info("score: {0}".format(score))
if __name__ == "__main__": if __name__ == "__main__":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册