提交 a2a12fe4 编写于 作者: L LDOUBLEV

fix TRT8 core bug

上级 fa1a1256
...@@ -173,6 +173,7 @@ def create_predictor(args, mode, logger): ...@@ -173,6 +173,7 @@ def create_predictor(args, mode, logger):
config.enable_use_gpu(args.gpu_mem, 0) config.enable_use_gpu(args.gpu_mem, 0)
if args.use_tensorrt: if args.use_tensorrt:
config.enable_tensorrt_engine( config.enable_tensorrt_engine(
workspace_size=1 << 30,
precision_mode=precision, precision_mode=precision,
max_batch_size=args.max_batch_size, max_batch_size=args.max_batch_size,
min_subgraph_size=args.min_subgraph_size) min_subgraph_size=args.min_subgraph_size)
...@@ -294,7 +295,10 @@ def create_predictor(args, mode, logger): ...@@ -294,7 +295,10 @@ def create_predictor(args, mode, logger):
def get_infer_gpuid(): def get_infer_gpuid():
cmd = "nvidia-smi" cmd = "nvidia-smi"
try:
res = os.popen(cmd).readlines() res = os.popen(cmd).readlines()
except:
res = None
if len(res) == 0: if len(res) == 0:
return None return None
cmd = "env | grep CUDA_VISIBLE_DEVICES" cmd = "env | grep CUDA_VISIBLE_DEVICES"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册