未验证 提交 25b4da98 编写于 作者: D Double_V 提交者: GitHub

Merge pull request #4752 from LDOUBLEV/dygraph

fix TRT core error and get gpu id function
...@@ -190,6 +190,7 @@ def create_predictor(args, mode, logger): ...@@ -190,6 +190,7 @@ def create_predictor(args, mode, logger):
config.enable_use_gpu(args.gpu_mem, 0) config.enable_use_gpu(args.gpu_mem, 0)
if args.use_tensorrt: if args.use_tensorrt:
config.enable_tensorrt_engine( config.enable_tensorrt_engine(
workspace_size=1 << 30,
precision_mode=precision, precision_mode=precision,
max_batch_size=args.max_batch_size, max_batch_size=args.max_batch_size,
min_subgraph_size=args.min_subgraph_size) min_subgraph_size=args.min_subgraph_size)
...@@ -310,10 +311,6 @@ def create_predictor(args, mode, logger): ...@@ -310,10 +311,6 @@ def create_predictor(args, mode, logger):
def get_infer_gpuid(): def get_infer_gpuid():
cmd = "nvidia-smi"
res = os.popen(cmd).readlines()
if len(res) == 0:
return None
cmd = "env | grep CUDA_VISIBLE_DEVICES" cmd = "env | grep CUDA_VISIBLE_DEVICES"
env_cuda = os.popen(cmd).readlines() env_cuda = os.popen(cmd).readlines()
if len(env_cuda) == 0: if len(env_cuda) == 0:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册