提交 5d0d3ef6 编写于 作者: Z zhiboniu 提交者: zhiboniu

update clas model deploy support in pipeline

上级 b1857334
...@@ -416,9 +416,15 @@ def load_predictor(model_dir, ...@@ -416,9 +416,15 @@ def load_predictor(model_dir,
raise ValueError( raise ValueError(
"Predict by TensorRT mode: {}, expect device=='GPU', but device == {}" "Predict by TensorRT mode: {}, expect device=='GPU', but device == {}"
.format(run_mode, device)) .format(run_mode, device))
config = Config( infer_model = os.path.join(model_dir, 'model.pdmodel')
os.path.join(model_dir, 'model.pdmodel'), infer_params = os.path.join(model_dir, 'model.pdiparams')
os.path.join(model_dir, 'model.pdiparams')) if not os.path.exists(infer_model):
infer_model = os.path.join(model_dir, 'inference.pdmodel')
infer_params = os.path.join(model_dir, 'inference.pdiparams')
if not os.path.exists(infer_model):
raise ValueError(
"Cannot find any inference model in dir: {},".format(model_dir))
config = Config(infer_model, infer_params)
if device == 'GPU': if device == 'GPU':
# initial GPU memory(M), device ID # initial GPU memory(M), device ID
config.enable_use_gpu(200, 0) config.enable_use_gpu(200, 0)
......
...@@ -158,6 +158,9 @@ class Detector(object): ...@@ -158,6 +158,9 @@ class Detector(object):
input_names = self.predictor.get_input_names() input_names = self.predictor.get_input_names()
for i in range(len(input_names)): for i in range(len(input_names)):
input_tensor = self.predictor.get_input_handle(input_names[i]) input_tensor = self.predictor.get_input_handle(input_names[i])
if input_names[i] == 'x':
input_tensor.copy_from_cpu(inputs['image'])
else:
input_tensor.copy_from_cpu(inputs[input_names[i]]) input_tensor.copy_from_cpu(inputs[input_names[i]])
return inputs return inputs
...@@ -320,7 +323,7 @@ class Detector(object): ...@@ -320,7 +323,7 @@ class Detector(object):
if not os.path.exists(self.output_dir): if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir) os.makedirs(self.output_dir)
out_path = os.path.join(self.output_dir, video_out_name) out_path = os.path.join(self.output_dir, video_out_name)
fourcc = cv2.VideoWriter_fourcc(*'mp4v') fourcc = cv2.VideoWriter_fourcc(* 'mp4v')
writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height)) writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height))
index = 1 index = 1
while (1): while (1):
...@@ -704,9 +707,15 @@ def load_predictor(model_dir, ...@@ -704,9 +707,15 @@ def load_predictor(model_dir,
raise ValueError( raise ValueError(
"Predict by TensorRT mode: {}, expect device=='GPU', but device == {}" "Predict by TensorRT mode: {}, expect device=='GPU', but device == {}"
.format(run_mode, device)) .format(run_mode, device))
config = Config( infer_model = os.path.join(model_dir, 'model.pdmodel')
os.path.join(model_dir, 'model.pdmodel'), infer_params = os.path.join(model_dir, 'model.pdiparams')
os.path.join(model_dir, 'model.pdiparams')) if not os.path.exists(infer_model):
infer_model = os.path.join(model_dir, 'inference.pdmodel')
infer_params = os.path.join(model_dir, 'inference.pdiparams')
if not os.path.exists(infer_model):
raise ValueError(
"Cannot find any inference model in dir: {},".format(model_dir))
config = Config(infer_model, infer_params)
if device == 'GPU': if device == 'GPU':
# initial GPU memory(M), device ID # initial GPU memory(M), device ID
config.enable_use_gpu(200, 0) config.enable_use_gpu(200, 0)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册