提交 f581a866 编写于 作者: W wangxiao

update task_instance.py

上级 fea8bf14
......@@ -127,10 +127,7 @@ class TaskInstance(object):
infer_model_path = self._save_infermodel_path
for k,v in list(json.load(open(os.path.join(infer_model_path, '__conf__'))).items()): # py3
strv = self._save_protocol[k]
print(v)
exec('{}=v'.format(strv))
print('self list')
print(self._pred_input_varname_list)
pred_prog, self._pred_input_varname_list, self._pred_fetch_var_list = \
fluid.io.load_inference_model(infer_model_path, self._exe)
print(self._name+': inference model loaded from ' + infer_model_path)
......@@ -173,7 +170,6 @@ class TaskInstance(object):
@property
def pred_input(self):
return list(zip(*[self._pred_input_name_list, self._pred_input_varname_list])) # py3
# return zip(*[self._pred_input_name_list, self._pred_input_varname_list])
@pred_input.setter
def pred_input(self, val):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册