未验证 提交 a3573c3a 编写于 作者: B BohaoWu 提交者: GitHub

Merge branch 'develop' into Zelda

...@@ -155,11 +155,13 @@ int GeneralResponseOp::inference() { ...@@ -155,11 +155,13 @@ int GeneralResponseOp::inference() {
} }
if (model_config->_is_lod_fetch[idx]) { if (model_config->_is_lod_fetch[idx]) {
if (in->at(idx).lod.size() > 0) {
for (int j = 0; j < in->at(idx).lod[0].size(); ++j) { for (int j = 0; j < in->at(idx).lod[0].size(); ++j) {
fetch_p->mutable_tensor_array(var_idx)->add_lod( fetch_p->mutable_tensor_array(var_idx)->add_lod(
in->at(idx).lod[0][j]); in->at(idx).lod[0][j]);
} }
} }
}
VLOG(2) << "(logid=" << log_id << ") fetch var [" VLOG(2) << "(logid=" << log_id << ") fetch var ["
<< model_config->_fetch_name[idx] << "] ready"; << model_config->_fetch_name[idx] << "] ready";
......
...@@ -354,8 +354,9 @@ class Client(object): ...@@ -354,8 +354,9 @@ class Client(object):
name)) name))
result_map[name].shape = shape result_map[name].shape = shape
if name in self.lod_tensor_set: if name in self.lod_tensor_set:
result_map["{}.lod".format( tmp_lod = result_batch_handle.get_lod(mi, name)
name)] = result_batch_handle.get_lod(mi, name) if np.size(tmp_lod) > 0:
result_map["{}.lod".format(name)] = tmp_lod
elif self.fetch_names_to_type_[name] == float32_type: elif self.fetch_names_to_type_[name] == float32_type:
result_map[name] = result_batch_handle.get_float_by_name( result_map[name] = result_batch_handle.get_float_by_name(
mi, name) mi, name)
...@@ -367,9 +368,9 @@ class Client(object): ...@@ -367,9 +368,9 @@ class Client(object):
shape = result_batch_handle.get_shape(mi, name) shape = result_batch_handle.get_shape(mi, name)
result_map[name].shape = shape result_map[name].shape = shape
if name in self.lod_tensor_set: if name in self.lod_tensor_set:
result_map["{}.lod".format( tmp_lod = result_batch_handle.get_lod(mi, name)
name)] = result_batch_handle.get_lod(mi, name) if np.size(tmp_lod) > 0:
result_map["{}.lod".format(name)] = tmp_lod
elif self.fetch_names_to_type_[name] == int32_type: elif self.fetch_names_to_type_[name] == int32_type:
# result_map[name] will be py::array(numpy array) # result_map[name] will be py::array(numpy array)
result_map[name] = result_batch_handle.get_int32_by_name( result_map[name] = result_batch_handle.get_int32_by_name(
...@@ -382,8 +383,9 @@ class Client(object): ...@@ -382,8 +383,9 @@ class Client(object):
shape = result_batch_handle.get_shape(mi, name) shape = result_batch_handle.get_shape(mi, name)
result_map[name].shape = shape result_map[name].shape = shape
if name in self.lod_tensor_set: if name in self.lod_tensor_set:
result_map["{}.lod".format( tmp_lod = result_batch_handle.get_lod(mi, name)
name)] = result_batch_handle.get_lod(mi, name) if np.size(tmp_lod) > 0:
result_map["{}.lod".format(name)] = tmp_lod
multi_result_map.append(result_map) multi_result_map.append(result_map)
ret = None ret = None
if len(model_engine_names) == 1: if len(model_engine_names) == 1:
......
...@@ -74,7 +74,8 @@ def save_model(server_model_folder, ...@@ -74,7 +74,8 @@ def save_model(server_model_folder,
fetch_var = model_conf.FetchVar() fetch_var = model_conf.FetchVar()
fetch_var.alias_name = key fetch_var.alias_name = key
fetch_var.name = fetch_var_dict[key].name fetch_var.name = fetch_var_dict[key].name
fetch_var.is_lod_tensor = fetch_var_dict[key].lod_level >= 1 #fetch_var.is_lod_tensor = fetch_var_dict[key].lod_level >= 1
fetch_var.is_lod_tensor = 1
if fetch_var_dict[key].dtype == core.VarDesc.VarType.INT64: if fetch_var_dict[key].dtype == core.VarDesc.VarType.INT64:
fetch_var.fetch_type = 0 fetch_var.fetch_type = 0
if fetch_var_dict[key].dtype == core.VarDesc.VarType.FP32: if fetch_var_dict[key].dtype == core.VarDesc.VarType.FP32:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册