提交 c7ca29f0 编写于 作者: B barrierye

fix code style

上级 fbfb348d
...@@ -37,7 +37,9 @@ int GeneralCopyOp::inference() { ...@@ -37,7 +37,9 @@ int GeneralCopyOp::inference() {
// reade request from client // reade request from client
const std::vector<std::string> pre_node_names = pre_names(); const std::vector<std::string> pre_node_names = pre_names();
if (pre_node_names.size() != 1) { if (pre_node_names.size() != 1) {
LOG(ERROR) << "This op(" << op_name() <<") can only have one predecessor op, but received " << pre_node_names.size(); LOG(ERROR) << "This op(" << op_name()
<< ") can only have one predecessor op, but received "
<< pre_node_names.size();
return -1; return -1;
} }
const std::string pre_name = pre_node_names[0]; const std::string pre_name = pre_node_names[0];
......
...@@ -41,7 +41,9 @@ using baidu::paddle_serving::predictor::PaddleGeneralModelConfig; ...@@ -41,7 +41,9 @@ using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
int GeneralDistKVInferOp::inference() { int GeneralDistKVInferOp::inference() {
VLOG(2) << "Going to run inference"; VLOG(2) << "Going to run inference";
if (pre_node_names.size() != 1) { if (pre_node_names.size() != 1) {
LOG(ERROR) << "This op(" << op_name() <<") can only have one predecessor op, but received " << pre_node_names.size(); LOG(ERROR) << "This op(" << op_name()
<< ") can only have one predecessor op, but received "
<< pre_node_names.size();
return -1; return -1;
} }
const std::string pre_name = pre_node_names[0]; const std::string pre_name = pre_node_names[0];
......
...@@ -42,7 +42,9 @@ using baidu::paddle_serving::predictor::PaddleGeneralModelConfig; ...@@ -42,7 +42,9 @@ using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
int GeneralDistKVQuantInferOp::inference() { int GeneralDistKVQuantInferOp::inference() {
VLOG(2) << "Going to run inference"; VLOG(2) << "Going to run inference";
if (pre_node_names.size() != 1) { if (pre_node_names.size() != 1) {
LOG(ERROR) << "This op(" << op_name() <<") can only have one predecessor op, but received " << pre_node_names.size(); LOG(ERROR) << "This op(" << op_name()
<< ") can only have one predecessor op, but received "
<< pre_node_names.size();
return -1; return -1;
} }
const std::string pre_name = pre_node_names[0]; const std::string pre_name = pre_node_names[0];
......
...@@ -765,7 +765,8 @@ class InferManager { ...@@ -765,7 +765,8 @@ class InferManager {
} }
size_t engine_num = model_toolkit_conf.engines_size(); size_t engine_num = model_toolkit_conf.engines_size();
for (size_t ei = 0; ei < engine_num; ++ei) { for (size_t ei = 0; ei < engine_num; ++ei) {
LOG(INFO) << "model_toolkit_conf.engines(" << ei << ").name: " << model_toolkit_conf.engines(ei).name(); LOG(INFO) << "model_toolkit_conf.engines(" << ei
<< ").name: " << model_toolkit_conf.engines(ei).name();
std::string engine_name = model_toolkit_conf.engines(ei).name(); std::string engine_name = model_toolkit_conf.engines(ei).name();
VersionedInferEngine* engine = new (std::nothrow) VersionedInferEngine(); VersionedInferEngine* engine = new (std::nothrow) VersionedInferEngine();
if (!engine) { if (!engine) {
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from paddle_serving_client import Client
from imdb_reader import IMDBDataset
import sys
client = Client()
client.load_client_config('imdb_bow_client_conf/serving_client_conf.prototxt')
client.connect(["127.0.0.1:9393"])
# you can define any english sentence or dataset here
# This example reuses imdb reader in training, you
# can define your own data preprocessing easily.
imdb_dataset = IMDBDataset()
imdb_dataset.load_resource('imdb.vocab')
for i in range(400):
line = 'i am very sad | 0'
word_ids, label = imdb_dataset.get_words_and_label(line)
feed = {"words": word_ids}
fetch = ["acc", "cost", "prediction"]
fetch_maps = client.predict(feed=feed, fetch=fetch)
if len(fetch_maps) == 1:
print("step: {}, res: {}".format(i, fetch_maps['prediction'][1]))
else:
for mi, fetch_map in enumerate(fetch_maps):
print("step: {}, model: {}, res: {}".format(i, mi, fetch_map['prediction'][1]))
# print('bow: 0.633530199528, cnn: 0.560272455215')
# exit(0)
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import os
import sys
from paddle_serving_server import OpMaker
from paddle_serving_server import OpSeqMaker
from paddle_serving_server import Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
g1_infer_op = op_maker.create('general_infer', node_name='g1')
g2_infer_op = op_maker.create('general_infer', node_name='g2')
# add_op = op_maker.create('general_add')
response_op = op_maker.create('general_response')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
op_seq_maker.add_op(g1_infer_op, dependent_nodes=[read_op])
op_seq_maker.add_op(g2_infer_op, dependent_nodes=[read_op])
# op_seq_maker.add_op(add_op, dependent_nodes=[g1_infer_op, g2_infer_op])
# op_seq_maker.add_op(response_op, dependent_nodes=[add_op])
op_seq_maker.add_op(response_op, dependent_nodes=[g1_infer_op, g2_infer_op])
server = Server()
server.set_op_sequence(op_seq_maker.get_op_sequence())
# server.load_model_config(sys.argv[1])
model_configs = {'g1': 'imdb_cnn_model', 'g2': 'imdb_bow_model'}
server.load_model_config(model_configs)
server.prepare_server(workdir="work_dir1", port=9393, device="cpu")
server.run_server()
...@@ -277,8 +277,8 @@ class Client(object): ...@@ -277,8 +277,8 @@ class Client(object):
else: else:
for mi, result_map_batch in enumerate(multi_result_map_batch): for mi, result_map_batch in enumerate(multi_result_map_batch):
ret[model_engine_names[mi]] = result_map_batch ret[model_engine_names[mi]] = result_map_batch
return [ret, self.result_handle_.variant_tag() return [ret,
] if need_variant_tag else ret self.result_handle_.variant_tag()] if need_variant_tag else ret
def release(self): def release(self):
self.client_handle_.destroy_predictor() self.client_handle_.destroy_predictor()
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# pylint: disable=doc-string-missing
import os import os
from .proto import server_configure_pb2 as server_sdk from .proto import server_configure_pb2 as server_sdk
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册