提交 49786ae2 编写于 作者: B barrierye

remove useless code

上级 2d73ac0a
...@@ -214,7 +214,7 @@ int PredictorClient::predict(const std::vector<std::vector<float>> &float_feed, ...@@ -214,7 +214,7 @@ int PredictorClient::predict(const std::vector<std::vector<float>> &float_feed,
VLOG(2) << "predict done."; VLOG(2) << "predict done.";
client_infer_end = timeline.TimeStampUS(); client_infer_end = timeline.TimeStampUS();
postprocess_start = client_infer_end; postprocess_start = client_infer_end;
// severaal model output // multi-model output
uint32_t model_num = res.outputs_size(); uint32_t model_num = res.outputs_size();
predict_res._models.resize(model_num); predict_res._models.resize(model_num);
for (uint32_t m_idx = 0; m_idx < model_num; ++m_idx) { for (uint32_t m_idx = 0; m_idx < model_num; ++m_idx) {
...@@ -242,9 +242,10 @@ int PredictorClient::predict(const std::vector<std::vector<float>> &float_feed, ...@@ -242,9 +242,10 @@ int PredictorClient::predict(const std::vector<std::vector<float>> &float_feed,
output.insts(0).tensor_array(idx).float_data(i); output.insts(0).tensor_array(idx).float_data(i);
} }
} }
//TODO
postprocess_end = timeline.TimeStampUS();
} }
} }
postprocess_end = timeline.TimeStampUS();
} }
if (FLAGS_profile_client) { if (FLAGS_profile_client) {
...@@ -414,11 +415,11 @@ int PredictorClient::batch_predict( ...@@ -414,11 +415,11 @@ int PredictorClient::batch_predict(
output.insts(bi).tensor_array(idx).float_data(i); output.insts(bi).tensor_array(idx).float_data(i);
} }
} }
idx += 1;
} }
idx += 1;
} }
postprocess_end = timeline.TimeStampUS();
} }
postprocess_end = timeline.TimeStampUS();
} }
if (FLAGS_profile_client) { if (FLAGS_profile_client) {
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "core/general-server/op/general_copy_op.h"
#include <algorithm>
#include <iostream>
#include <memory>
#include <sstream>
#include "core/general-server/op/general_infer_helper.h"
#include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h"
#include "core/util/include/timer.h"
namespace baidu {
namespace paddle_serving {
namespace serving {
using baidu::paddle_serving::Timer;
using baidu::paddle_serving::predictor::MempoolWrapper;
using baidu::paddle_serving::predictor::general_model::Tensor;
using baidu::paddle_serving::predictor::general_model::Request;
using baidu::paddle_serving::predictor::general_model::FeedInst;
using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
int GeneralCopyOp::inference() {
// reade request from client
const GeneralBlob *input_blob = get_depend_argument<GeneralBlob>(pre_name());
VLOG(2) << "precedent name: " << pre_name();
const TensorVector *in = &input_blob->tensor_vector;
VLOG(2) << "input size: " << in->size();
int batch_size = input_blob->GetBatchSize();
int input_var_num = 0;
GeneralBlob *res = mutable_data<GeneralBlob>();
TensorVector *out = &res->tensor_vector;
VLOG(2) << "input batch size: " << batch_size;
res->SetBatchSize(batch_size);
if (!res) {
LOG(ERROR) << "Failed get op tls reader object output";
}
Timer timeline;
int64_t start = timeline.TimeStampUS();
VLOG(2) << "Going to init lod tensor";
for (int i = 0; i < in->size(); ++i) {
paddle::PaddleTensor lod_tensor;
CopyLod(&in->at(i), &lod_tensor);
lod_tensor.dtype = in->at(i).dtype;
lod_tensor.name = in->at(i).name;
VLOG(2) << "lod tensor [" << i << "].name = " << lod_tensor.name;
out->push_back(lod_tensor);
}
VLOG(2) << "pack done.";
for (int i = 0; i < out->size(); ++i) {
int64_t *src_ptr = static_cast<int64_t *>(in->at(i).data.data());
out->at(i).data.Resize(out->at(i).lod[0].back() * sizeof(int64_t));
out->at(i).shape = {out->at(i).lod[0].back(), 1};
int64_t *tgt_ptr = static_cast<int64_t *>(out->at(i).data.data());
for (int j = 0; j < out->at(i).lod[0].back(); ++j) {
tgt_ptr[j] = src_ptr[j];
}
}
VLOG(2) << "output done.";
timeline.Pause();
int64_t end = timeline.TimeStampUS();
CopyBlobInfo(input_blob, res);
AddBlobInfo(res, start);
AddBlobInfo(res, end);
VLOG(2) << "read data from client success";
return 0;
}
DEFINE_OP(GeneralCopyOp);
} // namespace serving
} // namespace paddle_serving
} // namespace baidu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <vector>
#ifdef BCLOUD
#ifdef WITH_GPU
#include "paddle/paddle_inference_api.h"
#else
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#endif
#else
#include "paddle_inference_api.h" // NOLINT
#endif
#include <string>
#include "core/general-server/general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/predictor/framework/resource.h"
namespace baidu {
namespace paddle_serving {
namespace serving {
class GeneralCopyOp
: public baidu::paddle_serving::predictor::OpWithChannel<GeneralBlob> {
public:
typedef std::vector<paddle::PaddleTensor> TensorVector;
DECLARE_OP(GeneralCopyOp);
int inference();
};
} // namespace serving
} // namespace paddle_serving
} // namespace baidu
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "core/general-server/op/general_dist_kv_infer_op.h"
#include <algorithm>
#include <iostream>
#include <memory>
#include <sstream>
#include <unordered_map>
#include <utility>
#include "core/cube/cube-api/include/cube_api.h"
#include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h"
#include "core/predictor/framework/resource.h"
#include "core/util/include/timer.h"
namespace baidu {
namespace paddle_serving {
namespace serving {
using baidu::paddle_serving::Timer;
using baidu::paddle_serving::predictor::MempoolWrapper;
using baidu::paddle_serving::predictor::general_model::Tensor;
using baidu::paddle_serving::predictor::general_model::Response;
using baidu::paddle_serving::predictor::general_model::Request;
using baidu::paddle_serving::predictor::general_model::FetchInst;
using baidu::paddle_serving::predictor::InferManager;
using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
int GeneralDistKVInferOp::inference() {
VLOG(2) << "Going to run inference";
const GeneralBlob *input_blob = get_depend_argument<GeneralBlob>(pre_name());
VLOG(2) << "Get precedent op name: " << pre_name();
GeneralBlob *output_blob = mutable_data<GeneralBlob>();
if (!input_blob) {
LOG(ERROR) << "Failed mutable depended argument, op:" << pre_name();
return -1;
}
const TensorVector *in = &input_blob->tensor_vector;
TensorVector *out = &output_blob->tensor_vector;
int batch_size = input_blob->GetBatchSize();
VLOG(2) << "input batch size: " << batch_size;
std::vector<uint64_t> keys;
std::vector<rec::mcube::CubeValue> values;
int sparse_count = 0;
int dense_count = 0;
std::vector<std::pair<int64_t *, size_t>> dataptr_size_pairs;
size_t key_len = 0;
for (size_t i = 0; i < in->size(); ++i) {
if (in->at(i).dtype != paddle::PaddleDType::INT64) {
++dense_count;
continue;
}
++sparse_count;
size_t elem_num = 1;
for (size_t s = 0; s < in->at(i).shape.size(); ++s) {
elem_num *= in->at(i).shape[s];
}
key_len += elem_num;
int64_t *data_ptr = static_cast<int64_t *>(in->at(i).data.data());
dataptr_size_pairs.push_back(std::make_pair(data_ptr, elem_num));
}
keys.resize(key_len);
int key_idx = 0;
for (size_t i = 0; i < dataptr_size_pairs.size(); ++i) {
std::copy(dataptr_size_pairs[i].first,
dataptr_size_pairs[i].first + dataptr_size_pairs[i].second,
keys.begin() + key_idx);
key_idx += dataptr_size_pairs[i].second;
}
rec::mcube::CubeAPI *cube = rec::mcube::CubeAPI::instance();
std::vector<std::string> table_names = cube->get_table_names();
if (table_names.size() == 0) {
LOG(ERROR) << "cube init error or cube config not given.";
return -1;
}
int ret = cube->seek(table_names[0], keys, &values);
if (values.size() != keys.size() || values[0].buff.size() == 0) {
LOG(ERROR) << "cube value return null";
}
size_t EMBEDDING_SIZE = values[0].buff.size() / sizeof(float);
TensorVector sparse_out;
sparse_out.resize(sparse_count);
TensorVector dense_out;
dense_out.resize(dense_count);
int cube_val_idx = 0;
int sparse_idx = 0;
int dense_idx = 0;
std::unordered_map<int, int> in_out_map;
baidu::paddle_serving::predictor::Resource &resource =
baidu::paddle_serving::predictor::Resource::instance();
std::shared_ptr<PaddleGeneralModelConfig> model_config =
resource.get_general_model_config();
for (size_t i = 0; i < in->size(); ++i) {
if (in->at(i).dtype != paddle::PaddleDType::INT64) {
dense_out[dense_idx] = in->at(i);
++dense_idx;
continue;
}
sparse_out[sparse_idx].lod.resize(in->at(i).lod.size());
for (size_t x = 0; x < sparse_out[sparse_idx].lod.size(); ++x) {
sparse_out[sparse_idx].lod[x].resize(in->at(i).lod[x].size());
std::copy(in->at(i).lod[x].begin(),
in->at(i).lod[x].end(),
sparse_out[sparse_idx].lod[x].begin());
}
sparse_out[sparse_idx].dtype = paddle::PaddleDType::FLOAT32;
sparse_out[sparse_idx].shape.push_back(
sparse_out[sparse_idx].lod[0].back());
sparse_out[sparse_idx].shape.push_back(EMBEDDING_SIZE);
sparse_out[sparse_idx].name = model_config->_feed_name[i];
sparse_out[sparse_idx].data.Resize(sparse_out[sparse_idx].lod[0].back() *
EMBEDDING_SIZE * sizeof(float));
float *dst_ptr = static_cast<float *>(sparse_out[sparse_idx].data.data());
for (int x = 0; x < sparse_out[sparse_idx].lod[0].back(); ++x) {
float *data_ptr = dst_ptr + x * EMBEDDING_SIZE;
memcpy(data_ptr,
values[cube_val_idx].buff.data(),
values[cube_val_idx].buff.size());
cube_val_idx++;
}
++sparse_idx;
}
TensorVector infer_in;
infer_in.insert(infer_in.end(), dense_out.begin(), dense_out.end());
infer_in.insert(infer_in.end(), sparse_out.begin(), sparse_out.end());
output_blob->SetBatchSize(batch_size);
VLOG(2) << "infer batch size: " << batch_size;
Timer timeline;
int64_t start = timeline.TimeStampUS();
timeline.Start();
if (InferManager::instance().infer(
GENERAL_MODEL_NAME, &infer_in, out, batch_size)) {
LOG(ERROR) << "Failed do infer in fluid model: " << GENERAL_MODEL_NAME;
return -1;
}
int64_t end = timeline.TimeStampUS();
CopyBlobInfo(input_blob, output_blob);
AddBlobInfo(output_blob, start);
AddBlobInfo(output_blob, end);
return 0;
}
DEFINE_OP(GeneralDistKVInferOp);
} // namespace serving
} // namespace paddle_serving
} // namespace baidu
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#ifdef BCLOUD
#ifdef WITH_GPU
#include "paddle/paddle_inference_api.h"
#else
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#endif
#else
#include "paddle_inference_api.h" // NOLINT
#endif
#include "core/general-server/general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h"
namespace baidu {
namespace paddle_serving {
namespace serving {
class GeneralDistKVInferOp
: public baidu::paddle_serving::predictor::OpWithChannel<GeneralBlob> {
public:
typedef std::vector<paddle::PaddleTensor> TensorVector;
DECLARE_OP(GeneralDistKVInferOp);
int inference();
};
} // namespace serving
} // namespace paddle_serving
} // namespace baidu
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "core/general-server/op/general_dist_kv_quant_infer_op.h"
#include <algorithm>
#include <iostream>
#include <memory>
#include <sstream>
#include <unordered_map>
#include <utility>
#include "core/cube/cube-api/include/cube_api.h"
#include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h"
#include "core/predictor/framework/resource.h"
#include "core/predictor/tools/quant.h"
#include "core/util/include/timer.h"
namespace baidu {
namespace paddle_serving {
namespace serving {
using baidu::paddle_serving::Timer;
using baidu::paddle_serving::predictor::MempoolWrapper;
using baidu::paddle_serving::predictor::general_model::Tensor;
using baidu::paddle_serving::predictor::general_model::Response;
using baidu::paddle_serving::predictor::general_model::Request;
using baidu::paddle_serving::predictor::general_model::FetchInst;
using baidu::paddle_serving::predictor::InferManager;
using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
int GeneralDistKVQuantInferOp::inference() {
VLOG(2) << "Going to run inference";
const GeneralBlob *input_blob = get_depend_argument<GeneralBlob>(pre_name());
VLOG(2) << "Get precedent op name: " << pre_name();
GeneralBlob *output_blob = mutable_data<GeneralBlob>();
if (!input_blob) {
LOG(ERROR) << "Failed mutable depended argument, op:" << pre_name();
return -1;
}
const TensorVector *in = &input_blob->tensor_vector;
TensorVector *out = &output_blob->tensor_vector;
int batch_size = input_blob->GetBatchSize();
VLOG(2) << "input batch size: " << batch_size;
std::vector<uint64_t> keys;
std::vector<rec::mcube::CubeValue> values;
int sparse_count = 0;
int dense_count = 0;
std::vector<std::pair<int64_t *, size_t>> dataptr_size_pairs;
size_t key_len = 0;
for (size_t i = 0; i < in->size(); ++i) {
if (in->at(i).dtype != paddle::PaddleDType::INT64) {
++dense_count;
continue;
}
++sparse_count;
size_t elem_num = 1;
for (size_t s = 0; s < in->at(i).shape.size(); ++s) {
elem_num *= in->at(i).shape[s];
}
key_len += elem_num;
int64_t *data_ptr = static_cast<int64_t *>(in->at(i).data.data());
dataptr_size_pairs.push_back(std::make_pair(data_ptr, elem_num));
}
keys.resize(key_len);
int key_idx = 0;
for (size_t i = 0; i < dataptr_size_pairs.size(); ++i) {
std::copy(dataptr_size_pairs[i].first,
dataptr_size_pairs[i].first + dataptr_size_pairs[i].second,
keys.begin() + key_idx);
key_idx += dataptr_size_pairs[i].second;
}
rec::mcube::CubeAPI *cube = rec::mcube::CubeAPI::instance();
std::vector<std::string> table_names = cube->get_table_names();
if (table_names.size() == 0) {
LOG(ERROR) << "cube init error or cube config not given.";
return -1;
}
int ret = cube->seek(table_names[0], keys, &values);
if (values.size() != keys.size() || values[0].buff.size() == 0) {
LOG(ERROR) << "cube value return null";
}
TensorVector sparse_out;
sparse_out.resize(sparse_count);
TensorVector dense_out;
dense_out.resize(dense_count);
int cube_val_idx = 0;
int sparse_idx = 0;
int dense_idx = 0;
std::unordered_map<int, int> in_out_map;
baidu::paddle_serving::predictor::Resource &resource =
baidu::paddle_serving::predictor::Resource::instance();
std::shared_ptr<PaddleGeneralModelConfig> model_config =
resource.get_general_model_config();
int cube_quant_bits = resource.get_cube_quant_bits();
size_t EMBEDDING_SIZE = 0;
if (cube_quant_bits == 0) {
EMBEDDING_SIZE = values[0].buff.size() / sizeof(float);
} else {
EMBEDDING_SIZE = values[0].buff.size() - 2 * sizeof(float);
}
for (size_t i = 0; i < in->size(); ++i) {
if (in->at(i).dtype != paddle::PaddleDType::INT64) {
dense_out[dense_idx] = in->at(i);
++dense_idx;
continue;
}
sparse_out[sparse_idx].lod.resize(in->at(i).lod.size());
for (size_t x = 0; x < sparse_out[sparse_idx].lod.size(); ++x) {
sparse_out[sparse_idx].lod[x].resize(in->at(i).lod[x].size());
std::copy(in->at(i).lod[x].begin(),
in->at(i).lod[x].end(),
sparse_out[sparse_idx].lod[x].begin());
}
sparse_out[sparse_idx].dtype = paddle::PaddleDType::FLOAT32;
sparse_out[sparse_idx].shape.push_back(
sparse_out[sparse_idx].lod[0].back());
sparse_out[sparse_idx].shape.push_back(EMBEDDING_SIZE);
sparse_out[sparse_idx].name = model_config->_feed_name[i];
sparse_out[sparse_idx].data.Resize(sparse_out[sparse_idx].lod[0].back() *
EMBEDDING_SIZE * sizeof(float));
// END HERE
float *dst_ptr = static_cast<float *>(sparse_out[sparse_idx].data.data());
for (int x = 0; x < sparse_out[sparse_idx].lod[0].back(); ++x) {
float *data_ptr = dst_ptr + x * EMBEDDING_SIZE;
if (cube_quant_bits == 0) {
memcpy(data_ptr,
values[cube_val_idx].buff.data(),
values[cube_val_idx].buff.size());
} else {
// min (float), max (float), num, num, num... (Byte)
size_t num_of_float =
values[cube_val_idx].buff.size() - 2 * sizeof(float);
float *float_ptr = new float[num_of_float];
char *src_ptr = new char[values[cube_val_idx].buff.size()];
memcpy(src_ptr,
values[cube_val_idx].buff.data(),
values[cube_val_idx].buff.size());
float *minmax = reinterpret_cast<float *>(src_ptr);
dequant(src_ptr + 2 * sizeof(float),
float_ptr,
minmax[0],
minmax[1],
num_of_float,
cube_quant_bits);
memcpy(data_ptr, float_ptr, sizeof(float) * num_of_float);
delete float_ptr;
delete src_ptr;
}
cube_val_idx++;
}
++sparse_idx;
}
TensorVector infer_in;
infer_in.insert(infer_in.end(), dense_out.begin(), dense_out.end());
infer_in.insert(infer_in.end(), sparse_out.begin(), sparse_out.end());
output_blob->SetBatchSize(batch_size);
VLOG(2) << "infer batch size: " << batch_size;
Timer timeline;
int64_t start = timeline.TimeStampUS();
timeline.Start();
if (InferManager::instance().infer(
GENERAL_MODEL_NAME, &infer_in, out, batch_size)) {
LOG(ERROR) << "Failed do infer in fluid model: " << GENERAL_MODEL_NAME;
return -1;
}
int64_t end = timeline.TimeStampUS();
CopyBlobInfo(input_blob, output_blob);
AddBlobInfo(output_blob, start);
AddBlobInfo(output_blob, end);
return 0;
}
DEFINE_OP(GeneralDistKVQuantInferOp);
} // namespace serving
} // namespace paddle_serving
} // namespace baidu
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#ifdef BCLOUD
#ifdef WITH_GPU
#include "paddle/paddle_inference_api.h"
#else
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#endif
#else
#include "paddle_inference_api.h" // NOLINT
#endif
#include "core/general-server/general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h"
namespace baidu {
namespace paddle_serving {
namespace serving {
class GeneralDistKVQuantInferOp
: public baidu::paddle_serving::predictor::OpWithChannel<GeneralBlob> {
public:
typedef std::vector<paddle::PaddleTensor> TensorVector;
DECLARE_OP(GeneralDistKVQuantInferOp);
int inference();
};
} // namespace serving
} // namespace paddle_serving
} // namespace baidu
...@@ -37,68 +37,39 @@ using baidu::paddle_serving::predictor::PaddleGeneralModelConfig; ...@@ -37,68 +37,39 @@ using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
int GeneralInferOp::inference() { int GeneralInferOp::inference() {
VLOG(2) << "Going to run inference"; VLOG(2) << "Going to run inference";
// const GeneralBlob *input_blob = if (pre_node_names.size() != 1) {
// get_depend_argument<GeneralBlob>(pre_name()); LOG(ERROR) << "This op(" << op_name() <<") can only have one predecessor op, but received " << pre_node_names.size();
VLOG(2) << "try to get output_blob"; return -1;
GeneralBlob *output_blob = mutable_data<GeneralBlob>(); }
fprintf(stderr, "[output] blob address %x\n", output_blob); const std::string pre_name = pre_node_names[0];
TensorVector *out = &output_blob->tensor_vector;
const std::vector<std::string> pre_node_names = pre_names();
VLOG(2) << "pre node names size: " << pre_node_names.size();
TensorVector input; const GeneralBlob *input_blob = get_depend_argument<GeneralBlob>(pre_name);
int batch_size = 0; VLOG(2) << "Get precedent op name: " << pre_name;
const GeneralBlob *input_blob; GeneralBlob *output_blob = mutable_data<GeneralBlob>();
for (uint32_t i = 0; i < pre_node_names.size(); ++i) {
VLOG(2) << "pre names[" << i << "]: " << pre_node_names[i];
input_blob = get_depend_argument<GeneralBlob>(pre_node_names[i]);
if (!input_blob) {
LOG(ERROR) << "Failed mutable depended argument, op:"
<< pre_node_names[i];
return -1;
}
fprintf(stderr, "[input] blob address %x\n", input_blob);
batch_size = input_blob->GetBatchSize(); if (!input_blob) {
VLOG(2) << "batch size of input: " << batch_size; LOG(ERROR) << "Failed mutable depended argument, op:" << pre_name;
for (uint32_t j = 0; j < input_blob->tensor_vector.size(); ++j) { return -1;
VLOG(2) << "input tensor[" << j
<< "]: " << input_blob->tensor_vector[j].name;
input.push_back(input_blob->tensor_vector[j]);
VLOG(2) << "add an input tensor name: "
<< input_blob->tensor_vector[j].name;
}
} }
const TensorVector *in = &input; const TensorVector *in = &input_blob->tensor_vector;
TensorVector *out = &output_blob->tensor_vector;
int batch_size = input_blob->GetBatchSize();
VLOG(2) << "input batch size: " << batch_size;
batch_size = 1;
VLOG(2) << "infer batch size: " << batch_size;
output_blob->SetBatchSize(batch_size); output_blob->SetBatchSize(batch_size);
VLOG(2) << "infer batch size: " << batch_size;
Timer timeline; Timer timeline;
int64_t start = timeline.TimeStampUS(); int64_t start = timeline.TimeStampUS();
timeline.Start(); timeline.Start();
VLOG(2) << "input of op " << op_name(); if (InferManager::instance().infer(GENERAL_MODEL_NAME, in, out, batch_size)) {
for (uint32_t i = 0; i < in->size(); ++i) { LOG(ERROR) << "Failed do infer in fluid model: " << GENERAL_MODEL_NAME;
VLOG(2) << in->at(i).name;
}
VLOG(2) << "get engine name: " << engine_name().c_str();
if (InferManager::instance().infer(
GeneralInferOp::engine_name().c_str(), in, out, batch_size)) {
LOG(ERROR) << "Failed do infer in fluid model: "
<< GeneralInferOp::engine_name();
return -1; return -1;
} }
VLOG(2) << "output of op " << op_name();
for (uint32_t i = 0; i < out->size(); ++i) {
VLOG(2) << out->at(i).name;
}
int64_t end = timeline.TimeStampUS(); int64_t end = timeline.TimeStampUS();
CopyBlobInfo(input_blob, output_blob); CopyBlobInfo(input_blob, output_blob);
AddBlobInfo(output_blob, start); AddBlobInfo(output_blob, start);
......
...@@ -80,7 +80,6 @@ int GeneralReaderOp::inference() { ...@@ -80,7 +80,6 @@ int GeneralReaderOp::inference() {
std::vector<int64_t> capacity; std::vector<int64_t> capacity;
GeneralBlob *res = mutable_data<GeneralBlob>(); GeneralBlob *res = mutable_data<GeneralBlob>();
fprintf(stderr, "[reader] out blob address %x\n", res);
TensorVector *out = &res->tensor_vector; TensorVector *out = &res->tensor_vector;
res->SetBatchSize(batch_size); res->SetBatchSize(batch_size);
......
...@@ -46,7 +46,7 @@ int GeneralResponseOp::inference() { ...@@ -46,7 +46,7 @@ int GeneralResponseOp::inference() {
Response *res = mutable_data<Response>(); Response *res = mutable_data<Response>();
Timer timeline; Timer timeline;
// double resionse_time = 0.0; // double response_time = 0.0;
// timeline.Start(); // timeline.Start();
int64_t start = timeline.TimeStampUS(); int64_t start = timeline.TimeStampUS();
......
...@@ -36,20 +36,21 @@ using baidu::paddle_serving::predictor::InferManager; ...@@ -36,20 +36,21 @@ using baidu::paddle_serving::predictor::InferManager;
using baidu::paddle_serving::predictor::PaddleGeneralModelConfig; using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
int GeneralTextResponseOp::inference() { int GeneralTextResponseOp::inference() {
const std::vector<std::string> pre_node_names = pre_names(); VLOG(2) << "Going to run inference";
VLOG(2) << "pre node names size: " << pre_node_names.size(); //TODO: multi-predecessor
if (pre_node_names.size() != 1) {
LOG(ERROR) << "This op(" << op_name() <<") can only have one predecessor op, but received " << pre_node_names.size();
return -1;
}
const std::string pre_name = pre_node_names[0];
const GeneralBlob *input_blob = const GeneralBlob *input_blob = get_depend_argument<GeneralBlob>(pre_name);
get_depend_argument<GeneralBlob>(pre_node_names[0]);
if (!input_blob) { if (!input_blob) {
LOG(ERROR) << "Failed mutable depended argument, op: " << pre_node_names[0]; LOG(ERROR) << "Failed mutable depended argument, op: " << pre_name;
return -1; return -1;
} }
LOG(ERROR) << "Error!";
return -1;
/*
const TensorVector *in = &input_blob->tensor_vector; const TensorVector *in = &input_blob->tensor_vector;
int batch_size = input_blob->GetBatchSize(); int batch_size = input_blob->GetBatchSize();
...@@ -131,7 +132,7 @@ int GeneralTextResponseOp::inference() { ...@@ -131,7 +132,7 @@ int GeneralTextResponseOp::inference() {
// TODO(guru4elephant): find more elegant way to do this // TODO(guru4elephant): find more elegant way to do this
res->add_profile_time(start); res->add_profile_time(start);
res->add_profile_time(end); res->add_profile_time(end);
}*/ }
return 0; return 0;
} }
......
...@@ -152,13 +152,9 @@ class OpChannel : public Channel { ...@@ -152,13 +152,9 @@ class OpChannel : public Channel {
// functions of derived class // functions of derived class
T* data() { T* data() { return &_data; }
LOG(INFO) << "get data from channel.";
return &_data; }
const T* data() const { const T* data() const { return &_data; }
LOG(INFO) << "get data from channel.";
return &_data; }
Channel& operator=(const T& obj) { Channel& operator=(const T& obj) {
_data = obj; _data = obj;
......
...@@ -18,9 +18,6 @@ ...@@ -18,9 +18,6 @@
#include "core/predictor/common/inner_common.h" #include "core/predictor/common/inner_common.h"
#include "core/predictor/framework/predictor_metric.h" // PredictorMetric #include "core/predictor/framework/predictor_metric.h" // PredictorMetric
#include "core/predictor/op/op.h" #include "core/predictor/op/op.h"
#define BLOG(fmt, ...) \
printf( \
"[%s:%s]:%d " fmt "\n", __FILE__, __FUNCTION__, __LINE__, ##__VA_ARGS__)
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
...@@ -213,14 +210,11 @@ int Dag::topo_sort() { ...@@ -213,14 +210,11 @@ int Dag::topo_sort() {
uint32_t pnid = Dag::node_by_name(it->first)->id - uint32_t pnid = Dag::node_by_name(it->first)->id -
1; // 0 is reserved for begginer-op 1; // 0 is reserved for begginer-op
in_egde[pnid].push_back(nid); in_egde[pnid].push_back(nid);
BLOG("inegde[%d]: %d", pnid, nid); LOG(INFO) << "inegde[" << pnid << "]: " << nid;
} }
} }
for (int i = 0; i < in_degree.size(); ++i) { for (int i = 0; i < in_degree.size(); ++i) {
BLOG("(%s) in_degree[%d]: %d", LOG(INFO) << "(" << _index_nodes[i]->name << ") in_degree[" << i << "]: " << in_degree[i];
_index_nodes[i]->name.c_str(),
i,
in_degree[i]);
} }
int sorted_num = 0; int sorted_num = 0;
DagStage* stage = new (std::nothrow) DagStage(); DagStage* stage = new (std::nothrow) DagStage();
...@@ -232,10 +226,9 @@ int Dag::topo_sort() { ...@@ -232,10 +226,9 @@ int Dag::topo_sort() {
ss << _stages.size(); ss << _stages.size();
stage->name = ss.str(); stage->name = ss.str();
stage->full_name = full_name() + NAME_DELIMITER + stage->name; stage->full_name = full_name() + NAME_DELIMITER + stage->name;
BLOG("stage->full_name: %s", stage->full_name.c_str());
for (uint32_t nid = 0; nid < nodes_size; ++nid) { for (uint32_t nid = 0; nid < nodes_size; ++nid) {
if (in_degree[nid] == 0) { if (in_degree[nid] == 0) {
BLOG("nid: %d", nid); LOG(INFO) << "nid:" << nid;
++sorted_num; ++sorted_num;
stage->nodes.push_back(_index_nodes[nid]); stage->nodes.push_back(_index_nodes[nid]);
// assign stage number after stage created // assign stage number after stage created
...@@ -259,16 +252,15 @@ int Dag::topo_sort() { ...@@ -259,16 +252,15 @@ int Dag::topo_sort() {
ss << _stages.size(); ss << _stages.size();
stage->name = ss.str(); stage->name = ss.str();
stage->full_name = full_name() + NAME_DELIMITER + stage->name; stage->full_name = full_name() + NAME_DELIMITER + stage->name;
BLOG("stage->full_name: %s", stage->full_name.c_str());
for (uint32_t pi = 0; pi < pre_nodes.size(); ++pi) { for (uint32_t pi = 0; pi < pre_nodes.size(); ++pi) {
uint32_t pnid = pre_nodes[pi]->id - 1; uint32_t pnid = pre_nodes[pi]->id - 1;
BLOG("pnid: %d", pnid); LOG(INFO) << "pnid: " << pnid;
for (uint32_t ei = 0; ei < in_egde[pnid].size(); ++ei) { for (uint32_t ei = 0; ei < in_egde[pnid].size(); ++ei) {
uint32_t nid = in_egde[pnid][ei]; uint32_t nid = in_egde[pnid][ei];
--in_degree[nid]; --in_degree[nid];
BLOG("nid: %d, indeg: %d", nid, in_degree[nid]); LOG(INFO) << "nid: " << nid << ", indeg: " << in_degree[nid];
if (in_degree[nid] == 0) { if (in_degree[nid] == 0) {
BLOG("nid: %d", nid); LOG(INFO) << "nid: " << nid;
++sorted_num; ++sorted_num;
stage->nodes.push_back(_index_nodes[nid]); stage->nodes.push_back(_index_nodes[nid]);
// assign stage number after stage created // assign stage number after stage created
...@@ -297,7 +289,6 @@ int Dag::topo_sort() { ...@@ -297,7 +289,6 @@ int Dag::topo_sort() {
// ss << _stages.size(); // ss << _stages.size();
// stage->name = ss.str(); // stage->name = ss.str();
// stage->full_name = full_name() + NAME_DELIMITER + stage->name; // stage->full_name = full_name() + NAME_DELIMITER + stage->name;
// BLOG("stage->full_name: %s", stage->full_name.c_str());
//_stages.push_back(stage); //_stages.push_back(stage);
//// assign stage number after stage created //// assign stage number after stage created
......
...@@ -21,19 +21,12 @@ ...@@ -21,19 +21,12 @@
#include <string> #include <string>
#include "core/predictor/common/inner_common.h" #include "core/predictor/common/inner_common.h"
#include "core/predictor/framework/op_repository.h" #include "core/predictor/framework/op_repository.h"
#define BLOG(fmt, ...) \
printf("[%s:%s]:%d " fmt "\n", \
__FILE__, \
__FUNCTION__, \
__LINE__, \
##__VA_ARGS__);
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace predictor { namespace predictor {
int DagView::init(Dag* dag, const std::string& service_name) { int DagView::init(Dag* dag, const std::string& service_name) {
BLOG("DagView::init.");
_name = dag->name(); _name = dag->name();
_full_name = service_name + NAME_DELIMITER + dag->name(); _full_name = service_name + NAME_DELIMITER + dag->name();
_bus = butil::get_object<Bus>(); _bus = butil::get_object<Bus>();
...@@ -85,7 +78,7 @@ int DagView::init(Dag* dag, const std::string& service_name) { ...@@ -85,7 +78,7 @@ int DagView::init(Dag* dag, const std::string& service_name) {
op->set_full_name(service_name + NAME_DELIMITER + node->full_name); op->set_full_name(service_name + NAME_DELIMITER + node->full_name);
// Set the name of the Op as the key of the matching engine. // Set the name of the Op as the key of the matching engine.
BLOG("op->set_engine_name(%s)", node->name.c_str()); VLOG(2) << "op->set_engine_name(" << node->name.c_str() << ")";
op->set_engine_name(node->name); op->set_engine_name(node->name);
vnode->conf = node; vnode->conf = node;
...@@ -114,7 +107,6 @@ int DagView::init(Dag* dag, const std::string& service_name) { ...@@ -114,7 +107,6 @@ int DagView::init(Dag* dag, const std::string& service_name) {
_view.push_back(vstage); _view.push_back(vstage);
} }
BLOG("DagView::finish.");
return ERR_OK; return ERR_OK;
} }
...@@ -144,7 +136,6 @@ int DagView::deinit() { ...@@ -144,7 +136,6 @@ int DagView::deinit() {
int DagView::execute(butil::IOBufBuilder* debug_os) { int DagView::execute(butil::IOBufBuilder* debug_os) {
uint32_t stage_size = _view.size(); uint32_t stage_size = _view.size();
for (uint32_t si = 0; si < stage_size; si++) { for (uint32_t si = 0; si < stage_size; si++) {
BLOG("start to execute stage[%u] %s", si, _view[si]->full_name.c_str());
TRACEPRINTF("start to execute stage[%u]", si); TRACEPRINTF("start to execute stage[%u]", si);
int errcode = execute_one_stage(_view[si], debug_os); int errcode = execute_one_stage(_view[si], debug_os);
TRACEPRINTF("finish to execute stage[%u]", si); TRACEPRINTF("finish to execute stage[%u]", si);
...@@ -163,16 +154,13 @@ int DagView::execute_one_stage(ViewStage* vstage, ...@@ -163,16 +154,13 @@ int DagView::execute_one_stage(ViewStage* vstage,
butil::IOBufBuilder* debug_os) { butil::IOBufBuilder* debug_os) {
butil::Timer stage_time(butil::Timer::STARTED); butil::Timer stage_time(butil::Timer::STARTED);
uint32_t node_size = vstage->nodes.size(); uint32_t node_size = vstage->nodes.size();
BLOG("vstage->nodes.size(): %d", node_size); VLOG(2) << "vstage->nodes.size(): " << node_size;
for (uint32_t ni = 0; ni < node_size; ni++) { for (uint32_t ni = 0; ni < node_size; ni++) {
ViewNode* vnode = vstage->nodes[ni]; ViewNode* vnode = vstage->nodes[ni];
DagNode* conf = vnode->conf; DagNode* conf = vnode->conf;
Op* op = vnode->op; Op* op = vnode->op;
BLOG("start to execute op[%s]", op->name());
BLOG("Op engine name: %s", op->engine_name().c_str());
TRACEPRINTF("start to execute op[%s]", op->name()); TRACEPRINTF("start to execute op[%s]", op->name());
int errcode = op->process(debug_os != NULL); int errcode = op->process(debug_os != NULL);
BLOG("finish to execute op[%s]", op->name());
TRACEPRINTF("finish to execute op[%s]", op->name()); TRACEPRINTF("finish to execute op[%s]", op->name());
if (errcode < 0) { if (errcode < 0) {
LOG(ERROR) << "Execute failed, Op:" << op->debug_string(); LOG(ERROR) << "Execute failed, Op:" << op->debug_string();
......
...@@ -23,9 +23,6 @@ ...@@ -23,9 +23,6 @@
#include "core/predictor/framework/bsf.h" #include "core/predictor/framework/bsf.h"
#include "core/predictor/framework/factory.h" #include "core/predictor/framework/factory.h"
#include "core/predictor/framework/infer_data.h" #include "core/predictor/framework/infer_data.h"
#define BLOG(fmt, ...) \
printf( \
"[%s:%s]:%d " fmt "\n", __FILE__, __FUNCTION__, __LINE__, ##__VA_ARGS__)
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
...@@ -768,9 +765,7 @@ class InferManager { ...@@ -768,9 +765,7 @@ class InferManager {
} }
size_t engine_num = model_toolkit_conf.engines_size(); size_t engine_num = model_toolkit_conf.engines_size();
for (size_t ei = 0; ei < engine_num; ++ei) { for (size_t ei = 0; ei < engine_num; ++ei) {
BLOG("model_toolkit_conf.engines(%d).name: %s", LOG(INFO) << "model_toolkit_conf.engines(" << ei << ").name: " << model_toolkit_conf.engines(ei).name();
ei,
model_toolkit_conf.engines(ei).name().c_str());
std::string engine_name = model_toolkit_conf.engines(ei).name(); std::string engine_name = model_toolkit_conf.engines(ei).name();
VersionedInferEngine* engine = new (std::nothrow) VersionedInferEngine(); VersionedInferEngine* engine = new (std::nothrow) VersionedInferEngine();
if (!engine) { if (!engine) {
...@@ -851,10 +846,8 @@ class InferManager { ...@@ -851,10 +846,8 @@ class InferManager {
void* out, void* out,
uint32_t batch_size = -1) { uint32_t batch_size = -1) {
auto it = _map.find(model_name); auto it = _map.find(model_name);
BLOG("find model_name: %s", model_name);
if (it == _map.end()) { if (it == _map.end()) {
LOG(WARNING) << "Cannot find engine in map, model name:" << model_name; LOG(WARNING) << "Cannot find engine in map, model name:" << model_name;
BLOG("Cannot find engine in map, model name: %s", model_name);
return -1; return -1;
} }
return it->second->infer(in, out, batch_size); return it->second->infer(in, out, batch_size);
......
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#include "core/predictor/framework/manager.h" #include "core/predictor/framework/manager.h"
#include "core/predictor/framework/resource.h" #include "core/predictor/framework/resource.h"
#include "core/predictor/framework/service_manager.h" #include "core/predictor/framework/service_manager.h"
#define BLOG(fmt, ...) printf("[%s:%s]:%d "fmt"\n", __FILE__, __FUNCTION__, __LINE__, ##__VA_ARGS__)
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
...@@ -86,7 +85,6 @@ int ServerManager::start_and_wait() { ...@@ -86,7 +85,6 @@ int ServerManager::start_and_wait() {
boost::unordered_map<std::string, Service*>::iterator it; boost::unordered_map<std::string, Service*>::iterator it;
for (it = _format_services.begin(); it != _format_services.end(); it++) { for (it = _format_services.begin(); it != _format_services.end(); it++) {
BLOG("\n\nservice name: %s", it->first.c_str());
if (_server.AddService(it->second, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { if (_server.AddService(it->second, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) {
LOG(ERROR) << "Failed to add service of format:" << it->first << "!"; LOG(ERROR) << "Failed to add service of format:" << it->first << "!";
return -1; return -1;
......
...@@ -30,9 +30,6 @@ ...@@ -30,9 +30,6 @@
#include "core/predictor/framework/predictor_metric.h" // PredictorMetric #include "core/predictor/framework/predictor_metric.h" // PredictorMetric
#include "core/predictor/framework/resource.h" #include "core/predictor/framework/resource.h"
#include "core/predictor/framework/server.h" #include "core/predictor/framework/server.h"
#define BLOG(fmt, ...) \
printf( \
"[%s:%s]:%d " fmt "\n", __FILE__, __FUNCTION__, __LINE__, ##__VA_ARGS__)
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
...@@ -139,7 +136,6 @@ const std::string& InferService::name() const { return _infer_service_format; } ...@@ -139,7 +136,6 @@ const std::string& InferService::name() const { return _infer_service_format; }
int InferService::inference(const google::protobuf::Message* request, int InferService::inference(const google::protobuf::Message* request,
google::protobuf::Message* response, google::protobuf::Message* response,
butil::IOBufBuilder* debug_os) { butil::IOBufBuilder* debug_os) {
BLOG("\n=====> start to inference");
TRACEPRINTF("start to inference"); TRACEPRINTF("start to inference");
// when funtion call begins, framework will reset // when funtion call begins, framework will reset
// thread local variables&resources automatically. // thread local variables&resources automatically.
...@@ -165,7 +161,6 @@ int InferService::inference(const google::protobuf::Message* request, ...@@ -165,7 +161,6 @@ int InferService::inference(const google::protobuf::Message* request,
return ERR_INTERNAL_FAILURE; return ERR_INTERNAL_FAILURE;
} }
TRACEPRINTF("start to execute workflow[%s]", workflow->name().c_str()); TRACEPRINTF("start to execute workflow[%s]", workflow->name().c_str());
BLOG("start to execute workflow[%s]", workflow->name().c_str());
int errcode = _execute_workflow(workflow, request, response, debug_os); int errcode = _execute_workflow(workflow, request, response, debug_os);
TRACEPRINTF("finish to execute workflow[%s]", workflow->name().c_str()); TRACEPRINTF("finish to execute workflow[%s]", workflow->name().c_str());
if (errcode < 0) { if (errcode < 0) {
...@@ -225,7 +220,6 @@ int InferService::_execute_workflow(Workflow* workflow, ...@@ -225,7 +220,6 @@ int InferService::_execute_workflow(Workflow* workflow,
// call actual inference interface // call actual inference interface
int errcode = dv->execute(debug_os); int errcode = dv->execute(debug_os);
BLOG("execute_workflow");
if (errcode < 0) { if (errcode < 0) {
LOG(ERROR) << "Failed execute dag for workflow:" << workflow->name(); LOG(ERROR) << "Failed execute dag for workflow:" << workflow->name();
return errcode; return errcode;
......
...@@ -16,9 +16,6 @@ ...@@ -16,9 +16,6 @@
#include <string> #include <string>
#include "core/predictor/common/inner_common.h" #include "core/predictor/common/inner_common.h"
#include "core/predictor/framework/predictor_metric.h" // PredictorMetric #include "core/predictor/framework/predictor_metric.h" // PredictorMetric
#define BLOG(fmt, ...) \
printf( \
"[%s:%s]:%d " fmt "\n", __FILE__, __FUNCTION__, __LINE__, ##__VA_ARGS__)
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
...@@ -54,7 +51,6 @@ DagView* Workflow::fetch_dag_view(const std::string& service_name) { ...@@ -54,7 +51,6 @@ DagView* Workflow::fetch_dag_view(const std::string& service_name) {
} }
void Workflow::return_dag_view(DagView* view) { void Workflow::return_dag_view(DagView* view) {
BLOG("Workflow::return_dag_vie");
view->deinit(); view->deinit();
if (_type == "Sequence") { if (_type == "Sequence") {
butil::return_object<DagView>(view); butil::return_object<DagView>(view);
......
...@@ -25,12 +25,6 @@ ...@@ -25,12 +25,6 @@
#include "core/predictor/common/utils.h" #include "core/predictor/common/utils.h"
#include "core/predictor/framework/channel.h" #include "core/predictor/framework/channel.h"
#include "core/predictor/framework/dag.h" #include "core/predictor/framework/dag.h"
#define BLOG(fmt, ...) \
printf("[%s:%s]:%d " fmt "\n", \
__FILE__, \
__FUNCTION__, \
__LINE__, \
##__VA_ARGS__);
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
...@@ -140,7 +134,6 @@ int Op::process(bool debug) { ...@@ -140,7 +134,6 @@ int Op::process(bool debug) {
} }
// 2. current inference // 2. current inference
BLOG("Op: %s->inference()", _name.c_str());
if (inference() != 0) { if (inference() != 0) {
return ERR_OP_INFER_FAILURE; return ERR_OP_INFER_FAILURE;
} }
......
...@@ -20,8 +20,6 @@ ...@@ -20,8 +20,6 @@
#include "core/predictor/framework/op_repository.h" #include "core/predictor/framework/op_repository.h"
#include "core/predictor/framework/predictor_metric.h" // PredictorMetric #include "core/predictor/framework/predictor_metric.h" // PredictorMetric
#include <cstdlib> #include <cstdlib>
#define BLOG(fmt, ...) printf("[%s:%s]:%d "fmt"\n", __FILE__, __FUNCTION__, __LINE__, ##__VA_ARGS__)
#include<stdexcept>
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
...@@ -231,14 +229,13 @@ class OpWithChannel : public Op { ...@@ -231,14 +229,13 @@ class OpWithChannel : public Op {
Channel* mutable_channel() { Channel* mutable_channel() {
if (_channel != NULL) { if (_channel != NULL) {
LOG(INFO) << "op->mutable_data: _channel != NULL";
return _channel; return _channel;
} }
LOG(INFO) << "try to get_object: _channel";
//TODO: some bug in using butil::get_object
//_channel = butil::get_object<ChannelType>(); //_channel = butil::get_object<ChannelType>();
//LOG(INFO) << butil::describe_objects<ChannelType>();
_channel = new ChannelType(); _channel = new ChannelType();
if (!_channel) { if (!_channel) {
LOG(ERROR) << "Failed mutable channel of type:" << typeid(T).name(); LOG(ERROR) << "Failed mutable channel of type:" << typeid(T).name();
return NULL; return NULL;
...@@ -250,11 +247,11 @@ class OpWithChannel : public Op { ...@@ -250,11 +247,11 @@ class OpWithChannel : public Op {
const Channel* get_channel() const { return _channel; } const Channel* get_channel() const { return _channel; }
int release_channel() { int release_channel() {
LOG(INFO) << "=====> _chaneel deinit";
if (_channel) { if (_channel) {
_channel->deinit(); _channel->deinit();
delete _channel; delete _channel;
} }
//TODO: some bug in using butil::get_object
/*if (_channel) {*/ /*if (_channel) {*/
//_channel->deinit(); //_channel->deinit();
//butil::return_object<ChannelType>(_channel); //butil::return_object<ChannelType>(_channel);
......
...@@ -32,7 +32,6 @@ ...@@ -32,7 +32,6 @@
#include "core/predictor/framework/server.h" #include "core/predictor/framework/server.h"
#include "core/predictor/framework/service.h" #include "core/predictor/framework/service.h"
#include "core/predictor/framework/workflow.h" #include "core/predictor/framework/workflow.h"
#define BLOG(fmt, ...) printf("[%s:%s]:%d "fmt"\n", __FILE__, __FUNCTION__, __LINE__, ##__VA_ARGS__)
using baidu::paddle_serving::predictor::ServerManager; using baidu::paddle_serving::predictor::ServerManager;
using baidu::paddle_serving::predictor::WorkflowManager; using baidu::paddle_serving::predictor::WorkflowManager;
...@@ -218,7 +217,6 @@ int main(int argc, char** argv) { ...@@ -218,7 +217,6 @@ int main(int argc, char** argv) {
FLAGS_stderrthreshold = 3; FLAGS_stderrthreshold = 3;
#endif #endif
BLOG("\nServerManager::instance().start_and_wait()\n");
if (ServerManager::instance().start_and_wait() != 0) { if (ServerManager::instance().start_and_wait() != 0) {
LOG(ERROR) << "Failed start server and wait!"; LOG(ERROR) << "Failed start server and wait!";
return -1; return -1;
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from paddle_serving_client import Client
from imdb_reader import IMDBDataset
import sys
import time
client = Client()
client.load_client_config('imdb_bow_client_conf/serving_client_conf.prototxt')
client.connect(["127.0.0.1:9393"])
# you can define any english sentence or dataset here
# This example reuses imdb reader in training, you
# can define your own data preprocessing easily.
imdb_dataset = IMDBDataset()
imdb_dataset.load_resource('imdb.vocab')
for i in range(500):
line = 'i am very sad | 0'
word_ids, label = imdb_dataset.get_words_and_label(line)
feed = {"words": word_ids}
fetch = ["acc", "cost", "prediction"]
fetch_map = client.predict(feed=feed, fetch=fetch)
print("{} {}".format(i, fetch_map["prediction"][1]))
# time.sleep(1)
# exit(0)
print('0.633530199528')
wget --no-check-certificate https://fleet.bj.bcebos.com/text_classification_data.tar.gz
wget --no-check-certificate https://paddle-serving.bj.bcebos.com/imdb-demo/imdb_model.tar.gz
tar -zxvf text_classification_data.tar.gz
tar -zxvf imdb_model.tar.gz
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import sys
import os
import paddle
import re
import paddle.fluid.incubate.data_generator as dg
py_version = sys.version_info[0]
class IMDBDataset(dg.MultiSlotDataGenerator):
def load_resource(self, dictfile):
self._vocab = {}
wid = 0
if py_version == 2:
with open(dictfile) as f:
for line in f:
self._vocab[line.strip()] = wid
wid += 1
else:
with open(dictfile, encoding="utf-8") as f:
for line in f:
self._vocab[line.strip()] = wid
wid += 1
self._unk_id = len(self._vocab)
self._pattern = re.compile(r'(;|,|\.|\?|!|\s|\(|\))')
self.return_value = ("words", [1, 2, 3, 4, 5, 6]), ("label", [0])
def get_words_only(self, line):
sent = line.lower().replace("<br />", " ").strip()
words = [x for x in self._pattern.split(sent) if x and x != " "]
feas = [
self._vocab[x] if x in self._vocab else self._unk_id for x in words
]
return feas
def get_words_and_label(self, line):
send = '|'.join(line.split('|')[:-1]).lower().replace("<br />",
" ").strip()
label = [int(line.split('|')[-1])]
words = [x for x in self._pattern.split(send) if x and x != " "]
feas = [
self._vocab[x] if x in self._vocab else self._unk_id for x in words
]
return feas, label
def infer_reader(self, infer_filelist, batch, buf_size):
def local_iter():
for fname in infer_filelist:
with open(fname, "r") as fin:
for line in fin:
feas, label = self.get_words_and_label(line)
yield feas, label
import paddle
batch_iter = paddle.batch(
paddle.reader.shuffle(
local_iter, buf_size=buf_size),
batch_size=batch)
return batch_iter
def generate_sample(self, line):
def memory_iter():
for i in range(1000):
yield self.return_value
def data_iter():
feas, label = self.get_words_and_label(line)
yield ("words", feas), ("label", label)
return data_iter
if __name__ == "__main__":
imdb = IMDBDataset()
imdb.load_resource("imdb.vocab")
imdb.run_from_stdin()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import os
import sys
from paddle_serving_server import OpMaker
from paddle_serving_server import OpSeqMaker
from paddle_serving_server import Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
g1_infer_op = op_maker.create('general_infer', node_name='g1')
g2_infer_op = op_maker.create('general_infer', node_name='g2')
add_op = op_maker.create('general_add')
response_op = op_maker.create('general_response')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
op_seq_maker.add_op(g1_infer_op, dependent_nodes=[read_op])
op_seq_maker.add_op(g2_infer_op, dependent_nodes=[read_op])
op_seq_maker.add_op(add_op, dependent_nodes=[g1_infer_op, g2_infer_op])
op_seq_maker.add_op(response_op, dependent_nodes=[add_op])
server = Server()
server.set_op_sequence(op_seq_maker.get_op_sequence())
# server.load_model_config(sys.argv[1])
model_configs = {'g1': 'imdb_bow_model', 'g2': 'imdb_bow_model'}
# model_configs = {'g1': 'imdb_bow_model', 'g2': 'imdb_cnn_model'}
server.load_model_config(model_configs)
server.prepare_server(workdir="work_dir1", port=9393, device="cpu")
server.run_server()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import os
import sys
from paddle_serving_server import OpMaker
from paddle_serving_server import OpSeqMaker
from paddle_serving_server import Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
infer_op = op_maker.create('general_infer')
response_op = op_maker.create('general_response')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
op_seq_maker.add_op(infer_op, dependent_nodes=[read_op])
op_seq_maker.add_op(response_op, dependent_nodes=[infer_op])
server = Server()
server.set_op_sequence(op_seq_maker.get_op_sequence())
# server.load_model_config(sys.argv[1])
model_configs = {'general_infer_op': 'imdb_bow_model'}
server.load_model_config(model_configs)
server.prepare_server(workdir="work_dir1", port=9393, device="cpu")
server.run_server()
...@@ -29,4 +29,3 @@ test_reader = paddle.batch( ...@@ -29,4 +29,3 @@ test_reader = paddle.batch(
for data in test_reader(): for data in test_reader():
fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"]) fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"])
print("{} {}".format(fetch_map["price"][0], data[0][1][0])) print("{} {}".format(fetch_map["price"][0], data[0][1][0]))
exit(0)
...@@ -21,20 +21,16 @@ from paddle_serving_server import Server ...@@ -21,20 +21,16 @@ from paddle_serving_server import Server
op_maker = OpMaker() op_maker = OpMaker()
read_op = op_maker.create('general_reader') read_op = op_maker.create('general_reader')
g1_infer_op = op_maker.create('general_infer', node_name='g1') general_infer_op = op_maker.create('general_infer')
g2_infer_op = op_maker.create('general_infer', node_name='g2')
response_op = op_maker.create('general_response') response_op = op_maker.create('general_response')
op_seq_maker = OpSeqMaker() op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op) op_seq_maker.add_op(read_op)
op_seq_maker.add_op(g1_infer_op, dependent_nodes=[read_op]) op_seq_maker.add_op(general_infer_op)
op_seq_maker.add_op(g2_infer_op, dependent_nodes=[read_op]) op_seq_maker.add_op(response_op)
op_seq_maker.add_op(response_op, dependent_nodes=[g1_infer_op, g2_infer_op])
server = Server() server = Server()
server.set_op_sequence(op_seq_maker.get_op_sequence()) server.set_op_sequence(op_seq_maker.get_op_sequence())
# server.load_model_config(sys.argv[1]) server.load_model_config(sys.argv[1])
model_configs = {'g1': 'uci_housing_model', 'g2': 'uci_housing_model'}
server.load_model_config(model_configs)
server.prepare_server(workdir="work_dir1", port=9393, device="cpu") server.prepare_server(workdir="work_dir1", port=9393, device="cpu")
server.run_server() server.run_server()
...@@ -34,8 +34,7 @@ class OpMaker(object): ...@@ -34,8 +34,7 @@ class OpMaker(object):
"general_single_kv": "GeneralSingleKVOp", "general_single_kv": "GeneralSingleKVOp",
"general_dist_kv_infer": "GeneralDistKVInferOp", "general_dist_kv_infer": "GeneralDistKVInferOp",
"general_dist_kv_quant_infer": "GeneralDistKVQuantInferOp", "general_dist_kv_quant_infer": "GeneralDistKVQuantInferOp",
"general_copy": "GeneralCopyOp", "general_copy": "GeneralCopyOp"
"general_add": "GeneralAddOp"
} }
# currently, inputs and outputs are not used # currently, inputs and outputs are not used
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册