提交 a0cc5cfa 编写于 作者: B barrierye

add code style check

上级 69d13cec
...@@ -10,4 +10,5 @@ services: ...@@ -10,4 +10,5 @@ services:
before_install: before_install:
- docker build -f ${DOCKERFILE_CPU} -t serving-img:${COMPILE_TYPE} . - docker build -f ${DOCKERFILE_CPU} -t serving-img:${COMPILE_TYPE} .
install: install:
- docker run -it -v $PWD:/Serving serving-img:${COMPILE_TYPE} /bin/bash Serving/tools/serving-build.sh $COMPILE_TYPE - if [ $COMPILE_TYPE == "CPU" ]; then docker run -it -v $PWD:/Serving serving-img:${COMPILE_TYPE} /bin/bash Serving/tools/serving_check_style.sh ; fi;
- docker run -it -v $PWD:/Serving serving-img:${COMPILE_TYPE} /bin/bash Serving/tools/serving_build.sh $COMPILE_TYPE
...@@ -87,4 +87,3 @@ add_custom_command(TARGET general_model_config_py_proto POST_BUILD ...@@ -87,4 +87,3 @@ add_custom_command(TARGET general_model_config_py_proto POST_BUILD
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
endif() endif()
endif() endif()
...@@ -20,16 +20,16 @@ namespace baidu { ...@@ -20,16 +20,16 @@ namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace configure { namespace configure {
int read_proto_conf(const std::string &conf_full_path, int read_proto_conf(const std::string &conf_full_path,
google::protobuf::Message *conf); google::protobuf::Message *conf);
int read_proto_conf(const std::string &conf_path, int read_proto_conf(const std::string &conf_path,
const std::string &conf_file, const std::string &conf_file,
google::protobuf::Message *conf); google::protobuf::Message *conf);
int write_proto_conf(google::protobuf::Message *message, int write_proto_conf(google::protobuf::Message *message,
const std::string &output_path, const std::string &output_path,
const std::string &output_file); const std::string &output_file);
} // namespace configure } // namespace configure
} // namespace paddle_serving } // namespace paddle_serving
......
...@@ -12,9 +12,9 @@ ...@@ -12,9 +12,9 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "core/cube/cube-server/include/cube/server.h"
#include <brpc/server.h> #include <brpc/server.h>
#include "core/cube/cube-server/include/cube/framework.h" #include "core/cube/cube-server/include/cube/framework.h"
#include "core/cube/cube-server/include/cube/server.h"
namespace rec { namespace rec {
namespace mcube { namespace mcube {
......
...@@ -17,18 +17,18 @@ ...@@ -17,18 +17,18 @@
#include "core/general-client/include/general_model.h" #include "core/general-client/include/general_model.h"
using namespace std; using namespace std; // NOLINT
using baidu::paddle_serving::general_model::PredictorClient; using baidu::paddle_serving::general_model::PredictorClient;
using baidu::paddle_serving::general_model::FetchedMap; using baidu::paddle_serving::general_model::FetchedMap;
int main(int argc, char * argv[]) { int main(int argc, char* argv[]) {
PredictorClient * client = new PredictorClient(); PredictorClient* client = new PredictorClient();
client->init("inference.conf"); client->init("inference.conf");
client->set_predictor_conf("./", "predictor.conf"); client->set_predictor_conf("./", "predictor.conf");
client->create_predictor(); client->create_predictor();
std::vector<std::vector<float> > float_feed; std::vector<std::vector<float>> float_feed;
std::vector<std::vector<int64_t> > int_feed; std::vector<std::vector<int64_t>> int_feed;
std::vector<std::string> float_feed_name; std::vector<std::string> float_feed_name;
std::vector<std::string> int_feed_name = {"words", "label"}; std::vector<std::string> int_feed_name = {"words", "label"};
std::vector<std::string> fetch_name = {"cost", "acc", "prediction"}; std::vector<std::string> fetch_name = {"cost", "acc", "prediction"};
...@@ -53,13 +53,14 @@ int main(int argc, char * argv[]) { ...@@ -53,13 +53,14 @@ int main(int argc, char * argv[]) {
cin >> label; cin >> label;
int_feed.push_back({label}); int_feed.push_back({label});
FetchedMap result; FetchedMap result;
client->predict( client->predict(float_feed,
float_feed, float_feed_name, float_feed_name,
int_feed, int_feed_name, fetch_name, int_feed,
&result); int_feed_name,
fetch_name,
&result);
cout << label << "\t" << result["prediction"][1] << endl; cout << label << "\t" << result["prediction"][1] << endl;
......
...@@ -69,8 +69,7 @@ int GeneralCopyOp::inference() { ...@@ -69,8 +69,7 @@ int GeneralCopyOp::inference() {
for (int i = 0; i < out->size(); ++i) { for (int i = 0; i < out->size(); ++i) {
int64_t *src_ptr = static_cast<int64_t *>(in->at(i).data.data()); int64_t *src_ptr = static_cast<int64_t *>(in->at(i).data.data());
out->at(i).data.Resize( out->at(i).data.Resize(out->at(i).lod[0].back() * sizeof(int64_t));
out->at(i).lod[0].back() * sizeof(int64_t));
out->at(i).shape = {out->at(i).lod[0].back(), 1}; out->at(i).shape = {out->at(i).lod[0].back(), 1};
int64_t *tgt_ptr = static_cast<int64_t *>(out->at(i).data.data()); int64_t *tgt_ptr = static_cast<int64_t *>(out->at(i).data.data());
for (int j = 0; j < out->at(i).lod[0].back(); ++j) { for (int j = 0; j < out->at(i).lod[0].back(); ++j) {
......
...@@ -24,23 +24,22 @@ ...@@ -24,23 +24,22 @@
#include "paddle_inference_api.h" // NOLINT #include "paddle_inference_api.h" // NOLINT
#endif #endif
#include <string> #include <string>
#include "core/predictor/framework/resource.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/general-server/general_model_service.pb.h" #include "core/general-server/general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/predictor/framework/resource.h"
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace serving { namespace serving {
class GeneralCopyOp : class GeneralCopyOp
public baidu::paddle_serving::predictor::OpWithChannel<GeneralBlob> { : public baidu::paddle_serving::predictor::OpWithChannel<GeneralBlob> {
public: public:
typedef std::vector<paddle::PaddleTensor> TensorVector; typedef std::vector<paddle::PaddleTensor> TensorVector;
DECLARE_OP(GeneralCopyOp); DECLARE_OP(GeneralCopyOp);
int inference(); int inference();
}; };
} // namespace serving } // namespace serving
......
...@@ -39,7 +39,6 @@ class GeneralInferOp ...@@ -39,7 +39,6 @@ class GeneralInferOp
DECLARE_OP(GeneralInferOp); DECLARE_OP(GeneralInferOp);
int inference(); int inference();
}; };
} // namespace serving } // namespace serving
......
...@@ -188,8 +188,7 @@ int GeneralReaderOp::inference() { ...@@ -188,8 +188,7 @@ int GeneralReaderOp::inference() {
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
int elem_num = req->insts(j).tensor_array(i).int64_data_size(); int elem_num = req->insts(j).tensor_array(i).int64_data_size();
for (int k = 0; k < elem_num; ++k) { for (int k = 0; k < elem_num; ++k) {
dst_ptr[offset + k] = dst_ptr[offset + k] = req->insts(j).tensor_array(i).int64_data(k);
req->insts(j).tensor_array(i).int64_data(k);
} }
if (out->at(i).lod.size() == 1) { if (out->at(i).lod.size() == 1) {
offset = out->at(i).lod[0][j + 1]; offset = out->at(i).lod[0][j + 1];
...@@ -203,8 +202,7 @@ int GeneralReaderOp::inference() { ...@@ -203,8 +202,7 @@ int GeneralReaderOp::inference() {
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
int elem_num = req->insts(j).tensor_array(i).float_data_size(); int elem_num = req->insts(j).tensor_array(i).float_data_size();
for (int k = 0; k < elem_num; ++k) { for (int k = 0; k < elem_num; ++k) {
dst_ptr[offset + k] = dst_ptr[offset + k] = req->insts(j).tensor_array(i).float_data(k);
req->insts(j).tensor_array(i).float_data(k);
} }
if (out->at(i).lod.size() == 1) { if (out->at(i).lod.size() == 1) {
offset = out->at(i).lod[0][j + 1]; offset = out->at(i).lod[0][j + 1];
......
...@@ -24,24 +24,23 @@ ...@@ -24,24 +24,23 @@
#include "paddle_inference_api.h" // NOLINT #include "paddle_inference_api.h" // NOLINT
#endif #endif
#include <string> #include <string>
#include "core/predictor/framework/resource.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/general-server/general_model_service.pb.h" #include "core/general-server/general_model_service.pb.h"
#include "core/general-server/load_general_model_service.pb.h" #include "core/general-server/load_general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/predictor/framework/resource.h"
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace serving { namespace serving {
class GeneralReaderOp : public baidu::paddle_serving::predictor::OpWithChannel< class GeneralReaderOp
GeneralBlob> { : public baidu::paddle_serving::predictor::OpWithChannel<GeneralBlob> {
public: public:
typedef std::vector<paddle::PaddleTensor> TensorVector; typedef std::vector<paddle::PaddleTensor> TensorVector;
DECLARE_OP(GeneralReaderOp); DECLARE_OP(GeneralReaderOp);
int inference(); int inference();
}; };
} // namespace serving } // namespace serving
......
...@@ -122,8 +122,7 @@ int GeneralResponseOp::inference() { ...@@ -122,8 +122,7 @@ int GeneralResponseOp::inference() {
} else { } else {
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
FetchInst *fetch_p = res->mutable_insts(j); FetchInst *fetch_p = res->mutable_insts(j);
fetch_p->mutable_tensor_array(var_idx)->add_int64_data( fetch_p->mutable_tensor_array(var_idx)->add_int64_data(data_ptr[0]);
data_ptr[0]);
} }
} }
} }
...@@ -143,16 +142,15 @@ int GeneralResponseOp::inference() { ...@@ -143,16 +142,15 @@ int GeneralResponseOp::inference() {
if (var_size == batch_size) { if (var_size == batch_size) {
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
for (int k = j * cap; k < (j + 1) * cap; ++k) { for (int k = j * cap; k < (j + 1) * cap; ++k) {
FetchInst * fetch_p = res->mutable_insts(j); FetchInst *fetch_p = res->mutable_insts(j);
fetch_p->mutable_tensor_array(var_idx)->add_float_data( fetch_p->mutable_tensor_array(var_idx)->add_float_data(
data_ptr[k]); data_ptr[k]);
} }
} }
} else { } else {
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
FetchInst * fetch_p = res->mutable_insts(j); FetchInst *fetch_p = res->mutable_insts(j);
fetch_p->mutable_tensor_array(var_idx)->add_float_data( fetch_p->mutable_tensor_array(var_idx)->add_float_data(data_ptr[0]);
data_ptr[0]);
} }
} }
} }
......
...@@ -39,7 +39,6 @@ class GeneralResponseOp ...@@ -39,7 +39,6 @@ class GeneralResponseOp
DECLARE_OP(GeneralResponseOp); DECLARE_OP(GeneralResponseOp);
int inference(); int inference();
}; };
} // namespace serving } // namespace serving
......
...@@ -12,11 +12,11 @@ ...@@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "core/general-server/op/general_text_reader_op.h"
#include <algorithm> #include <algorithm>
#include <iostream> #include <iostream>
#include <memory> #include <memory>
#include <sstream> #include <sstream>
#include "core/general-server/op/general_text_reader_op.h"
#include "core/predictor/framework/infer.h" #include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h" #include "core/predictor/framework/memory.h"
#include "core/util/include/timer.h" #include "core/util/include/timer.h"
...@@ -32,7 +32,6 @@ using baidu::paddle_serving::predictor::general_model::Request; ...@@ -32,7 +32,6 @@ using baidu::paddle_serving::predictor::general_model::Request;
using baidu::paddle_serving::predictor::general_model::FeedInst; using baidu::paddle_serving::predictor::general_model::FeedInst;
using baidu::paddle_serving::predictor::PaddleGeneralModelConfig; using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
int GeneralTextReaderOp::inference() { int GeneralTextReaderOp::inference() {
// reade request from client // reade request from client
const Request *req = dynamic_cast<const Request *>(get_request_message()); const Request *req = dynamic_cast<const Request *>(get_request_message());
...@@ -132,11 +131,9 @@ int GeneralTextReaderOp::inference() { ...@@ -132,11 +131,9 @@ int GeneralTextReaderOp::inference() {
int64_t *dst_ptr = static_cast<int64_t *>(out->at(i).data.data()); int64_t *dst_ptr = static_cast<int64_t *>(out->at(i).data.data());
int offset = 0; int offset = 0;
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
for (int k = 0; for (int k = 0; k < req->insts(j).tensor_array(i).int_data_size();
k < req->insts(j).tensor_array(i).int_data_size();
++k) { ++k) {
dst_ptr[offset + k] = dst_ptr[offset + k] = req->insts(j).tensor_array(i).int_data(k);
req->insts(j).tensor_array(i).int_data(k);
} }
if (out->at(i).lod.size() == 1) { if (out->at(i).lod.size() == 1) {
offset = out->at(i).lod[0][j + 1]; offset = out->at(i).lod[0][j + 1];
...@@ -148,11 +145,9 @@ int GeneralTextReaderOp::inference() { ...@@ -148,11 +145,9 @@ int GeneralTextReaderOp::inference() {
float *dst_ptr = static_cast<float *>(out->at(i).data.data()); float *dst_ptr = static_cast<float *>(out->at(i).data.data());
int offset = 0; int offset = 0;
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
for (int k = 0; for (int k = 0; k < req->insts(j).tensor_array(i).int_data_size();
k < req->insts(j).tensor_array(i).int_data_size();
++k) { ++k) {
dst_ptr[offset + k] = dst_ptr[offset + k] = req->insts(j).tensor_array(i).int_data(k);
req->insts(j).tensor_array(i).int_data(k);
} }
if (out->at(i).lod.size() == 1) { if (out->at(i).lod.size() == 1) {
offset = out->at(i).lod[0][j + 1]; offset = out->at(i).lod[0][j + 1];
......
...@@ -24,17 +24,17 @@ ...@@ -24,17 +24,17 @@
#include "paddle_inference_api.h" // NOLINT #include "paddle_inference_api.h" // NOLINT
#endif #endif
#include <string> #include <string>
#include "core/predictor/framework/resource.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/general-server/general_model_service.pb.h" #include "core/general-server/general_model_service.pb.h"
#include "core/general-server/load_general_model_service.pb.h" #include "core/general-server/load_general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/predictor/framework/resource.h"
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace serving { namespace serving {
class GeneralTextReaderOp : class GeneralTextReaderOp
public baidu::paddle_serving::predictor::OpWithChannel<GeneralBlob> { : public baidu::paddle_serving::predictor::OpWithChannel<GeneralBlob> {
public: public:
typedef std::vector<paddle::PaddleTensor> TensorVector; typedef std::vector<paddle::PaddleTensor> TensorVector;
......
...@@ -40,7 +40,6 @@ class GeneralTextResponseOp ...@@ -40,7 +40,6 @@ class GeneralTextResponseOp
DECLARE_OP(GeneralTextResponseOp); DECLARE_OP(GeneralTextResponseOp);
int inference(); int inference();
}; };
} // namespace serving } // namespace serving
......
...@@ -28,13 +28,9 @@ message Tensor { ...@@ -28,13 +28,9 @@ message Tensor {
repeated int32 shape = 6; repeated int32 shape = 6;
}; };
message FeedInst { message FeedInst { repeated Tensor tensor_array = 1; };
repeated Tensor tensor_array = 1;
};
message FetchInst { message FetchInst { repeated Tensor tensor_array = 1; };
repeated Tensor tensor_array = 1;
};
message Request { message Request {
repeated FeedInst insts = 1; repeated FeedInst insts = 1;
......
...@@ -27,11 +27,11 @@ ...@@ -27,11 +27,11 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include <chrono> #include <chrono> // NOLINT
#include <functional>
#include <memory> #include <memory>
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include <functional>
class AbstractKVDB; class AbstractKVDB;
class FileReader; class FileReader;
class ParamDict; class ParamDict;
...@@ -65,7 +65,7 @@ class FileReader { ...@@ -65,7 +65,7 @@ class FileReader {
std::string data; std::string data;
FILE *stream = nullptr; FILE *stream = nullptr;
const int max_buffer = 256; const int max_buffer = 256;
char buffer[max_buffer]; char buffer[max_buffer]; // NOLINT
cmd.append(" 2>&1"); cmd.append(" 2>&1");
stream = popen(cmd.c_str(), "r"); stream = popen(cmd.c_str(), "r");
if (stream) { if (stream) {
...@@ -76,7 +76,8 @@ class FileReader { ...@@ -76,7 +76,8 @@ class FileReader {
return data; return data;
}; };
std::string cmd = "md5sum " + this->filename_; std::string cmd = "md5sum " + this->filename_;
// TODO: throw exception if error occurs during execution of shell command // NOLINT TODO: throw exception if error occurs during execution of shell
// command
std::string md5val = getCmdOut(cmd); std::string md5val = getCmdOut(cmd);
this->time_stamp_ = md5val == this->last_md5_val_ this->time_stamp_ = md5val == this->last_md5_val_
? this->time_stamp_ ? this->time_stamp_
...@@ -93,7 +94,7 @@ class FileReader { ...@@ -93,7 +94,7 @@ class FileReader {
return this->time_stamp_; return this->time_stamp_;
} }
inline virtual ~FileReader(){}; inline virtual ~FileReader() {}
private: private:
std::string filename_; std::string filename_;
...@@ -128,7 +129,7 @@ class ParamDict { ...@@ -128,7 +129,7 @@ class ParamDict {
virtual ~ParamDict(); virtual ~ParamDict();
private: private:
std::function<std::pair<Key, Value>(std::string)> read_func_; std::function<std::pair<Key, Value>(std::string)> read_func_; // NOLINT
std::vector<FileReaderPtr> file_reader_lst_; std::vector<FileReaderPtr> file_reader_lst_;
AbsKVDBPtr front_db, back_db; AbsKVDBPtr front_db, back_db;
}; };
...@@ -139,5 +140,5 @@ class ParamDictMgr { ...@@ -139,5 +140,5 @@ class ParamDictMgr {
void InsertParamDict(std::string, ParamDictPtr); void InsertParamDict(std::string, ParamDictPtr);
private: private:
std::unordered_map<std::string, ParamDictPtr> ParamDictMap; std::unordered_map<std::string, ParamDictPtr> ParamDictMap; // NOLINT
}; };
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
class RocksDBWrapper { class RocksDBWrapper {
public: public:
RocksDBWrapper(std::string db_name); RocksDBWrapper(std::string db_name); // NOLINT
std::string Get(std::string key); std::string Get(std::string key);
bool Put(std::string key, std::string value); bool Put(std::string key, std::string value);
...@@ -33,6 +33,7 @@ class RocksDBWrapper { ...@@ -33,6 +33,7 @@ class RocksDBWrapper {
static std::shared_ptr<RocksDBWrapper> RocksDBWrapperFactory( static std::shared_ptr<RocksDBWrapper> RocksDBWrapperFactory(
std::string db_name = "SparseMatrix"); std::string db_name = "SparseMatrix");
void Close(); void Close();
private: private:
rocksdb::DB *db_; rocksdb::DB *db_;
std::string db_name_; std::string db_name_;
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include <fstream> #include <fstream>
#include <iterator> #include <iterator>
#include <sstream> #include <sstream>
#include <thread> #include <thread> // NOLINT
#include "core/kvdb/include/kvdb/rocksdb_impl.h" #include "core/kvdb/include/kvdb/rocksdb_impl.h"
std::vector<FileReaderPtr> ParamDict::GetDictReaderLst() { std::vector<FileReaderPtr> ParamDict::GetDictReaderLst() {
...@@ -33,8 +33,10 @@ void ParamDict::SetFileReaderLst(std::vector<std::string> lst) { ...@@ -33,8 +33,10 @@ void ParamDict::SetFileReaderLst(std::vector<std::string> lst) {
std::vector<float> ParamDict::GetSparseValue(std::string feasign, std::vector<float> ParamDict::GetSparseValue(std::string feasign,
std::string slot) { std::string slot) {
auto BytesToFloat = [](uint8_t* byte_array) { return *((float*)byte_array); }; auto BytesToFloat = [](uint8_t* byte_array) {
// TODO: the concatation of feasign and slot is TBD. return *((float*)byte_array); // NOLINT
};
// NOLINT TODO: the concatation of feasign and slot is TBD.
std::string result = front_db->Get(feasign + slot); std::string result = front_db->Get(feasign + slot);
std::vector<float> value; std::vector<float> value;
if (result == "NOT_FOUND") return value; if (result == "NOT_FOUND") return value;
...@@ -87,7 +89,7 @@ bool ParamDict::InsertSparseValue(std::string feasign, ...@@ -87,7 +89,7 @@ bool ParamDict::InsertSparseValue(std::string feasign,
value.push_back(raw_values_ptr[i]); value.push_back(raw_values_ptr[i]);
} }
back_db->Set(key, value); back_db->Set(key, value);
// TODO: change stateless to stateful // NOLINT TODO: change stateless to stateful
return true; return true;
} }
...@@ -140,5 +142,4 @@ void ParamDict::CreateKVDB() { ...@@ -140,5 +142,4 @@ void ParamDict::CreateKVDB() {
this->back_db->CreateDB(); this->back_db->CreateDB();
} }
ParamDict::~ParamDict() { ParamDict::~ParamDict() {}
}
...@@ -51,7 +51,7 @@ void RocksDBWrapper::SetDBName(std::string db_name) { ...@@ -51,7 +51,7 @@ void RocksDBWrapper::SetDBName(std::string db_name) {
void RocksDBWrapper::Close() { void RocksDBWrapper::Close() {
if (db_ != nullptr) { if (db_ != nullptr) {
db_->Close(); db_->Close();
delete(db_); delete (db_);
db_ = nullptr; db_ = nullptr;
} }
} }
......
...@@ -32,12 +32,8 @@ void RocksKVDB::Set(std::string key, std::string value) { ...@@ -32,12 +32,8 @@ void RocksKVDB::Set(std::string key, std::string value) {
return; return;
} }
void RocksKVDB::Close() { void RocksKVDB::Close() { this->db_->Close(); }
this->db_->Close();
}
std::string RocksKVDB::Get(std::string key) { return this->db_->Get(key); } std::string RocksKVDB::Get(std::string key) { return this->db_->Get(key); }
RocksKVDB::~RocksKVDB() { RocksKVDB::~RocksKVDB() { this->db_->Close(); }
this->db_->Close();
}
...@@ -15,14 +15,14 @@ ...@@ -15,14 +15,14 @@
#include <list> #include <list>
#include "boost/algorithm/string.hpp" #include "boost/algorithm/string.hpp"
#include "boost/scoped_ptr.hpp" #include "boost/scoped_ptr.hpp"
#include "core/pdcodegen/pds_option.pb.h"
#include "core/pdcodegen/plugin/strutil.h"
#include "core/pdcodegen/plugin/substitute.h"
#include "google/protobuf/compiler/code_generator.h" #include "google/protobuf/compiler/code_generator.h"
#include "google/protobuf/compiler/plugin.h" #include "google/protobuf/compiler/plugin.h"
#include "google/protobuf/descriptor.h" #include "google/protobuf/descriptor.h"
#include "google/protobuf/io/printer.h" #include "google/protobuf/io/printer.h"
#include "google/protobuf/io/zero_copy_stream.h" #include "google/protobuf/io/zero_copy_stream.h"
#include "core/pdcodegen/pds_option.pb.h"
#include "core/pdcodegen/plugin/strutil.h"
#include "core/pdcodegen/plugin/substitute.h"
using std::string; using std::string;
using google::protobuf::Descriptor; using google::protobuf::Descriptor;
using google::protobuf::FileDescriptor; using google::protobuf::FileDescriptor;
...@@ -115,7 +115,8 @@ class PdsCodeGenerator : public CodeGenerator { ...@@ -115,7 +115,8 @@ class PdsCodeGenerator : public CodeGenerator {
printer.Print("#include \"core/predictor/common/inner_common.h\"\n"); printer.Print("#include \"core/predictor/common/inner_common.h\"\n");
printer.Print("#include \"core/predictor/framework/service.h\"\n"); printer.Print("#include \"core/predictor/framework/service.h\"\n");
printer.Print("#include \"core/predictor/framework/manager.h\"\n"); printer.Print("#include \"core/predictor/framework/manager.h\"\n");
printer.Print("#include \"core/predictor/framework/service_manager.h\"\n"); printer.Print(
"#include \"core/predictor/framework/service_manager.h\"\n");
} }
if (generate_stub) { if (generate_stub) {
printer.Print("#include <baidu/rpc/parallel_channel.h>\n"); printer.Print("#include <baidu/rpc/parallel_channel.h>\n");
...@@ -845,7 +846,8 @@ class PdsCodeGenerator : public CodeGenerator { ...@@ -845,7 +846,8 @@ class PdsCodeGenerator : public CodeGenerator {
printer.Print("#include \"core/predictor/common/inner_common.h\"\n"); printer.Print("#include \"core/predictor/common/inner_common.h\"\n");
printer.Print("#include \"core/predictor/framework/service.h\"\n"); printer.Print("#include \"core/predictor/framework/service.h\"\n");
printer.Print("#include \"core/predictor/framework/manager.h\"\n"); printer.Print("#include \"core/predictor/framework/manager.h\"\n");
printer.Print("#include \"core/predictor/framework/service_manager.h\"\n"); printer.Print(
"#include \"core/predictor/framework/service_manager.h\"\n");
} }
if (generate_stub) { if (generate_stub) {
printer.Print("#include <brpc/parallel_channel.h>\n"); printer.Print("#include <brpc/parallel_channel.h>\n");
......
...@@ -52,9 +52,9 @@ ...@@ -52,9 +52,9 @@
#include "glog/raw_logging.h" #include "glog/raw_logging.h"
#include "core/configure/general_model_config.pb.h"
#include "core/configure/include/configure_parser.h" #include "core/configure/include/configure_parser.h"
#include "core/configure/server_configure.pb.h" #include "core/configure/server_configure.pb.h"
#include "core/configure/general_model_config.pb.h"
#include "core/predictor/common/constant.h" #include "core/predictor/common/constant.h"
#include "core/predictor/common/types.h" #include "core/predictor/common/types.h"
......
...@@ -45,7 +45,7 @@ int DagView::init(Dag* dag, const std::string& service_name) { ...@@ -45,7 +45,7 @@ int DagView::init(Dag* dag, const std::string& service_name) {
<< "at:" << si; << "at:" << si;
return ERR_MEM_ALLOC_FAILURE; return ERR_MEM_ALLOC_FAILURE;
} }
VLOG(2) << "stage[" << si << "] name: " << stage->full_name; VLOG(2) << "stage[" << si << "] name: " << stage->full_name;
VLOG(2) << "stage[" << si << "] node size: " << stage->nodes.size(); VLOG(2) << "stage[" << si << "] node size: " << stage->nodes.size();
vstage->full_name = service_name + NAME_DELIMITER + stage->full_name; vstage->full_name = service_name + NAME_DELIMITER + stage->full_name;
uint32_t node_size = stage->nodes.size(); uint32_t node_size = stage->nodes.size();
...@@ -74,7 +74,7 @@ int DagView::init(Dag* dag, const std::string& service_name) { ...@@ -74,7 +74,7 @@ int DagView::init(Dag* dag, const std::string& service_name) {
LOG(WARNING) << "Failed init op, type:" << node->type; LOG(WARNING) << "Failed init op, type:" << node->type;
return ERR_INTERNAL_FAILURE; return ERR_INTERNAL_FAILURE;
} }
op->set_full_name(service_name + NAME_DELIMITER + node->full_name); op->set_full_name(service_name + NAME_DELIMITER + node->full_name);
vnode->conf = node; vnode->conf = node;
vnode->op = op; vnode->op = op;
...@@ -85,9 +85,9 @@ int DagView::init(Dag* dag, const std::string& service_name) { ...@@ -85,9 +85,9 @@ int DagView::init(Dag* dag, const std::string& service_name) {
VLOG(2) << "set op pre name: \n" VLOG(2) << "set op pre name: \n"
<< "current op name: " << vstage->nodes.back()->op->op_name() << "current op name: " << vstage->nodes.back()->op->op_name()
<< " previous op name: " << " previous op name: "
<< _view[si-1]->nodes.back()->op->op_name(); << _view[si - 1]->nodes.back()->op->op_name();
vstage->nodes.back()->op->set_pre_node_name( vstage->nodes.back()->op->set_pre_node_name(
_view[si-1]->nodes.back()->op->op_name()); _view[si - 1]->nodes.back()->op->op_name());
} }
_view.push_back(vstage); _view.push_back(vstage);
} }
......
...@@ -16,8 +16,8 @@ ...@@ -16,8 +16,8 @@
#include <map> #include <map>
#include <string> #include <string>
#include <utility> #include <utility>
#include "glog/raw_logging.h"
#include "core/predictor/common/inner_common.h" #include "core/predictor/common/inner_common.h"
#include "glog/raw_logging.h"
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace predictor { namespace predictor {
......
...@@ -197,13 +197,10 @@ int Resource::general_model_initialize(const std::string& path, ...@@ -197,13 +197,10 @@ int Resource::general_model_initialize(const std::string& path,
for (int i = 0; i < feed_var_num; ++i) { for (int i = 0; i < feed_var_num; ++i) {
_config->_feed_name[i] = model_config.feed_var(i).name(); _config->_feed_name[i] = model_config.feed_var(i).name();
_config->_feed_alias_name[i] = model_config.feed_var(i).alias_name(); _config->_feed_alias_name[i] = model_config.feed_var(i).alias_name();
VLOG(2) << "feed var[" << i << "]: " VLOG(2) << "feed var[" << i << "]: " << _config->_feed_name[i];
<< _config->_feed_name[i]; VLOG(2) << "feed var[" << i << "]: " << _config->_feed_alias_name[i];
VLOG(2) << "feed var[" << i << "]: "
<< _config->_feed_alias_name[i];
_config->_feed_type[i] = model_config.feed_var(i).feed_type(); _config->_feed_type[i] = model_config.feed_var(i).feed_type();
VLOG(2) << "feed type[" << i << "]: " VLOG(2) << "feed type[" << i << "]: " << _config->_feed_type[i];
<< _config->_feed_type[i];
if (model_config.feed_var(i).is_lod_tensor()) { if (model_config.feed_var(i).is_lod_tensor()) {
VLOG(2) << "var[" << i << "] is lod tensor"; VLOG(2) << "var[" << i << "] is lod tensor";
......
...@@ -13,10 +13,10 @@ ...@@ -13,10 +13,10 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include <map>
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
#include <map>
#include "core/cube/cube-api/include/cube_api.h" #include "core/cube/cube-api/include/cube_api.h"
#include "core/kvdb/include/kvdb/paddle_rocksdb.h" #include "core/kvdb/include/kvdb/paddle_rocksdb.h"
#include "core/predictor/common/inner_common.h" #include "core/predictor/common/inner_common.h"
...@@ -36,15 +36,15 @@ class PaddleGeneralModelConfig { ...@@ -36,15 +36,15 @@ class PaddleGeneralModelConfig {
public: public:
std::vector<std::string> _feed_name; std::vector<std::string> _feed_name;
std::vector<std::string> _feed_alias_name; std::vector<std::string> _feed_alias_name;
std::vector<int> _feed_type; // 0 int64, 1 float std::vector<int> _feed_type; // 0 int64, 1 float
std::vector<bool> _is_lod_feed; // true lod tensor std::vector<bool> _is_lod_feed; // true lod tensor
std::vector<bool> _is_lod_fetch; // whether a fetch var is lod_tensor std::vector<bool> _is_lod_fetch; // whether a fetch var is lod_tensor
std::vector<int> _capacity; // capacity for each tensor std::vector<int> _capacity; // capacity for each tensor
/* /*
feed_shape_ for feeded variable feed_shape_ for feeded variable
feed_shape_[i][j] represents the jth dim for ith input Tensor feed_shape_[i][j] represents the jth dim for ith input Tensor
if is_lod_feed_[i] == False, feed_shape_[i][0] = -1 if is_lod_feed_[i] == False, feed_shape_[i][0] = -1
*/ */
std::vector<std::vector<int>> _feed_shape; std::vector<std::vector<int>> _feed_shape;
std::vector<std::string> _fetch_name; std::vector<std::string> _fetch_name;
......
...@@ -99,8 +99,8 @@ static void g_change_server_port() { ...@@ -99,8 +99,8 @@ static void g_change_server_port() {
if (read_proto_conf(FLAGS_inferservice_path.c_str(), if (read_proto_conf(FLAGS_inferservice_path.c_str(),
FLAGS_inferservice_file.c_str(), FLAGS_inferservice_file.c_str(),
&conf) != 0) { &conf) != 0) {
VLOG(2) << "failed to load configure[" << FLAGS_inferservice_path VLOG(2) << "failed to load configure[" << FLAGS_inferservice_path << ","
<< "," << FLAGS_inferservice_file << "]."; << FLAGS_inferservice_file << "].";
return; return;
} }
uint32_t port = conf.port(); uint32_t port = conf.port();
...@@ -157,8 +157,7 @@ int main(int argc, char** argv) { ...@@ -157,8 +157,7 @@ int main(int argc, char** argv) {
mkdir(FLAGS_log_dir.c_str(), 0777); mkdir(FLAGS_log_dir.c_str(), 0777);
ret = stat(FLAGS_log_dir.c_str(), &st_buf); ret = stat(FLAGS_log_dir.c_str(), &st_buf);
if (ret != 0) { if (ret != 0) {
VLOG(2) << "Log path " << FLAGS_log_dir VLOG(2) << "Log path " << FLAGS_log_dir << " not exist, and create fail";
<< " not exist, and create fail";
return -1; return -1;
} }
} }
......
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
#pragma once #pragma once
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "core/predictor/framework/channel.h" #include "core/predictor/framework/channel.h"
#include "core/predictor/op/op.h"
#include "core/predictor/msg_data.pb.h" #include "core/predictor/msg_data.pb.h"
#include "core/predictor/op/op.h"
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
// limitations under the License. // limitations under the License.
#include "core/predictor/unittest/test_server_manager.h" // TestServerManager #include "core/predictor/unittest/test_server_manager.h" // TestServerManager
#include <gflags/gflags.h> // FLAGS #include <gflags/gflags.h> // FLAGS
#include <string> #include <string>
#include "core/predictor/framework/server.h" // ServerManager #include "core/predictor/framework/server.h" // ServerManager
......
...@@ -53,9 +53,9 @@ ...@@ -53,9 +53,9 @@
#include "json2pb/json_to_pb.h" #include "json2pb/json_to_pb.h"
#endif #endif
#include "core/configure/general_model_config.pb.h"
#include "core/configure/include/configure_parser.h" #include "core/configure/include/configure_parser.h"
#include "core/configure/sdk_configure.pb.h" #include "core/configure/sdk_configure.pb.h"
#include "core/configure/general_model_config.pb.h"
#include "core/sdk-cpp/include/utils.h" #include "core/sdk-cpp/include/utils.h"
......
...@@ -32,9 +32,9 @@ class EndpointConfigManager { ...@@ -32,9 +32,9 @@ class EndpointConfigManager {
EndpointConfigManager() EndpointConfigManager()
: _last_update_timestamp(0), _current_endpointmap_id(1) {} : _last_update_timestamp(0), _current_endpointmap_id(1) {}
int create(const std::string & sdk_desc_str); int create(const std::string& sdk_desc_str);
int load(const std::string & sdk_desc_str); int load(const std::string& sdk_desc_str);
int create(const char* path, const char* file); int create(const char* path, const char* file);
......
...@@ -16,9 +16,9 @@ ...@@ -16,9 +16,9 @@
#include <map> #include <map>
#include <string> #include <string>
#include <utility> #include <utility>
#include "glog/raw_logging.h"
#include "core/sdk-cpp/include/common.h" #include "core/sdk-cpp/include/common.h"
#include "core/sdk-cpp/include/stub_impl.h" #include "core/sdk-cpp/include/stub_impl.h"
#include "glog/raw_logging.h"
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -31,7 +31,7 @@ class PredictorApi { ...@@ -31,7 +31,7 @@ class PredictorApi {
int register_all(); int register_all();
int create(const std::string & sdk_desc_str); int create(const std::string& sdk_desc_str);
int create(const char* path, const char* file); int create(const char* path, const char* file);
......
...@@ -28,13 +28,9 @@ message Tensor { ...@@ -28,13 +28,9 @@ message Tensor {
repeated int32 shape = 6; repeated int32 shape = 6;
}; };
message FeedInst { message FeedInst { repeated Tensor tensor_array = 1; };
repeated Tensor tensor_array = 1;
};
message FetchInst { message FetchInst { repeated Tensor tensor_array = 1; };
repeated Tensor tensor_array = 1;
};
message Request { message Request {
repeated FeedInst insts = 1; repeated FeedInst insts = 1;
......
...@@ -35,8 +35,7 @@ int Endpoint::initialize(const EndpointInfo& ep_info) { ...@@ -35,8 +35,7 @@ int Endpoint::initialize(const EndpointInfo& ep_info) {
return -1; return -1;
} }
_variant_list.push_back(var); _variant_list.push_back(var);
VLOG(2) << "Succ create variant: " << vi VLOG(2) << "Succ create variant: " << vi << ", endpoint:" << _endpoint_name;
<< ", endpoint:" << _endpoint_name;
} }
return 0; return 0;
......
...@@ -30,7 +30,7 @@ int PredictorApi::register_all() { ...@@ -30,7 +30,7 @@ int PredictorApi::register_all() {
return 0; return 0;
} }
int PredictorApi::create(const std::string & api_desc_str) { int PredictorApi::create(const std::string& api_desc_str) {
VLOG(2) << api_desc_str; VLOG(2) << api_desc_str;
if (register_all() != 0) { if (register_all() != 0) {
LOG(ERROR) << "Failed do register all!"; LOG(ERROR) << "Failed do register all!";
......
...@@ -54,7 +54,7 @@ int Variant::initialize(const EndpointInfo& ep_info, ...@@ -54,7 +54,7 @@ int Variant::initialize(const EndpointInfo& ep_info,
if (_stub_map.size() > 0) { if (_stub_map.size() > 0) {
VLOG(2) << "Initialize variants from VariantInfo" VLOG(2) << "Initialize variants from VariantInfo"
<< ", stubs count: " << _stub_map.size(); << ", stubs count: " << _stub_map.size();
return 0; return 0;
} }
......
include(src/CMakeLists.txt) include(src/CMakeLists.txt)
add_library(utils ${util_srcs}) add_library(utils ${util_srcs})
...@@ -15,7 +15,6 @@ limitations under the License. */ ...@@ -15,7 +15,6 @@ limitations under the License. */
#pragma once #pragma once
#include <stdlib.h> #include <stdlib.h>
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
FILE(GLOB srcs ${CMAKE_CURRENT_LIST_DIR}/*.cc) FILE(GLOB srcs ${CMAKE_CURRENT_LIST_DIR}/*.cc)
LIST(APPEND util_srcs ${srcs}) LIST(APPEND util_srcs ${srcs})
...@@ -27,4 +27,3 @@ make -j10 ...@@ -27,4 +27,3 @@ make -j10
cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=/home/users/dongdaxiang/software/baidu/third-party/python/bin/python -DCLIENT_ONLY=ON .. cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=/home/users/dongdaxiang/software/baidu/third-party/python/bin/python -DCLIENT_ONLY=ON ..
make -j10 make -j10
``` ```
...@@ -152,4 +152,3 @@ GLOG_minloglevel=1 bin/serving ...@@ -152,4 +152,3 @@ GLOG_minloglevel=1 bin/serving
2 -ERROR 2 -ERROR
3 - FATAL (Be careful as FATAL log will generate a coredump) 3 - FATAL (Be careful as FATAL log will generate a coredump)
...@@ -193,6 +193,3 @@ total num: 25000 ...@@ -193,6 +193,3 @@ total num: 25000
acc num: 22014 acc num: 22014
acc: 0.88056 acc: 0.88056
``` ```
...@@ -143,6 +143,3 @@ self.op_dict = { ...@@ -143,6 +143,3 @@ self.op_dict = {
"general_dist_kv": "GeneralDistKVOp" "general_dist_kv": "GeneralDistKVOp"
} }
``` ```
...@@ -54,10 +54,3 @@ op_seq_maker.add_op(dist_kv_op) ...@@ -54,10 +54,3 @@ op_seq_maker.add_op(dist_kv_op)
op_seq_maker.add_op(general_infer_op) op_seq_maker.add_op(general_infer_op)
op_seq_maker.add_op(general_response_op) op_seq_maker.add_op(general_response_op)
``` ```
...@@ -21,8 +21,8 @@ ...@@ -21,8 +21,8 @@
#include <vector> #include <vector>
#include "core/configure/include/configure_parser.h" #include "core/configure/include/configure_parser.h"
#include "core/configure/inferencer_configure.pb.h" #include "core/configure/inferencer_configure.pb.h"
#include "paddle_inference_api.h" // NOLINT
#include "core/predictor/framework/infer.h" #include "core/predictor/framework/infer.h"
#include "paddle_inference_api.h" // NOLINT
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
...@@ -336,7 +336,7 @@ class SigmoidModel { ...@@ -336,7 +336,7 @@ class SigmoidModel {
return -1; return -1;
} }
VLOG(2) << "load sigmoid_b [" << _sigmoid_b._params[0] << "] [" VLOG(2) << "load sigmoid_b [" << _sigmoid_b._params[0] << "] ["
<< _sigmoid_b._params[1] << "]."; << _sigmoid_b._params[1] << "].";
_exp_max_input = exp_max; _exp_max_input = exp_max;
_exp_min_input = exp_min; _exp_min_input = exp_min;
return 0; return 0;
...@@ -373,7 +373,7 @@ class SigmoidFluidModel { ...@@ -373,7 +373,7 @@ class SigmoidFluidModel {
clone_model.reset(new SigmoidFluidModel()); clone_model.reset(new SigmoidFluidModel());
clone_model->_sigmoid_core = _sigmoid_core; clone_model->_sigmoid_core = _sigmoid_core;
clone_model->_fluid_core = _fluid_core->Clone(); clone_model->_fluid_core = _fluid_core->Clone();
return std::move(clone_model); return std::move(clone_model); // NOLINT
} }
public: public:
...@@ -459,7 +459,7 @@ class FluidCpuWithSigmoidCore : public FluidFamilyCore { ...@@ -459,7 +459,7 @@ class FluidCpuWithSigmoidCore : public FluidFamilyCore {
} }
protected: protected:
std::unique_ptr<SigmoidFluidModel> _core; std::unique_ptr<SigmoidFluidModel> _core; // NOLINT
}; };
class FluidCpuNativeDirWithSigmoidCore : public FluidCpuWithSigmoidCore { class FluidCpuNativeDirWithSigmoidCore : public FluidCpuWithSigmoidCore {
......
...@@ -23,8 +23,8 @@ ...@@ -23,8 +23,8 @@
#include <vector> #include <vector>
#include "core/configure/include/configure_parser.h" #include "core/configure/include/configure_parser.h"
#include "core/configure/inferencer_configure.pb.h" #include "core/configure/inferencer_configure.pb.h"
#include "paddle_inference_api.h" // NOLINT
#include "core/predictor/framework/infer.h" #include "core/predictor/framework/infer.h"
#include "paddle_inference_api.h" // NOLINT
DECLARE_int32(gpuid); DECLARE_int32(gpuid);
...@@ -334,13 +334,13 @@ class SigmoidModel { ...@@ -334,13 +334,13 @@ class SigmoidModel {
return -1; return -1;
} }
VLOG(2) << "load sigmoid_w [" << _sigmoid_w._params[0] << "] [" VLOG(2) << "load sigmoid_w [" << _sigmoid_w._params[0] << "] ["
<< _sigmoid_w._params[1] << "]."; << _sigmoid_w._params[1] << "].";
if (0 != _sigmoid_b.init(2, 1, sigmoid_b_file) || 0 != _sigmoid_b.load()) { if (0 != _sigmoid_b.init(2, 1, sigmoid_b_file) || 0 != _sigmoid_b.load()) {
LOG(ERROR) << "load params sigmoid_b failed."; LOG(ERROR) << "load params sigmoid_b failed.";
return -1; return -1;
} }
VLOG(2) << "load sigmoid_b [" << _sigmoid_b._params[0] << "] [" VLOG(2) << "load sigmoid_b [" << _sigmoid_b._params[0] << "] ["
<< _sigmoid_b._params[1] << "]."; << _sigmoid_b._params[1] << "].";
_exp_max_input = exp_max; _exp_max_input = exp_max;
_exp_min_input = exp_min; _exp_min_input = exp_min;
return 0; return 0;
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# pylint: disable=doc-string-missing
from __future__ import unicode_literals, absolute_import from __future__ import unicode_literals, absolute_import
import os import os
...@@ -29,6 +30,7 @@ from bert_reader import BertReader ...@@ -29,6 +30,7 @@ from bert_reader import BertReader
args = benchmark_args() args = benchmark_args()
def single_func(idx, resource): def single_func(idx, resource):
fin = open("data-c.txt") fin = open("data-c.txt")
if args.request == "rpc": if args.request == "rpc":
...@@ -38,29 +40,32 @@ def single_func(idx, resource): ...@@ -38,29 +40,32 @@ def single_func(idx, resource):
client = Client() client = Client()
client.load_client_config(args.model) client.load_client_config(args.model)
client.connect([resource["endpoint"][idx % 4]]) client.connect([resource["endpoint"][idx % 4]])
start = time.time() start = time.time()
for line in fin: for line in fin:
feed_dict = reader.process(line) feed_dict = reader.process(line)
result = client.predict(feed=feed_dict, result = client.predict(feed=feed_dict, fetch=fetch)
fetch=fetch)
end = time.time() end = time.time()
elif args.request == "http": elif args.request == "http":
start = time.time() start = time.time()
header = {"Content-Type":"application/json"} header = {"Content-Type": "application/json"}
for line in fin: for line in fin:
#dict_data = {"words": "this is for output ", "fetch": ["pooled_output"]} #dict_data = {"words": "this is for output ", "fetch": ["pooled_output"]}
dict_data = {"words": line, "fetch": ["pooled_output"]} dict_data = {"words": line, "fetch": ["pooled_output"]}
r = requests.post('http://{}/bert/prediction'.format(resource["endpoint"][0]), r = requests.post(
data=json.dumps(dict_data), headers=header) 'http://{}/bert/prediction'.format(resource["endpoint"][0]),
data=json.dumps(dict_data),
headers=header)
end = time.time() end = time.time()
return [[end - start]] return [[end - start]]
if __name__ == '__main__': if __name__ == '__main__':
multi_thread_runner = MultiThreadRunner() multi_thread_runner = MultiThreadRunner()
endpoint_list = ["127.0.0.1:9494", "127.0.0.1:9495", "127.0.0.1:9496", "127.0.0.1:9497"] endpoint_list = [
"127.0.0.1:9494", "127.0.0.1:9495", "127.0.0.1:9496", "127.0.0.1:9497"
]
#endpoint_list = endpoint_list + endpoint_list + endpoint_list #endpoint_list = endpoint_list + endpoint_list + endpoint_list
#result = multi_thread_runner.run(single_func, args.thread, {"endpoint":endpoint_list}) #result = multi_thread_runner.run(single_func, args.thread, {"endpoint":endpoint_list})
result = single_func(0, {"endpoint":endpoint_list}) result = single_func(0, {"endpoint": endpoint_list})
print(result) print(result)
# coding:utf-8 # coding:utf-8
# pylint: disable=doc-string-missing
import os import os
import sys import sys
import numpy as np import numpy as np
...@@ -143,9 +144,12 @@ def single_func(idx, resource): ...@@ -143,9 +144,12 @@ def single_func(idx, resource):
end = time.time() end = time.time()
return [[end - start]] return [[end - start]]
if __name__ == '__main__': if __name__ == '__main__':
multi_thread_runner = MultiThreadRunner() multi_thread_runner = MultiThreadRunner()
result = multi_thread_runner.run(single_func, args.thread, {"endpoint":["127.0.0.1:9494", "127.0.0.1:9495", "127.0.0.1:9496", "127.0.0.1:9497"]}) result = multi_thread_runner.run(single_func, args.thread, {
"endpoint": [
"127.0.0.1:9494", "127.0.0.1:9495", "127.0.0.1:9496",
"127.0.0.1:9497"
]
})
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from batching import pad_batch_data from batching import pad_batch_data
import tokenization import tokenization
class BertReader(): class BertReader():
def __init__(self, vocab_file="", max_seq_len=128): def __init__(self, vocab_file="", max_seq_len=128):
self.vocab_file = vocab_file self.vocab_file = vocab_file
...@@ -48,8 +63,10 @@ class BertReader(): ...@@ -48,8 +63,10 @@ class BertReader():
position_ids = list(range(len(token_ids))) position_ids = list(range(len(token_ids)))
p_token_ids, p_pos_ids, p_text_type_ids, input_mask = \ p_token_ids, p_pos_ids, p_text_type_ids, input_mask = \
self.pad_batch(token_ids, text_type_ids, position_ids) self.pad_batch(token_ids, text_type_ids, position_ids)
feed_result = {"input_ids": p_token_ids.reshape(-1).tolist(), feed_result = {
"position_ids": p_pos_ids.reshape(-1).tolist(), "input_ids": p_token_ids.reshape(-1).tolist(),
"segment_ids": p_text_type_ids.reshape(-1).tolist(), "position_ids": p_pos_ids.reshape(-1).tolist(),
"input_mask": input_mask.reshape(-1).tolist()} "segment_ids": p_text_type_ids.reshape(-1).tolist(),
"input_mask": input_mask.reshape(-1).tolist()
}
return feed_result return feed_result
...@@ -12,12 +12,13 @@ ...@@ -12,12 +12,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# pylint: disable=doc-string-missing
from paddle_serving_server_gpu.web_service import WebService from paddle_serving_server_gpu.web_service import WebService
from bert_reader import BertReader from bert_reader import BertReader
import sys import sys
import os import os
class BertService(WebService): class BertService(WebService):
def load(self): def load(self):
self.reader = BertReader(vocab_file="vocab.txt", max_seq_len=20) self.reader = BertReader(vocab_file="vocab.txt", max_seq_len=20)
...@@ -26,12 +27,12 @@ class BertService(WebService): ...@@ -26,12 +27,12 @@ class BertService(WebService):
feed_res = self.reader.process(feed["words"].encode("utf-8")) feed_res = self.reader.process(feed["words"].encode("utf-8"))
return feed_res, fetch return feed_res, fetch
bert_service = BertService(name="bert") bert_service = BertService(name="bert")
bert_service.load() bert_service.load()
bert_service.load_model_config(sys.argv[1]) bert_service.load_model_config(sys.argv[1])
gpu_ids = os.environ["CUDA_VISIBLE_DEVICES"] gpu_ids = os.environ["CUDA_VISIBLE_DEVICES"]
gpus = [int(x) for x in gpu_ids.split(",")] gpus = [int(x) for x in gpu_ids.split(",")]
bert_service.set_gpus(gpus) bert_service.set_gpus(gpus)
bert_service.prepare_server( bert_service.prepare_server(workdir="workdir", port=9494, device="gpu")
workdir="workdir", port=9494, device="gpu")
bert_service.run_server() bert_service.run_server()
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# pylint: disable=doc-string-missing
import paddlehub as hub import paddlehub as hub
import paddle.fluid as fluid import paddle.fluid as fluid
import sys import sys
...@@ -19,7 +19,8 @@ import paddle_serving_client.io as serving_io ...@@ -19,7 +19,8 @@ import paddle_serving_client.io as serving_io
model_name = "bert_chinese_L-12_H-768_A-12" model_name = "bert_chinese_L-12_H-768_A-12"
module = hub.Module(model_name) module = hub.Module(model_name)
inputs, outputs, program = module.context(trainable=True, max_seq_len=int(sys.argv[1])) inputs, outputs, program = module.context(
trainable=True, max_seq_len=int(sys.argv[1]))
place = fluid.core_avx.CPUPlace() place = fluid.core_avx.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
input_ids = inputs["input_ids"] input_ids = inputs["input_ids"]
...@@ -35,10 +36,12 @@ feed_var_names = [ ...@@ -35,10 +36,12 @@ feed_var_names = [
target_vars = [pooled_output, sequence_output] target_vars = [pooled_output, sequence_output]
serving_io.save_model("bert_seq{}_model".format(sys.argv[1]), "bert_seq{}_client".format(sys.argv[1]), { serving_io.save_model(
"input_ids": input_ids, "bert_seq{}_model".format(sys.argv[1]),
"position_ids": position_ids, "bert_seq{}_client".format(sys.argv[1]), {
"segment_ids": segment_ids, "input_ids": input_ids,
"input_mask": input_mask, "position_ids": position_ids,
}, {"pooled_output": pooled_output, "segment_ids": segment_ids,
"sequence_output": sequence_output}, program) "input_mask": input_mask,
}, {"pooled_output": pooled_output,
"sequence_output": sequence_output}, program)
...@@ -26,7 +26,7 @@ import sentencepiece as spm ...@@ -26,7 +26,7 @@ import sentencepiece as spm
import pickle import pickle
def convert_to_unicode(text): def convert_to_unicode(text): # pylint: disable=doc-string-with-all-args
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" """Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3: if six.PY3:
if isinstance(text, str): if isinstance(text, str):
...@@ -46,7 +46,7 @@ def convert_to_unicode(text): ...@@ -46,7 +46,7 @@ def convert_to_unicode(text):
raise ValueError("Not running on Python2 or Python 3?") raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text): def printable_text(text): # pylint: disable=doc-string-with-all-args
"""Returns text encoded in a way suitable for print or `tf.logging`.""" """Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case # These functions want `str` for both Python2 and Python3, but in one case
...@@ -69,7 +69,7 @@ def printable_text(text): ...@@ -69,7 +69,7 @@ def printable_text(text):
raise ValueError("Not running on Python2 or Python 3?") raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file): def load_vocab(vocab_file): # pylint: disable=doc-string-with-all-args, doc-string-with-returns
"""Loads a vocabulary file into a dictionary.""" """Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict() vocab = collections.OrderedDict()
fin = io.open(vocab_file, "r", encoding="UTF-8") fin = io.open(vocab_file, "r", encoding="UTF-8")
...@@ -163,7 +163,7 @@ class CharTokenizer(object): ...@@ -163,7 +163,7 @@ class CharTokenizer(object):
return convert_by_vocab(self.inv_vocab, ids) return convert_by_vocab(self.inv_vocab, ids)
class WSSPTokenizer(object): class WSSPTokenizer(object): # pylint: disable=doc-string-missing
def __init__(self, vocab_file, sp_model_dir, word_dict, ws=True, def __init__(self, vocab_file, sp_model_dir, word_dict, ws=True,
lower=True): lower=True):
self.vocab = load_vocab(vocab_file) self.vocab = load_vocab(vocab_file)
...@@ -175,7 +175,7 @@ class WSSPTokenizer(object): ...@@ -175,7 +175,7 @@ class WSSPTokenizer(object):
self.window_size = 5 self.window_size = 5
self.sp_model.Load(sp_model_dir) self.sp_model.Load(sp_model_dir)
def cut(self, chars): def cut(self, chars): # pylint: disable=doc-string-missing
words = [] words = []
idx = 0 idx = 0
while idx < len(chars): while idx < len(chars):
...@@ -192,7 +192,7 @@ class WSSPTokenizer(object): ...@@ -192,7 +192,7 @@ class WSSPTokenizer(object):
idx += i idx += i
return words return words
def tokenize(self, text, unk_token="[UNK]"): def tokenize(self, text, unk_token="[UNK]"): # pylint: disable=doc-string-missing
text = convert_to_unicode(text) text = convert_to_unicode(text)
if self.ws: if self.ws:
text = [s for s in self.cut(text) if s != ' '] text = [s for s in self.cut(text) if s != ' ']
...@@ -228,7 +228,7 @@ class BasicTokenizer(object): ...@@ -228,7 +228,7 @@ class BasicTokenizer(object):
""" """
self.do_lower_case = do_lower_case self.do_lower_case = do_lower_case
def tokenize(self, text): def tokenize(self, text): # pylint: disable=doc-string-with-all-args, doc-string-with-returns
"""Tokenizes a piece of text.""" """Tokenizes a piece of text."""
text = convert_to_unicode(text) text = convert_to_unicode(text)
text = self._clean_text(text) text = self._clean_text(text)
...@@ -345,7 +345,7 @@ class WordpieceTokenizer(object): ...@@ -345,7 +345,7 @@ class WordpieceTokenizer(object):
self.max_input_chars_per_word = max_input_chars_per_word self.max_input_chars_per_word = max_input_chars_per_word
self.use_sentence_piece_vocab = use_sentence_piece_vocab self.use_sentence_piece_vocab = use_sentence_piece_vocab
def tokenize(self, text): def tokenize(self, text): # pylint: disable=doc-string-with-all-args
"""Tokenizes a piece of text into its word pieces. """Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization This uses a greedy longest-match-first algorithm to perform tokenization
...@@ -432,8 +432,8 @@ def _is_punctuation(char): ...@@ -432,8 +432,8 @@ def _is_punctuation(char):
# Characters such as "^", "$", and "`" are not in the Unicode # Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for # Punctuation class but we treat them as punctuation anyways, for
# consistency. # consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True return True
cat = unicodedata.category(char) cat = unicodedata.category(char)
if cat.startswith("P"): if cat.startswith("P"):
......
# CTR task on Criteo Dataset # CTR task on Criteo Dataset
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import argparse import argparse
def parse_args(): def parse_args():
parser = argparse.ArgumentParser(description="PaddlePaddle CTR example") parser = argparse.ArgumentParser(description="PaddlePaddle CTR example")
parser.add_argument( parser.add_argument(
'--train_data_path', '--train_data_path',
type=str, type=str,
default='./data/raw/train.txt', default='./data/raw/train.txt',
help="The path of training dataset") help="The path of training dataset")
parser.add_argument( parser.add_argument(
'--sparse_only', '--sparse_only',
type=bool, type=bool,
default=False, default=False,
help="Whether we use sparse features only") help="Whether we use sparse features only")
parser.add_argument( parser.add_argument(
'--test_data_path', '--test_data_path',
type=str, type=str,
default='./data/raw/valid.txt', default='./data/raw/valid.txt',
help="The path of testing dataset") help="The path of testing dataset")
parser.add_argument( parser.add_argument(
'--batch_size', '--batch_size',
type=int, type=int,
default=1000, default=1000,
help="The size of mini-batch (default:1000)") help="The size of mini-batch (default:1000)")
parser.add_argument( parser.add_argument(
'--embedding_size', '--embedding_size',
type=int, type=int,
default=10, default=10,
help="The size for embedding layer (default:10)") help="The size for embedding layer (default:10)")
parser.add_argument( parser.add_argument(
'--num_passes', '--num_passes',
type=int, type=int,
default=10, default=10,
help="The number of passes to train (default: 10)") help="The number of passes to train (default: 10)")
parser.add_argument( parser.add_argument(
'--model_output_dir', '--model_output_dir',
type=str, type=str,
default='models', default='models',
help='The path for model to store (default: models)') help='The path for model to store (default: models)')
parser.add_argument( parser.add_argument(
'--sparse_feature_dim', '--sparse_feature_dim',
type=int, type=int,
default=1000001, default=1000001,
help='sparse feature hashing space for index processing') help='sparse feature hashing space for index processing')
parser.add_argument( parser.add_argument(
'--is_local', '--is_local',
type=int, type=int,
default=1, default=1,
help='Local train or distributed train (default: 1)') help='Local train or distributed train (default: 1)')
parser.add_argument( parser.add_argument(
'--cloud_train', '--cloud_train',
type=int, type=int,
default=0, default=0,
help='Local train or distributed train on paddlecloud (default: 0)') help='Local train or distributed train on paddlecloud (default: 0)')
parser.add_argument( parser.add_argument(
'--async_mode', '--async_mode',
action='store_true', action='store_true',
default=False, default=False,
help='Whether start pserver in async mode to support ASGD') help='Whether start pserver in async mode to support ASGD')
parser.add_argument( parser.add_argument(
'--no_split_var', '--no_split_var',
action='store_true', action='store_true',
default=False, default=False,
help='Whether split variables into blocks when update_method is pserver') help='Whether split variables into blocks when update_method is pserver')
parser.add_argument( parser.add_argument(
'--role', '--role',
type=str, type=str,
default='pserver', # trainer or pserver default='pserver', # trainer or pserver
help='The path for model to store (default: models)') help='The path for model to store (default: models)')
parser.add_argument( parser.add_argument(
'--endpoints', '--endpoints',
type=str, type=str,
default='127.0.0.1:6000', default='127.0.0.1:6000',
help='The pserver endpoints, like: 127.0.0.1:6000,127.0.0.1:6001') help='The pserver endpoints, like: 127.0.0.1:6000,127.0.0.1:6001')
parser.add_argument( parser.add_argument(
'--current_endpoint', '--current_endpoint',
type=str, type=str,
default='127.0.0.1:6000', default='127.0.0.1:6000',
help='The path for model to store (default: 127.0.0.1:6000)') help='The path for model to store (default: 127.0.0.1:6000)')
parser.add_argument( parser.add_argument(
'--trainer_id', '--trainer_id',
type=int, type=int,
default=0, default=0,
help='The path for model to store (default: models)') help='The path for model to store (default: models)')
parser.add_argument( parser.add_argument(
'--trainers', '--trainers',
type=int, type=int,
default=1, default=1,
help='The num of trianers, (default: 1)') help='The num of trianers, (default: 1)')
return parser.parse_args() return parser.parse_args()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import sys import sys
import paddle.fluid.incubate.data_generator as dg import paddle.fluid.incubate.data_generator as dg
class CriteoDataset(dg.MultiSlotDataGenerator): class CriteoDataset(dg.MultiSlotDataGenerator):
def setup(self, sparse_feature_dim): def setup(self, sparse_feature_dim):
self.cont_min_ = [0, -3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] self.cont_min_ = [0, -3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.cont_max_ = [20, 600, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50] self.cont_max_ = [
self.cont_diff_ = [20, 603, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50] 20, 600, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50
]
self.cont_diff_ = [
20, 603, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50
]
self.hash_dim_ = sparse_feature_dim self.hash_dim_ = sparse_feature_dim
# here, training data are lines with line_index < train_idx_ # here, training data are lines with line_index < train_idx_
self.train_idx_ = 41256555 self.train_idx_ = 41256555
...@@ -23,8 +43,9 @@ class CriteoDataset(dg.MultiSlotDataGenerator): ...@@ -23,8 +43,9 @@ class CriteoDataset(dg.MultiSlotDataGenerator):
dense_feature.append((float(features[idx]) - self.cont_min_[idx - 1]) / \ dense_feature.append((float(features[idx]) - self.cont_min_[idx - 1]) / \
self.cont_diff_[idx - 1]) self.cont_diff_[idx - 1])
for idx in self.categorical_range_: for idx in self.categorical_range_:
sparse_feature.append([hash(str(idx) + features[idx]) % self.hash_dim_]) sparse_feature.append(
[hash(str(idx) + features[idx]) % self.hash_dim_])
return dense_feature, sparse_feature, [int(features[0])] return dense_feature, sparse_feature, [int(features[0])]
def infer_reader(self, filelist, batch, buf_size): def infer_reader(self, filelist, batch, buf_size):
...@@ -32,16 +53,17 @@ class CriteoDataset(dg.MultiSlotDataGenerator): ...@@ -32,16 +53,17 @@ class CriteoDataset(dg.MultiSlotDataGenerator):
for fname in filelist: for fname in filelist:
with open(fname.strip(), "r") as fin: with open(fname.strip(), "r") as fin:
for line in fin: for line in fin:
dense_feature, sparse_feature, label = self._process_line(line) dense_feature, sparse_feature, label = self._process_line(
line)
#yield dense_feature, sparse_feature, label #yield dense_feature, sparse_feature, label
yield [dense_feature] + sparse_feature + [label] yield [dense_feature] + sparse_feature + [label]
import paddle import paddle
batch_iter = paddle.batch( batch_iter = paddle.batch(
paddle.reader.shuffle( paddle.reader.shuffle(
local_iter, buf_size=buf_size), local_iter, buf_size=buf_size),
batch_size=batch) batch_size=batch)
return batch_iter return batch_iter
def generate_sample(self, line): def generate_sample(self, line):
def data_iter(): def data_iter():
...@@ -54,6 +76,7 @@ class CriteoDataset(dg.MultiSlotDataGenerator): ...@@ -54,6 +76,7 @@ class CriteoDataset(dg.MultiSlotDataGenerator):
return data_iter return data_iter
if __name__ == "__main__": if __name__ == "__main__":
criteo_dataset = CriteoDataset() criteo_dataset = CriteoDataset()
criteo_dataset.setup(int(sys.argv[1])) criteo_dataset.setup(int(sys.argv[1]))
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from __future__ import print_function from __future__ import print_function
from args import parse_args from args import parse_args
...@@ -17,15 +32,17 @@ def train(): ...@@ -17,15 +32,17 @@ def train():
dense_input = fluid.layers.data( dense_input = fluid.layers.data(
name="dense_input", shape=[dense_feature_dim], dtype='float32') name="dense_input", shape=[dense_feature_dim], dtype='float32')
sparse_input_ids = [ sparse_input_ids = [
fluid.layers.data(name="C" + str(i), shape=[1], lod_level=1, dtype="int64") fluid.layers.data(
for i in range(1, 27)] name="C" + str(i), shape=[1], lod_level=1, dtype="int64")
for i in range(1, 27)
]
label = fluid.layers.data(name='label', shape=[1], dtype='int64') label = fluid.layers.data(name='label', shape=[1], dtype='int64')
#nn_input = None if sparse_only else dense_input #nn_input = None if sparse_only else dense_input
nn_input = dense_input nn_input = dense_input
predict_y, loss, auc_var, batch_auc_var = dnn_model( predict_y, loss, auc_var, batch_auc_var = dnn_model(
nn_input, sparse_input_ids, label, nn_input, sparse_input_ids, label, args.embedding_size,
args.embedding_size, args.sparse_feature_dim) args.sparse_feature_dim)
optimizer = fluid.optimizer.SGD(learning_rate=1e-4) optimizer = fluid.optimizer.SGD(learning_rate=1e-4)
optimizer.minimize(loss) optimizer.minimize(loss)
...@@ -36,16 +53,17 @@ def train(): ...@@ -36,16 +53,17 @@ def train():
dataset.set_use_var([dense_input] + sparse_input_ids + [label]) dataset.set_use_var([dense_input] + sparse_input_ids + [label])
python_executable = "python" python_executable = "python"
pipe_command = "{} criteo_reader.py {}".format( pipe_command = "{} criteo_reader.py {}".format(python_executable,
python_executable, args.sparse_feature_dim) args.sparse_feature_dim)
dataset.set_pipe_command(pipe_command) dataset.set_pipe_command(pipe_command)
dataset.set_batch_size(128) dataset.set_batch_size(128)
thread_num = 10 thread_num = 10
dataset.set_thread(thread_num) dataset.set_thread(thread_num)
whole_filelist = ["raw_data/part-%d" % x for x in whole_filelist = [
range(len(os.listdir("raw_data")))] "raw_data/part-%d" % x for x in range(len(os.listdir("raw_data")))
]
dataset.set_filelist(whole_filelist[:thread_num]) dataset.set_filelist(whole_filelist[:thread_num])
dataset.load_into_memory() dataset.load_into_memory()
...@@ -53,8 +71,7 @@ def train(): ...@@ -53,8 +71,7 @@ def train():
epochs = 1 epochs = 1
for i in range(epochs): for i in range(epochs):
exe.train_from_dataset( exe.train_from_dataset(
program=fluid.default_main_program(), program=fluid.default_main_program(), dataset=dataset, debug=True)
dataset=dataset, debug=True)
print("epoch {} finished".format(i)) print("epoch {} finished".format(i))
import paddle_serving_client.io as server_io import paddle_serving_client.io as server_io
...@@ -63,9 +80,9 @@ def train(): ...@@ -63,9 +80,9 @@ def train():
feed_var_dict["sparse_{}".format(i)] = sparse feed_var_dict["sparse_{}".format(i)] = sparse
fetch_var_dict = {"prob": predict_y} fetch_var_dict = {"prob": predict_y}
server_io.save_model( server_io.save_model("ctr_serving_model", "ctr_client_conf", feed_var_dict,
"ctr_serving_model", "ctr_client_conf", fetch_var_dict, fluid.default_main_program())
feed_var_dict, fetch_var_dict, fluid.default_main_program())
if __name__ == '__main__': if __name__ == '__main__':
train() train()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import paddle.fluid as fluid import paddle.fluid as fluid
import math import math
def dnn_model(dense_input, sparse_inputs, label,
embedding_size, sparse_feature_dim):
def dnn_model(dense_input, sparse_inputs, label, embedding_size,
sparse_feature_dim):
def embedding_layer(input): def embedding_layer(input):
emb = fluid.layers.embedding( emb = fluid.layers.embedding(
input=input, input=input,
is_sparse=True, is_sparse=True,
is_distributed=False, is_distributed=False,
size=[sparse_feature_dim, embedding_size], size=[sparse_feature_dim, embedding_size],
param_attr=fluid.ParamAttr(name="SparseFeatFactors", param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform())) name="SparseFeatFactors",
initializer=fluid.initializer.Uniform()))
return fluid.layers.sequence_pool(input=emb, pool_type='sum') return fluid.layers.sequence_pool(input=emb, pool_type='sum')
def mlp_input_tensor(emb_sums, dense_tensor): def mlp_input_tensor(emb_sums, dense_tensor):
...@@ -21,18 +37,30 @@ def dnn_model(dense_input, sparse_inputs, label, ...@@ -21,18 +37,30 @@ def dnn_model(dense_input, sparse_inputs, label,
return fluid.layers.concat(emb_sums + [dense_tensor], axis=1) return fluid.layers.concat(emb_sums + [dense_tensor], axis=1)
def mlp(mlp_input): def mlp(mlp_input):
fc1 = fluid.layers.fc(input=mlp_input, size=400, act='relu', fc1 = fluid.layers.fc(input=mlp_input,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( size=400,
scale=1 / math.sqrt(mlp_input.shape[1])))) act='relu',
fc2 = fluid.layers.fc(input=fc1, size=400, act='relu', param_attr=fluid.ParamAttr(
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( initializer=fluid.initializer.Normal(
scale=1 / math.sqrt(fc1.shape[1])))) scale=1 / math.sqrt(mlp_input.shape[1]))))
fc3 = fluid.layers.fc(input=fc2, size=400, act='relu', fc2 = fluid.layers.fc(input=fc1,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( size=400,
scale=1 / math.sqrt(fc2.shape[1])))) act='relu',
pre = fluid.layers.fc(input=fc3, size=2, act='softmax', param_attr=fluid.ParamAttr(
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( initializer=fluid.initializer.Normal(
scale=1 / math.sqrt(fc3.shape[1])))) scale=1 / math.sqrt(fc1.shape[1]))))
fc3 = fluid.layers.fc(input=fc2,
size=400,
act='relu',
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Normal(
scale=1 / math.sqrt(fc2.shape[1]))))
pre = fluid.layers.fc(input=fc3,
size=2,
act='softmax',
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Normal(
scale=1 / math.sqrt(fc3.shape[1]))))
return pre return pre
emb_sums = list(map(embedding_layer, sparse_inputs)) emb_sums = list(map(embedding_layer, sparse_inputs))
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from paddle_serving_client import Client from paddle_serving_client import Client
import paddle import paddle
import sys import sys
...@@ -13,9 +28,12 @@ batch = 1 ...@@ -13,9 +28,12 @@ batch = 1
buf_size = 100 buf_size = 100
dataset = criteo.CriteoDataset() dataset = criteo.CriteoDataset()
dataset.setup(1000001) dataset.setup(1000001)
test_filelists = ["{}/part-%d".format(sys.argv[2]) % x test_filelists = [
for x in range(len(os.listdir(sys.argv[2])))] "{}/part-%d".format(sys.argv[2]) % x
reader = dataset.infer_reader(test_filelists[len(test_filelists)-40:], batch, buf_size) for x in range(len(os.listdir(sys.argv[2])))
]
reader = dataset.infer_reader(test_filelists[len(test_filelists) - 40:], batch,
buf_size)
label_list = [] label_list = []
prob_list = [] prob_list = []
...@@ -25,4 +43,3 @@ for data in reader(): ...@@ -25,4 +43,3 @@ for data in reader():
feed_dict["sparse_{}".format(i - 1)] = data[0][i] feed_dict["sparse_{}".format(i - 1)] = data[0][i]
fetch_map = client.predict(feed=feed_dict, fetch=["prob"]) fetch_map = client.predict(feed=feed_dict, fetch=["prob"])
print(fetch_map) print(fetch_map)
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import os import os
import sys import sys
from paddle_serving_server import OpMaker from paddle_serving_server import OpMaker
......
...@@ -19,4 +19,4 @@ python -m paddle_serving_server.web_serve --model uci_housing_model/ --thread 10 ...@@ -19,4 +19,4 @@ python -m paddle_serving_server.web_serve --model uci_housing_model/ --thread 10
Prediction through http post Prediction through http post
``` shell ``` shell
curl -H "Content-Type:application/json" -X POST -d '{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332], "fetch":["price"]}' http://127.0.0.1:9393/uci/prediction curl -H "Content-Type:application/json" -X POST -d '{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332], "fetch":["price"]}' http://127.0.0.1:9393/uci/prediction
``` ```
\ No newline at end of file
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# pylint: disable=doc-string-missing
from paddle_serving_client import Client from paddle_serving_client import Client
from paddle_serving_client.utils import MultiThreadRunner from paddle_serving_client.utils import MultiThreadRunner
from paddle_serving_client.utils import benchmark_args from paddle_serving_client.utils import benchmark_args
...@@ -21,28 +23,35 @@ import requests ...@@ -21,28 +23,35 @@ import requests
args = benchmark_args() args = benchmark_args()
def single_func(idx, resource): def single_func(idx, resource):
if args.request == "rpc": if args.request == "rpc":
client = Client() client = Client()
client.load_client_config(args.model) client.load_client_config(args.model)
client.connect([args.endpoint]) client.connect([args.endpoint])
train_reader = paddle.batch(paddle.reader.shuffle( train_reader = paddle.batch(
paddle.dataset.uci_housing.train(), buf_size=500), batch_size=1) paddle.reader.shuffle(
paddle.dataset.uci_housing.train(), buf_size=500),
batch_size=1)
start = time.time() start = time.time()
for data in train_reader(): for data in train_reader():
fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"]) fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"])
end = time.time() end = time.time()
return [[end - start]] return [[end - start]]
elif args.request == "http": elif args.request == "http":
train_reader = paddle.batch(paddle.reader.shuffle( train_reader = paddle.batch(
paddle.dataset.uci_housing.train(), buf_size=500), batch_size=1) paddle.reader.shuffle(
paddle.dataset.uci_housing.train(), buf_size=500),
batch_size=1)
start = time.time() start = time.time()
for data in train_reader(): for data in train_reader():
r = requests.post('http://{}/uci/prediction'.format(args.endpoint), r = requests.post(
data = {"x": data[0]}) 'http://{}/uci/prediction'.format(args.endpoint),
data={"x": data[0]})
end = time.time() end = time.time()
return [[end - start]] return [[end - start]]
multi_thread_runner = MultiThreadRunner() multi_thread_runner = MultiThreadRunner()
result = multi_thread_runner.run(single_func, args.thread, {}) result = multi_thread_runner.run(single_func, args.thread, {})
print(result) print(result)
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import sys import sys
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
train_reader = paddle.batch(paddle.reader.shuffle( train_reader = paddle.batch(
paddle.dataset.uci_housing.train(), buf_size=500), batch_size=16) paddle.reader.shuffle(
paddle.dataset.uci_housing.train(), buf_size=500),
batch_size=16)
test_reader = paddle.batch(paddle.reader.shuffle( test_reader = paddle.batch(
paddle.dataset.uci_housing.test(), buf_size=500), batch_size=16) paddle.reader.shuffle(
paddle.dataset.uci_housing.test(), buf_size=500),
batch_size=16)
x = fluid.data(name='x', shape=[None, 13], dtype='float32') x = fluid.data(name='x', shape=[None, 13], dtype='float32')
y = fluid.data(name='y', shape=[None, 1], dtype='float32') y = fluid.data(name='y', shape=[None, 1], dtype='float32')
...@@ -26,11 +45,9 @@ import paddle_serving_client.io as serving_io ...@@ -26,11 +45,9 @@ import paddle_serving_client.io as serving_io
for pass_id in range(30): for pass_id in range(30):
for data_train in train_reader(): for data_train in train_reader():
avg_loss_value, = exe.run( avg_loss_value, = exe.run(fluid.default_main_program(),
fluid.default_main_program(), feed=feeder.feed(data_train),
feed=feeder.feed(data_train), fetch_list=[avg_loss])
fetch_list=[avg_loss])
serving_io.save_model("uci_housing_model", "uci_housing_client", {"x": x},
serving_io.save_model( {"price": y_predict}, fluid.default_main_program())
"uci_housing_model", "uci_housing_client",
{"x": x}, {"price": y_predict}, fluid.default_main_program())
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from paddle_serving_client import Client from paddle_serving_client import Client
import sys import sys
...@@ -6,10 +21,11 @@ client.load_client_config(sys.argv[1]) ...@@ -6,10 +21,11 @@ client.load_client_config(sys.argv[1])
client.connect(["127.0.0.1:9393"]) client.connect(["127.0.0.1:9393"])
import paddle import paddle
test_reader = paddle.batch(paddle.reader.shuffle( test_reader = paddle.batch(
paddle.dataset.uci_housing.test(), buf_size=500), batch_size=1) paddle.reader.shuffle(
paddle.dataset.uci_housing.test(), buf_size=500),
batch_size=1)
for data in test_reader(): for data in test_reader():
fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"]) fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"])
print("{} {}".format(fetch_map["price"][0], data[0][1][0])) print("{} {}".format(fetch_map["price"][0], data[0][1][0]))
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import os import os
import sys import sys
from paddle_serving_server import OpMaker from paddle_serving_server import OpMaker
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# pylint: disable=doc-string-missing
import sys import sys
import time import time
...@@ -22,6 +23,7 @@ from paddle_serving_client.utils import benchmark_args ...@@ -22,6 +23,7 @@ from paddle_serving_client.utils import benchmark_args
args = benchmark_args() args = benchmark_args()
def single_func(idx, resource): def single_func(idx, resource):
imdb_dataset = IMDBDataset() imdb_dataset = IMDBDataset()
imdb_dataset.load_resource(args.vocab) imdb_dataset.load_resource(args.vocab)
...@@ -40,18 +42,21 @@ def single_func(idx, resource): ...@@ -40,18 +42,21 @@ def single_func(idx, resource):
fin = open(fn) fin = open(fn)
for line in fin: for line in fin:
word_ids, label = imdb_dataset.get_words_and_label(line) word_ids, label = imdb_dataset.get_words_and_label(line)
fetch_map = client.predict(feed={"words": word_ids}, fetch_map = client.predict(
fetch=["prediction"]) feed={"words": word_ids}, fetch=["prediction"])
elif args.request == "http": elif args.request == "http":
for fn in filelist: for fn in filelist:
fin = open(fn) fin = open(fn)
for line in fin: for line in fin:
word_ids, label = imdb_dataset.get_words_and_label(line) word_ids, label = imdb_dataset.get_words_and_label(line)
r = requests.post("http://{}/imdb/prediction".format(args.endpoint), r = requests.post(
data={"words": word_ids, "fetch": ["prediction"]}) "http://{}/imdb/prediction".format(args.endpoint),
data={"words": word_ids,
"fetch": ["prediction"]})
end = time.time() end = time.time()
return [[end - start]] return [[end - start]]
multi_thread_runner = MultiThreadRunner() multi_thread_runner = MultiThreadRunner()
result = multi_thread_runner.run(single_func, args.thread, {}) result = multi_thread_runner.run(single_func, args.thread, {})
print(result) print(result)
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# pylint: disable=doc-string-missing
import sys import sys
import os import os
...@@ -18,6 +19,7 @@ import paddle ...@@ -18,6 +19,7 @@ import paddle
import re import re
import paddle.fluid.incubate.data_generator as dg import paddle.fluid.incubate.data_generator as dg
class IMDBDataset(dg.MultiSlotDataGenerator): class IMDBDataset(dg.MultiSlotDataGenerator):
def load_resource(self, dictfile): def load_resource(self, dictfile):
self._vocab = {} self._vocab = {}
...@@ -42,7 +44,7 @@ class IMDBDataset(dg.MultiSlotDataGenerator): ...@@ -42,7 +44,7 @@ class IMDBDataset(dg.MultiSlotDataGenerator):
send = '|'.join(line.split('|')[:-1]).lower().replace("<br />", send = '|'.join(line.split('|')[:-1]).lower().replace("<br />",
" ").strip() " ").strip()
label = [int(line.split('|')[-1])] label = [int(line.split('|')[-1])]
words = [x for x in self._pattern.split(send) if x and x != " "] words = [x for x in self._pattern.split(send) if x and x != " "]
feas = [ feas = [
self._vocab[x] if x in self._vocab else self._unk_id for x in words self._vocab[x] if x in self._vocab else self._unk_id for x in words
...@@ -56,9 +58,11 @@ class IMDBDataset(dg.MultiSlotDataGenerator): ...@@ -56,9 +58,11 @@ class IMDBDataset(dg.MultiSlotDataGenerator):
for line in fin: for line in fin:
feas, label = self.get_words_and_label(line) feas, label = self.get_words_and_label(line)
yield feas, label yield feas, label
import paddle import paddle
batch_iter = paddle.batch( batch_iter = paddle.batch(
paddle.reader.shuffle(local_iter, buf_size=buf_size), paddle.reader.shuffle(
local_iter, buf_size=buf_size),
batch_size=batch) batch_size=batch)
return batch_iter return batch_iter
...@@ -66,13 +70,15 @@ class IMDBDataset(dg.MultiSlotDataGenerator): ...@@ -66,13 +70,15 @@ class IMDBDataset(dg.MultiSlotDataGenerator):
def memory_iter(): def memory_iter():
for i in range(1000): for i in range(1000):
yield self.return_value yield self.return_value
def data_iter(): def data_iter():
feas, label = self.get_words_and_label(line) feas, label = self.get_words_and_label(line)
yield ("words", feas), ("label", label) yield ("words", feas), ("label", label)
return data_iter return data_iter
if __name__ == "__main__": if __name__ == "__main__":
imdb = IMDBDataset() imdb = IMDBDataset()
imdb.load_resource("imdb.vocab") imdb.load_resource("imdb.vocab")
imdb.run_from_stdin() imdb.run_from_stdin()
...@@ -3,4 +3,3 @@ tar -xzf imdb_service.tar.gz ...@@ -3,4 +3,3 @@ tar -xzf imdb_service.tar.gz
wget --no-check-certificate https://fleet.bj.bcebos.com/text_classification_data.tar.gz wget --no-check-certificate https://fleet.bj.bcebos.com/text_classification_data.tar.gz
tar -zxvf text_classification_data.tar.gz tar -zxvf text_classification_data.tar.gz
python text_classify_service.py serving_server_model/ workdir imdb.vocab python text_classify_service.py serving_server_model/ workdir imdb.vocab
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# pylint: disable=doc-string-missing
import os import os
import sys import sys
import paddle import paddle
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing, doc-string-with-all-args, doc-string-with-returns
import sys import sys
import time import time
import numpy as np import numpy as np
...@@ -13,10 +28,9 @@ def bow_net(data, ...@@ -13,10 +28,9 @@ def bow_net(data,
hid_dim=128, hid_dim=128,
hid_dim2=96, hid_dim2=96,
class_dim=2): class_dim=2):
""" """ bow net. """
bow net emb = fluid.layers.embedding(
""" input=data, size=[dict_dim, emb_dim], is_sparse=True)
emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim], is_sparse=True)
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow_tanh = fluid.layers.tanh(bow) bow_tanh = fluid.layers.tanh(bow)
fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh") fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
...@@ -37,10 +51,9 @@ def cnn_net(data, ...@@ -37,10 +51,9 @@ def cnn_net(data,
hid_dim2=96, hid_dim2=96,
class_dim=2, class_dim=2,
win_size=3): win_size=3):
""" """ conv net. """
conv net emb = fluid.layers.embedding(
""" input=data, size=[dict_dim, emb_dim], is_sparse=True)
emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim], is_sparse=True)
conv_3 = fluid.nets.sequence_conv_pool( conv_3 = fluid.nets.sequence_conv_pool(
input=emb, input=emb,
...@@ -67,9 +80,7 @@ def lstm_net(data, ...@@ -67,9 +80,7 @@ def lstm_net(data,
hid_dim2=96, hid_dim2=96,
class_dim=2, class_dim=2,
emb_lr=30.0): emb_lr=30.0):
""" """ lstm net. """
lstm net
"""
emb = fluid.layers.embedding( emb = fluid.layers.embedding(
input=data, input=data,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
...@@ -103,9 +114,7 @@ def gru_net(data, ...@@ -103,9 +114,7 @@ def gru_net(data,
hid_dim2=96, hid_dim2=96,
class_dim=2, class_dim=2,
emb_lr=400.0): emb_lr=400.0):
""" """ gru net. """
gru net
"""
emb = fluid.layers.embedding( emb = fluid.layers.embedding(
input=data, input=data,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# pylint: disable=doc-string-missing
from paddle_serving_client import Client from paddle_serving_client import Client
from imdb_reader import IMDBDataset from imdb_reader import IMDBDataset
import sys import sys
...@@ -31,4 +32,3 @@ for line in sys.stdin: ...@@ -31,4 +32,3 @@ for line in sys.stdin:
fetch = ["acc", "cost", "prediction"] fetch = ["acc", "cost", "prediction"]
fetch_map = client.predict(feed=feed, fetch=fetch) fetch_map = client.predict(feed=feed, fetch=fetch)
print("{} {}".format(fetch_map["prediction"][1], label[0])) print("{} {}".format(fetch_map["prediction"][1], label[0]))
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# pylint: disable=doc-string-missing
from paddle_serving_client import Client from paddle_serving_client import Client
import sys import sys
......
...@@ -11,17 +11,20 @@ ...@@ -11,17 +11,20 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# pylint: disable=doc-string-missing
from paddle_serving_server.web_service import WebService from paddle_serving_server.web_service import WebService
from imdb_reader import IMDBDataset from imdb_reader import IMDBDataset
import sys import sys
class IMDBService(WebService): class IMDBService(WebService):
def prepare_dict(self, args={}): def prepare_dict(self, args={}):
if len(args) == 0: if len(args) == 0:
exit(-1) exit(-1)
self.dataset = IMDBDataset() self.dataset = IMDBDataset()
self.dataset.load_resource(args["dict_file_path"]) self.dataset.load_resource(args["dict_file_path"])
def preprocess(self, feed={}, fetch=[]): def preprocess(self, feed={}, fetch=[]):
if "words" not in feed: if "words" not in feed:
exit(-1) exit(-1)
...@@ -29,8 +32,9 @@ class IMDBService(WebService): ...@@ -29,8 +32,9 @@ class IMDBService(WebService):
res_feed["words"] = self.dataset.get_words_only(feed["words"])[0] res_feed["words"] = self.dataset.get_words_only(feed["words"])[0]
return res_feed, fetch return res_feed, fetch
imdb_service = IMDBService(name="imdb") imdb_service = IMDBService(name="imdb")
imdb_service.load_model_config(sys.argv[1]) imdb_service.load_model_config(sys.argv[1])
imdb_service.prepare_server(workdir=sys.argv[2], port=9393, device="cpu") imdb_service.prepare_server(workdir=sys.argv[2], port=9393, device="cpu")
imdb_service.prepare_dict({"dict_file_path":sys.argv[3]}) imdb_service.prepare_dict({"dict_file_path": sys.argv[3]})
imdb_service.run_server() imdb_service.run_server()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import sys import sys
import os import os
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# pylint: disable=doc-string-missing
from paddle.fluid import Executor from paddle.fluid import Executor
from paddle.fluid.compiler import CompiledProgram from paddle.fluid.compiler import CompiledProgram
...@@ -22,6 +23,7 @@ from paddle.fluid.io import save_inference_model ...@@ -22,6 +23,7 @@ from paddle.fluid.io import save_inference_model
from ..proto import general_model_config_pb2 as model_conf from ..proto import general_model_config_pb2 as model_conf
import os import os
def save_model(server_model_folder, def save_model(server_model_folder,
client_config_folder, client_config_folder,
feed_var_dict, feed_var_dict,
...@@ -32,8 +34,12 @@ def save_model(server_model_folder, ...@@ -32,8 +34,12 @@ def save_model(server_model_folder,
feed_var_names = [feed_var_dict[x].name for x in feed_var_dict] feed_var_names = [feed_var_dict[x].name for x in feed_var_dict]
target_vars = fetch_var_dict.values() target_vars = fetch_var_dict.values()
save_inference_model(server_model_folder, feed_var_names, save_inference_model(
target_vars, executor, main_program=main_program) server_model_folder,
feed_var_names,
target_vars,
executor,
main_program=main_program)
config = model_conf.GeneralModelConfig() config = model_conf.GeneralModelConfig()
...@@ -67,7 +73,7 @@ def save_model(server_model_folder, ...@@ -67,7 +73,7 @@ def save_model(server_model_folder,
fetch_var.fetch_type = 0 fetch_var.fetch_type = 0
if fetch_var_dict[key].dtype == core.VarDesc.VarType.FP32: if fetch_var_dict[key].dtype == core.VarDesc.VarType.FP32:
fetch_var.fetch_type = 1 fetch_var.fetch_type = 1
if fetch_var.is_lod_tensor: if fetch_var.is_lod_tensor:
fetch_var.shape.extend([-1]) fetch_var.shape.extend([-1])
...@@ -82,15 +88,15 @@ def save_model(server_model_folder, ...@@ -82,15 +88,15 @@ def save_model(server_model_folder,
cmd = "mkdir -p {}".format(client_config_folder) cmd = "mkdir -p {}".format(client_config_folder)
os.system(cmd) os.system(cmd)
with open("{}/serving_client_conf.prototxt".format(client_config_folder), "w") as fout: with open("{}/serving_client_conf.prototxt".format(client_config_folder),
"w") as fout:
fout.write(str(config)) fout.write(str(config))
with open("{}/serving_server_conf.prototxt".format(server_model_folder), "w") as fout: with open("{}/serving_server_conf.prototxt".format(server_model_folder),
"w") as fout:
fout.write(str(config)) fout.write(str(config))
with open("{}/serving_client_conf.stream.prototxt".format(client_config_folder), "wb") as fout: with open("{}/serving_client_conf.stream.prototxt".format(
client_config_folder), "wb") as fout:
fout.write(config.SerializeToString()) fout.write(config.SerializeToString())
with open("{}/serving_server_conf.stream.prototxt".format(server_model_folder), "wb") as fout: with open("{}/serving_server_conf.stream.prototxt".format(
server_model_folder), "wb") as fout:
fout.write(config.SerializeToString()) fout.write(config.SerializeToString())
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# pylint: disable=doc-string-missing
def acc(prob, label, threshold): def acc(prob, label, threshold):
# we support prob is the probability for label to be one # we support prob is the probability for label to be one
...@@ -21,5 +23,3 @@ def acc(prob, label, threshold): ...@@ -21,5 +23,3 @@ def acc(prob, label, threshold):
if (prob - threshold) * (label - prob) > 0: if (prob - threshold) * (label - prob) > 0:
right += 1 right += 1
return float(right) / total return float(right) / total
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# pylint: disable=doc-string-missing, doc-string-with-all-args, doc-string-with-returns
def tied_rank(x): def tied_rank(x):
""" """
...@@ -24,21 +26,22 @@ def tied_rank(x): ...@@ -24,21 +26,22 @@ def tied_rank(x):
score : list of numbers score : list of numbers
The tied rank f each element in x The tied rank f each element in x
""" """
sorted_x = sorted(zip(x,range(len(x)))) sorted_x = sorted(zip(x, range(len(x))))
r = [0 for k in x] r = [0 for k in x]
cur_val = sorted_x[0][0] cur_val = sorted_x[0][0]
last_rank = 0 last_rank = 0
for i in range(len(sorted_x)): for i in range(len(sorted_x)):
if cur_val != sorted_x[i][0]: if cur_val != sorted_x[i][0]:
cur_val = sorted_x[i][0] cur_val = sorted_x[i][0]
for j in range(last_rank, i): for j in range(last_rank, i):
r[sorted_x[j][1]] = float(last_rank+1+i)/2.0 r[sorted_x[j][1]] = float(last_rank + 1 + i) / 2.0
last_rank = i last_rank = i
if i==len(sorted_x)-1: if i == len(sorted_x) - 1:
for j in range(last_rank, i+1): for j in range(last_rank, i + 1):
r[sorted_x[j][1]] = float(last_rank+i+2)/2.0 r[sorted_x[j][1]] = float(last_rank + i + 2) / 2.0
return r return r
def auc(actual, posterior): def auc(actual, posterior):
""" """
Computes the area under the receiver-operater characteristic (AUC) Computes the area under the receiver-operater characteristic (AUC)
...@@ -56,10 +59,9 @@ def auc(actual, posterior): ...@@ -56,10 +59,9 @@ def auc(actual, posterior):
The mean squared error between actual and posterior The mean squared error between actual and posterior
""" """
r = tied_rank(posterior) r = tied_rank(posterior)
num_positive = len([0 for x in actual if x==1]) num_positive = len([0 for x in actual if x == 1])
num_negative = len(actual)-num_positive num_negative = len(actual) - num_positive
sum_positive = sum([r[i] for i in range(len(r)) if actual[i]==1]) sum_positive = sum([r[i] for i in range(len(r)) if actual[i] == 1])
auc = ((sum_positive - num_positive*(num_positive+1)/2.0) / auc = ((sum_positive - num_positive * (num_positive + 1) / 2.0) /
(num_negative*num_positive)) (num_negative * num_positive))
return auc return auc
...@@ -11,18 +11,26 @@ ...@@ -11,18 +11,26 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# pylint: disable=doc-string-missing
import os import os
import sys import sys
import subprocess import subprocess
import argparse import argparse
from multiprocessing import Pool from multiprocessing import Pool
def benchmark_args(): def benchmark_args():
parser = argparse.ArgumentParser("benchmark") parser = argparse.ArgumentParser("benchmark")
parser.add_argument("--thread", type=int, default=10, help="concurrecy") parser.add_argument("--thread", type=int, default=10, help="concurrecy")
parser.add_argument("--model", type=str, default="", help="model for evaluation") parser.add_argument(
parser.add_argument("--endpoint", type=str, default="127.0.0.1:9292", help="endpoint of server") "--model", type=str, default="", help="model for evaluation")
parser.add_argument("--request", type=str, default="rpc", help="mode of service") parser.add_argument(
"--endpoint",
type=str,
default="127.0.0.1:9292",
help="endpoint of server")
parser.add_argument(
"--request", type=str, default="rpc", help="mode of service")
return parser.parse_args() return parser.parse_args()
......
...@@ -19,16 +19,26 @@ Usage: ...@@ -19,16 +19,26 @@ Usage:
""" """
import argparse import argparse
def parse_args():
def parse_args(): # pylint: disable=doc-string-missing
parser = argparse.ArgumentParser("serve") parser = argparse.ArgumentParser("serve")
parser.add_argument("--thread", type=int, default=10, help="Concurrency of server") parser.add_argument(
parser.add_argument("--model", type=str, default="", help="Model for serving") "--thread", type=int, default=10, help="Concurrency of server")
parser.add_argument("--port", type=int, default=9292, help="Port the server") parser.add_argument(
parser.add_argument("--workdir", type=str, default="workdir", help="Working dir of current service") "--model", type=str, default="", help="Model for serving")
parser.add_argument("--device", type=str, default="cpu", help="Type of device") parser.add_argument(
"--port", type=int, default=9292, help="Port the server")
parser.add_argument(
"--workdir",
type=str,
default="workdir",
help="Working dir of current service")
parser.add_argument(
"--device", type=str, default="cpu", help="Type of device")
return parser.parse_args() return parser.parse_args()
def start_standard_model():
def start_standard_model(): # pylint: disable=doc-string-missing
args = parse_args() args = parse_args()
thread_num = args.thread thread_num = args.thread
model = args.model model = args.model
...@@ -59,5 +69,6 @@ def start_standard_model(): ...@@ -59,5 +69,6 @@ def start_standard_model():
server.prepare_server(workdir=workdir, port=port, device=device) server.prepare_server(workdir=workdir, port=port, device=device)
server.run_server() server.run_server()
if __name__ == "__main__": if __name__ == "__main__":
start_standard_model() start_standard_model()
...@@ -21,19 +21,31 @@ import argparse ...@@ -21,19 +21,31 @@ import argparse
from multiprocessing import Pool, Process from multiprocessing import Pool, Process
from .web_service import WebService from .web_service import WebService
def parse_args():
def parse_args(): # pylint: disable=doc-string-missing
parser = argparse.ArgumentParser("web_serve") parser = argparse.ArgumentParser("web_serve")
parser.add_argument("--thread", type=int, default=10, help="Concurrency of server") parser.add_argument(
parser.add_argument("--model", type=str, default="", help="Model for serving") "--thread", type=int, default=10, help="Concurrency of server")
parser.add_argument("--port", type=int, default=9292, help="Port the server") parser.add_argument(
parser.add_argument("--workdir", type=str, default="workdir", help="Working dir of current service") "--model", type=str, default="", help="Model for serving")
parser.add_argument("--device", type=str, default="cpu", help="Type of device") parser.add_argument(
parser.add_argument("--name", type=str, default="default", help="Default service name") "--port", type=int, default=9292, help="Port the server")
parser.add_argument(
"--workdir",
type=str,
default="workdir",
help="Working dir of current service")
parser.add_argument(
"--device", type=str, default="cpu", help="Type of device")
parser.add_argument(
"--name", type=str, default="default", help="Default service name")
return parser.parse_args() return parser.parse_args()
if __name__ == "__main__": if __name__ == "__main__":
args = parse_args() args = parse_args()
service = WebService(name=args.name) service = WebService(name=args.name)
service.load_model_config(args.model) service.load_model_config(args.model)
service.prepare_server(workdir=args.workdir, port=args.port, device=args.device) service.prepare_server(
workdir=args.workdir, port=args.port, device=args.device)
service.run_server() service.run_server()
...@@ -12,11 +12,14 @@ ...@@ -12,11 +12,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
#!flask/bin/python #!flask/bin/python
# pylint: disable=doc-string-missing
from flask import Flask, request, abort from flask import Flask, request, abort
from multiprocessing import Pool, Process from multiprocessing import Pool, Process
from paddle_serving_server import OpMaker, OpSeqMaker, Server from paddle_serving_server import OpMaker, OpSeqMaker, Server
from paddle_serving_client import Client from paddle_serving_client import Client
class WebService(object): class WebService(object):
def __init__(self, name="default_service"): def __init__(self, name="default_service"):
self.name = name self.name = name
...@@ -38,7 +41,7 @@ class WebService(object): ...@@ -38,7 +41,7 @@ class WebService(object):
server.set_num_threads(16) server.set_num_threads(16)
server.load_model_config(self.model_config) server.load_model_config(self.model_config)
server.prepare_server( server.prepare_server(
workdir=self.workdir, port=self.port+1, device=self.device) workdir=self.workdir, port=self.port + 1, device=self.device)
server.run_server() server.run_server()
def prepare_server(self, workdir="", port=9393, device="cpu"): def prepare_server(self, workdir="", port=9393, device="cpu"):
...@@ -51,8 +54,9 @@ class WebService(object): ...@@ -51,8 +54,9 @@ class WebService(object):
client_service = Client() client_service = Client()
client_service.load_client_config( client_service.load_client_config(
"{}/serving_server_conf.prototxt".format(self.model_config)) "{}/serving_server_conf.prototxt".format(self.model_config))
client_service.connect(["127.0.0.1:{}".format(self.port+1)]) client_service.connect(["127.0.0.1:{}".format(self.port + 1)])
service_name = "/" + self.name + "/prediction" service_name = "/" + self.name + "/prediction"
@app_instance.route(service_name, methods=['POST']) @app_instance.route(service_name, methods=['POST'])
def get_prediction(): def get_prediction():
if not request.json: if not request.json:
...@@ -63,15 +67,21 @@ class WebService(object): ...@@ -63,15 +67,21 @@ class WebService(object):
if "fetch" in feed: if "fetch" in feed:
del feed["fetch"] del feed["fetch"]
fetch_map = client_service.predict(feed=feed, fetch=fetch) fetch_map = client_service.predict(feed=feed, fetch=fetch)
fetch_map = self.postprocess(feed=request.json, fetch=fetch, fetch_map=fetch_map) fetch_map = self.postprocess(
feed=request.json, fetch=fetch, fetch_map=fetch_map)
return fetch_map return fetch_map
app_instance.run(host="127.0.0.1", port=self.port, threaded=False, processes=1)
app_instance.run(host="127.0.0.1",
port=self.port,
threaded=False,
processes=1)
def run_server(self): def run_server(self):
import socket import socket
localIP = socket.gethostbyname(socket.gethostname()) localIP = socket.gethostbyname(socket.gethostname())
print("web service address:") print("web service address:")
print("http://{}:{}/{}/prediction".format(localIP, self.port, self.name)) print("http://{}:{}/{}/prediction".format(localIP, self.port,
self.name))
p_rpc = Process(target=self._launch_rpc_service) p_rpc = Process(target=self._launch_rpc_service)
p_web = Process(target=self._launch_web_service) p_web = Process(target=self._launch_web_service)
p_rpc.start() p_rpc.start()
...@@ -84,4 +94,3 @@ class WebService(object): ...@@ -84,4 +94,3 @@ class WebService(object):
def postprocess(self, feed={}, fetch=[], fetch_map={}): def postprocess(self, feed={}, fetch=[], fetch_map={}):
return fetch_map return fetch_map
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# pylint: disable=doc-string-missing
import os import os
from .proto import server_configure_pb2 as server_sdk from .proto import server_configure_pb2 as server_sdk
...@@ -22,6 +23,7 @@ import paddle_serving_server_gpu as paddle_serving_server ...@@ -22,6 +23,7 @@ import paddle_serving_server_gpu as paddle_serving_server
from version import serving_server_version from version import serving_server_version
from contextlib import closing from contextlib import closing
def serve_args(): def serve_args():
parser = argparse.ArgumentParser("serve") parser = argparse.ArgumentParser("serve")
parser.add_argument( parser.add_argument(
...@@ -37,12 +39,12 @@ def serve_args(): ...@@ -37,12 +39,12 @@ def serve_args():
help="Working dir of current service") help="Working dir of current service")
parser.add_argument( parser.add_argument(
"--device", type=str, default="gpu", help="Type of device") "--device", type=str, default="gpu", help="Type of device")
parser.add_argument( parser.add_argument("--gpu_ids", type=str, default="", help="gpu ids")
"--gpu_ids", type=str, default="", help="gpu ids")
parser.add_argument( parser.add_argument(
"--name", type=str, default="default", help="Default service name") "--name", type=str, default="default", help="Default service name")
return parser.parse_args() return parser.parse_args()
class OpMaker(object): class OpMaker(object):
def __init__(self): def __init__(self):
self.op_dict = { self.op_dict = {
......
...@@ -22,7 +22,7 @@ from multiprocessing import Pool, Process ...@@ -22,7 +22,7 @@ from multiprocessing import Pool, Process
from paddle_serving_server_gpu import serve_args from paddle_serving_server_gpu import serve_args
def start_gpu_card_model(gpuid, args): def start_gpu_card_model(gpuid, args): # pylint: disable=doc-string-missing
gpuid = int(gpuid) gpuid = int(gpuid)
device = "gpu" device = "gpu"
port = args.port port = args.port
...@@ -43,7 +43,7 @@ def start_gpu_card_model(gpuid, args): ...@@ -43,7 +43,7 @@ def start_gpu_card_model(gpuid, args):
read_op = op_maker.create('general_reader') read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer') general_infer_op = op_maker.create('general_infer')
general_response_op = op_maker.create('general_response') general_response_op = op_maker.create('general_response')
op_seq_maker = serving.OpSeqMaker() op_seq_maker = serving.OpSeqMaker()
op_seq_maker.add_op(read_op) op_seq_maker.add_op(read_op)
op_seq_maker.add_op(general_infer_op) op_seq_maker.add_op(general_infer_op)
...@@ -59,7 +59,8 @@ def start_gpu_card_model(gpuid, args): ...@@ -59,7 +59,8 @@ def start_gpu_card_model(gpuid, args):
server.set_gpuid(gpuid) server.set_gpuid(gpuid)
server.run_server() server.run_server()
def start_multi_card(args):
def start_multi_card(args): # pylint: disable=doc-string-missing
gpus = "" gpus = ""
if args.gpu_ids == "": if args.gpu_ids == "":
gpus = os.environ["CUDA_VISIBLE_DEVICES"] gpus = os.environ["CUDA_VISIBLE_DEVICES"]
...@@ -70,13 +71,17 @@ def start_multi_card(args): ...@@ -70,13 +71,17 @@ def start_multi_card(args):
else: else:
gpu_processes = [] gpu_processes = []
for i, gpu_id in enumerate(gpus): for i, gpu_id in enumerate(gpus):
p = Process(target=start_gpu_card_model, args=(i, args, )) p = Process(
target=start_gpu_card_model, args=(
i,
args, ))
gpu_processes.append(p) gpu_processes.append(p)
for p in gpu_processes: for p in gpu_processes:
p.start() p.start()
for p in gpu_processes: for p in gpu_processes:
p.join() p.join()
if __name__ == "__main__": if __name__ == "__main__":
args = serve_args() args = serve_args()
start_multi_card(args) start_multi_card(args)
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
#!flask/bin/python #!flask/bin/python
# pylint: disable=doc-string-missing
from flask import Flask, request, abort from flask import Flask, request, abort
from multiprocessing import Pool, Process from multiprocessing import Pool, Process
from paddle_serving_server_gpu import OpMaker, OpSeqMaker, Server from paddle_serving_server_gpu import OpMaker, OpSeqMaker, Server
...@@ -34,8 +36,11 @@ class WebService(object): ...@@ -34,8 +36,11 @@ class WebService(object):
def set_gpus(self, gpus): def set_gpus(self, gpus):
self.gpus = gpus self.gpus = gpus
def default_rpc_service(self, workdir="conf", port=9292, def default_rpc_service(self,
gpuid=0, thread_num=10): workdir="conf",
port=9292,
gpuid=0,
thread_num=10):
device = "gpu" device = "gpu"
if gpuid == -1: if gpuid == -1:
device = "cpu" device = "cpu"
...@@ -43,16 +48,16 @@ class WebService(object): ...@@ -43,16 +48,16 @@ class WebService(object):
read_op = op_maker.create('general_reader') read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer') general_infer_op = op_maker.create('general_infer')
general_response_op = op_maker.create('general_response') general_response_op = op_maker.create('general_response')
op_seq_maker = serving.OpSeqMaker() op_seq_maker = serving.OpSeqMaker()
op_seq_maker.add_op(read_op) op_seq_maker.add_op(read_op)
op_seq_maker.add_op(general_infer_op) op_seq_maker.add_op(general_infer_op)
op_seq_maker.add_op(general_response_op) op_seq_maker.add_op(general_response_op)
server = serving.Server() server = serving.Server()
server.set_op_sequence(op_seq_maker.get_op_sequence()) server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(thread_num) server.set_num_threads(thread_num)
server.load_model_config(self.model_config) server.load_model_config(self.model_config)
if gpuid >= 0: if gpuid >= 0:
server.set_gpuid(gpuid) server.set_gpuid(gpuid)
...@@ -70,14 +75,16 @@ class WebService(object): ...@@ -70,14 +75,16 @@ class WebService(object):
if len(self.gpus) == 0: if len(self.gpus) == 0:
# init cpu service # init cpu service
self.rpc_service_list.append( self.rpc_service_list.append(
self.default_rpc_service(self.workdir, self.port+1, self.default_rpc_service(
-1, thread_num=10)) self.workdir, self.port + 1, -1, thread_num=10))
else: else:
for i, gpuid in enumerate(self.gpus): for i, gpuid in enumerate(self.gpus):
self.rpc_service_list.append( self.rpc_service_list.append(
self.default_rpc_service("{}_{}".format(self.workdir, i), self.default_rpc_service(
self.port+1+i, "{}_{}".format(self.workdir, i),
gpuid, thread_num=10)) self.port + 1 + i,
gpuid,
thread_num=10))
def _launch_web_service(self, gpu_num): def _launch_web_service(self, gpu_num):
app_instance = Flask(__name__) app_instance = Flask(__name__)
...@@ -100,8 +107,7 @@ class WebService(object): ...@@ -100,8 +107,7 @@ class WebService(object):
if "fetch" not in request.json: if "fetch" not in request.json:
abort(400) abort(400)
feed, fetch = self.preprocess(request.json, request.json["fetch"]) feed, fetch = self.preprocess(request.json, request.json["fetch"])
fetch_map = client_list[0].predict( fetch_map = client_list[0].predict(feed=feed, fetch=fetch)
feed=feed, fetch=fetch)
fetch_map = self.postprocess( fetch_map = self.postprocess(
feed=request.json, fetch=fetch, fetch_map=fetch_map) feed=request.json, fetch=fetch, fetch_map=fetch_map)
return fetch_map return fetch_map
...@@ -120,13 +126,14 @@ class WebService(object): ...@@ -120,13 +126,14 @@ class WebService(object):
rpc_processes = [] rpc_processes = []
for idx in range(len(self.rpc_service_list)): for idx in range(len(self.rpc_service_list)):
p_rpc = Process(target=self._launch_rpc_service, args=(idx,)) p_rpc = Process(target=self._launch_rpc_service, args=(idx, ))
rpc_processes.append(p_rpc) rpc_processes.append(p_rpc)
for p in rpc_processes: for p in rpc_processes:
p.start() p.start()
p_web = Process(target=self._launch_web_service, args=(len(self.gpus),)) p_web = Process(
target=self._launch_web_service, args=(len(self.gpus), ))
p_web.start() p_web.start()
for p in rpc_processes: for p in rpc_processes:
p.join() p.join()
......
...@@ -77,4 +77,3 @@ setup( ...@@ -77,4 +77,3 @@ setup(
], ],
license='Apache 2.0', license='Apache 2.0',
keywords=('paddle-serving serving-client deployment industrial easy-to-use')) keywords=('paddle-serving serving-client deployment industrial easy-to-use'))
...@@ -73,4 +73,3 @@ setup( ...@@ -73,4 +73,3 @@ setup(
], ],
license='Apache 2.0', license='Apache 2.0',
keywords=('paddle-serving serving-server deployment industrial easy-to-use')) keywords=('paddle-serving serving-server deployment industrial easy-to-use'))
FROM centos:7.3.1611 FROM centos:7.3.1611
RUN yum -y install wget \ RUN yum -y install wget >/dev/null \
&& yum -y install gcc gcc-c++ make glibc-static which \ && yum -y install gcc gcc-c++ make glibc-static which >/dev/null \
&& yum -y install git openssl-devel curl-devel bzip2-devel python-devel \ && yum -y install git openssl-devel curl-devel bzip2-devel python-devel >/dev/null \
&& wget https://cmake.org/files/v3.2/cmake-3.2.0-Linux-x86_64.tar.gz \ && wget https://cmake.org/files/v3.2/cmake-3.2.0-Linux-x86_64.tar.gz >/dev/null \
&& tar xzf cmake-3.2.0-Linux-x86_64.tar.gz \ && tar xzf cmake-3.2.0-Linux-x86_64.tar.gz \
&& mv cmake-3.2.0-Linux-x86_64 /usr/local/cmake3.2.0 \ && mv cmake-3.2.0-Linux-x86_64 /usr/local/cmake3.2.0 \
&& echo 'export PATH=/usr/local/cmake3.2.0/bin:$PATH' >> /root/.bashrc \ && echo 'export PATH=/usr/local/cmake3.2.0/bin:$PATH' >> /root/.bashrc \
&& wget https://dl.google.com/go/go1.14.linux-amd64.tar.gz \ && rm cmake-3.2.0-Linux-x86_64.tar.gz \
&& wget https://dl.google.com/go/go1.14.linux-amd64.tar.gz >/dev/null \
&& tar xzf go1.14.linux-amd64.tar.gz \ && tar xzf go1.14.linux-amd64.tar.gz \
&& mv go /usr/local/go \ && mv go /usr/local/go \
&& echo 'export GOROOT=/usr/local/go' >> /root/.bashrc \ && echo 'export GOROOT=/usr/local/go' >> /root/.bashrc \
&& echo 'export PATH=/usr/local/go/bin:$PATH' >> /root/.bashrc \ && echo 'export PATH=/usr/local/go/bin:$PATH' >> /root/.bashrc \
&& yum -y install python-devel sqlite-devel \ && rm go1.14.linux-amd64.tar.gz \
&& yum -y install python-devel sqlite-devel >/dev/null \
&& curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py >/dev/null \ && curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py >/dev/null \
&& python get-pip.py >/dev/null \ && python get-pip.py >/dev/null \
&& pip install google protobuf setuptools wheel flask >/dev/null \ && pip install google protobuf setuptools wheel flask >/dev/null \
&& rm get-pip.py \
&& wget http://nixos.org/releases/patchelf/patchelf-0.10/patchelf-0.10.tar.bz2 \ && wget http://nixos.org/releases/patchelf/patchelf-0.10/patchelf-0.10.tar.bz2 \
&& yum -y install bzip2 \ && yum -y install bzip2 >/dev/null \
&& tar -jxf patchelf-0.10.tar.bz2 \ && tar -jxf patchelf-0.10.tar.bz2 \
&& cd patchelf-0.10 \ && cd patchelf-0.10 \
&& ./configure --prefix=/usr \ && ./configure --prefix=/usr \
&& make >/dev/null && make install >/dev/null && make >/dev/null && make install >/dev/null \
&& cd .. \
&& rm -rf patchelf-0.10* \
&& yum -y update >/dev/null \
&& yum -y install dnf >/dev/null \
&& yum -y install dnf-plugins-core >/dev/null \
&& dnf copr enable alonid/llvm-3.8.0 -y \
&& dnf install llvm-3.8.0 clang-3.8.0 compiler-rt-3.8.0 -y \
&& echo 'export PATH=/opt/llvm-3.8.0/bin:$PATH' >> /root/.bashrc
...@@ -3,4 +3,4 @@ paddle_serving_client.egg-info/PKG-INFO ...@@ -3,4 +3,4 @@ paddle_serving_client.egg-info/PKG-INFO
paddle_serving_client.egg-info/SOURCES.txt paddle_serving_client.egg-info/SOURCES.txt
paddle_serving_client.egg-info/dependency_links.txt paddle_serving_client.egg-info/dependency_links.txt
paddle_serving_client.egg-info/not-zip-safe paddle_serving_client.egg-info/not-zip-safe
paddle_serving_client.egg-info/top_level.txt paddle_serving_client.egg-info/top_level.txt
\ No newline at end of file
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "general_model.h" // NOLINT
#include <fstream> #include <fstream>
#include "general_model.h"
#include "sdk-cpp/builtin_format.pb.h" #include "sdk-cpp/builtin_format.pb.h"
#include "sdk-cpp/include/common.h" #include "sdk-cpp/include/common.h"
#include "sdk-cpp/include/predictor_sdk.h" #include "sdk-cpp/include/predictor_sdk.h"
...@@ -28,7 +28,7 @@ namespace baidu { ...@@ -28,7 +28,7 @@ namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace general_model { namespace general_model {
void PredictorClient::init(const std::string & conf_file) { void PredictorClient::init(const std::string &conf_file) {
_conf_file = conf_file; _conf_file = conf_file;
std::ifstream fin(conf_file); std::ifstream fin(conf_file);
if (!fin) { if (!fin) {
...@@ -65,9 +65,8 @@ void PredictorClient::init(const std::string & conf_file) { ...@@ -65,9 +65,8 @@ void PredictorClient::init(const std::string & conf_file) {
} }
} }
void PredictorClient::set_predictor_conf( void PredictorClient::set_predictor_conf(const std::string &conf_path,
const std::string & conf_path, const std::string &conf_file) {
const std::string & conf_file) {
_predictor_path = conf_path; _predictor_path = conf_path;
_predictor_conf = conf_file; _predictor_conf = conf_file;
} }
...@@ -80,53 +79,51 @@ int PredictorClient::create_predictor() { ...@@ -80,53 +79,51 @@ int PredictorClient::create_predictor() {
_api.thrd_initialize(); _api.thrd_initialize();
} }
void PredictorClient::predict( void PredictorClient::predict(const std::vector<std::vector<float>> &float_feed,
const std::vector<std::vector<float> > & float_feed, const std::vector<std::string> &float_feed_name,
const std::vector<std::string> & float_feed_name, const std::vector<std::vector<int64_t>> &int_feed,
const std::vector<std::vector<int64_t> > & int_feed, const std::vector<std::string> &int_feed_name,
const std::vector<std::string> & int_feed_name, const std::vector<std::string> &fetch_name,
const std::vector<std::string> & fetch_name, FetchedMap *fetch_result) {
FetchedMap * fetch_result) {
_api.thrd_clear(); _api.thrd_clear();
_predictor = _api.fetch_predictor("general_model"); _predictor = _api.fetch_predictor("general_model");
Request req; Request req;
std::vector<Tensor *> tensor_vec; std::vector<Tensor *> tensor_vec;
FeedInst * inst = req.add_insts(); FeedInst *inst = req.add_insts();
for (auto & name : float_feed_name) { for (auto &name : float_feed_name) {
tensor_vec.push_back(inst->add_tensor_array()); tensor_vec.push_back(inst->add_tensor_array());
} }
for (auto & name : int_feed_name) { for (auto &name : int_feed_name) {
tensor_vec.push_back(inst->add_tensor_array()); tensor_vec.push_back(inst->add_tensor_array());
} }
int vec_idx = 0; int vec_idx = 0;
for (auto & name : float_feed_name) { for (auto &name : float_feed_name) {
int idx = _feed_name_to_idx[name]; int idx = _feed_name_to_idx[name];
Tensor * tensor = tensor_vec[idx]; Tensor *tensor = tensor_vec[idx];
for (int j = 0; j < _shape[idx].size(); ++j) { for (int j = 0; j < _shape[idx].size(); ++j) {
tensor->add_shape(_shape[idx][j]); tensor->add_shape(_shape[idx][j]);
} }
tensor->set_elem_type(1); tensor->set_elem_type(1);
for (int j = 0; j < float_feed[vec_idx].size(); ++j) { for (int j = 0; j < float_feed[vec_idx].size(); ++j) {
tensor->add_data( tensor->add_data((char *)(&(float_feed[vec_idx][j])), // NOLINT
(char *)(&(float_feed[vec_idx][j])), sizeof(float)); sizeof(float));
} }
vec_idx++; vec_idx++;
} }
vec_idx = 0; vec_idx = 0;
for (auto & name : int_feed_name) { for (auto &name : int_feed_name) {
int idx = _feed_name_to_idx[name]; int idx = _feed_name_to_idx[name];
Tensor * tensor = tensor_vec[idx]; Tensor *tensor = tensor_vec[idx];
for (int j = 0; j < _shape[idx].size(); ++j) { for (int j = 0; j < _shape[idx].size(); ++j) {
tensor->add_shape(_shape[idx][j]); tensor->add_shape(_shape[idx][j]);
} }
tensor->set_elem_type(0); tensor->set_elem_type(0);
for (int j = 0; j < int_feed[vec_idx].size(); ++j) { for (int j = 0; j < int_feed[vec_idx].size(); ++j) {
tensor->add_data( tensor->add_data((char *)(&(int_feed[vec_idx][j])), // NOLINT
(char *)(&(int_feed[vec_idx][j])), sizeof(int64_t)); sizeof(int64_t));
} }
vec_idx++; vec_idx++;
} }
...@@ -139,13 +136,13 @@ void PredictorClient::predict( ...@@ -139,13 +136,13 @@ void PredictorClient::predict(
LOG(ERROR) << "failed call predictor with req: " << req.ShortDebugString(); LOG(ERROR) << "failed call predictor with req: " << req.ShortDebugString();
exit(-1); exit(-1);
} else { } else {
for (auto & name : fetch_name) { for (auto &name : fetch_name) {
int idx = _fetch_name_to_idx[name]; int idx = _fetch_name_to_idx[name];
int len = res.insts(0).tensor_array(idx).data_size(); int len = res.insts(0).tensor_array(idx).data_size();
(*fetch_result)[name].resize(len); (*fetch_result)[name].resize(len);
for (int i = 0; i < len; ++i) { for (int i = 0; i < len; ++i) {
(*fetch_result)[name][i] = *(const float *) (*fetch_result)[name][i] =
res.insts(0).tensor_array(idx).data(i).c_str(); *(const float *)res.insts(0).tensor_array(idx).data(i).c_str();
} }
} }
} }
...@@ -154,12 +151,12 @@ void PredictorClient::predict( ...@@ -154,12 +151,12 @@ void PredictorClient::predict(
} }
void PredictorClient::predict_with_profile( void PredictorClient::predict_with_profile(
const std::vector<std::vector<float> > & float_feed, const std::vector<std::vector<float>> &float_feed,
const std::vector<std::string> & float_feed_name, const std::vector<std::string> &float_feed_name,
const std::vector<std::vector<int64_t> > & int_feed, const std::vector<std::vector<int64_t>> &int_feed,
const std::vector<std::string> & int_feed_name, const std::vector<std::string> &int_feed_name,
const std::vector<std::string> & fetch_name, const std::vector<std::string> &fetch_name,
FetchedMap * fetch_result) { FetchedMap *fetch_result) {
return; return;
} }
......
...@@ -18,9 +18,9 @@ ...@@ -18,9 +18,9 @@
#include <unistd.h> #include <unistd.h>
#include <fstream> #include <fstream>
#include <map>
#include <string> #include <string>
#include <vector> #include <vector>
#include <map>
#include "sdk-cpp/builtin_format.pb.h" #include "sdk-cpp/builtin_format.pb.h"
#include "sdk-cpp/general_model_service.pb.h" #include "sdk-cpp/general_model_service.pb.h"
...@@ -37,44 +37,40 @@ namespace general_model { ...@@ -37,44 +37,40 @@ namespace general_model {
typedef std::map<std::string, std::vector<float>> FetchedMap; typedef std::map<std::string, std::vector<float>> FetchedMap;
class PredictorClient { class PredictorClient {
public: public:
PredictorClient() {} PredictorClient() {}
~PredictorClient() {} ~PredictorClient() {}
void init(const std::string & client_conf); void init(const std::string& client_conf);
void set_predictor_conf( void set_predictor_conf(const std::string& conf_path,
const std::string& conf_path, const std::string& conf_file);
const std::string& conf_file);
int create_predictor(); int create_predictor();
void predict( void predict(const std::vector<std::vector<float>>& float_feed,
const std::vector<std::vector<float> > & float_feed, const std::vector<std::string>& float_feed_name,
const std::vector<std::string> & float_feed_name, const std::vector<std::vector<int64_t>>& int_feed,
const std::vector<std::vector<int64_t> > & int_feed, const std::vector<std::string>& int_feed_name,
const std::vector<std::string> & int_feed_name, const std::vector<std::string>& fetch_name,
const std::vector<std::string> & fetch_name, FetchedMap* result_map);
FetchedMap * result_map);
void predict_with_profile( void predict_with_profile(const std::vector<std::vector<float>>& float_feed,
const std::vector<std::vector<float> > & float_feed, const std::vector<std::string>& float_feed_name,
const std::vector<std::string> & float_feed_name, const std::vector<std::vector<int64_t>>& int_feed,
const std::vector<std::vector<int64_t> > & int_feed, const std::vector<std::string>& int_feed_name,
const std::vector<std::string> & int_feed_name, const std::vector<std::string>& fetch_name,
const std::vector<std::string> & fetch_name, FetchedMap* result_map);
FetchedMap * result_map);
private: private:
PredictorApi _api; PredictorApi _api;
Predictor * _predictor; Predictor* _predictor;
std::string _predictor_conf; std::string _predictor_conf;
std::string _predictor_path; std::string _predictor_path;
std::string _conf_file; std::string _conf_file;
std::map<std::string, int> _feed_name_to_idx; std::map<std::string, int> _feed_name_to_idx;
std::map<std::string, int> _fetch_name_to_idx; std::map<std::string, int> _fetch_name_to_idx;
std::map<std::string, std::string> _fetch_name_to_var_name; std::map<std::string, std::string> _fetch_name_to_var_name;
std::vector<std::vector<int> > _shape; std::vector<std::vector<int>> _shape;
}; };
} // namespace general_model } // namespace general_model
......
...@@ -15,20 +15,20 @@ ...@@ -15,20 +15,20 @@
#include <fstream> #include <fstream>
#include <vector> #include <vector>
#include "general_model.h" #include "general_model.h" // NOLINT
using namespace std; using namespace std; // NOLINT
using baidu::paddle_serving::general_model::PredictorClient; using baidu::paddle_serving::general_model::PredictorClient;
using baidu::paddle_serving::general_model::FetchedMap; using baidu::paddle_serving::general_model::FetchedMap;
int main(int argc, char * argv[]) { int main(int argc, char* argv[]) {
PredictorClient * client = new PredictorClient(); PredictorClient* client = new PredictorClient();
client->init("inference.conf"); client->init("inference.conf");
client->set_predictor_conf("./", "predictor.conf"); client->set_predictor_conf("./", "predictor.conf");
client->create_predictor(); client->create_predictor();
std::vector<std::vector<float> > float_feed; std::vector<std::vector<float>> float_feed;
std::vector<std::vector<int64_t> > int_feed; std::vector<std::vector<int64_t>> int_feed;
std::vector<std::string> float_feed_name; std::vector<std::string> float_feed_name;
std::vector<std::string> int_feed_name = {"words", "label"}; std::vector<std::string> int_feed_name = {"words", "label"};
std::vector<std::string> fetch_name = {"cost", "acc", "prediction"}; std::vector<std::string> fetch_name = {"cost", "acc", "prediction"};
...@@ -53,13 +53,14 @@ int main(int argc, char * argv[]) { ...@@ -53,13 +53,14 @@ int main(int argc, char * argv[]) {
cin >> label; cin >> label;
int_feed.push_back({label}); int_feed.push_back({label});
FetchedMap result; FetchedMap result;
client->predict( client->predict(float_feed,
float_feed, float_feed_name, float_feed_name,
int_feed, int_feed_name, fetch_name, int_feed,
&result); int_feed_name,
fetch_name,
&result);
cout << label << "\t" << result["prediction"][1] << endl; cout << label << "\t" << result["prediction"][1] << endl;
......
...@@ -18,14 +18,14 @@ ...@@ -18,14 +18,14 @@
#include <fstream> #include <fstream>
#include "core/sdk-cpp/builtin_format.pb.h" #include "core/sdk-cpp/builtin_format.pb.h"
#include "core/sdk-cpp/load_general_model_service.pb.h"
#include "core/sdk-cpp/include/common.h" #include "core/sdk-cpp/include/common.h"
#include "core/sdk-cpp/include/predictor_sdk.h" #include "core/sdk-cpp/include/predictor_sdk.h"
#include "core/sdk-cpp/load_general_model_service.pb.h"
using baidu::paddle_serving::sdk_cpp::Predictor; using baidu::paddle_serving::sdk_cpp::Predictor;
using baidu::paddle_serving::sdk_cpp::PredictorApi; using baidu::paddle_serving::sdk_cpp::PredictorApi;
using baidu::paddle_serving::predictor:: using baidu::paddle_serving::predictor::load_general_model_service::
load_general_model_service::RequestAndResponse; RequestAndResponse;
int create_req(RequestAndResponse& req) { // NOLINT int create_req(RequestAndResponse& req) { // NOLINT
req.set_a(1); req.set_a(1);
......
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
#include "general_model.h" #include "general_model.h" // NOLINT
#include <pybind11/stl.h> #include <pybind11/stl.h> // NOLINT
namespace py = pybind11; namespace py = pybind11;
...@@ -17,28 +31,30 @@ PYBIND11_MODULE(paddle_serving_client, m) { ...@@ -17,28 +31,30 @@ PYBIND11_MODULE(paddle_serving_client, m) {
py::class_<PredictorClient>(m, "PredictorClient", py::buffer_protocol()) py::class_<PredictorClient>(m, "PredictorClient", py::buffer_protocol())
.def(py::init()) .def(py::init())
.def("init", .def("init",
[](PredictorClient &self, const std::string & conf) { [](PredictorClient &self, const std::string &conf) {
self.init(conf); self.init(conf);
}) })
.def("set_predictor_conf", .def("set_predictor_conf",
[](PredictorClient &self, const std::string & conf_path, [](PredictorClient &self,
const std::string & conf_file) { const std::string &conf_path,
const std::string &conf_file) {
self.set_predictor_conf(conf_path, conf_file); self.set_predictor_conf(conf_path, conf_file);
}) })
.def("create_predictor", .def("create_predictor",
[](PredictorClient & self) { [](PredictorClient &self) { self.create_predictor(); })
self.create_predictor();
})
.def("predict", .def("predict",
[](PredictorClient &self, [](PredictorClient &self,
const std::vector<std::vector<float> > & float_feed, const std::vector<std::vector<float>> &float_feed,
const std::vector<std::string> & float_feed_name, const std::vector<std::string> &float_feed_name,
const std::vector<std::vector<int64_t> > & int_feed, const std::vector<std::vector<int64_t>> &int_feed,
const std::vector<std::string> & int_feed_name, const std::vector<std::string> &int_feed_name,
const std::vector<std::string> & fetch_name, const std::vector<std::string> &fetch_name,
FetchedMap * fetch_result) { FetchedMap *fetch_result) {
return self.predict(float_feed, float_feed_name, return self.predict(float_feed,
int_feed, int_feed_name, fetch_name, float_feed_name,
int_feed,
int_feed_name,
fetch_name,
fetch_result); fetch_result);
}); });
} }
......
...@@ -14,10 +14,10 @@ ...@@ -14,10 +14,10 @@
#pragma once #pragma once
#include <vector> #include <vector>
#include "paddle_inference_api.h" // NOLINT
#include "examples/demo-serving/bert_service.pb.h" #include "examples/demo-serving/bert_service.pb.h"
#include "paddle_inference_api.h" // NOLINT
#include <sys/time.h> #include <sys/time.h> // NOLINT
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -13,9 +13,9 @@ ...@@ -13,9 +13,9 @@
// limitations under the License. // limitations under the License.
#include "examples/demo-serving/op/classify_op.h" #include "examples/demo-serving/op/classify_op.h"
#include "examples/demo-serving/op/reader_op.h"
#include "core/predictor/framework/infer.h" #include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h" #include "core/predictor/framework/memory.h"
#include "examples/demo-serving/op/reader_op.h"
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
#pragma once #pragma once
#include <vector> #include <vector>
#include "paddle_inference_api.h" // NOLINT
#include "examples/demo-serving/image_class.pb.h" #include "examples/demo-serving/image_class.pb.h"
#include "paddle_inference_api.h" // NOLINT
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
#pragma once #pragma once
#include <vector> #include <vector>
#include "paddle_inference_api.h" // NOLINT
#include "examples/demo-serving/ctr_prediction.pb.h" #include "examples/demo-serving/ctr_prediction.pb.h"
#include "paddle_inference_api.h" // NOLINT
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#endif #endif
#include "examples/demo-serving/general_model_service.pb.h" #include "examples/demo-serving/general_model_service.pb.h"
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace serving { namespace serving {
...@@ -34,7 +33,7 @@ static const char* GENERAL_MODEL_NAME = "general_model"; ...@@ -34,7 +33,7 @@ static const char* GENERAL_MODEL_NAME = "general_model";
class GeneralModelOp class GeneralModelOp
: public baidu::paddle_serving::predictor::OpWithChannel< : public baidu::paddle_serving::predictor::OpWithChannel<
baidu::paddle_serving::predictor::general_model::Response> { baidu::paddle_serving::predictor::general_model::Response> {
public: public:
typedef std::vector<paddle::PaddleTensor> TensorVector; typedef std::vector<paddle::PaddleTensor> TensorVector;
......
...@@ -13,13 +13,13 @@ ...@@ -13,13 +13,13 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include "examples/demo-serving/echo_kvdb_service.pb.h"
#include "core/kvdb/include/kvdb/paddle_rocksdb.h" #include "core/kvdb/include/kvdb/paddle_rocksdb.h"
#include "core/predictor/common/inner_common.h" #include "core/predictor/common/inner_common.h"
#include "core/predictor/framework/channel.h" #include "core/predictor/framework/channel.h"
#include "core/predictor/framework/op_repository.h" #include "core/predictor/framework/op_repository.h"
#include "core/predictor/framework/resource.h" #include "core/predictor/framework/resource.h"
#include "core/predictor/op/op.h" #include "core/predictor/op/op.h"
#include "examples/demo-serving/echo_kvdb_service.pb.h"
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -15,25 +15,23 @@ ...@@ -15,25 +15,23 @@
#pragma once #pragma once
#include <memory> #include <memory>
#include "examples/demo-serving/load_general_model_service.pb.h"
#include "core/predictor/common/inner_common.h" #include "core/predictor/common/inner_common.h"
#include "core/predictor/framework/channel.h" #include "core/predictor/framework/channel.h"
#include "core/predictor/framework/op_repository.h" #include "core/predictor/framework/op_repository.h"
#include "core/predictor/op/op.h"
#include "core/predictor/framework/resource.h" #include "core/predictor/framework/resource.h"
#include "core/predictor/op/op.h"
#include "examples/demo-serving/load_general_model_service.pb.h"
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace predictor { namespace predictor {
class LoadGeneralModelConfOp class LoadGeneralModelConfOp
: public OpWithChannel< : public OpWithChannel<baidu::paddle_serving::predictor::
baidu::paddle_serving::predictor:: load_general_model_service::RequestAndResponse> {
load_general_model_service::RequestAndResponse> {
public: public:
typedef baidu::paddle_serving::predictor:: typedef baidu::paddle_serving::predictor::load_general_model_service::
load_general_model_service::RequestAndResponse RequestAndResponse RequestAndResponse;
RequestAndResponse;
DECLARE_OP(LoadGeneralModelConfOp); DECLARE_OP(LoadGeneralModelConfOp);
......
...@@ -15,12 +15,12 @@ ...@@ -15,12 +15,12 @@
#pragma once #pragma once
#include <string> #include <string>
#include <vector> #include <vector>
#include "examples/demo-serving/image_class.pb.h"
#include "core/predictor/builtin_format.pb.h" #include "core/predictor/builtin_format.pb.h"
#include "core/predictor/common/inner_common.h" #include "core/predictor/common/inner_common.h"
#include "core/predictor/framework/channel.h" #include "core/predictor/framework/channel.h"
#include "core/predictor/framework/op_repository.h" #include "core/predictor/framework/op_repository.h"
#include "core/predictor/op/op.h" #include "core/predictor/op/op.h"
#include "examples/demo-serving/image_class.pb.h"
// opencv // opencv
#include "opencv/cv.h" #include "opencv/cv.h"
......
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
#pragma once #pragma once
#include <vector> #include <vector>
#include "paddle_inference_api.h" // NOLINT
#include "examples/demo-serving/text_classification.pb.h" #include "examples/demo-serving/text_classification.pb.h"
#include "paddle_inference_api.h" // NOLINT
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -21,8 +21,8 @@ ...@@ -21,8 +21,8 @@
#include "json2pb/pb_to_json.h" #include "json2pb/pb_to_json.h"
#endif #endif
#include "examples/demo-serving/op/write_json_op.h"
#include "core/predictor/framework/memory.h" #include "core/predictor/framework/memory.h"
#include "examples/demo-serving/op/write_json_op.h"
#ifndef BCLOUD #ifndef BCLOUD
using json2pb::ProtoMessageToJson; using json2pb::ProtoMessageToJson;
......
...@@ -13,11 +13,11 @@ ...@@ -13,11 +13,11 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include "examples/demo-serving/image_class.pb.h"
#include "core/predictor/common/inner_common.h" #include "core/predictor/common/inner_common.h"
#include "core/predictor/framework/channel.h" #include "core/predictor/framework/channel.h"
#include "core/predictor/framework/op_repository.h" #include "core/predictor/framework/op_repository.h"
#include "core/predictor/op/op.h" #include "core/predictor/op/op.h"
#include "examples/demo-serving/image_class.pb.h"
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -16,13 +16,13 @@ ...@@ -16,13 +16,13 @@
#include <string> #include <string>
#ifdef BCLOUD #ifdef BCLOUD
#include "pb_to_json.h" #include "pb_to_json.h" // NOLINT
#else #else
#include "json2pb/pb_to_json.h" #include "json2pb/pb_to_json.h"
#endif #endif
#include "examples/demo-serving/op/write_op.h"
#include "core/predictor/framework/memory.h" #include "core/predictor/framework/memory.h"
#include "examples/demo-serving/op/write_op.h"
#ifndef BCLOUD #ifndef BCLOUD
using json2pb::ProtoMessageToJson; using json2pb::ProtoMessageToJson;
......
...@@ -13,12 +13,12 @@ ...@@ -13,12 +13,12 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include "examples/demo-serving/image_class.pb.h"
#include "core/predictor/builtin_format.pb.h" #include "core/predictor/builtin_format.pb.h"
#include "core/predictor/common/inner_common.h" #include "core/predictor/common/inner_common.h"
#include "core/predictor/framework/channel.h" #include "core/predictor/framework/channel.h"
#include "core/predictor/framework/op_repository.h" #include "core/predictor/framework/op_repository.h"
#include "core/predictor/op/op.h" #include "core/predictor/op/op.h"
#include "examples/demo-serving/image_class.pb.h"
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -25,21 +25,13 @@ message Tensor { ...@@ -25,21 +25,13 @@ message Tensor {
repeated int32 shape = 3; repeated int32 shape = 3;
}; };
message FeedInst { message FeedInst { repeated Tensor tensor_array = 1; };
repeated Tensor tensor_array = 1;
};
message FetchInst { message FetchInst { repeated Tensor tensor_array = 1; };
repeated Tensor tensor_array = 1;
};
message Request { message Request { repeated FeedInst insts = 1; };
repeated FeedInst insts = 1;
};
message Response { message Response { repeated FetchInst insts = 1; };
repeated FetchInst insts = 1;
};
service GeneralModelService { service GeneralModelService {
rpc inference(Request) returns (Response); rpc inference(Request) returns (Response);
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# pylint: disable=doc-string-missing
from __future__ import print_function from __future__ import print_function
import json import json
import sys import sys
...@@ -33,8 +33,8 @@ def str2long(str): ...@@ -33,8 +33,8 @@ def str2long(str):
return int(str) return int(str)
def tied_rank(x): def tied_rank(x): # pylint: disable=doc-string-with-all-args, doc-string-with-returns
""" """
Computes the tied rank of elements in x. Computes the tied rank of elements in x.
This function computes the tied rank of elements in x. This function computes the tied rank of elements in x.
Parameters Parameters
...@@ -45,23 +45,23 @@ def tied_rank(x): ...@@ -45,23 +45,23 @@ def tied_rank(x):
score : list of numbers score : list of numbers
The tied rank f each element in x The tied rank f each element in x
""" """
sorted_x = sorted(zip(x,range(len(x)))) sorted_x = sorted(zip(x, range(len(x))))
r = [0 for k in x] r = [0 for k in x]
cur_val = sorted_x[0][0] cur_val = sorted_x[0][0]
last_rank = 0 last_rank = 0
for i in range(len(sorted_x)): for i in range(len(sorted_x)):
if cur_val != sorted_x[i][0]: if cur_val != sorted_x[i][0]:
cur_val = sorted_x[i][0] cur_val = sorted_x[i][0]
for j in range(last_rank, i): for j in range(last_rank, i):
r[sorted_x[j][1]] = float(last_rank+1+i)/2.0 r[sorted_x[j][1]] = float(last_rank + 1 + i) / 2.0
last_rank = i last_rank = i
if i==len(sorted_x)-1: if i == len(sorted_x) - 1:
for j in range(last_rank, i+1): for j in range(last_rank, i + 1):
r[sorted_x[j][1]] = float(last_rank+i+2)/2.0 r[sorted_x[j][1]] = float(last_rank + i + 2) / 2.0
return r return r
def auc(actual, posterior): def auc(actual, posterior): # pylint: disable=doc-string-with-all-args, doc-string-with-returns
""" """
Computes the area under the receiver-operater characteristic (AUC) Computes the area under the receiver-operater characteristic (AUC)
This function computes the AUC error metric for binary classification. This function computes the AUC error metric for binary classification.
...@@ -78,11 +78,11 @@ def auc(actual, posterior): ...@@ -78,11 +78,11 @@ def auc(actual, posterior):
The mean squared error between actual and posterior The mean squared error between actual and posterior
""" """
r = tied_rank(posterior) r = tied_rank(posterior)
num_positive = len([0 for x in actual if x==1]) num_positive = len([0 for x in actual if x == 1])
num_negative = len(actual)-num_positive num_negative = len(actual) - num_positive
sum_positive = sum([r[i] for i in range(len(r)) if actual[i]==1]) sum_positive = sum([r[i] for i in range(len(r)) if actual[i] == 1])
auc = ((sum_positive - num_positive*(num_positive+1)/2.0) / auc = ((sum_positive - num_positive * (num_positive + 1) / 2.0) /
(num_negative*num_positive)) (num_negative * num_positive))
return auc return auc
...@@ -105,8 +105,8 @@ def data_reader(data_file, samples, labels): ...@@ -105,8 +105,8 @@ def data_reader(data_file, samples, labels):
for i in range(0, len(features)): for i in range(0, len(features)):
if slots[i] in sample: if slots[i] in sample:
sample[slots[i]].append(int(features[i]) % sample[slots[i]].append(
CTR_EMBEDDING_TABLE_SIZE) int(features[i]) % CTR_EMBEDDING_TABLE_SIZE)
else: else:
sample[slots[i]] = [ sample[slots[i]] = [
int(features[i]) % CTR_EMBEDDING_TABLE_SIZE int(features[i]) % CTR_EMBEDDING_TABLE_SIZE
...@@ -117,7 +117,7 @@ def data_reader(data_file, samples, labels): ...@@ -117,7 +117,7 @@ def data_reader(data_file, samples, labels):
sample[x] = [0] sample[x] = [0]
samples.append(sample) samples.append(sample)
if __name__ == "__main__": if __name__ == "__main__":
""" main """ main
""" """
...@@ -180,4 +180,4 @@ if __name__ == "__main__": ...@@ -180,4 +180,4 @@ if __name__ == "__main__":
pass pass
idx = idx + 1 idx = idx + 1
print("auc = ", auc(labels, result_list) ) print("auc = ", auc(labels, result_list))
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
#include "elastic-ctr/serving/op/elastic_ctr_prediction_op.h" #include "elastic-ctr/serving/op/elastic_ctr_prediction_op.h"
#include <algorithm> #include <algorithm>
#include <string>
#include <iomanip> #include <iomanip>
#include <string>
#include "cube/cube-api/include/cube_api.h" #include "cube/cube-api/include/cube_api.h"
#include "predictor/framework/infer.h" #include "predictor/framework/infer.h"
#include "predictor/framework/kv_manager.h" #include "predictor/framework/kv_manager.h"
...@@ -87,7 +87,7 @@ int ElasticCTRPredictionOp::inference() { ...@@ -87,7 +87,7 @@ int ElasticCTRPredictionOp::inference() {
// Verify all request instances have same slots // Verify all request instances have same slots
std::vector<int> slot_ids; std::vector<int> slot_ids;
for (auto x: samples[0]) { for (auto x : samples[0]) {
slot_ids.push_back(x.first); slot_ids.push_back(x.first);
} }
std::sort(slot_ids.begin(), slot_ids.end()); std::sort(slot_ids.begin(), slot_ids.end());
...@@ -105,9 +105,9 @@ int ElasticCTRPredictionOp::inference() { ...@@ -105,9 +105,9 @@ int ElasticCTRPredictionOp::inference() {
// //
// Later we use slot_map to index into lod_tenor array // Later we use slot_map to index into lod_tenor array
// //
std::map<int, int> slot_map; std::map<int, int> slot_map; // NOLINT
int index = 0; int index = 0;
for (auto slot_id: slot_ids) { for (auto slot_id : slot_ids) {
slot_map[slot_id] = index; slot_map[slot_id] = index;
++index; ++index;
} }
...@@ -121,7 +121,7 @@ int ElasticCTRPredictionOp::inference() { ...@@ -121,7 +121,7 @@ int ElasticCTRPredictionOp::inference() {
return 0; return 0;
} }
for (auto slot: samples[i]) { for (auto slot : samples[i]) {
int id = slot.first; int id = slot.first;
auto x = std::find(slot_ids.begin(), slot_ids.end(), id); auto x = std::find(slot_ids.begin(), slot_ids.end(), id);
if (x == slot_ids.end()) { if (x == slot_ids.end()) {
...@@ -171,7 +171,7 @@ int ElasticCTRPredictionOp::inference() { ...@@ -171,7 +171,7 @@ int ElasticCTRPredictionOp::inference() {
feature_slot_sizes.resize(slot_ids.size()); feature_slot_sizes.resize(slot_ids.size());
// Iterate over each feature slot // Iterate over each feature slot
for (auto slot_id: slot_ids) { for (auto slot_id : slot_ids) {
feature_slot_lods[slot_map[slot_id]].push_back(0); feature_slot_lods[slot_map[slot_id]].push_back(0);
feature_slot_sizes[slot_map[slot_id]] = 0; feature_slot_sizes[slot_map[slot_id]] = 0;
...@@ -179,8 +179,8 @@ int ElasticCTRPredictionOp::inference() { ...@@ -179,8 +179,8 @@ int ElasticCTRPredictionOp::inference() {
for (size_t si = 0; si < samples.size(); ++si) { for (size_t si = 0; si < samples.size(); ++si) {
Sample &sample = samples[si]; Sample &sample = samples[si];
std::vector<int64_t> &slot = sample[slot_id]; std::vector<int64_t> &slot = sample[slot_id];
feature_slot_lods[slot_map[slot_id]].push_back(feature_slot_lods[slot_map[slot_id]].back() + feature_slot_lods[slot_map[slot_id]].push_back(
slot.size()); feature_slot_lods[slot_map[slot_id]].back() + slot.size());
feature_slot_sizes[slot_map[slot_id]] += slot.size(); feature_slot_sizes[slot_map[slot_id]] += slot.size();
for (size_t j = 0; j < slot.size(); ++j) { for (size_t j = 0; j < slot.size(); ++j) {
...@@ -303,14 +303,11 @@ int ElasticCTRPredictionOp::inference() { ...@@ -303,14 +303,11 @@ int ElasticCTRPredictionOp::inference() {
std::vector<paddle::PaddleTensor> lod_tensors; std::vector<paddle::PaddleTensor> lod_tensors;
lod_tensors.resize(slot_ids.size()); lod_tensors.resize(slot_ids.size());
for (auto slot_id: slot_ids) { for (auto slot_id : slot_ids) {
paddle::PaddleTensor &lod_tensor = lod_tensors[slot_map[slot_id]]; paddle::PaddleTensor &lod_tensor = lod_tensors[slot_map[slot_id]];
char name[VARIABLE_NAME_LEN]; char name[VARIABLE_NAME_LEN];
snprintf(name, snprintf(name, VARIABLE_NAME_LEN, "embedding_%d.tmp_0", slot_id);
VARIABLE_NAME_LEN,
"embedding_%d.tmp_0",
slot_id);
lod_tensor.name = std::string(name); lod_tensor.name = std::string(name);
lod_tensor.dtype = paddle::PaddleDType::FLOAT32; lod_tensor.dtype = paddle::PaddleDType::FLOAT32;
...@@ -322,7 +319,7 @@ int ElasticCTRPredictionOp::inference() { ...@@ -322,7 +319,7 @@ int ElasticCTRPredictionOp::inference() {
int base = 0; int base = 0;
// Iterate over all slots // Iterate over all slots
for (auto slot_id: slot_ids) { for (auto slot_id : slot_ids) {
paddle::PaddleTensor &lod_tensor = lod_tensors[slot_map[slot_id]]; paddle::PaddleTensor &lod_tensor = lod_tensors[slot_map[slot_id]];
std::vector<std::vector<size_t>> &lod = lod_tensor.lod; std::vector<std::vector<size_t>> &lod = lod_tensor.lod;
...@@ -346,7 +343,7 @@ int ElasticCTRPredictionOp::inference() { ...@@ -346,7 +343,7 @@ int ElasticCTRPredictionOp::inference() {
res, -1, "Embedding vector size not expected"); res, -1, "Embedding vector size not expected");
return 0; return 0;
#else #else
// sizeof(float) * CTR_PREDICTION_EMBEDDING_SIZE = 36 // sizeof(float) * CTR_PREDICTION_EMBEDDING_SIZE = 36
#if 1 #if 1
LOG(INFO) << "values[" << idx << "].buff.size != 36"; LOG(INFO) << "values[" << idx << "].buff.size != 36";
#endif #endif
......
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
#pragma once #pragma once
#include <vector> #include <vector>
#include "paddle_inference_api.h" // NOLINT
#include "elastic-ctr/serving/elastic_ctr_prediction.pb.h" #include "elastic-ctr/serving/elastic_ctr_prediction.pb.h"
#include "paddle_inference_api.h" // NOLINT
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
...@@ -33,7 +33,7 @@ class ElasticCTRPredictionOp ...@@ -33,7 +33,7 @@ class ElasticCTRPredictionOp
baidu::paddle_serving::predictor::elastic_ctr::Response> { baidu::paddle_serving::predictor::elastic_ctr::Response> {
public: public:
typedef std::vector<paddle::PaddleTensor> TensorVector; typedef std::vector<paddle::PaddleTensor> TensorVector;
typedef std::map<int, std::vector<int64_t>> Sample; typedef std::map<int, std::vector<int64_t>> Sample; // NOLINT
typedef std::vector<Sample> Samples; typedef std::vector<Sample> Samples;
DECLARE_OP(ElasticCTRPredictionOp); DECLARE_OP(ElasticCTRPredictionOp);
......
#!/usr/bin/env bash
function init() {
source /root/.bashrc
set -v
cd Serving
}
function abort(){
echo "Your change doesn't follow PaddlePaddle's code style." 1>&2
echo "Please use pre-commit to check what is wrong." 1>&2
exit 1
}
function check_style() {
trap 'abort' 0
set -e
pip install cpplint 'pre-commit==1.10.4'
export PATH=/usr/bin:$PATH
pre-commit install
clang-format --version
if ! pre-commit run -a ; then
git diff
exit 1
fi
trap : 0
}
function main() {
init
check_style
}
main
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册