diff --git a/README.md b/README.md
index 04634afebfc699708b681a99257eabc0898f7356..c867298e7e1a8c6bbc06aaba2ad2dc8f73518ae4 100644
--- a/README.md
+++ b/README.md
@@ -184,6 +184,12 @@ Here, `client.predict` function has two arguments. `feed` is a `python dict` wit
Community
+### User Group in China
+
+
+
+PaddleServing交流QQ群 PaddleServing微信群
+
### Slack
To connect with other users and contributors, welcome to join our [Slack channel](https://paddleserving.slack.com/archives/CUBPKHKMJ)
diff --git a/core/general-client/include/general_model.h b/core/general-client/include/general_model.h
index b379188854c30587d24962bc827aa099c3a39183..b5d27df5edbaf9278ecb8614e282d104347206f8 100644
--- a/core/general-client/include/general_model.h
+++ b/core/general-client/include/general_model.h
@@ -49,6 +49,8 @@ class ModelRes {
res._int64_value_map.end());
_float_value_map.insert(res._float_value_map.begin(),
res._float_value_map.end());
+ _int32_value_map.insert(res._int32_value_map.begin(),
+ res._int32_value_map.end());
_shape_map.insert(res._shape_map.begin(), res._shape_map.end());
_lod_map.insert(res._lod_map.begin(), res._lod_map.end());
}
@@ -60,6 +62,9 @@ class ModelRes {
_float_value_map.insert(
std::make_move_iterator(std::begin(res._float_value_map)),
std::make_move_iterator(std::end(res._float_value_map)));
+ _int32_value_map.insert(
+ std::make_move_iterator(std::begin(res._int32_value_map)),
+ std::make_move_iterator(std::end(res._int32_value_map)));
_shape_map.insert(std::make_move_iterator(std::begin(res._shape_map)),
std::make_move_iterator(std::end(res._shape_map)));
_lod_map.insert(std::make_move_iterator(std::begin(res._lod_map)),
@@ -78,6 +83,12 @@ class ModelRes {
std::vector&& get_float_by_name_with_rv(const std::string& name) {
return std::move(_float_value_map[name]);
}
+ const std::vector& get_int32_by_name(const std::string& name) {
+ return _int32_value_map[name];
+ }
+ std::vector&& get_int32_by_name_with_rv(const std::string& name) {
+ return std::move(_int32_value_map[name]);
+ }
const std::vector& get_shape_by_name(const std::string& name) {
return _shape_map[name];
}
@@ -103,6 +114,9 @@ class ModelRes {
_float_value_map.insert(
std::make_move_iterator(std::begin(res._float_value_map)),
std::make_move_iterator(std::end(res._float_value_map)));
+ _int32_value_map.insert(
+ std::make_move_iterator(std::begin(res._int32_value_map)),
+ std::make_move_iterator(std::end(res._int32_value_map)));
_shape_map.insert(std::make_move_iterator(std::begin(res._shape_map)),
std::make_move_iterator(std::end(res._shape_map)));
_lod_map.insert(std::make_move_iterator(std::begin(res._lod_map)),
@@ -115,6 +129,7 @@ class ModelRes {
std::string _engine_name;
std::map> _int64_value_map;
std::map> _float_value_map;
+ std::map> _int32_value_map;
std::map> _shape_map;
std::map> _lod_map;
};
@@ -145,6 +160,14 @@ class PredictorRes {
const std::string& name) {
return std::move(_models[model_idx].get_float_by_name_with_rv(name));
}
+ const std::vector& get_int32_by_name(const int model_idx,
+ const std::string& name) {
+ return _models[model_idx].get_int32_by_name(name);
+ }
+ std::vector&& get_int32_by_name_with_rv(const int model_idx,
+ const std::string& name) {
+ return std::move(_models[model_idx].get_int32_by_name_with_rv(name));
+ }
const std::vector& get_shape_by_name(const int model_idx,
const std::string& name) {
return _models[model_idx].get_shape_by_name(name);
diff --git a/core/general-client/src/general_model.cpp b/core/general-client/src/general_model.cpp
index 613abf9233610d170bce4386798662f78887edf7..9f709c71045577f7b043777a7ad1528a0e2ccc28 100644
--- a/core/general-client/src/general_model.cpp
+++ b/core/general-client/src/general_model.cpp
@@ -207,17 +207,28 @@ int PredictorClient::batch_predict(
for (auto &name : int_feed_name) {
int idx = _feed_name_to_idx[name];
Tensor *tensor = tensor_vec[idx];
- VLOG(2) << "prepare int feed " << name << " shape size "
- << int_shape[vec_idx].size();
+ if (_type[idx] == 0) {
+ VLOG(2) << "prepare int64 feed " << name << " shape size "
+ << int_shape[vec_idx].size();
+ VLOG(3) << "feed var name " << name << " index " << vec_idx
+ << "first data " << int_feed[vec_idx][0];
+ for (uint32_t j = 0; j < int_feed[vec_idx].size(); ++j) {
+ tensor->add_int64_data(int_feed[vec_idx][j]);
+ }
+ } else if (_type[idx] == 2) {
+ VLOG(2) << "prepare int32 feed " << name << " shape size "
+ << int_shape[vec_idx].size();
+ VLOG(3) << "feed var name " << name << " index " << vec_idx
+ << "first data " << int32_t(int_feed[vec_idx][0]);
+ for (uint32_t j = 0; j < int_feed[vec_idx].size(); ++j) {
+ tensor->add_int_data(int32_t(int_feed[vec_idx][j]));
+ }
+ }
+
for (uint32_t j = 0; j < int_shape[vec_idx].size(); ++j) {
tensor->add_shape(int_shape[vec_idx][j]);
}
- tensor->set_elem_type(0);
- VLOG(3) << "feed var name " << name << " index " << vec_idx
- << "first data " << int_feed[vec_idx][0];
- for (uint32_t j = 0; j < int_feed[vec_idx].size(); ++j) {
- tensor->add_int64_data(int_feed[vec_idx][j]);
- }
+ tensor->set_elem_type(_type[idx]);
vec_idx++;
}
@@ -284,18 +295,25 @@ int PredictorClient::batch_predict(
for (auto &name : fetch_name) {
// int idx = _fetch_name_to_idx[name];
if (_fetch_name_to_type[name] == 0) {
- VLOG(2) << "ferch var " << name << "type int";
+ VLOG(2) << "ferch var " << name << "type int64";
int size = output.insts(0).tensor_array(idx).int64_data_size();
model._int64_value_map[name] = std::vector(
output.insts(0).tensor_array(idx).int64_data().begin(),
output.insts(0).tensor_array(idx).int64_data().begin() + size);
- } else {
+ } else if (_fetch_name_to_type[name] == 1) {
VLOG(2) << "fetch var " << name << "type float";
int size = output.insts(0).tensor_array(idx).float_data_size();
model._float_value_map[name] = std::vector(
output.insts(0).tensor_array(idx).float_data().begin(),
output.insts(0).tensor_array(idx).float_data().begin() + size);
+ } else if (_fetch_name_to_type[name] == 2) {
+ VLOG(2) << "fetch var " << name << "type int32";
+ int size = output.insts(0).tensor_array(idx).int_data_size();
+ model._int32_value_map[name] = std::vector(
+ output.insts(0).tensor_array(idx).int_data().begin(),
+ output.insts(0).tensor_array(idx).int_data().begin() + size);
}
+
idx += 1;
}
predict_res_batch.add_model_res(std::move(model));
@@ -442,12 +460,19 @@ int PredictorClient::numpy_predict(
for (auto &name : int_feed_name) {
int idx = _feed_name_to_idx[name];
Tensor *tensor = tensor_vec[idx];
- VLOG(2) << "prepare int feed " << name << " shape size "
- << int_shape[vec_idx].size();
+
for (uint32_t j = 0; j < int_shape[vec_idx].size(); ++j) {
tensor->add_shape(int_shape[vec_idx][j]);
}
- tensor->set_elem_type(0);
+ tensor->set_elem_type(_type[idx]);
+
+ if (_type[idx] == 0) {
+ VLOG(2) << "prepare int feed " << name << " shape size "
+ << int_shape[vec_idx].size();
+ } else {
+ VLOG(2) << "prepare int32 feed " << name << " shape size "
+ << int_shape[vec_idx].size();
+ }
const int int_shape_size = int_shape[vec_idx].size();
switch (int_shape_size) {
@@ -457,7 +482,11 @@ int PredictorClient::numpy_predict(
for (ssize_t j = 0; j < int_array.shape(1); j++) {
for (ssize_t k = 0; k < int_array.shape(2); k++) {
for (ssize_t l = 0; k < int_array.shape(3); l++) {
- tensor->add_int64_data(int_array(i, j, k, l));
+ if (_type[idx] == 0) {
+ tensor->add_int64_data(int_array(i, j, k, l));
+ } else {
+ tensor->add_int_data(int_array(i, j, k, l));
+ }
}
}
}
@@ -469,7 +498,11 @@ int PredictorClient::numpy_predict(
for (ssize_t i = 0; i < int_array.shape(0); i++) {
for (ssize_t j = 0; j < int_array.shape(1); j++) {
for (ssize_t k = 0; k < int_array.shape(2); k++) {
- tensor->add_int64_data(int_array(i, j, k));
+ if (_type[idx] == 0) {
+ tensor->add_int64_data(int_array(i, j, k));
+ } else {
+ tensor->add_int_data(int_array(i, j, k));
+ }
}
}
}
@@ -479,7 +512,11 @@ int PredictorClient::numpy_predict(
auto int_array = int_feed[vec_idx].unchecked<2>();
for (ssize_t i = 0; i < int_array.shape(0); i++) {
for (ssize_t j = 0; j < int_array.shape(1); j++) {
- tensor->add_int64_data(int_array(i, j));
+ if (_type[idx] == 0) {
+ tensor->add_int64_data(int_array(i, j));
+ } else {
+ tensor->add_int_data(int_array(i, j));
+ }
}
}
break;
@@ -487,7 +524,11 @@ int PredictorClient::numpy_predict(
case 1: {
auto int_array = int_feed[vec_idx].unchecked<1>();
for (ssize_t i = 0; i < int_array.shape(0); i++) {
- tensor->add_int64_data(int_array(i));
+ if (_type[idx] == 0) {
+ tensor->add_int64_data(int_array(i));
+ } else {
+ tensor->add_int_data(int_array(i));
+ }
}
break;
}
@@ -557,17 +598,23 @@ int PredictorClient::numpy_predict(
for (auto &name : fetch_name) {
// int idx = _fetch_name_to_idx[name];
if (_fetch_name_to_type[name] == 0) {
- VLOG(2) << "ferch var " << name << "type int";
+ VLOG(2) << "ferch var " << name << "type int64";
int size = output.insts(0).tensor_array(idx).int64_data_size();
model._int64_value_map[name] = std::vector(
output.insts(0).tensor_array(idx).int64_data().begin(),
output.insts(0).tensor_array(idx).int64_data().begin() + size);
- } else {
+ } else if (_fetch_name_to_type[name] == 1) {
VLOG(2) << "fetch var " << name << "type float";
int size = output.insts(0).tensor_array(idx).float_data_size();
model._float_value_map[name] = std::vector(
output.insts(0).tensor_array(idx).float_data().begin(),
output.insts(0).tensor_array(idx).float_data().begin() + size);
+ } else if (_fetch_name_to_type[name] == 2) {
+ VLOG(2) << "fetch var " << name << "type int32";
+ int size = output.insts(0).tensor_array(idx).int_data_size();
+ model._int32_value_map[name] = std::vector(
+ output.insts(0).tensor_array(idx).int_data().begin(),
+ output.insts(0).tensor_array(idx).int_data().begin() + size);
}
idx += 1;
}
@@ -601,7 +648,6 @@ int PredictorClient::numpy_predict(
_api.thrd_clear();
return 0;
}
-
} // namespace general_model
} // namespace paddle_serving
} // namespace baidu
diff --git a/core/general-server/op/general_reader_op.cpp b/core/general-server/op/general_reader_op.cpp
index 7d48949b22d0ace289ab3b9214f092819f5476e0..380f861606a7719a33407dd946c5ac476629fdb7 100644
--- a/core/general-server/op/general_reader_op.cpp
+++ b/core/general-server/op/general_reader_op.cpp
@@ -126,9 +126,12 @@ int GeneralReaderOp::inference() {
if (elem_type[i] == 0) { // int64
elem_size[i] = sizeof(int64_t);
lod_tensor.dtype = paddle::PaddleDType::INT64;
- } else {
+ } else if (elem_type[i] == 1) {
elem_size[i] = sizeof(float);
lod_tensor.dtype = paddle::PaddleDType::FLOAT32;
+ } else if (elem_type[i] == 2) {
+ elem_size[i] = sizeof(int32_t);
+ lod_tensor.dtype = paddle::PaddleDType::INT32;
}
if (model_config->_is_lod_feed[i]) {
@@ -159,8 +162,10 @@ int GeneralReaderOp::inference() {
int data_len = 0;
if (tensor.int64_data_size() > 0) {
data_len = tensor.int64_data_size();
- } else {
+ } else if (tensor.float_data_size() > 0) {
data_len = tensor.float_data_size();
+ } else if (tensor.int_data_size() > 0) {
+ data_len = tensor.int_data_size();
}
VLOG(2) << "tensor size for var[" << i << "]: " << data_len;
tensor_size += data_len;
@@ -198,6 +203,8 @@ int GeneralReaderOp::inference() {
for (int i = 0; i < var_num; ++i) {
if (elem_type[i] == 0) {
int64_t *dst_ptr = static_cast(out->at(i).data.data());
+ VLOG(2) << "first element data in var[" << i << "] is "
+ << req->insts(0).tensor_array(i).int64_data(0);
int offset = 0;
for (int j = 0; j < batch_size; ++j) {
int elem_num = req->insts(j).tensor_array(i).int64_data_size();
@@ -210,8 +217,10 @@ int GeneralReaderOp::inference() {
offset += capacity[i];
}
}
- } else {
+ } else if (elem_type[i] == 1) {
float *dst_ptr = static_cast(out->at(i).data.data());
+ VLOG(2) << "first element data in var[" << i << "] is "
+ << req->insts(0).tensor_array(i).float_data(0);
int offset = 0;
for (int j = 0; j < batch_size; ++j) {
int elem_num = req->insts(j).tensor_array(i).float_data_size();
@@ -224,6 +233,22 @@ int GeneralReaderOp::inference() {
offset += capacity[i];
}
}
+ } else if (elem_type[i] == 2) {
+ int32_t *dst_ptr = static_cast(out->at(i).data.data());
+ VLOG(2) << "first element data in var[" << i << "] is "
+ << req->insts(0).tensor_array(i).int_data(0);
+ int offset = 0;
+ for (int j = 0; j < batch_size; ++j) {
+ int elem_num = req->insts(j).tensor_array(i).int_data_size();
+ for (int k = 0; k < elem_num; ++k) {
+ dst_ptr[offset + k] = req->insts(j).tensor_array(i).int_data(k);
+ }
+ if (out->at(i).lod.size() == 1) {
+ offset = out->at(i).lod[0][j + 1];
+ } else {
+ offset += capacity[i];
+ }
+ }
}
}
diff --git a/core/general-server/op/general_response_op.cpp b/core/general-server/op/general_response_op.cpp
index 5667a174d9bb6e134e58de72524c60839dc82356..935ef85d847cc17c2d5b76255de0936f0e08a890 100644
--- a/core/general-server/op/general_response_op.cpp
+++ b/core/general-server/op/general_response_op.cpp
@@ -91,7 +91,6 @@ int GeneralResponseOp::inference() {
for (auto &idx : fetch_index) {
Tensor *tensor = fetch_inst->add_tensor_array();
- tensor->set_elem_type(1);
if (model_config->_is_lod_fetch[idx]) {
VLOG(2) << "out[" << idx << "] " << model_config->_fetch_name[idx]
<< " is lod_tensor";
@@ -116,7 +115,7 @@ int GeneralResponseOp::inference() {
cap *= in->at(idx).shape[j];
}
if (in->at(idx).dtype == paddle::PaddleDType::INT64) {
- VLOG(2) << "Prepare float var [" << model_config->_fetch_name[idx]
+ VLOG(2) << "Prepare int64 var [" << model_config->_fetch_name[idx]
<< "].";
int64_t *data_ptr = static_cast(in->at(idx).data.data());
if (model_config->_is_lod_fetch[idx]) {
@@ -157,6 +156,27 @@ int GeneralResponseOp::inference() {
}
VLOG(2) << "fetch var [" << model_config->_fetch_name[idx] << "] ready";
var_idx++;
+ } else if (in->at(idx).dtype == paddle::PaddleDType::INT32) {
+ VLOG(2) << "Prepare int32 var [" << model_config->_fetch_name[idx]
+ << "].";
+ int32_t *data_ptr = static_cast(in->at(idx).data.data());
+ if (model_config->_is_lod_fetch[idx]) {
+ FetchInst *fetch_p = output->mutable_insts(0);
+ for (int j = 0; j < in->at(idx).lod[0].size(); ++j) {
+ fetch_p->mutable_tensor_array(var_idx)->add_lod(
+ in->at(idx).lod[0][j]);
+ }
+ for (int j = 0; j < cap; ++j) {
+ fetch_p->mutable_tensor_array(var_idx)->add_int_data(data_ptr[j]);
+ }
+ } else {
+ FetchInst *fetch_p = output->mutable_insts(0);
+ for (int j = 0; j < cap; ++j) {
+ fetch_p->mutable_tensor_array(var_idx)->add_int_data(data_ptr[j]);
+ }
+ }
+ VLOG(2) << "fetch var [" << model_config->_fetch_name[idx] << "] ready";
+ var_idx++;
}
}
}
diff --git a/core/predictor/framework/infer.h b/core/predictor/framework/infer.h
index e8c0ff47d86f081516a35576655f843a28b0591b..51cfb95a8d56d4261b9dab99df5216c5e6c79733 100644
--- a/core/predictor/framework/infer.h
+++ b/core/predictor/framework/infer.h
@@ -603,13 +603,13 @@ class VersionedInferEngine : public InferEngine {
LOG(ERROR) << "Failed generate engine with type:" << engine_type;
return -1;
}
- VLOG(2) << "FLGS_logtostderr " << FLAGS_logtostderr;
+ VLOG(2) << "FLAGS_logtostderr " << FLAGS_logtostderr;
int tmp = FLAGS_logtostderr;
if (engine->proc_initialize(conf, version) != 0) {
LOG(ERROR) << "Failed initialize engine, type:" << engine_type;
return -1;
}
- VLOG(2) << "FLGS_logtostderr " << FLAGS_logtostderr;
+ VLOG(2) << "FLAGS_logtostderr " << FLAGS_logtostderr;
FLAGS_logtostderr = tmp;
auto r = _versions.insert(std::make_pair(engine->version(), engine));
if (!r.second) {
diff --git a/doc/qq.jpeg b/doc/qq.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..d097e55aa5242bd6b4e968e3df48feed299a5e46
Binary files /dev/null and b/doc/qq.jpeg differ
diff --git a/doc/wechat.jpeg b/doc/wechat.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..52dd20702ec17060992f2c2362db203eacc04a3d
Binary files /dev/null and b/doc/wechat.jpeg differ
diff --git a/python/examples/yolov4/000000570688.jpg b/python/examples/yolov4/000000570688.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..cb304bd56c4010c08611a30dcca58ea9140cea54
Binary files /dev/null and b/python/examples/yolov4/000000570688.jpg differ
diff --git a/python/examples/yolov4/README.md b/python/examples/yolov4/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..08e16026d79ef7e93df732359f2c17609d4a2d0d
--- /dev/null
+++ b/python/examples/yolov4/README.md
@@ -0,0 +1,23 @@
+# Yolov4 Detection Service
+
+([简体中文](README_CN.md)|English)
+
+## Get Model
+
+```
+python -m paddle_serving_app.package --get_model yolov4
+tar -xzvf yolov4.tar.gz
+```
+
+## Start RPC Service
+
+```
+python -m paddle_serving_server_gpu.serve --model yolov4_model --port 9393 --gpu_ids 0
+```
+
+## Prediction
+
+```
+python test_client.py 000000570688.jpg
+```
+After the prediction is completed, a json file to save the prediction result and a picture with the detection result box will be generated in the `./outpu folder.
diff --git a/python/examples/yolov4/README_CN.md b/python/examples/yolov4/README_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..a4eed96b08619d4602cbd012a676a9adb6e08a63
--- /dev/null
+++ b/python/examples/yolov4/README_CN.md
@@ -0,0 +1,24 @@
+# Yolov4 检测服务
+
+(简体中文|[English](README.md))
+
+## 获取模型
+
+```
+python -m paddle_serving_app.package --get_model yolov4
+tar -xzvf yolov4.tar.gz
+```
+
+## 启动RPC服务
+
+```
+python -m paddle_serving_server_gpu.serve --model yolov4_model --port 9393 --gpu_ids 0
+```
+
+## 预测
+
+```
+python test_client.py 000000570688.jpg
+```
+
+预测完成会在`./output`文件夹下生成保存预测结果的json文件以及标出检测结果框的图片。
diff --git a/python/examples/yolov4/label_list.txt b/python/examples/yolov4/label_list.txt
new file mode 100644
index 0000000000000000000000000000000000000000..941cb4e1392266f6a6c09b1fdc5f79503b2e5df6
--- /dev/null
+++ b/python/examples/yolov4/label_list.txt
@@ -0,0 +1,80 @@
+person
+bicycle
+car
+motorcycle
+airplane
+bus
+train
+truck
+boat
+traffic light
+fire hydrant
+stop sign
+parking meter
+bench
+bird
+cat
+dog
+horse
+sheep
+cow
+elephant
+bear
+zebra
+giraffe
+backpack
+umbrella
+handbag
+tie
+suitcase
+frisbee
+skis
+snowboard
+sports ball
+kite
+baseball bat
+baseball glove
+skateboard
+surfboard
+tennis racket
+bottle
+wine glass
+cup
+fork
+knife
+spoon
+bowl
+banana
+apple
+sandwich
+orange
+broccoli
+carrot
+hot dog
+pizza
+donut
+cake
+chair
+couch
+potted plant
+bed
+dining table
+toilet
+tv
+laptop
+mouse
+remote
+keyboard
+cell phone
+microwave
+oven
+toaster
+sink
+refrigerator
+book
+clock
+vase
+scissors
+teddy bear
+hair drier
+toothbrush
diff --git a/python/examples/yolov4/test_client.py b/python/examples/yolov4/test_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..92dcd06552ca1fdd3f2d54060e9de501f052e349
--- /dev/null
+++ b/python/examples/yolov4/test_client.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import numpy as np
+from paddle_serving_client import Client
+from paddle_serving_app.reader import *
+import cv2
+preprocess = Sequential([
+ File2Image(), BGR2RGB(), Resize(
+ (608, 608), interpolation=cv2.INTER_LINEAR), Div(255.0), Transpose(
+ (2, 0, 1))
+])
+
+postprocess = RCNNPostprocess("label_list.txt", "output", [608, 608])
+client = Client()
+
+client.load_client_config("yolov4_client/serving_client_conf.prototxt")
+client.connect(['127.0.0.1:9393'])
+
+im = preprocess(sys.argv[1])
+print(im.shape)
+fetch_map = client.predict(
+ feed={
+ "image": im,
+ "im_size": np.array(list(im.shape[1:])),
+ },
+ fetch=["save_infer_model/scale_0.tmp_0"])
+fetch_map["image"] = sys.argv[1]
+postprocess(fetch_map)
diff --git a/python/paddle_serving_app/models/model_list.py b/python/paddle_serving_app/models/model_list.py
index 0c26a59f6f0537b9c910f21062938d4720d4f9f4..79b3f91bd6584d17ddbc4124584cf40bd586b965 100644
--- a/python/paddle_serving_app/models/model_list.py
+++ b/python/paddle_serving_app/models/model_list.py
@@ -24,7 +24,7 @@ class ServingModels(object):
"SentimentAnalysis"] = ["senta_bilstm", "senta_bow", "senta_cnn"]
self.model_dict["SemanticRepresentation"] = ["ernie"]
self.model_dict["ChineseWordSegmentation"] = ["lac"]
- self.model_dict["ObjectDetection"] = ["faster_rcnn"]
+ self.model_dict["ObjectDetection"] = ["faster_rcnn", "yolov4"]
self.model_dict["ImageSegmentation"] = [
"unet", "deeplabv3", "deeplabv3+cityscapes"
]
diff --git a/python/paddle_serving_app/reader/image_reader.py b/python/paddle_serving_app/reader/image_reader.py
index dc029bf0409179f1d392ce05d007565cd3007085..a44ca5de84da2bafce9b4cea37fb88095debabc6 100644
--- a/python/paddle_serving_app/reader/image_reader.py
+++ b/python/paddle_serving_app/reader/image_reader.py
@@ -280,10 +280,11 @@ class SegPostprocess(object):
class RCNNPostprocess(object):
- def __init__(self, label_file, output_dir):
+ def __init__(self, label_file, output_dir, resize_shape=None):
self.output_dir = output_dir
self.label_file = label_file
self.label_list = []
+ self.resize_shape = resize_shape
with open(label_file) as fin:
for line in fin:
self.label_list.append(line.strip())
@@ -378,6 +379,13 @@ class RCNNPostprocess(object):
xmax = xmin + w
ymax = ymin + h
+ img_w, img_h = image.size
+ if self.resize_shape is not None:
+ xmin = xmin * img_w / self.resize_shape[0]
+ xmax = xmax * img_w / self.resize_shape[0]
+ ymin = ymin * img_h / self.resize_shape[1]
+ ymax = ymax * img_h / self.resize_shape[1]
+
color = tuple(color_list[catid])
# draw bbox
diff --git a/python/paddle_serving_client/__init__.py b/python/paddle_serving_client/__init__.py
index 63f827167de6417a15097d0ea2c7834e7fbf2d20..37f52c48b4c168d93f877a4a7cd4f1bd9afc8b1d 100644
--- a/python/paddle_serving_client/__init__.py
+++ b/python/paddle_serving_client/__init__.py
@@ -28,8 +28,11 @@ sys.path.append(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'proto'))
from .proto import multi_lang_general_model_service_pb2_grpc
-int_type = 0
-float_type = 1
+int64_type = 0
+float32_type = 1
+int32_type = 2
+int_type = set([int64_type, int32_type])
+float_type = set([float32_type])
class _NOPProfiler(object):
@@ -279,7 +282,7 @@ class Client(object):
raise ValueError("Wrong feed name: {}.".format(key))
#if not isinstance(feed_i[key], np.ndarray):
self.shape_check(feed_i, key)
- if self.feed_types_[key] == int_type:
+ if self.feed_types_[key] in int_type:
if i == 0:
int_feed_names.append(key)
if isinstance(feed_i[key], np.ndarray):
@@ -292,7 +295,7 @@ class Client(object):
else:
int_slot.append(feed_i[key])
self.all_numpy_input = False
- elif self.feed_types_[key] == float_type:
+ elif self.feed_types_[key] in float_type:
if i == 0:
float_feed_names.append(key)
if isinstance(feed_i[key], np.ndarray):
@@ -339,7 +342,7 @@ class Client(object):
result_map = {}
# result map needs to be a numpy array
for i, name in enumerate(fetch_names):
- if self.fetch_names_to_type_[name] == int_type:
+ if self.fetch_names_to_type_[name] == int64_type:
# result_map[name] will be py::array(numpy array)
result_map[name] = result_batch_handle.get_int64_by_name(
mi, name)
@@ -348,7 +351,7 @@ class Client(object):
if name in self.lod_tensor_set:
result_map["{}.lod".format(
name)] = result_batch_handle.get_lod(mi, name)
- elif self.fetch_names_to_type_[name] == float_type:
+ elif self.fetch_names_to_type_[name] == float32_type:
result_map[name] = result_batch_handle.get_float_by_name(
mi, name)
shape = result_batch_handle.get_shape(mi, name)
@@ -356,6 +359,16 @@ class Client(object):
if name in self.lod_tensor_set:
result_map["{}.lod".format(
name)] = result_batch_handle.get_lod(mi, name)
+
+ elif self.fetch_names_to_type_[name] == int32_type:
+ # result_map[name] will be py::array(numpy array)
+ result_map[name] = result_batch_handle.get_int32_by_name(
+ mi, name)
+ shape = result_batch_handle.get_shape(mi, name)
+ result_map[name].shape = shape
+ if name in self.lod_tensor_set:
+ result_map["{}.lod".format(
+ name)] = result_batch_handle.get_lod(mi, name)
multi_result_map.append(result_map)
ret = None
if len(model_engine_names) == 1:
@@ -460,6 +473,8 @@ class MultiLangClient(object):
data = np.array(var, dtype="int64")
elif v_type == 1: # float32
data = np.array(var, dtype="float32")
+ elif v_type == 2: #int32
+ data = np.array(var, dtype="int32")
else:
raise Exception("error type.")
else:
@@ -478,6 +493,11 @@ class MultiLangClient(object):
tensor.float_data.extend(var.reshape(-1).tolist())
else:
tensor.float_data.extend(self._flatten_list(var))
+ elif v_type == 2: #int32
+ if isinstance(car, np.array):
+ tensor.int_data.extend(var.reshape(-1).tolist())
+ else:
+ tensor.int_data.extend(self._flatten_list(var))
else:
raise Exception("error type.")
if isinstance(var, np.ndarray):
@@ -509,6 +529,9 @@ class MultiLangClient(object):
elif v_type == 1: # float32
result_map[name] = np.array(
list(var.float_data), dtype="float32")
+ elif v_type == 2: # int32
+ result_map[name] = np.array(
+ list(var.int_data), dtype="int32")
else:
raise Exception("error type.")
result_map[name].shape = list(var.shape)
diff --git a/python/paddle_serving_client/io/__init__.py b/python/paddle_serving_client/io/__init__.py
index 20d29e2bdfe0d2753d2f23cda028d76a3b13c699..69e185be3d2e4d1a579a29d30b59341bfb8666ed 100644
--- a/python/paddle_serving_client/io/__init__.py
+++ b/python/paddle_serving_client/io/__init__.py
@@ -48,16 +48,18 @@ def save_model(server_model_folder,
config = model_conf.GeneralModelConfig()
+ #int64 = 0; float32 = 1; int32 = 2;
for key in feed_var_dict:
feed_var = model_conf.FeedVar()
feed_var.alias_name = key
feed_var.name = feed_var_dict[key].name
feed_var.is_lod_tensor = feed_var_dict[key].lod_level >= 1
- if feed_var_dict[key].dtype == core.VarDesc.VarType.INT32 or \
- feed_var_dict[key].dtype == core.VarDesc.VarType.INT64:
+ if feed_var_dict[key].dtype == core.VarDesc.VarType.INT64:
feed_var.feed_type = 0
if feed_var_dict[key].dtype == core.VarDesc.VarType.FP32:
feed_var.feed_type = 1
+ if feed_var_dict[key].dtype == core.VarDesc.VarType.INT32:
+ feed_var.feed_type = 2
if feed_var.is_lod_tensor:
feed_var.shape.extend([-1])
else:
@@ -73,13 +75,12 @@ def save_model(server_model_folder,
fetch_var.alias_name = key
fetch_var.name = fetch_var_dict[key].name
fetch_var.is_lod_tensor = fetch_var_dict[key].lod_level >= 1
- if fetch_var_dict[key].dtype == core.VarDesc.VarType.INT32 or \
- fetch_var_dict[key].dtype == core.VarDesc.VarType.INT64:
+ if fetch_var_dict[key].dtype == core.VarDesc.VarType.INT64:
fetch_var.fetch_type = 0
-
if fetch_var_dict[key].dtype == core.VarDesc.VarType.FP32:
fetch_var.fetch_type = 1
-
+ if fetch_var_dict[key].dtype == core.VarDesc.VarType.INT32:
+ fetch_var.fetch_type = 2
if fetch_var.is_lod_tensor:
fetch_var.shape.extend([-1])
else:
diff --git a/python/pipeline/channel.py b/python/pipeline/channel.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0eed6da107c0955be0d0bbcdda2967402b84b68
--- /dev/null
+++ b/python/pipeline/channel.py
@@ -0,0 +1,14 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# pylint: disable=doc-string-missing
diff --git a/python/pipeline/operator.py b/python/pipeline/operator.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0eed6da107c0955be0d0bbcdda2967402b84b68
--- /dev/null
+++ b/python/pipeline/operator.py
@@ -0,0 +1,14 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# pylint: disable=doc-string-missing
diff --git a/python/pipeline/pipeline_server.py b/python/pipeline/pipeline_server.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0eed6da107c0955be0d0bbcdda2967402b84b68
--- /dev/null
+++ b/python/pipeline/pipeline_server.py
@@ -0,0 +1,14 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# pylint: disable=doc-string-missing
diff --git a/python/pipeline/profiler.py b/python/pipeline/profiler.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0eed6da107c0955be0d0bbcdda2967402b84b68
--- /dev/null
+++ b/python/pipeline/profiler.py
@@ -0,0 +1,14 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# pylint: disable=doc-string-missing
diff --git a/tools/serving_build.sh b/tools/serving_build.sh
index 989e48ead9864e717e573f7f0800a1afba2e934a..b70936e5522b52964518d017fece3ceb78d66b87 100644
--- a/tools/serving_build.sh
+++ b/tools/serving_build.sh
@@ -499,6 +499,41 @@ function python_test_lac() {
cd ..
}
+function python_test_yolov4(){
+ #pwd:/ Serving/python/examples
+ local TYPE=$1
+ export SERVING_BIN=${SERVING_WORKDIR}/build-server-${TYPE}/core/general-server/serving
+ cd yolov4
+ case $TYPE in
+ CPU)
+ python -m paddle_serving_app.package --get_model yolov4
+ tar -xzvf yolov4.tar.gz
+ check_cmd "python -m paddle_serving_server.serve --model yolov4_model/ --port 9393 &"
+ sleep 5
+ check_cmd "python test_client.py 000000570688.jpg"
+ echo "yolov4 CPU RPC inference pass"
+ kill_server_process
+ ;;
+ GPU)
+ python -m paddle_serving_app.package --get_model yolov4
+ tar -xzvf yolov4.tar.gz
+ check_cmd "python -m paddle_serving_server_gpu.serve --model yolov4_model/ --port 9393 --gpu_ids 0 &"
+ sleep 5
+ check_cmd "python test_client.py 000000570688.jpg"
+ echo "yolov4 GPU RPC inference pass"
+ kill_server_process
+ ;;
+ *)
+ echo "error type"
+ exit 1
+ ;;
+ esac
+ echo "test yolov4 $TYPE finished as expected."
+ unset SERVING_BIN
+ cd ..
+}
+
+
function python_run_test() {
# Using the compiled binary
local TYPE=$1 # pwd: /Serving
@@ -510,6 +545,7 @@ function python_run_test() {
python_test_lac $TYPE # pwd: /Serving/python/examples
python_test_multi_process $TYPE # pwd: /Serving/python/examples
python_test_multi_fetch $TYPE # pwd: /Serving/python/examples
+ python_test_yolov4 $TYPE # pwd: /Serving/python/examples
echo "test python $TYPE part finished as expected."
cd ../.. # pwd: /Serving
}