未验证 提交 77ac3066 编写于 作者: J Jiawei Wang 提交者: GitHub

Merge branch 'develop' into kv_tool

...@@ -107,6 +107,7 @@ if (SERVER OR CLIENT) ...@@ -107,6 +107,7 @@ if (SERVER OR CLIENT)
include(external/gflags) include(external/gflags)
include(external/glog) include(external/glog)
include(external/utf8proc) include(external/utf8proc)
include(external/jemalloc)
if (WITH_PYTHON) if (WITH_PYTHON)
include(external/pybind11) include(external/pybind11)
include(external/python) include(external/python)
......
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
INCLUDE(ExternalProject)
SET(JEMALLOC_SOURCES_DIR ${THIRD_PARTY_PATH}/jemalloc)
SET(JEMALLOC_INSTALL_DIR ${THIRD_PARTY_PATH}/install/jemalloc)
SET(JEMALLOC_INCLUDE_DIR "${JEMALLOC_INSTALL_DIR}/include" CACHE PATH "jemalloc include directory." FORCE)
ExternalProject_Add(
extern_jemalloc
${EXTERNAL_PROJECT_LOG_ARGS}
GIT_REPOSITORY "https://github.com/jemalloc/jemalloc.git"
GIT_TAG "5.2.1"
PREFIX ${JEMALLOC_SOURCES_DIR}
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND cd ${JEMALLOC_SOURCES_DIR}/src/extern_jemalloc/ && sh autogen.sh
&& make
&& mkdir -p ${JEMALLOC_INSTALL_DIR}/lib/
&& cp ${JEMALLOC_SOURCES_DIR}/src/extern_jemalloc/lib/libjemalloc.a ${JEMALLOC_INSTALL_DIR}/lib
&& cp ${JEMALLOC_SOURCES_DIR}/src/extern_jemalloc/lib/libjemalloc_pic.a ${JEMALLOC_INSTALL_DIR}/lib
&& cp -r ${JEMALLOC_SOURCES_DIR}/src/extern_jemalloc/include/jemalloc ${JEMALLOC_INCLUDE_DIR}
TEST_COMMAND ""
)
...@@ -107,7 +107,8 @@ int /*自定义Class名称*/::inference() { ...@@ -107,7 +107,8 @@ int /*自定义Class名称*/::inference() {
VLOG(2) << "(logid=" << log_id << ") infer batch size: " << batch_size; VLOG(2) << "(logid=" << log_id << ") infer batch size: " << batch_size;
TensorVector *out = &output_blob->tensor_vector; TensorVector *out = &output_blob->tensor_vector;
/*前处理的代码添加在此处,前处理直接修改上文的TensorVector* in*/ //前处理的代码添加在此处,前处理直接修改上文的TensorVector* in
//注意in里面的数据是前置节点的输出经过后处理后的out中的数据
Timer timeline; Timer timeline;
int64_t start = timeline.TimeStampUS(); int64_t start = timeline.TimeStampUS();
...@@ -121,7 +122,8 @@ int /*自定义Class名称*/::inference() { ...@@ -121,7 +122,8 @@ int /*自定义Class名称*/::inference() {
return -1; return -1;
} }
/*后处理的代码添加在此处,前处理直接修改上文的TensorVector *out*/ //后处理的代码添加在此处,后处理直接修改上文的TensorVector* out
//后处理后的out会被传递给后续的节点
int64_t end = timeline.TimeStampUS(); int64_t end = timeline.TimeStampUS();
CopyBlobInfo(input_blob, output_blob); CopyBlobInfo(input_blob, output_blob);
...@@ -135,6 +137,67 @@ DEFINE_OP(/*自定义Class名称*/); ...@@ -135,6 +137,67 @@ DEFINE_OP(/*自定义Class名称*/);
} // namespace paddle_serving } // namespace paddle_serving
} // namespace baidu } // namespace baidu
``` ```
### TensorVector数据结构
TensorVector* in和out都是一个TensorVector类型的指指针,其使用方法跟Paddle C++API中的Tensor几乎一样,相关的数据结构如下所示
``` C++
//TensorVector
typedef std::vector<paddle::PaddleTensor> TensorVector;
//paddle::PaddleTensor
struct PD_INFER_DECL PaddleTensor {
PaddleTensor() = default;
std::string name; ///< variable name.
std::vector<int> shape;
PaddleBuf data; ///< blob of data.
PaddleDType dtype;
std::vector<std::vector<size_t>> lod; ///< Tensor+LoD equals LoDTensor
};
//PaddleBuf
class PD_INFER_DECL PaddleBuf {
public:
explicit PaddleBuf(size_t length)
: data_(new char[length]), length_(length), memory_owned_(true) {}
PaddleBuf(void* data, size_t length)
: data_(data), length_(length), memory_owned_{false} {}
explicit PaddleBuf(const PaddleBuf& other);
void Resize(size_t length);
void Reset(void* data, size_t length);
bool empty() const { return length_ == 0; }
void* data() const { return data_; }
size_t length() const { return length_; }
~PaddleBuf() { Free(); }
PaddleBuf& operator=(const PaddleBuf&);
PaddleBuf& operator=(PaddleBuf&&);
PaddleBuf() = default;
PaddleBuf(PaddleBuf&& other);
private:
void Free();
void* data_{nullptr}; ///< pointer to the data memory.
size_t length_{0}; ///< number of memory bytes.
bool memory_owned_{true};
};
```
### TensorVector代码示例
```C++
/*例如,你想访问输入数据中的第1个Tensor*/
paddle::PaddleTensor& tensor_1 = in->at(0);
/*例如,你想修改输入数据中的第1个Tensor的名称*/
tensor_1.name = "new name";
/*例如,你想获取输入数据中的第1个Tensor的shape信息*/
std::vector<int> tensor_1_shape = tensor_1.shape;
/*例如,你想修改输入数据中的第1个Tensor中的数据*/
void* data_1 = tensor_1.data.data();
//后续直接修改data_1指向的内存即可
//比如,当您的数据是int类型,将void*转换为int*进行处理即可
```
# 2. 编译 # 2. 编译
此时,需要您重新编译生成serving,并通过`export SERVING_BIN`设置环境变量来指定使用您编译生成的serving二进制文件,并通过`pip3 install`的方式安装相关python包,细节请参考[如何编译Serving](../Compile_CN.md) 此时,需要您重新编译生成serving,并通过`export SERVING_BIN`设置环境变量来指定使用您编译生成的serving二进制文件,并通过`pip3 install`的方式安装相关python包,细节请参考[如何编译Serving](../Compile_CN.md)
...@@ -142,19 +205,19 @@ DEFINE_OP(/*自定义Class名称*/); ...@@ -142,19 +205,19 @@ DEFINE_OP(/*自定义Class名称*/);
# 3. 服务启动与调用 # 3. 服务启动与调用
## 3.1 Server端启动 ## 3.1 Server端启动
在前面两个小节工作做好的基础上,一个服务启动两个模型串联,只需要在`--model后依次按顺序传入模型文件夹的相对路径`,且需要在`--op后依次传入自定义C++OP类名称`,其中--model后面的模型与--op后面的类名称的顺序需要对应,脚本代码如下: 在前面两个小节工作做好的基础上,一个服务启动两个模型串联,只需要在`--model后依次按顺序传入模型文件夹的相对路径`,且需要在`--op后依次传入自定义C++OP类名称`,其中--model后面的模型与--op后面的类名称的顺序需要对应,`这里假设我们已经定义好了两个OP分别为GeneralDetectionOp和GeneralRecOp`,则脚本代码如下:
```python ```python
#一个服务启动多模型串联 #一个服务启动多模型串联
python3 -m paddle_serving_server.serve --model ocr_det_model ocr_rec_model --op GeneralDetectionOp GeneralInferOp --port 9292 python3 -m paddle_serving_server.serve --model ocr_det_model ocr_rec_model --op GeneralDetectionOp GeneralRecOp --port 9292
#多模型串联 ocr_det_model对应GeneralDetectionOp ocr_rec_model对应GeneralInferOp #多模型串联 ocr_det_model对应GeneralDetectionOp ocr_rec_model对应GeneralRecOp
``` ```
## 3.2 Client端调用 ## 3.2 Client端调用
此时,Client端的调用,也需要传入两个Client端的proto文件或文件夹的路径,以OCR为例,python脚本代码如下: 此时,Client端的调用,也需要传入两个Client端的proto文件或文件夹的路径,以OCR为例,可以参考[ocr_cpp_client.py](../../examples/C++/PaddleOCR/ocr/ocr_cpp_client.py)来自行编写您的脚本,此时Client调用如下:
```python ```python
#一个服务启动多模型串联 #一个服务启动多模型串联
python3 [ocr_cpp_client.py](../../examples/C++/PaddleOCR/ocr/ocr_cpp_client.py) ocr_det_client ocr_rec_client python3 自定义.py ocr_det_client ocr_rec_client
#ocr_det_client为第一个模型的Client端proto文件夹的相对路径 #ocr_det_client为第一个模型的Client端proto文件夹的相对路径
#ocr_rec_client为第二个模型的Client端proto文件夹的相对路径 #ocr_rec_client为第二个模型的Client端proto文件夹的相对路径
``` ```
此时,对于Server端而言,输入的数据的格式与`第一个模型的Client端proto格式`定义的一致,输出的数据格式与`最后一个模型的Client端proto`文件一致。如果您不了解[proto的定义,请参考此处](./Serving_Configure_CN.md) 此时,对于Server端而言,输入的数据的格式与`第一个模型的Client端proto格式`定义的一致,输出的数据格式与`最后一个模型的Client端proto`文件一致。一般情况下您无须关注此事,当您需要了解详细的[proto的定义,请参考此处](./Serving_Configure_CN.md)
...@@ -85,7 +85,7 @@ print(fetch_map) ...@@ -85,7 +85,7 @@ print(fetch_map)
用户也可以将数据格式处理逻辑放在服务器端进行,这样就可以直接用curl去访问服务,参考如下案例,在目录`Serving/examples/C++/fit_a_line`. 用户也可以将数据格式处理逻辑放在服务器端进行,这样就可以直接用curl去访问服务,参考如下案例,在目录`Serving/examples/C++/fit_a_line`.
``` ```
python3 -m paddle_serving_server.serve --model uci_housing_model --thread 10 --port 9292 --name uci python3 -m paddle_serving_server.serve --model uci_housing_model --thread 10 --port 9292
``` ```
客户端输入 客户端输入
``` ```
......
...@@ -57,7 +57,7 @@ Here, `client.predict` function has two arguments. `feed` is a `python dict` wit ...@@ -57,7 +57,7 @@ Here, `client.predict` function has two arguments. `feed` is a `python dict` wit
Users can also put the data format processing logic on the server side, so that they can directly use curl to access the service, refer to the following case whose path is `Serving/examples/C++/fit_a_line` Users can also put the data format processing logic on the server side, so that they can directly use curl to access the service, refer to the following case whose path is `Serving/examples/C++/fit_a_line`
``` ```
python3 -m paddle_serving_server.serve --model uci_housing_model --thread 10 --port 9292 --name uci python3 -m paddle_serving_server.serve --model uci_housing_model --thread 10 --port 9292
``` ```
for client side, for client side,
``` ```
......
...@@ -54,6 +54,7 @@ fetch_var { ...@@ -54,6 +54,7 @@ fetch_var {
|6|BF16| |6|BF16|
|7|UINT8| |7|UINT8|
|8|INT8| |8|INT8|
|20|STRING|
- shape:数据维度 - shape:数据维度
......
...@@ -53,6 +53,7 @@ fetch_var { ...@@ -53,6 +53,7 @@ fetch_var {
|6|BF16| |6|BF16|
|7|UINT8| |7|UINT8|
|8|INT8| |8|INT8|
|20|STRING|
- shape:tensor shape - shape:tensor shape
......
doc/images/wechat_group_1.jpeg

62.0 KB | W: | H:

doc/images/wechat_group_1.jpeg

338.0 KB | W: | H:

doc/images/wechat_group_1.jpeg
doc/images/wechat_group_1.jpeg
doc/images/wechat_group_1.jpeg
doc/images/wechat_group_1.jpeg
  • 2-up
  • Swipe
  • Onion skin
...@@ -32,6 +32,7 @@ precision_map = { ...@@ -32,6 +32,7 @@ precision_map = {
'int8': paddle_infer.PrecisionType.Int8, 'int8': paddle_infer.PrecisionType.Int8,
'fp32': paddle_infer.PrecisionType.Float32, 'fp32': paddle_infer.PrecisionType.Float32,
'fp16': paddle_infer.PrecisionType.Half, 'fp16': paddle_infer.PrecisionType.Half,
'bf16': 'bf16',
} }
...@@ -194,6 +195,8 @@ class LocalPredictor(object): ...@@ -194,6 +195,8 @@ class LocalPredictor(object):
config.set_cpu_math_library_num_threads(thread_num) config.set_cpu_math_library_num_threads(thread_num)
if use_mkldnn: if use_mkldnn:
config.enable_mkldnn() config.enable_mkldnn()
if precision_type == "bf16":
config.enable_mkldnn_bfloat16()
if mkldnn_cache_capacity > 0: if mkldnn_cache_capacity > 0:
config.set_mkldnn_cache_capacity(mkldnn_cache_capacity) config.set_mkldnn_cache_capacity(mkldnn_cache_capacity)
if mkldnn_op_list is not None: if mkldnn_op_list is not None:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册