“475dd708b93d673acc1da6bbed710921367d51ec”上不存在“paddle/git@gitcode.net:paddlepaddle/PaddleDetection.git”
提交 ff67ebb9 编写于 作者: W wangguibao

predictor

上级 6b107eca
#coding:gbk
COMPILER('gcc482')
#工作路径.
WORKROOT('../../../')
# version info
## module name
repo_module = REPO_PATH()
## git branch name (master/rb)
repo_name = REPO_BRANCH()
## last changed version
repo_version = REPO_LAST_CHANGED_REV()
version = repo_module + '_' + repo_name.split('/')[-1] + ',' + repo_version
build_time = os.popen('date +%Y-%m-%d_%H:%M:%S').read().strip()
#Preprocessor flags.
CPPFLAGS(r'-DPDSERVING_VERSION=\"%s\"' % (version))
CPPFLAGS(r'-DPDSERVING_BUILDTIME=\"%s\"' % (build_time))
#使用硬链接copy.
#CopyUsingHardLink(True)
#C++编译参数.
#CXXFLAGS('-fsanitize=address -g -DNDEBUG -O2 -pipe -W -Wall -fPIC -fno-omit-frame-pointer -Wno-deprecated \
# -Wno-unused-parameter -Wno-unused-variable -Wno-unused-local-typedefs -Wno-sign-compare \
# -std=c++11')
CXXFLAGS('-g -O2 -pipe -W -Wall -fPIC -fno-omit-frame-pointer -Wno-deprecated \
-Wno-unused-parameter -Wno-unused-variable -Wno-unused-local-typedefs -Wno-sign-compare \
-std=c++11')
#for profiler
#CPPFLAGS('-D__const__= -Dtypeof=__typeof__ -DUSE_PTHREAD -DUSE_XBOX -DBAIDU_RPC_ENABLE_CPU_PROFILER -DBAIDU_RPC_ENABLE_HEAP_PROFILER')
#使用C++11编译idl报错:error: ‘typeof’ was not declared in this scope,需要加上-Dtypeof=__typeof__
CPPFLAGS('-D__const__= -Dtypeof=__typeof__ -DUSE_PTHREAD')
#链接参数.
LDFLAGS('-lpthread -lcrypto -lrt -lssl -ldl -lz')
#依赖模块
CONFIGS('baidu/base/baidu-rpc@ci-base')
CONFIGS('baidu/im-common/mempool@mempool_1-0-1_BRANCH@git_branch')
CONFIGS('baidu/third-party/opencv@master@git_branch')
CONFIGS('public/configure@configure_1-2-17-0_PD_BL')
#CONFIGS('lib2-64/ullib@ullib_3-1-135-21782_PD_BL')
CONFIGS('third-64/boost@boost_1-63-0-101_PD_BL')
CONFIGS('public/bthread@ci-base')
CONFIGS('third-64/protobuf@protobuf_2-4-1-1100_PD_BL')
#CONFIGS('third-64/protobuf@protobuf_3-1-0-6209_PD_BL')
# for ut
CONFIGS('third-64/gtest@1.7.2.0')
# for profiler
#CONFIGS('thirdsrc/tcmalloc@2.5.0.5977', Libraries('libtcmalloc_and_profiler.a'))
# McCache
CONFIGS('baidu/base/cache@cache_3-1-7-21784_PD_BL')
INCPATHS('$OUT/include/')
PROTOFLAGS('--proto_path=.')
#ServiceGenerator
HEADERS(GLOB_GEN_SRCS('./proto/*.h'), '$INC/')
HEADERS(GLOB('./plugin/*.h'), '$INC/plugin')
Application('pdcodegen', Sources(GLOB('plugin/*.cc'), 'proto/pds_option.proto',
'src/pdcodegen.cpp', IncludePaths('. ./proto/ $OUT/include')))
HEADERS(GLOB_GEN_SRCS('./proto/*.h'), '$INC/')
HEADERS(GLOB('./proto/*.proto'), '$INC/proto')
HEADERS(GLOB('./common/*.h'), '$INC/common')
HEADERS(GLOB('./op/*.h'), '$INC/op')
HEADERS(GLOB('./framework/*.h'), '$INC/framework')
cpp_source_dirs = []
cpp_source_dirs.append('common/*.cpp')
cpp_source_dirs.append('op/*.cpp')
cpp_source_dirs.append('framework/*.cpp')
cpp_source_dirs.append('proto/*.proto')
#支持.proto作为源文件
PROTOFLAGS('--plugin=protoc-gen-pdcodegen=plugin/pdcodegen --pdcodegen_out proto --proto_path=.')
#StaticLib
StaticLibrary('pdserving',
Sources(GLOB(' '.join(cpp_source_dirs)), 'src/pdserving.cpp'),
LinkFlags('-lpthread -lcrypto -lm -lrt -lssl -ldl -lz'))
#可执行文件
Application('pdserving',
Sources(GLOB(' '.join(cpp_source_dirs)), 'src/pdserving.cpp'),
LinkFlags('-lpthread -lcrypto -lm -lrt -lssl -ldl -lz '))
Application('pdclient',
Sources(GLOB(' '.join(cpp_source_dirs)), 'src/pdclient.cpp'),
LinkFlags('-lpthread -lcrypto -lm -lrt -lssl -ldl -lz'))
#单元测试
ut_include='./include ./unittest $OUT/include'
ut_sources=GLOB('./unittest/*.cpp')
ut_cppflag='-Dprivate=public -Dprotected=public -DUNIT_TEST -std=c++11 -usercode_in_pthread -DUSE_PTHREAD'
ut_ldflags='-lpthread -lssl -lcrypto -lrt -ldl -lz -std=c++11 -usercode_in_pthread '
ut_gdbflag='-O0 -g -fpermissive -std=c++11 -usercode_in_pthread'
UTApplication(
'test_pdserving',
Sources(
ut_sources,
IncludePaths(ut_include),
CppFlags(ut_cppflag),
CxxFlags(ut_gdbflag)
),
Libraries('$OUT/lib/libpdserving.a'),
LinkFlags(ut_ldflags),
UTOnServer(True))
OUTPUT('./conf', '$OUT/')
OUTPUT('./data', '$OUT/')
OUTPUT('./scripts/images', '$OUT/data/')
OUTPUT('./scripts/start.sh', '$OUT/bin/')
FROM registry.baidu.com/public/centos6u3-online:gcc482
MAINTAINER predictor@baidu.com
LABEL Description="paddle serving docker image"
USER root
RUN echo "Enjoy your paddle serving journey!"
ADD conf /home/work/paddle-serving/conf
ADD data /home/work/paddle-serving/data
ADD bin /home/work/paddle-serving/bin
RUN wget ftp://st01-rdqa-dev055-wanlijin01.epc.baidu.com/home/users/wanlijin01/workspace/baidu/paddle-serving/predictor/data.tar.gz -O /tmp/data.tar.gz \
&& tar -C /home/work/paddle-serving -xvzf /tmp/data.tar.gz \
&& rm /tmp/data.tar.gz \
&& cd /home/work/paddle-serving/ \
&& chmod a+x bin/pdserving \
&& chmod a+x bin/start.sh \
&& sed -i 's/\.\/conf/\/home\/work\/paddle-serving\/conf/g' conf/workflow.conf \
&& sed -i 's/\.\/conf/\/home\/work\/paddle-serving\/conf/g' conf/resource.conf \
&& sed -i 's/\.\/log/\/home\/work\/paddle-serving\/log/g' conf/log.conf \
&& sed -i 's/\.\/data/\/home\/work\/paddle-serving\/data/g' conf/model_toolkit.conf \
&& mkdir -p /home/work/paddle-serving/log
CMD sh /home/work/paddle-serving/bin/start.sh -c "trap : TERM INT; sleep infinity & wait"
FROM registry.baidu.com/paddlecloud/paddlecloud-runenv-centos6u3-bce:paddlecloud-fluid-gcc482-cuda8.0_cudnn5_bce
MAINTAINER predictor@baidu.com
LABEL Description="paddle serving docker image"
USER root
RUN echo "Enjoy your paddle serving journey!"
ADD conf /home/work/paddle-serving/conf
ADD data /home/work/paddle-serving/data
ADD bin /home/work/paddle-serving/bin
RUN wget ftp://st01-rdqa-dev055-wanlijin01.epc.baidu.com/home/users/wanlijin01/workspace/baidu/paddle-serving/predictor/data.tar.gz -O /tmp/data.tar.gz \
&& tar -C /home/work/paddle-serving -xvzf /tmp/data.tar.gz \
&& rm /tmp/data.tar.gz \
&& cd /home/work/paddle-serving/ \
&& chmod a+x bin/pdserving \
&& chmod a+x bin/start.sh \
&& sed -i 's/\.\/conf/\/home\/work\/paddle-serving\/conf/g' conf/workflow.conf \
&& sed -i 's/\.\/conf/\/home\/work\/paddle-serving\/conf/g' conf/resource.conf \
&& sed -i 's/\.\/log/\/home\/work\/paddle-serving\/log/g' conf/log.conf \
&& sed -i 's/\.\/data/\/home\/work\/paddle-serving\/data/g' conf/model_toolkit.conf \
&& mkdir -p /home/work/paddle-serving/log
CMD sh /home/work/paddle-serving/bin/start.sh -c "trap : TERM INT; sleep infinity & wait"
#
# bcloud default makefile
# more details please refer to ./.bcloud.cache/makefile.*
#
.SILENT:
#
#makefile-self location, must be always on the top!!!
#
MAKEFILE_PATH_DEFAULT := $(word $(words $(MAKEFILE_LIST)), $(MAKEFILE_LIST))
MAKEFILE_DIR_DEFAULT := $(shell cd $(dir $(MAKEFILE_PATH_DEFAULT)) && pwd)
MAKEFILE_DEAULT := 'imported'
#
#Global Configs
#
WORK_ROOT := $(shell cd $(MAKEFILE_DIR_DEFAULT)/../../.. && pwd)
GCC_PATH := /opt/compiler/gcc-4.8.2/bin
#
#import global configs
#
ifneq ($(MAKEFILE_CONFIG), 'imported')
include $(MAKEFILE_DIR_DEFAULT)/.bcloud.cache/makefile.config
endif
.PHONEY: all clean distclean cleanall help
all: main
clean:
echo removing $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/\*.o ...
find $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor -name "*.o" | xargs rm -rf
echo removing $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/bin ...
rm -rf $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/bin
echo removing $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/lib ...
rm -rf $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/lib
echo removing $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/so ...
rm -rf $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/so
echo removing $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/test ...
rm -rf $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/test
echo removing $(WORK_ROOT)/baidu/paddle-serving/predictor/output ...
rm -rf $(WORK_ROOT)/baidu/paddle-serving/predictor/output
distclean:
echo removing $(WORK_ROOT)/baidu/paddle-serving/predictor/Makefile ...
rm -rf $(WORK_ROOT)/baidu/paddle-serving/predictor/Makefile
echo removing $(WORK_ROOT)/baidu/paddle-serving/predictor/.bcloud.cache/makefile.baidu_paddle-serving_predictor ...
rm -rf $(WORK_ROOT)/baidu/paddle-serving/predictor/.bcloud.cache/makefile.baidu_paddle-serving_predictor
echo removing $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor ...
rm -rf $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor
echo removing $(WORK_ROOT)/baidu/paddle-serving/predictor/output ...
rm -rf $(WORK_ROOT)/baidu/paddle-serving/predictor/output
echo -e '\033[32m[NOTICE]\033[0m'
echo -e \\tplease run \'bcloud local -M\' before next \'make\'!!!
cleanall:
echo removing $(WORK_ROOT)/bc_out ...
rm -rf $(WORK_ROOT)/bc_out
echo removing $(WORK_ROOT)/baidu/paddle-serving/predictor/output ...
rm -rf $(WORK_ROOT)/baidu/paddle-serving/predictor/output
echo removing $(WORK_ROOT)/baidu/paddle-serving/predictor/Makefile ...
rm -rf $(WORK_ROOT)/baidu/paddle-serving/predictor/Makefile
echo removing $(WORK_ROOT)/baidu/paddle-serving/predictor/.bcloud.cache/makefile\* ...
rm -rf $(WORK_ROOT)/baidu/paddle-serving/predictor/.bcloud.cache/makefile*
echo -e '\033[32m[NOTICE]\033[0m'
echo -e \\tplease run \'bcloud local or bcloud local -M ALL\' before next \'make\'!!!
app/ecom/elib/ecommon-lib: app/ecom/elib/ecommon-lib_deps
baidu/base/protobuf-json: baidu/base/protobuf-json_deps
baidu/base/common: baidu/base/common_deps
third-64/gtest: third-64/gtest_deps
baidu/base/bvar: baidu/base/bvar_deps
public/protobuf-json: public/protobuf-json_deps
baidu/base/cache: baidu/base/cache_deps
baidu/base/spreg: baidu/base/spreg_deps
baidu/bns/naming-lib: baidu/bns/naming-lib_deps
lib2-64/bsl: lib2-64/bsl_deps
third-64/gflags: third-64/gflags_deps
third-64/pcre: third-64/pcre_deps
lib2-64/cache: lib2-64/cache_deps
baidu/base/bthread: baidu/base/bthread_deps
third-64/leveldb: third-64/leveldb_deps
lib2-64/ullib: lib2-64/ullib_deps
public/common: public/common_deps
public/bvar: public/bvar_deps
baidu/base/bsl: baidu/base/bsl_deps
baidu/base/configure: baidu/base/configure_deps
public/spreg: public/spreg_deps
public/configure: public/configure_deps
baidu/base/ullib: baidu/base/ullib_deps
baidu/base/baidu-rpc: baidu/base/baidu-rpc_deps
third-64/libevent: third-64/libevent_deps
baidu/third-party/opencv: baidu/third-party/opencv_deps
baidu/base/dict: baidu/base/dict_deps
op/oped/noah/webfoot/naming-lib: op/oped/noah/webfoot/naming-lib_deps
baidu/elib/ecommon-lib: baidu/elib/ecommon-lib_deps
public/bthread: public/bthread_deps
public/noah/giano-lib/release/baas-lib-c: public/noah/giano-lib/release/baas-lib-c_deps
app/ecom/im/mempool: app/ecom/im/mempool_deps
baidu/base/mcpack2pb: baidu/base/mcpack2pb_deps
third-64/boost: third-64/boost_deps
public/baidu-rpc: public/baidu-rpc_deps
public/mcpack2pb: public/mcpack2pb_deps
baidu/base/iobuf: baidu/base/iobuf_deps
public/iobuf: public/iobuf_deps
baidu/im-common/mempool: baidu/im-common/mempool_deps
lib2-64/dict: lib2-64/dict_deps
help:
echo -e 'all available targets of make($(MAKE_VERSION)):\n'
echo ' all : will compile all targets and run release.bcloud'
echo ' no-release.bcloud: will compile all targets and not run release.bcloud'
echo ' no-ut : will compile all targets without ut and run release.bcloud'
echo ' test : will compile all targets, run ut and run release.bcloud'
echo ' clean : will only cleanup Intermediate files(such as .o, .so, .a, ut and bin) of main module:'
echo ' $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/bin'
echo ' $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/lib'
echo ' $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/so'
echo ' $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/\*.o'
echo ' $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/test'
echo ' $(WORK_ROOT)/baidu/paddle-serving/predictor/output'
echo ' distclean : will cleanup makefile, intermediate files(such as .o, .so, .a, ut and bin) and pb.cc/pb.h idl.h/idl.cpp of main module:'
echo ' $(WORK_ROOT)/baidu/paddle-serving/predictor/Makefile'
echo ' $(WORK_ROOT)/baidu/paddle-serving/predictor/.bcloud.cache/makefile.baidu_paddle-serving_predictor'
echo ' $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor'
echo ' $(WORK_ROOT)/baidu/paddle-serving/predictor/output'
echo ' cleanall : will cleanup makefiles, intermediate files(such as .o, .so, .a, ut and bin) and pb.cc/pb.h idl.h/idl.cpp of all modules:'
echo ' $(WORK_ROOT)/bc_out'
echo ' $(WORK_ROOT)/baidu/paddle-serving/predictor/output'
echo ' $(WORK_ROOT)/baidu/paddle-serving/predictor/Makefile'
echo ' $(WORK_ROOT)/baidu/paddle-serving/predictor/.bcloud.cache/makefile*'
echo ' help : list all available make targets'
echo -e '\ntargets for the compile of dependency module:'
echo ' app/ecom/elib/ecommon-lib'
echo ' app/ecom/im/mempool'
echo ' baidu/base/baidu-rpc'
echo ' baidu/base/bsl'
echo ' baidu/base/bthread'
echo ' baidu/base/bvar'
echo ' baidu/base/cache'
echo ' baidu/base/common'
echo ' baidu/base/configure'
echo ' baidu/base/dict'
echo ' baidu/base/iobuf'
echo ' baidu/base/mcpack2pb'
echo ' baidu/base/protobuf-json'
echo ' baidu/base/spreg'
echo ' baidu/base/ullib'
echo ' baidu/bns/naming-lib'
echo ' baidu/elib/ecommon-lib'
echo ' baidu/im-common/mempool'
echo ' baidu/third-party/opencv'
echo ' lib2-64/bsl'
echo ' lib2-64/cache'
echo ' lib2-64/dict'
echo ' lib2-64/ullib'
echo ' op/oped/noah/webfoot/naming-lib'
echo ' public/baidu-rpc'
echo ' public/bthread'
echo ' public/bvar'
echo ' public/common'
echo ' public/configure'
echo ' public/iobuf'
echo ' public/mcpack2pb'
echo ' public/noah/giano-lib/release/baas-lib-c'
echo ' public/protobuf-json'
echo ' public/spreg'
echo ' third-64/boost'
echo ' third-64/gflags'
echo ' third-64/gtest'
echo ' third-64/leveldb'
echo ' third-64/libevent'
echo ' third-64/pcre'
#
#import dependency modules
#
ifneq ($(MAKEFILE_BAIDU_PADDLE-SERVING_PREDICTOR), 'imported')
include $(MAKEFILE_DIR_DEFAULT)/.bcloud.cache/makefile.baidu_paddle-serving_predictor
endif
[TOC]
# 概述
PaddlePaddle是公司开源的机器学习框架,广泛支持各种深度学习模型的定制化开发;
Paddle cloud是基于PaddlePaddle框架实现的一整套云平台,对外提供全流程的AI开发平台,对内托管集团内各产品线的机器学习云服务。
Paddle serving是Paddle cloud的在线预测部分,与Paddle cloud模型训练环节无缝衔接,对外提供机器学习预测共有云服务,对内为公司各业务线提供统一的模型预测开发框架和云服务。
# Getting Started
## 运行示例
说明:Imagenet图像分类模型,默认采用CPU模式(GPU模式请修改BCLOUD配置项,并用Dockerfile构建运行环境,[Docker部署请参考Wiki](http://agroup.baidu.com/share/md/044f552e866f4078900be503784e2468))。
Step1:启动Server端:
```shell
git clone ssh://icode.baidu.com:8235/baidu/paddle-serving/serving ~/my_paddle_serving/baidu/paddle-serving/serving && cd ~/my_paddle_serving/baidu/paddle-serving/serving && bcloud build && ./output/bin/image_class &
```
Step2:启动Client端:
```shell
git clone ssh://icode.baidu.com:8235/baidu/paddle-serving/sdk-cpp ~/my_paddle_serving/baidu/paddle-serving/sdk-cpp && cd ~/my_paddle_serving/baidu/paddle-serving/sdk-cpp && bcloud build && ./output/bin/ximage && pkill image_class
```
## 示例说明
### 预测接口定义
```c++
syntax="proto2";
package baidu.paddle_serving.predictor.image_class;
option cc_generic_services = true;
// x-image request相关(批量接口)
message XImageReqInstance {
required bytes image_binary = 1;
required uint32 image_length = 2;
};
message Request {
repeated XImageReqInstance instances = 1;
};
// x-image response相关(批量接口)
message DensePrediction {
repeated float categories = 1;
};
message ClassResponse {
repeated DensePrediction predictions = 1;
};
message XImageResInstance {
required string response_json = 1;
};
message Response {
// Each json string is serialized from ClassResponse
repeated XImageResInstance predictions = 1;
};
// Service/method相关
service ImageClassifyService {
rpc inference(Request) returns (Response);
rpc debug(Request) returns (Response);
};
```
### Server端实现
用户只需定制或配置以下三类信息的实现,即可快速搭建完整的Paddle-Serving预测模块。
#### 接口改造([proto目录](http://icode.baidu.com/repos/baidu/paddle-serving/serving/tree/master:proto/))
Server端需对预测接口作如下修改即可:
```c++
// 改动1:依赖paddle-serving option接口文件
import "pds_option.proto";
...
service ClassService {
rpc inference(Request) returns (Response);
rpc debug(Request) returns (Response);
// 改动2:打开generate_impl开关(以支持配置驱动)
option (pds.options).generate_impl = true;
};
```
#### 示例配置([conf目录](http://icode.baidu.com/repos/baidu/paddle-serving/serving/tree/master:conf/))
- gflags配置项
| name | 默认值 | 含义 |
|------|--------|------|
| workflow_path | ./conf | workflow配置目录名 |
|workflow_file|workflow.conf|workflow配置文件名|
|inferservice_path|./conf|service配置目录名|
|inferservice_file|service.conf|service配置文件名|
|logger_path|./conf|日志配置目录名|
|logger_file|log.conf|日志配置文件名|
|resource_path|./conf|资源管理器目录名|
|resource_file|resource.conf|资源管理器文件名|
|reload_interval_s|10|重载线程间隔时间(s)|
- 配置文件实例(Image图像分类demo)
```shell
# >>> service.conf
[@Service]
name: ImageClassifyService
@workflow: workflow_image_classification
# >>> workflow.conf
[@Workflow]
name: workflow_image_classification
path: ./conf
file: imagec_dag.conf
# >>> imagec_dag.conf
workflow_type: Sequence
[@Node]
name: image_reader_op
type: ImageReaderOp
[@Node]
name: image_classify_op
type: ImageClassifyOp
[.@Depend]
name: image_reader_op
mode: RO
[@Node]
name: write_json_op
type: WriteJsonOp
[.@Depend]
name: image_classify_op
mode: RO
# >>> resource.conf
model_manager_path: ./conf
model_manager_file: model_toolkit.conf
```
#### 定制Op算子([op目录](http://icode.baidu.com/repos/baidu/paddle-serving/serving/tree/master:op/))
- 预处理算子(ImageReaderOp):从Request中读取图像字节流,通过opencv解码,填充tensor对象并输出到channel;
- 预测调用算子(ImageClassifyOp):从ImageReaderOp的channel获得输入tensor,临时申请输出tensor,调用ModelToolkit进行预测,并将输出tensor写入channel
- 后处理算子(WriteJsonOp):从ImageClassifyop的channel获得输出tensor,将其序列化为json字符串,写入作为rpc的output;
### Client端实现
用户只需定制或配置以下三类信息,即可方便的接入预估请求,并在本地配置多套服务连接:
#### 接口改造([proto目录](http://icode.baidu.com/repos/baidu/paddle-serving/sdk-cpp/tree/master:proto))
Client端接口只需对预测接口作如下修改即可:
```c++
// 改动1:依赖paddle-serving option接口文件
import "pds_option.proto";
...
service ImageClassifyService {
rpc inference(Request) returns (Response);
rpc debug(Request) returns (Response);
// 改动2:打开generate_stub开关(以支持配置驱动)
option (pds.options).generate_stub = true;
};
```
#### 连接配置([conf目录](http://icode.baidu.com/repos/baidu/paddle-serving/sdk-cpp/tree/master:conf))
```shell
# predictions.conf
## 默认配置共享
[DefaultVariantInfo]
Tag : default
[.Connection]
ConnectTimeoutMicroSec : 200
ReadTimeoutMicroSec : 2000
WriteTimeoutMicroSec : 500
ConnectRetryCount : 2
MaxConnectionPerHost : 100
HedgeRequestTimeoutMicroSec : -1
HedgeFetchRetryCount : 2
BnsReloadIntervalSeconds : 10
ConnectionType : pooled
[.NamingInfo]
ClusterFilterStrategy : Default
LoadBalanceStrategy : la
[.RpcParameter]
# 0-NONE, 1-SNAPPY, 2-GZIP, 3-ZLIB, 4-LZ4
CompressType : 0
Protocol : baidu_std
MaxChannelPerRequest : 3
[@Predictor]
name : ximage
service_name : baidu.paddle_serving.predictor.image_class.ImageClassifyService
endpoint_router : WeightedRandomRender
[.WeightedRandomRender]
VariantWeightList : 30|70 # 30% vs 70% pvs
[.@VariantInfo]
Tag : var1 # 变体版本标识,提供上游辨识
[..NamingInfo]
Cluster : list://127.0.0.1:8010
[.@VariantInfo]
Tag : var2
[..NamingInfo]
Cluster : list://127.0.0.1:8011
```
#### 请求逻辑([demo/ximage.cpp](http://icode.baidu.com/repos/baidu/paddle-serving/sdk-cpp/blob/master:demo/ximage.cpp))
```c++
// 进程级初始化
assert(PredictorAPI::instance().create("./conf/predictions.conf") == 0);
// 线程级预测调用:
Request req;
// fill request
// ...
Response res;
Predictor* ximage = PredictorAPI::instance().fetch_predictor("ximage");
assert(ximage != NULL);
ximage->inference(req, res);
// parse response
// ...
assert(PredictorAPI::instance().free_predictor(ximage) == 0);
// 进程级销毁
assert(PredictorAPI::instance().destroy() == 0);
```
## 凤巢协议兼容
Paddle Serving由凤巢观星框架发展而来,而之前框架的通信协议是nshead+compack+idl,为方便新老接口的兼容,Paddle Serving的server和client均支持向后兼容:
- 老API访问新Server,为适配老观星客户端数据包格式,新Server需通过mcpack2pb生成能解析idl格式的pb对象,详见:[wtitleq server实现](http://icode.baidu.com/repos/baidu/paddle-serving/lr-model/tree/master)
- 新SDK访问老Server,为能够访问老观星server服务,SDK需通过mcpack2pb插件生成基于idl格式的序列化逻辑;详见:[wtitleq api实现](http://icode.baidu.com/repos/baidu/infinite-inference/as-wtitleq-demo/tree/master)
凤巢广告拆包支持:Paddle Serving的SDK-Cpp为用户提供了简单易用的拆包功能,通过修改proto/conf文件开启:
```c++
// interface.proto文件
message PredictorRequest {
message AdvRequest {
// 广告级别字段
repeated uint32 ideaid = 1;
repeated string title = 2;
}
// query级别字段
required uint64 sid = 1;
required string query = 2;
// ...
// 广告级别字段
repeated AdvRequest advs = 3 [(pds.pack_on)=true]; // 改动1:对advs字段进行拆包
}
// ...
service WtitleqService {
rpc ...
rpc ...
option (pds.options).package_size = 10; // 改动2:限制单包大小
}
```
[wtitleq sdk的proto实例](http://icode.baidu.com/repos/baidu/infinite-inference/as-wtitleq-demo/blob/master:proto/predictor_api.proto)
```bash
# predictions.conf文件
[@Predictor]
# ...
[.@VariantInfo]
#...
[..RpcParameter]
Protocol : itp # 改动3:修改rpc请求参数为itp协议
```
[wtitleq sdk的conf实例](http://icode.baidu.com/repos/baidu/infinite-inference/as-wtitleq-demo/blob/master:conf/predictors.conf)
# 框架简介
![图片](http://agroup-bos.cdn.bcebos.com/63a5076471e96a08124b89101e12c1a0ec7b642a)
- 基础框架:屏蔽一个RPC服务所需的所有元素,让用户只关注自己的业务算子的开发;
- 业务框架:基于Protobuf定制请求接口,基于有限DAG定制业务逻辑,并行化调度;
- 模型框架:CPU/FPGA/GPU等硬件异构,多模型间异步优先级调度,新引擎灵活扩展,配置化驱动;
- 用户接口:搭建服务=定义proto文件+实现/复用Op+撰写配置,支持sdk/http请求;
## 名词解释
- 预测引擎:对PaddlePaddle/Abacus/Tensorflow等各种推理计算Lib的封装,屏蔽预测模型动态Reload细节,对上层暴露统一的预测接口;
- 预测模型:由离线训练框架生成、在线预测引擎加载的数据文件或目录,以PaddleFluid模型为例,通常包括拓扑文件和参数文件;
- Op 算子:Paddle-serving对在线(预处理/后处理等)业务逻辑的最小粒度封装,框架提供OpWithChannel和OpWithChannelAndConf这两种常用的Op基类;框架默认实现通用Op算子;
- Node:由某个Op算子类结合参数配置组成的Op算子实例,也是Workflow中的一个执行单元;
- DAG/Workflow:由若干个相互依赖的Node组成,每个Node均可通过特定接口获得Request对象,节点Op通过依赖关系获得其前置Op的输出对象,最后一个Node的输出默认就是Response对象;
- Service:对一次pv的请求封装,可配置若干条Workflow,彼此之间复用当前PV的Request对象,然后各自并行/串行执行,最后将Response写入对应的输出slot中;一个Paddle-serving进程可配置多套Service接口,上游根据ServiceName决定当前访问的Service接口。
![图片](http://agroup-bos.cdn.bcebos.com/2e5e3cdcc9426d16e2090e64e7d33098ae5ad826)
## 主要功能
Paddle serving框架为策略工程师提供以下三层面的功能性扩展:
### 模型
- 预测引擎:集成PaddlePaddle、Abacus、Tensorrt、Anakin、Tensorflow等常用机器学习框架的预测Lib;
- 模型种类:支持PaddlePaddle(V1、V2、Fluid)、TensorrtUFF、Anakin、Tensorflow、Caffe等常见模型格式;
- 用户接口:支持模型加载、重载的配置化驱动,不同种类模型的预测接口完全一致;
- 模型调度:支持基于异步线程模型的多模型预估调度,实现异构资源的优先级调度;
### 业务
- 预测流程:通过有限DAG图描述一次预测从Request到Response的业务流程,节点Node是一个最小逻辑单元——OP;
- 预测逻辑:框架封装常用预处理、预测计算、后处理等常用OP,用户通过自定义OP算子实现特化处理逻辑;
### 服务
- RPC:底层通过Baidu-rpc封装网络交互,Server端可配置化启动多个独立Service,框架会搜集Service粒度的详细业务指标,并按照BVar接口对接到Noah等监控平台;
- SDK:基于Baidu-rpc的client进行封装,提供多下游连接管理、可扩展路由策略、可定制参数实验、自动分包等机制,支持同步、半同步、纯异步等交互模式,以及多种兼容协议,所有连接策略均通过配置驱动
# 平台简介
![图片](http://agroup-bos.cdn.bcebos.com/42a0e34a7c6b36976e3932639209fd823d8f25e0)
- [运维API](http://agroup.baidu.com/share/md/e582f543fb574e9b92445286955a976d)
- [预测API](http://agroup.baidu.com/share/md/eb91a51739514319844ceccdb331564c)
## 名词解释
- 用户(User):云平台注册用户,可基于平台Dashboard对账户下的端点信息进行增、删、查、改;
- 端点(Endpoit):对一个预测需求的逻辑抽象,通常包含一到多个服务变体,以方便多版本模型管理;
- 变体(Variant):一套同质化的Paddle-serving集群服务,每个实例起一个Paddle-serving进程;
- 实验(A/B Test):支持变体实验和参数化实验两种模式,变体实验根据Endpoint所属变体流量百分比实现流量随机抽样;参数化实验通过对pv绑定实验参数、由Paddle-serving进程解析参数、选择不同的代码分支进行实验;
## 主要功能
在公有云落地场景为Infinite(天衍)云平台,主要为策略工程师提供以下三方面的全流程托管:
- 统一接入代理:提供代理服务,通过zk和云平台实时同步元信息,支持多模型版本管理和A/B测试路由策略,提供统一入口和标准预测API;
- 自动化部署:对接K8S/Opera等常见PaaS部署平台,支持服务的一键部署、回滚、下线等运维操作,支持endpoint/variant/model等维度的资源管理;
- 可视化运维:对接console、notebook、dashboard等前端工具和页面,满足可视化运维需求;
# 设计文档
- [总体设计文档](http://agroup.baidu.com/paddleserving/view/office/895070)
- [框架详设文档](http://agroup.baidu.com:8964/static/a3/e40876e464ba08ae5de14aa7710cf326456751.pdf?filename=PaddleServing%E6%9C%8D%E5%8A%A1%E6%A1%86%E6%9E%B6%E8%AF%A6%E7%BB%86%E8%AE%BE%E8%AE%A1%E6%96%87%E6%A1%A3v0_1.pdf)
- [平台详设文档](http://agroup.baidu.com/share/office/042a0941579e49adb8c255c8b5e92d51)
# FAQ
1. 如何修改端口配置?
- 使用该框架搭建的服务需要申请一个端口,可以通过以下方式修改端口号:
- 如果在inferservice_file里指定了port:xxx,那么就去申请该端口号;
- 否则,如果在gflags.conf里指定了--port:xxx,那就去申请该端口号;
- 否则,使用程序里指定的默认端口号:8010。
2. 如何在部署的时候配置动态端口?
- 如果使用FCCI部署协议(凤巢检索端内部的部署协议),需要(1)通过inferservice_file指定端口号;(2)修改[Rakefile.opera](http://wiki.baidu.com/pages/viewpage.action?pageId=399979183#id-%E4%BB%8E%E9%9B%B6%E5%BC%80%E5%A7%8B%E5%86%99production-%E7%BC%96%E5%86%99Rakefile)的dynamic_port_config配置
- `@dynamic_port_config为动态端口配置,向Opera申请名为:name的动态端口,其端口号会被写到:conf文件中的:target配置项。`例子如下:
```
@dynamic_port_config = [
{:name => 'main', :conf => 'framework/service.conf', :target => 'port'}, // 部署时自动向Opera申请端口,服务将会监听这个端口
{:name => 'main', :conf => 'predictor_valid.conf', :target => 'port'}, // valid工具向这个端口发送测试请求,确保服务已正常启动
]
```
####################################################################################################
# __ _ __ __ __ #
# / /_. (*) / /_. ____. / / _____. __ __. ______/ / #
# / __ \/ / / __ \ / ___/ / / / __ / / / / / / __ / / #
# / / / / / / /_/ / / /__ / / / /_/ / / /_/ / / /_/ / / #
# /_/ /_/_/ /_**__/ \_*_/ /_/ \_**_/ \_**_/ \__**__/ #
# #
####################################################################################################
No username is specified, will use the default user(wangguibao) information logined locally
[2019-02-11 16:32:52] [INFO] Start analyzing module dependency ...
[2019-02-11 16:32:52] [WARNING] BCLOUD has already changed, will re-analyze dependency
[2019-02-11 16:32:52] [WARNING] Please change CONFIGS('baidu/base/cache@cache_3-1-7-21784_PD_BL') to CONFIGS('lib2-64/cache@cache_3-1-7-21784_PD_BL') or CONFIGS('baidu/base/cache@xxx@git_branch') or CONFIGS('baidu/base/cache@xxx_PD_BL@git_tag') in /home/wangguibao/paddle/baidu/paddle-serving/predictor/BCLOUD
[2019-02-11 16:32:52] [WARNING] Please change CONFIGS('baidu/base/cache@cache_3-1-7-21784_PD_BL') to CONFIGS('lib2-64/cache@cache_3-1-7-21784_PD_BL'), because (cache_3-1-7-21784_PD_BL) was built on svn.
[2019-02-11 16:32:53] svn export https://svn.baidu.com/public/noah/tags/giano-lib/release/baas-lib-c/baas-lib-c_1-1-9-1482_PD_BL/BCLOUD /tmp/aa68fb3d9e1c8dfcd230dd04a5c58ca9 --username wangguibao --password ****** --non-interactive --trust-server-cert
[2019-02-11 16:32:55] [WARNING] baidu/im-common/mempool/BCLOUD:CONFIGS('third-64/gtest') is deprecated, use CONFIGS('third-64/gtest@base') instead. No more warnings for similar cases
svn export https://svn.baidu.com/app/ecom/elib/tags/ecommon-lib/ecommon-lib_1-1-14-392_PD_BL/BCLOUD /tmp/06b72c9ee669668d255335d893757dec --username wangguibao --password ****** --non-interactive --trust-server-cert
[2019-02-11 16:32:55] svn info https://svn.baidu.com/public/noah/tags/giano-lib/release/baas-lib-c/baas-lib-c_1-1-9-1482_PD_BL -r HEAD --xml --username wangguibao --password ****** --non-interactive --trust-server-cert
[2019-02-11 16:32:55] svn info https://svn.baidu.com/third-64/tags/protobuf/protobuf_2-4-1-1100_PD_BL -r HEAD --xml --username wangguibao --password ****** --non-interactive --trust-server-cert
[2019-02-11 16:32:55] svn info https://svn.baidu.com/op/oped/noah/tags/webfoot/naming-lib/naming-lib_1-0-32-0_PD_BL -r HEAD --xml --username wangguibao --password ****** --non-interactive --trust-server-cert
[2019-02-11 16:32:55] svn info https://svn.baidu.com/third-64/tags/gflags/gflags_2-0-0-100_PD_BL -r HEAD --xml --username wangguibao --password ****** --non-interactive --trust-server-cert
[2019-02-11 16:32:55] svn info https://svn.baidu.com/third-64/tags/leveldb/leveldb_1-0-0-0_PD_BL -r HEAD --xml --username wangguibao --password ****** --non-interactive --trust-server-cert
[2019-02-11 16:32:55] svn info https://svn.baidu.com/third-64/tags/boost/boost_1-63-0-101_PD_BL -r HEAD --xml --username wangguibao --password ****** --non-interactive --trust-server-cert
[2019-02-11 16:32:55] svn info https://svn.baidu.com/lib2-64/tags/dict/dict_3-1-22-0_PD_BL -r HEAD --xml --username wangguibao --password ****** --non-interactive --trust-server-cert
[2019-02-11 16:32:55] svn info https://svn.baidu.com/app/ecom/elib/tags/ecommon-lib/ecommon-lib_1-1-14-392_PD_BL -r HEAD --xml --username wangguibao --password ****** --non-interactive --trust-server-cert
[2019-02-11 16:32:55] svn info https://svn.baidu.com/third-64/tags/libevent/libevent_2-0-22-0_PD_BL -r HEAD --xml --username wangguibao --password ****** --non-interactive --trust-server-cert
[2019-02-11 16:32:55] svn info https://svn.baidu.com/lib2-64/tags/bsl/bsl_1-1-42-22055_PD_BL -r HEAD --xml --username wangguibao --password ****** --non-interactive --trust-server-cert
[2019-02-11 16:32:55] svn info https://svn.baidu.com/lib2-64/tags/cache/cache_3-1-7-21784_PD_BL -r HEAD --xml --username wangguibao --password ****** --non-interactive --trust-server-cert
[2019-02-11 16:32:55] svn info https://svn.baidu.com/third-64/tags/gtest/gtest_1-7-2-0_PD_BL -r HEAD --xml --username wangguibao --password ****** --non-interactive --trust-server-cert
[2019-02-11 16:32:55] svn info https://svn.baidu.com/third-64/tags/pcre/pcre_7-7-0-0_PD_BL -r HEAD --xml --username wangguibao --password ****** --non-interactive --trust-server-cert
[2019-02-11 16:32:55] svn info https://svn.baidu.com/public/tags/spreg/spreg_1-0-10-0_PD_BL -r HEAD --xml --username wangguibao --password ****** --non-interactive --trust-server-cert
[2019-02-11 16:32:55] svn info https://svn.baidu.com/public/tags/configure/configure_1-2-17-0_PD_BL -r HEAD --xml --username wangguibao --password ****** --non-interactive --trust-server-cert
####################################################################################################
# #
# WARNNING!!! #
# Fix the problem [Please change CONFIGS('xxx') to CONFIGS('yyy') above], #
# How to fix: http://ihelp.baidu.com/bcloud/42 #
# #
####################################################################################################
[2019-02-11 16:32:55] [INFO] Start check branch/tag name ...
[2019-02-11 16:32:55] export LC_ALL=en_US.UTF-8 GIT_SSL_NO_VERIFY=true && git ls-remote --heads https://wangguibao:******@icode.baidu.com/baidu/im-common/mempool mempool_1-0-1_BRANCH
############################################ Diff File #############################################
A/M baidu/paddle-serving/predictor/BCLOUD
A/M baidu/paddle-serving/predictor/common/constant.cpp
A/M baidu/paddle-serving/predictor/common/constant.h
A/M baidu/paddle-serving/predictor/framework/resource.cpp
A/M baidu/paddle-serving/predictor/framework/resource.h
A/M baidu/paddle-serving/predictor/src/pdserving.cpp
D baidu/paddle-serving/predictor/framework/rd_dict.cpp
D baidu/paddle-serving/predictor/framework/rd_dict.h
############################################ Task Info #############################################
TaskID : ffdfadb9c3392d9e09c573268e8c2c09
URL : ssh://icode.baidu.com:8235/baidu/paddle-serving/predictor
Workroot : /home/wangguibao/paddle
Module : baidu/paddle-serving/predictor
Builder OS : centos4u3 with kernel 2
Task Type : normal
Builder Master: 10.103.191.36
[INFO] builder will create workspace for module baidu/paddle-serving/predictor
[ 3%] https://svn.baidu.com/third-64/tags/libevent/libevent_2-0-22-0_PD_BL:1259 cached
[ 7%] https://svn.baidu.com/third-64/tags/gflags/gflags_2-0-0-100_PD_BL:860 cached
[ 11%] https://svn.baidu.com/op/oped/noah/tags/webfoot/naming-lib/naming-lib_1-0-32-0_PD_BL:235957 cached
[ 15%] ssh://icode.baidu.com:8235/baidu/third-party/opencv:master:952c2de156d02e786c6952229c8b2ce80095f1ea cached
[ 19%] https://svn.baidu.com/third-64/tags/pcre/pcre_7-7-0-0_PD_BL:153 cached
[ 23%] ssh://icode.baidu.com:8235/baidu/paddle-serving/predictor:refs/builder/c2d9e1a502afa0acb71ecae163003f82:078ed1f downloading ...
[ 26%] ssh://icode.baidu.com:8235/baidu/base/iobuf:stable:3485b863ec7e69cabe447df7078906f67adeb39f cached
[ 30%] ssh://icode.baidu.com:8235/baidu/base/ullib:stable:14e3bf274075fb929ea5e9a5023ca0d6351bd8c5 cached
[ 34%] https://svn.baidu.com/lib2-64/tags/bsl/bsl_1-1-42-22055_PD_BL:11439 cached
[ 38%] https://svn.baidu.com/lib2-64/tags/dict/dict_3-1-22-0_PD_BL:4768 cached
[ 42%] ssh://icode.baidu.com:8235/baidu/base/common:stable:bcabd2031e8496badd48c2a1f01b56f669689102 cached
[ 46%] ssh://icode.baidu.com:8235/baidu/base/bvar:bvar_1-0-112-1_PD_BL cached
[ 50%] ssh://icode.baidu.com:8235/baidu/base/baidu-rpc:baidu-rpc_1-0-11352-1_PD_BL cached
[ 53%] ssh://icode.baidu.com:8235/baidu/base/bthread:bthread_1-0-145-1_PD_BL cached
[ 57%] https://svn.baidu.com/public/tags/spreg/spreg_1-0-10-0_PD_BL:25135 cached
[ 61%] https://svn.baidu.com/third-64/tags/gtest/gtest_1-7-2-0_PD_BL:1253 cached
[ 65%] https://svn.baidu.com/lib2-64/tags/cache/cache_3-1-7-21784_PD_BL:9115 cached
[ 69%] https://svn.baidu.com/public/tags/configure/configure_1-2-17-0_PD_BL:25352 cached
[ 73%] ssh://icode.baidu.com:8235/baidu/im-common/mempool:mempool_1-0-1_BRANCH:44a12c43af6e5fe9a71591fe3247b250acfcd99b cached
[ 76%] https://svn.baidu.com/third-64/tags/protobuf/protobuf_2-4-1-1100_PD_BL:982 cached
[ 80%] https://svn.baidu.com/public/noah/tags/giano-lib/release/baas-lib-c/baas-lib-c_1-1-9-1482_PD_BL:1483 cached
[ 84%] ssh://icode.baidu.com:8235/baidu/base/mcpack2pb:stable:d5884073d5435030a07b204afdac26309309c231 cached
[ 88%] https://svn.baidu.com/app/ecom/elib/tags/ecommon-lib/ecommon-lib_1-1-14-392_PD_BL:393 cached
[ 92%] https://svn.baidu.com/third-64/tags/boost/boost_1-63-0-101_PD_BL:1241 cached
[ 96%] https://svn.baidu.com/third-64/tags/leveldb/leveldb_1-0-0-0_PD_BL:973 cached
[100%] ssh://icode.baidu.com:8235/baidu/base/protobuf-json:protobuf-json_1-0-41-1_PD_BL cached
[INFO] create workspace use 1.271s, total: 26, cached: 25
[INFO] =============== Preprocess [TaskID: ffdfadb9c3392d9e09c573268e8c2c09, Module: baidu/paddle-serving/predictor] ================
[INFO] [Stage-1/3] Handling Stage >>> [GenSources] ...
[INFO] [Stage-1/3 CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/get_favicon.proto ...
[CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/get_favicon.proto, takes 0.012s
[INFO] [Stage-1/3 CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/policy/hulu_pbrpc_meta.proto ...
[CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/policy/hulu_pbrpc_meta.proto, takes 0.015s
[INFO] [Stage-1/3 CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/policy/sofa_pbrpc_meta.proto ...
[CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/policy/sofa_pbrpc_meta.proto, takes 0.012s
[INFO] [Stage-1/3 CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/policy/streaming_rpc_meta.proto ...
[CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/policy/streaming_rpc_meta.proto, takes 0.014s
[INFO] [Stage-1/3 CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/policy/baidu_rpc_meta.proto ...
[CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/policy/baidu_rpc_meta.proto, takes 0.015s
[INFO] [Stage-1/3 CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/policy/public_pbrpc_meta.proto ...
[CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/policy/public_pbrpc_meta.proto, takes 0.014s
[INFO] [Stage-1/3 CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/errno.proto ...
[CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/errno.proto, takes 0.012s
[INFO] [Stage-1/3 CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/policy/nshead_meta.proto ...
[CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/policy/nshead_meta.proto, takes 0.013s
[INFO] [Stage-1/3 CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/get_js.proto ...
[CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/get_js.proto, takes 0.011s
[INFO] [Stage-1/3 CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/builtin_service.proto ...
[CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/builtin_service.proto, takes 0.019s
[INFO] [Stage-1/3 CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/rdma.proto ...
[CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/rdma.proto, takes 0.011s
[INFO] [Stage-1/3 CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/options.proto ...
[CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/options.proto, takes 0.013s
[INFO] [Stage-1/3 CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/rpc_dump.proto ...
[CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/rpc_dump.proto, takes 0.013s
[INFO] [Stage-1/3 CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/policy/mongo.proto ...
[CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/policy/mongo.proto, takes 0.014s
[INFO] [Stage-1/3 CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/rtmp.proto ...
[CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/rtmp.proto, takes 0.013s
[INFO] [Stage-1/3 CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/span.proto ...
[CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/span.proto, takes 0.017s
[INFO] [Stage-1/3 CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/trackme.proto ...
[CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/trackme.proto, takes 0.013s
[INFO] [Stage-1/3 CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/itp_header.proto ...
[CMD] /home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/bin/protoc --plugin=protoc-gen-mcpack=../mcpack2pb/protoc-gen-mcpack.forbcloud --proto_path=./protocol -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/baidu/base/mcpack2pb -I/home/bcloud/bcloud_data/EE/BCLOUD_PROTOBUF/CompileServer/Task/ffdfadb9c3392d9e09c573268e8c2c09/third-64/protobuf/include --cpp_out=. --mcpack_out=. protocol/baidu/rpc/itp_header.proto, takes 0.021s
[INFO] [Stage-1/3] GenSources takes : 0.662s
[INFO] [Stage-2/3] Handling Stage >>> [Proto&IDL] ...
[INFO] [Stage-2/3 1/37] Preprocess baidu/paddle-serving/predictor/proto/pds_option.proto ...
[INFO] [Stage-2/3 2/37] Preprocess baidu/base/baidu-rpc/example/multi_threaded_mcpack_c++/echo.proto ...
[INFO] [Stage-2/3 3/37] Preprocess baidu/paddle-serving/predictor/proto/builtin_format.proto ...
[INFO] [Stage-2/3 4/37] Preprocess op/oped/noah/webfoot/naming-lib/msg/service.proto ...
[INFO] [Stage-2/3 5/37] Preprocess baidu/base/baidu-rpc/example/multi_threaded_ubrpc_compack_c++/echo.proto ...
[INFO] [Stage-2/3 6/37] Preprocess baidu/base/baidu-rpc/example/multi_threaded_echo_c++/echo.proto ...
[INFO] [Stage-2/3 7/37] Preprocess baidu/paddle-serving/predictor/proto/int64tensor_service.proto ...
[INFO] [Stage-2/3 8/37] Preprocess op/oped/noah/webfoot/naming-lib/msg/naming.proto ...
[INFO] [Stage-2/3 9/37] Preprocess baidu/base/baidu-rpc/example/multi_threaded_echo_fns_c++/echo.proto ...
[INFO] [Stage-2/3 10/37] Preprocess baidu/paddle-serving/predictor/proto/msg_data.proto ...
[INFO] [Stage-2/3 11/37] Preprocess baidu/base/baidu-rpc/example/partition_echo_c++/echo.proto ...
[INFO] [Stage-2/3 12/37] Preprocess baidu/base/baidu-rpc/example/nshead_pb_extension_c++/echo.proto ...
[INFO] [Stage-2/3 13/37] Preprocess baidu/base/baidu-rpc/example/echo_c++_sofa_pbrpc/echo.proto ...
[INFO] [Stage-2/3 14/37] Preprocess baidu/base/baidu-rpc/example/selective_echo_c++/echo.proto ...
[INFO] [Stage-2/3 15/37] Preprocess baidu/base/baidu-rpc/example/session_data_and_thread_local/echo.proto ...
[INFO] [Stage-2/3 16/37] Preprocess baidu/base/baidu-rpc/example/http_c++/http.proto ...
[INFO] [Stage-2/3 17/37] Preprocess baidu/base/baidu-rpc/example/echo_c++_public_pbrpc/echo.proto ...
[INFO] [Stage-2/3 18/37] Preprocess baidu/base/baidu-rpc/example/cancel_c++/echo.proto ...
[INFO] [Stage-2/3 19/37] Preprocess baidu/base/mcpack2pb/idl_options.proto ...
[INFO] [Stage-2/3 20/37] Preprocess baidu/base/baidu-rpc/example/backup_request_c++/echo.proto ...
[INFO] [Stage-2/3 21/37] Preprocess baidu/base/baidu-rpc/example/streaming_echo_c++/echo.proto ...
[INFO] [Stage-2/3 23/37] Preprocess baidu/base/baidu-rpc/example/echo_c++_nova_pbrpc/echo.proto ...
[INFO] [Stage-2/3 22/37] Preprocess baidu/base/baidu-rpc/example/echo_c++/echo.proto ...
[INFO] [Stage-2/3 24/37] Preprocess baidu/base/baidu-rpc/example/cascade_echo_c++/echo.proto ...
[INFO] [Stage-2/3 25/37] Preprocess baidu/paddle-serving/predictor/proto/echo_service.proto ...
[INFO] [Stage-2/3 26/37] Preprocess baidu/paddle-serving/predictor/proto/sparse_service.proto ...
[INFO] [Stage-2/3 27/37] Preprocess baidu/base/baidu-rpc/example/echo_c++_ubrpc_compack/echo.proto ...
[INFO] [Stage-2/3 28/37] Preprocess baidu/base/baidu-rpc/example/asynchronous_echo_c++/echo.proto ...
[INFO] [Stage-2/3 29/37] Preprocess baidu/base/baidu-rpc/example/echo_c++_hulu_pbrpc/echo.proto ...
[INFO] [Stage-2/3 30/37] Preprocess op/oped/noah/webfoot/naming-lib/msg/naminglib.proto ...
[INFO] [Stage-2/3 31/37] Preprocess baidu/base/baidu-rpc/example/parallel_echo_c++/echo.proto ...
[INFO] [Stage-2/3 32/37] Preprocess baidu/paddle-serving/predictor/proto/xrecord_format.proto ...
[INFO] [Stage-2/3 33/37] Preprocess baidu/base/baidu-rpc/example/dynamic_partition_echo_c++/echo.proto ...
[INFO] [Stage-2/3 34/37] Preprocess baidu/paddle-serving/predictor/proto/image_classification.proto ...
[INFO] [Stage-2/3 35/37] Preprocess baidu/base/baidu-rpc/example/multi_threaded_itp_c++/echo.proto ...
[INFO] [Stage-2/3 36/37] Preprocess baidu/paddle-serving/predictor/proto/dense_service.proto ...
[INFO] [Stage-2/3 37/37] Preprocess baidu/base/baidu-rpc/tools/rpc_view/view.proto ...
[INFO] [Stage-2/3] Proto&IDL takes : 0.572s
[INFO] [Stage-3/3] Handling Stage >>> [Publish header files] ...
[INFO] [Stage-3/3] Publish header files takes : 4.054s
[INFO] =========== Preprocess total takes : 5.512s
[INFO] ==========> Pack preprocess result ...
[INFO] =========== Pack preprocess result takes : 3.557s
[INFO] check gcc version use 0 ms
[INFO] start analyzing targets, ip:10.103.191.36 start:2019-02-11 16:33:07
[INFO] output time 0.05s
[INFO] start compiling targets(714), ip:10.103.191.36 start:2019-02-11 16:33:07
[ 1%] cached baidu/third-party/opencv/output/lib/libIlmImf.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 1%] cached baidu/third-party/opencv/output/lib/libippicv.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 1%] cached baidu/third-party/opencv/output/lib/liblibjasper.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 1%] cached baidu/third-party/opencv/output/lib/liblibjpeg.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 1%] cached baidu/third-party/opencv/output/lib/liblibpng.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 1%] cached baidu/third-party/opencv/output/lib/liblibtiff.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 1%] cached baidu/third-party/opencv/output/lib/liblibwebp.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 1%] cached baidu/third-party/opencv/output/lib/libopencv_calib3d.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 1%] cached baidu/third-party/opencv/output/lib/libopencv_core.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 1%] cached baidu/third-party/opencv/output/lib/libopencv_features2d.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 1%] cached baidu/third-party/opencv/output/lib/libopencv_flann.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 1%] cached baidu/third-party/opencv/output/lib/libopencv_highgui.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 1%] cached baidu/third-party/opencv/output/lib/libopencv_imgcodecs.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 1%] cached baidu/third-party/opencv/output/lib/libopencv_imgproc.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 2%] cached baidu/third-party/opencv/output/lib/libopencv_ml.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 2%] cached baidu/third-party/opencv/output/lib/libopencv_objdetect.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 2%] cached baidu/third-party/opencv/output/lib/libopencv_photo.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 2%] cached baidu/third-party/opencv/output/lib/libopencv_shape.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 2%] cached baidu/third-party/opencv/output/lib/libopencv_stitching.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 2%] cached baidu/third-party/opencv/output/lib/libopencv_superres.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 2%] cached baidu/third-party/opencv/output/lib/libopencv_video.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 3%] cached baidu/third-party/opencv/output/lib/libopencv_videoio.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 3%] cached baidu/third-party/opencv/output/lib/libopencv_videostab.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 3%] cached baidu/third-party/opencv/output/lib/libopencv_xfeatures2d.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 3%] cached lib2-64/bsl/output/lib/libbsl.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 3%] cached lib2-64/bsl/output/lib/libbsl_ResourcePool.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 3%] cached lib2-64/bsl/output/lib/libbsl_archive.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 3%] cached lib2-64/bsl/output/lib/libbsl_buffer.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 4%] cached lib2-64/bsl/output/lib/libbsl_check_cast.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 4%] cached lib2-64/bsl/output/lib/libbsl_exception.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 4%] cached lib2-64/bsl/output/lib/libbsl_pool.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 4%] cached lib2-64/bsl/output/lib/libbsl_utils.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 4%] cached lib2-64/bsl/output/lib/libbsl_var.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 4%] cached lib2-64/bsl/output/lib/libbsl_var_implement.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 4%] cached lib2-64/bsl/output/lib/libbsl_var_utils.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 5%] cached lib2-64/cache/output/lib/libmcache.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 5%] cached lib2-64/dict/output/lib/libuldict.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 5%] cached third-64/boost/output/lib/libboost_atomic.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 5%] cached third-64/boost/output/lib/libboost_chrono.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 5%] cached third-64/boost/output/lib/libboost_container.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 5%] cached third-64/boost/output/lib/libboost_context.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 5%] cached third-64/boost/output/lib/libboost_coroutine.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 6%] cached third-64/boost/output/lib/libboost_date_time.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 6%] cached third-64/boost/output/lib/libboost_exception.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 6%] cached third-64/boost/output/lib/libboost_filesystem.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 6%] cached third-64/boost/output/lib/libboost_graph.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 6%] cached third-64/boost/output/lib/libboost_iostreams.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 6%] cached third-64/boost/output/lib/libboost_locale.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 6%] cached third-64/boost/output/lib/libboost_log.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 7%] cached third-64/boost/output/lib/libboost_log_setup.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 7%] cached third-64/boost/output/lib/libboost_math_c99.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 7%] cached third-64/boost/output/lib/libboost_math_c99f.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 7%] cached third-64/boost/output/lib/libboost_math_c99l.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 7%] cached third-64/boost/output/lib/libboost_math_tr1.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 7%] cached third-64/boost/output/lib/libboost_math_tr1f.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 7%] cached third-64/boost/output/lib/libboost_math_tr1l.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 7%] cached third-64/boost/output/lib/libboost_prg_exec_monitor.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 8%] cached third-64/boost/output/lib/libboost_program_options.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 8%] cached third-64/boost/output/lib/libboost_python.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 8%] cached third-64/boost/output/lib/libboost_random.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 8%] cached third-64/boost/output/lib/libboost_regex.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 8%] cached third-64/boost/output/lib/libboost_serialization.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 8%] cached third-64/boost/output/lib/libboost_signals.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 8%] cached third-64/boost/output/lib/libboost_system.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 9%] cached third-64/boost/output/lib/libboost_test_exec_monitor.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 9%] cached third-64/boost/output/lib/libboost_thread.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 9%] cached third-64/boost/output/lib/libboost_timer.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 9%] cached third-64/boost/output/lib/libboost_unit_test_framework.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 9%] cached third-64/boost/output/lib/libboost_wave.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 9%] cached third-64/boost/output/lib/libboost_wserialization.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 9%] cached third-64/gflags/output/lib/libgflags.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 10%] cached third-64/gflags/output/lib/libgflags_nothreads.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 10%] cached third-64/gtest/output/lib/libgtest.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 10%] cached third-64/gtest/output/lib/libgtest_main.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 10%] cached third-64/leveldb/output/lib/libleveldb.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 10%] cached third-64/libevent/output/lib/libevent.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 10%] cached third-64/libevent/output/lib/libevent_core.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 10%] cached third-64/libevent/output/lib/libevent_extra.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 11%] cached third-64/libevent/output/lib/libevent_openssl.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 11%] cached third-64/libevent/output/lib/libevent_pthreads.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 11%] cached third-64/pcre/output/lib/libpcre.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 11%] cached third-64/pcre/output/lib/libpcrecpp.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 11%] cached third-64/pcre/output/lib/libpcreposix.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 11%] cached third-64/protobuf/output/lib/libprotobuf-lite.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 11%] cached third-64/protobuf/output/lib/libprotobuf.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 12%] cached third-64/protobuf/output/lib/libprotoc.a ip:10.103.191.36 start:2019-02-11 16:33:09 used:0ms
[ 12%] cached app/ecom/elib/ecommon-lib/app_ecom_elib_ecommon-lib_ellib_lib_el_heap.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:09 used:89ms
[ 12%] cached app/ecom/elib/ecommon-lib/app_ecom_elib_ecommon-lib_ellib_lib_el_string.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:09 used:184ms
[ 12%] cached baidu/base/baidu-rpc/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_builtin_service.pb.cc.o ip:10.103.191.36 start:2019-02-11 16:33:09 used:199ms
[ 12%] cached baidu/base/baidu-rpc/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_baidu_rpc_meta.pb.cc.o ip:10.103.191.36 start:2019-02-11 16:33:09 used:187ms
[ 12%] cached baidu/base/baidu-rpc/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_public_pbrpc_meta.pb.cc.o ip:10.103.191.36 start:2019-02-11 16:33:09 used:216ms
[ 12%] cached baidu/base/baidu-rpc/src/baidu/rpc/details/baidu_base_baidu-rpc_bdrpc_lib_has_epollrdhup.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:09 used:150ms
[ 13%] cached baidu/base/bvar/bvar/detail/baidu_base_bvar_bvar_lib_sampler.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:09 used:295ms
[ 13%] cached baidu/base/common/base/baidu_base_common_base_lib_time.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:145ms
[ 13%] cached baidu/base/bthread/bthread/baidu_base_bthread_bthread_lib_task_control.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:09 used:344ms
[ 13%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_baidu_naming_service.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:09 used:344ms
[ 13%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_threads_service.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:09 used:385ms
[ 13%] cached baidu/base/common/base/baidu_base_common_base_lib_callback_internal.cc.o ip:10.103.191.36 start:2019-02-11 16:33:09 used:313ms
[ 13%] cached baidu/base/common/base/baidu_base_common_base_lib_sequence_checker_impl.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:240ms
[ 14%] cached baidu/base/common/base/third_party/xdg_mime/baidu_base_common_base_lib_xdgmimealias.c.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:106ms
[ 14%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_vlog_service.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:09 used:458ms
[ 14%] cached baidu/base/common/base/debug/baidu_base_common_base_lib_stack_trace_posix.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:350ms
[ 14%] cached baidu/base/bthread/baidu_base_bthread_bthread_lib_bthread_id.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:09 used:395ms
[ 14%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_selective_channel.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:09 used:583ms
[ 14%] cached baidu/base/common/base/third_party/xdg_mime/baidu_base_common_base_lib_xdgmimeicon.c.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:134ms
[ 14%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_global.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:09 used:627ms
[ 14%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_server.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:09 used:631ms
[ 15%] cached baidu/base/common/base/files/baidu_base_common_base_lib_file_path.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:305ms
[ 15%] cached baidu/base/common/base/baidu_base_common_base_lib_file_util_linux.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:316ms
[ 15%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_sofa_pbrpc_protocol.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:09 used:660ms
[ 15%] cached baidu/base/common/base/memory/baidu_base_common_base_lib_aligned_memory.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:282ms
[ 15%] cached baidu/base/common/base/files/baidu_base_common_base_lib_file_enumerator_posix.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:367ms
[ 15%] cached baidu/base/baidu-rpc/src/baidu/rpc/details/baidu_base_baidu-rpc_bdrpc_lib_http_message.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:544ms
[ 15%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_bad_method_service.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:09 used:719ms
[ 16%] cached baidu/base/common/base/json/baidu_base_common_base_lib_string_escape.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:343ms
[ 16%] cached baidu/base/common/base/third_party/snappy/baidu_base_common_base_lib_snappy.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:278ms
[ 16%] cached baidu/base/common/base/memory/baidu_base_common_base_lib_singleton.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:324ms
[ 16%] cached baidu/base/common/base/baidu_base_common_base_lib_guid_posix.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:237ms
[ 16%] cached baidu/base/common/base/baidu_base_common_base_lib_rand_util_posix.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:271ms
[ 16%] cached baidu/base/common/base/message_loop/baidu_base_common_base_lib_message_loop_proxy_impl.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:340ms
[ 16%] cached baidu/base/ullib/src/comlog/comempool/baidu_base_ullib_ullib_lib_mempool.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:190ms
[ 17%] cached baidu/base/common/base/strings/baidu_base_common_base_lib_latin1_string_conversions.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:138ms
[ 17%] cached baidu/base/common/base/debug/baidu_base_common_base_lib_trace_event_synthetic_delay.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:297ms
[ 17%] cached baidu/base/common/base/baidu_base_common_base_lib_popen.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:279ms
[ 17%] cached baidu/base/common/base/message_loop/baidu_base_common_base_lib_message_loop_proxy.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:386ms
[ 17%] cached baidu/base/common/base/process/baidu_base_common_base_lib_process_iterator.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:196ms
[ 17%] cached baidu/base/common/base/debug/baidu_base_common_base_lib_dump_without_crashing.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:295ms
[ 17%] cached baidu/base/common/base/process/baidu_base_common_base_lib_process_metrics.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:299ms
[ 18%] cached baidu/base/common/base/timer/baidu_base_common_base_lib_mock_timer.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:325ms
[ 18%] cached baidu/base/common/base/threading/baidu_base_common_base_lib_thread_checker_impl.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:127ms
[ 18%] cached baidu/base/ullib/src/comlog/appender/baidu_base_ullib_ullib_lib_appender.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:195ms
[ 18%] cached baidu/base/common/base/debug/baidu_base_common_base_lib_trace_event_system_stats_monitor.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:435ms
[ 18%] cached baidu/base/common/base/process/baidu_base_common_base_lib_memory_linux.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:276ms
[ 18%] cached baidu/base/common/base/synchronization/baidu_base_common_base_lib_cancellation_flag.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:254ms
[ 18%] cached baidu/base/common/base/strings/baidu_base_common_base_lib_string_number_conversions.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:297ms
[ 19%] cached baidu/base/common/base/threading/baidu_base_common_base_lib_thread_collision_warner.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:235ms
[ 19%] cached baidu/base/common/base/process/baidu_base_common_base_lib_process_handle_linux.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:248ms
[ 19%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_nova_pbrpc_protocol.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:537ms
[ 19%] cached baidu/base/common/base/threading/baidu_base_common_base_lib_watchdog.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:223ms
[ 19%] cached baidu/base/common/base/timer/baidu_base_common_base_lib_timer.cc.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:241ms
[ 19%] cached op/oped/noah/webfoot/naming-lib/src/op_oped_noah_webfoot_naming-lib_webfoot_naming_lib_webfoot_util.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:190ms
[ 19%] cached public/configure/constraint/public_configure_config_lib_cc_default.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:10 used:254ms
[ 20%] cached public/configure/reader/public_configure_config_lib_Reader.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:11 used:218ms
[ 20%] cached public/noah/giano-lib/release/baas-lib-c/common/jsoncpp/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_json_reader.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:11 used:168ms
[ 20%] cached public/noah/giano-lib/release/baas-lib-c/mock/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_mock_proof_verifier_impl.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:11 used:224ms
[ 20%] cached baidu/base/common/base/synchronization/baidu_base_common_base_lib_waitable_event_posix.cc.o ip:10.103.191.36 start:2019-02-11 16:33:11 used:289ms
[ 20%] cached public/noah/giano-lib/release/baas-lib-c/common/jsoncpp/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_json_writer.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:11 used:176ms
[ 20%] cached public/noah/giano-lib/release/baas-lib-c/shared_object/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_shared_object.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:11 used:150ms
[ 20%] cached public/noah/giano-lib/release/baas-lib-c/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_delegation_params.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:11 used:193ms
[ 21%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdserving_lib_msg_data.pb.cc.o ip:10.103.191.36 start:2019-02-11 16:33:11 used:901ms
[ 21%] cached baidu/base/common/base/baidu_base_common_base_lib_arena.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:136ms
[ 21%] cached app/ecom/elib/ecommon-lib/app_ecom_elib_ecommon-lib_ellib_lib_el_tvec.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:134ms
[ 21%] cached baidu/base/common/base/baidu_base_common_base_lib_guid.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:135ms
[ 21%] cached app/ecom/elib/ecommon-lib/app_ecom_elib_ecommon-lib_ellib_lib_el_bst.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:151ms
[ 21%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_pprof_perl.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:131ms
[ 21%] cached baidu/base/common/base/baidu_base_common_base_lib_base64.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:165ms
[ 21%] cached baidu/base/bthread/baidu_base_bthread_bthread_lib_bthread_errno.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:135ms
[ 22%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_ssl_option.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:139ms
[ 22%] cached app/ecom/elib/ecommon-lib/app_ecom_elib_ecommon-lib_ellib_lib_el_memorypool.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:173ms
[ 22%] cached baidu/base/common/base/baidu_base_common_base_lib_scoped_native_library.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:171ms
[ 22%] cached baidu/base/common/base/allocator/baidu_base_common_base_lib_allocator_extension_thunks.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:147ms
[ 22%] cached app/ecom/elib/ecommon-lib/app_ecom_elib_ecommon-lib_ellib_lib_el_strpool.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:177ms
[ 22%] cached app/ecom/elib/ecommon-lib/app_ecom_elib_ecommon-lib_ellib_lib_el_common.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:164ms
[ 22%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_reloadable_flags.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:162ms
[ 23%] cached baidu/base/baidu-rpc/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_nshead_meta.pb.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:205ms
[ 23%] cached baidu/base/baidu-rpc/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_streaming_rpc_meta.pb.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:173ms
[ 23%] cached baidu/base/common/base/allocator/baidu_base_common_base_lib_type_profiler_control.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:103ms
[ 23%] cached app/ecom/elib/ecommon-lib/app_ecom_elib_ecommon-lib_ellib_lib_el_container.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:208ms
[ 23%] cached baidu/base/baidu-rpc/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_errno.pb.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:172ms
[ 23%] cached baidu/base/baidu-rpc/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_rpc_dump.pb.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:181ms
[ 23%] cached baidu/base/common/base/baidu_base_common_base_lib_md5.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:229ms
[ 24%] cached baidu/base/baidu-rpc/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_rdma.pb.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:224ms
[ 24%] cached baidu/base/baidu-rpc/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_get_favicon.pb.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:241ms
[ 24%] cached app/ecom/elib/ecommon-lib/app_ecom_elib_ecommon-lib_ellib_lib_el_memorypool2.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:214ms
[ 24%] cached baidu/base/baidu-rpc/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_span.pb.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:250ms
[ 24%] cached baidu/base/common/base/baidu_base_common_base_lib_callback_helpers.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:251ms
[ 24%] cached baidu/base/common/base/baidu_base_common_base_lib_atomicops_internals_x86_gcc.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:206ms
[ 24%] cached baidu/base/common/base/baidu_base_common_base_lib_sequenced_task_runner.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:255ms
[ 25%] cached baidu/base/common/base/baidu_base_common_base_lib_barrier_closure.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:236ms
[ 25%] cached app/ecom/elib/ecommon-lib/output/lib/libellib.a ip:10.103.191.36 start:2019-02-11 16:33:12 used:22ms
[ 25%] cached baidu/base/common/base/baidu_base_common_base_lib_big_endian.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:270ms
[ 25%] cached baidu/base/common/base/baidu_base_common_base_lib_pickle.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:264ms
[ 25%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_esp_message.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:289ms
[ 25%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_adaptive_connection_type.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:256ms
[ 25%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_hasher.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:251ms
[ 26%] cached baidu/base/baidu-rpc/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_options.pb.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:269ms
[ 26%] cached baidu/base/common/base/baidu_base_common_base_lib_at_exit.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:303ms
[ 26%] cached baidu/base/baidu-rpc/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_mongo.pb.cc.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:332ms
[ 26%] cached baidu/base/baidu-rpc/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_rtmp.pb.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:260ms
[ 26%] cached baidu/base/common/base/baidu_base_common_base_lib_sys_info_linux.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:305ms
[ 26%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_http_method.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:282ms
[ 26%] cached baidu/base/common/base/baidu_base_common_base_lib_base_paths.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:293ms
[ 27%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_compress.cpp.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:328ms
[ 27%] cached baidu/base/baidu-rpc/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_itp_header.pb.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:328ms
[ 27%] cached baidu/base/common/base/baidu_base_common_base_lib_base_paths_posix.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:329ms
[ 27%] cached baidu/base/baidu-rpc/src/baidu/rpc/details/baidu_base_baidu-rpc_bdrpc_lib_rtmp_utils.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:115ms
[ 27%] cached baidu/base/baidu-rpc/src/baidu/rpc/details/baidu_base_baidu-rpc_bdrpc_lib_usercode_backup_pool.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:328ms
[ 27%] cached baidu/base/bvar/bvar/detail/baidu_base_bvar_bvar_lib_percentile.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:360ms
[ 27%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_rpc_dump.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:368ms
[ 28%] cached baidu/base/baidu-rpc/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_get_js.pb.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:324ms
[ 28%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_nshead_message.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:359ms
[ 28%] cached baidu/base/baidu-rpc/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_sofa_pbrpc_meta.pb.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:320ms
[ 28%] cached baidu/base/common/base/baidu_base_common_base_lib_base_switches.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:64ms
[ 28%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_periodic_naming_service.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:350ms
[ 28%] cached baidu/base/baidu-rpc/src/baidu/rpc/details/baidu_base_baidu-rpc_bdrpc_lib_hpack.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:326ms
[ 28%] cached baidu/base/common/base/baidu_base_common_base_lib_build_time.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:300ms
[ 28%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_redis_reply.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:359ms
[ 29%] cached baidu/base/common/base/metrics/baidu_base_common_base_lib_sparse_histogram.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:307ms
[ 29%] cached baidu/base/baidu-rpc/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_hulu_pbrpc_meta.pb.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:331ms
[ 29%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_amf.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:385ms
[ 29%] cached baidu/base/bthread/baidu_base_bthread_bthread_lib_bthread_cond.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:321ms
[ 29%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_sorttable_js.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:329ms
[ 29%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_nshead_service.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:388ms
[ 29%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_file_naming_service.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:418ms
[ 30%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_viz_min_js.cpp.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:394ms
[ 30%] cached baidu/base/common/base/baidu_base_common_base_lib_logging.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:395ms
[ 30%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_trackme.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:406ms
[ 30%] cached baidu/base/bthread/bthread/baidu_base_bthread_bthread_lib_interrupt_pthread.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:95ms
[ 30%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_uri.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:356ms
[ 30%] cached baidu/base/baidu-rpc/src/baidu/rpc/details/baidu_base_baidu-rpc_bdrpc_lib_method_status.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:427ms
[ 30%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_redis_authenticator.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:414ms
[ 31%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_index_service.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:434ms
[ 31%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_ids_service.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:379ms
[ 31%] cached baidu/base/bthread/bthread/baidu_base_bthread_bthread_lib_countdown_event.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:407ms
[ 31%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_get_favicon_service.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:415ms
[ 31%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_restful.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:415ms
[ 31%] cached baidu/base/common/base/timer/baidu_base_common_base_lib_elapsed_timer.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:152ms
[ 31%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_get_js_service.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:430ms
[ 32%] cached baidu/base/common/base/baidu_base_common_base_lib_crc32c.cc.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:115ms
[ 32%] cached baidu/base/bthread/baidu_base_bthread_bthread_lib_bthread_mutex.cpp.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:432ms
[ 32%] cached baidu/base/common/base/baidu_base_common_base_lib_pending_task.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:268ms
[ 32%] cached baidu/base/common/base/baidu_base_common_base_lib_comlog_sink.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:384ms
[ 32%] cached baidu/base/baidu-rpc/src/baidu/rpc/details/baidu_base_baidu-rpc_bdrpc_lib_load_balancer_with_naming.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:388ms
[ 32%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_jquery_min_js.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:403ms
[ 32%] cached baidu/base/bvar/bvar/baidu_base_bvar_bvar_lib_collector.cpp.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:440ms
[ 33%] cached baidu/base/baidu-rpc/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_trackme.pb.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:404ms
[ 33%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_load_balancer.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:433ms
[ 33%] cached baidu/base/baidu-rpc/src/baidu/rpc/details/baidu_base_baidu-rpc_bdrpc_lib_tcmalloc_extension.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:159ms
[ 33%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_http_header.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:399ms
[ 33%] cached baidu/base/bthread/baidu_base_bthread_bthread_lib_bthread_fd.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:388ms
[ 33%] cached baidu/base/common/base/allocator/baidu_base_common_base_lib_allocator_extension.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:265ms
[ 33%] cached baidu/base/common/base/containers/baidu_base_common_base_lib_case_ignored_flat_map.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:69ms
[ 34%] cached baidu/base/common/base/baidu_base_common_base_lib_sha1_portable.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:205ms
[ 34%] cached baidu/base/common/base/baidu_base_common_base_lib_fast_rand.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:121ms
[ 34%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_retry_policy.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:418ms
[ 34%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_serialized_request.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:472ms
[ 34%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_sockets_service.cpp.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:480ms
[ 34%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_esp_authenticator.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:484ms
[ 34%] cached baidu/base/common/base/power_monitor/baidu_base_common_base_lib_power_monitor_device_source_posix.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:285ms
[ 35%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_redis_command.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:446ms
[ 35%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_memcache.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:447ms
[ 35%] cached baidu/base/bthread/baidu_base_bthread_bthread_lib_bthread_key.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:429ms
[ 35%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_flot_min_js.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:421ms
[ 35%] cached baidu/base/common/base/metrics/baidu_base_common_base_lib_sample_vector.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:326ms
[ 35%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_giano_authenticator.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:484ms
[ 35%] cached baidu/base/common/base/baidu_base_common_base_lib_hash.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:257ms
[ 35%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_http_status_code.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:499ms
[ 36%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_pprof_service.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:531ms
[ 36%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_snappy_compress.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:517ms
[ 36%] cached baidu/base/common/base/debug/baidu_base_common_base_lib_debugger_posix.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:320ms
[ 36%] cached baidu/base/common/base/strings/baidu_base_common_base_lib_utf_string_conversion_utils.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:141ms
[ 36%] cached baidu/base/bthread/bthread/baidu_base_bthread_bthread_lib_task_group.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:491ms
[ 36%] cached baidu/base/common/base/baidu_base_common_base_lib_fd_utility.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:106ms
[ 36%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_list_naming_service.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:448ms
[ 37%] cached baidu/base/ullib/src/comlog/appender/baidu_base_ullib_ullib_lib_ullogappender.cpp.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:180ms
[ 37%] cached baidu/base/bthread/bthread/baidu_base_bthread_bthread_lib_stack.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:395ms
[ 37%] cached baidu/base/bthread/bthread/baidu_base_bthread_bthread_lib_context.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:71ms
[ 37%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_couchbase_authenticator.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:369ms
[ 37%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_bthreads_service.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:483ms
[ 37%] cached baidu/base/common/base/baidu_base_common_base_lib_async_socket_io_handler_posix.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:310ms
[ 37%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_dynpart_load_balancer.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:508ms
[ 38%] cached baidu/base/common/base/baidu_base_common_base_lib_location.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:179ms
[ 38%] cached baidu/base/bthread/bthread/baidu_base_bthread_bthread_lib_timer_thread.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:469ms
[ 38%] cached baidu/base/common/base/metrics/baidu_base_common_base_lib_bucket_ranges.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:345ms
[ 38%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_redis.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:458ms
[ 38%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_dh.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:531ms
[ 38%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_partition_channel.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:546ms
[ 38%] cached baidu/base/common/base/files/baidu_base_common_base_lib_file_proxy.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:377ms
[ 39%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_common.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:534ms
[ 39%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_event_dispatcher.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:552ms
[ 39%] cached baidu/base/common/base/memory/baidu_base_common_base_lib_discardable_memory_emulated.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:345ms
[ 39%] cached baidu/base/common/base/baidu_base_common_base_lib_errno.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:249ms
[ 39%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_version_service.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:580ms
[ 39%] cached baidu/base/bvar/bvar/baidu_base_bvar_bvar_lib_variable.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:345ms
[ 39%] cached baidu/base/common/base/time/baidu_base_common_base_lib_clock.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:162ms
[ 40%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_server_id.cpp.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:415ms
[ 40%] cached baidu/base/bthread/bthread/baidu_base_bthread_bthread_lib_execution_queue.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:340ms
[ 40%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_acceptor.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:605ms
[ 40%] cached baidu/base/common/base/metrics/baidu_base_common_base_lib_stats_table.cc.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:236ms
[ 40%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_health_service.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:533ms
[ 40%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_domain_naming_service.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:521ms
[ 40%] cached baidu/base/common/base/power_monitor/baidu_base_common_base_lib_power_monitor.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:341ms
[ 41%] cached baidu/base/common/base/debug/baidu_base_common_base_lib_proc_maps_linux.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:257ms
[ 41%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_baidu_rpc_protocol.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:619ms
[ 41%] cached baidu/base/common/base/memory/baidu_base_common_base_lib_memory_pressure_listener.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:358ms
[ 41%] cached baidu/base/common/base/time/baidu_base_common_base_lib_time.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:329ms
[ 41%] cached baidu/base/common/base/baidu_base_common_base_lib_class_name.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:197ms
[ 41%] cached baidu/base/common/base/baidu_base_common_base_lib_supports_user_data.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:304ms
[ 41%] cached baidu/base/common/base/baidu_base_common_base_lib_zero_copy_stream_as_streambuf.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:168ms
[ 42%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_progressive_attachment.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:615ms
[ 42%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_randomized_load_balancer.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:607ms
[ 42%] cached baidu/base/common/base/baidu_base_common_base_lib_bind_helpers.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:291ms
[ 42%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_input_messenger.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:602ms
[ 42%] cached baidu/base/common/base/process/baidu_base_common_base_lib_process_posix.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:292ms
[ 42%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_nshead_mcpack_protocol.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:598ms
[ 42%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_protocol.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:606ms
[ 42%] cached baidu/base/common/base/metrics/baidu_base_common_base_lib_stats_counters.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:253ms
[ 43%] cached baidu/base/bthread/baidu_base_bthread_bthread_lib_bthread.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:423ms
[ 43%] cached baidu/base/common/base/third_party/murmurhash3/baidu_base_common_base_lib_murmurhash3.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:145ms
[ 43%] cached baidu/base/common/base/baidu_base_common_base_lib_event_recorder_stubs.cc.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:109ms
[ 43%] cached baidu/base/common/base/files/baidu_base_common_base_lib_memory_mapped_file.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:327ms
[ 43%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_rpcz_service.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:577ms
[ 43%] cached baidu/base/common/base/baidu_base_common_base_lib_find_cstr.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:181ms
[ 43%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_ts.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:581ms
[ 44%] cached baidu/base/baidu-rpc/src/baidu/rpc/details/baidu_base_baidu-rpc_bdrpc_lib_http_parser.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:116ms
[ 44%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_parallel_channel.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:582ms
[ 44%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_protobufs_service.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:608ms
[ 44%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_status_service.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:592ms
[ 44%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_hotspots_service.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:622ms
[ 44%] cached baidu/base/ullib/src/comlog/appender/baidu_base_ullib_ullib_lib_ttyappender.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:178ms
[ 44%] cached baidu/base/common/base/strings/baidu_base_common_base_lib_sys_string_conversions_posix.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:249ms
[ 45%] cached baidu/base/common/base/message_loop/baidu_base_common_base_lib_incoming_task_queue.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:383ms
[ 45%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_flags_service.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:583ms
[ 45%] cached baidu/base/common/base/baidu_base_common_base_lib_task_runner.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:266ms
[ 45%] cached baidu/base/bthread/bthread/baidu_base_bthread_bthread_lib_butex.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:402ms
[ 45%] cached baidu/base/bvar/bvar/baidu_base_bvar_bvar_lib_latency_recorder.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:391ms
[ 45%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_span.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:584ms
[ 45%] cached baidu/base/common/base/files/baidu_base_common_base_lib_temp_file.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:131ms
[ 46%] cached baidu/base/common/base/baidu_base_common_base_lib_cpu.cc.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:167ms
[ 46%] cached baidu/base/common/base/third_party/xdg_mime/baidu_base_common_base_lib_xdgmimecache.c.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:145ms
[ 46%] cached baidu/base/common/base/baidu_base_common_base_lib_sync_socket_posix.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:385ms
[ 46%] cached baidu/base/ullib/src/baidu_base_ullib_ullib_lib_ul_error.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:115ms
[ 46%] cached baidu/base/common/base/timer/baidu_base_common_base_lib_hi_res_timer_manager_posix.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:133ms
[ 46%] cached baidu/base/baidu-rpc/src/baidu/rpc/details/baidu_base_baidu-rpc_bdrpc_lib_naming_service_thread.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:656ms
[ 46%] cached baidu/base/common/base/baidu_base_common_base_lib_rand_util.cc.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:266ms
[ 47%] cached baidu/base/common/base/baidu_base_common_base_lib_native_library_posix.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:295ms
[ 47%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_consistent_hashing_load_balancer.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:537ms
[ 47%] cached baidu/base/common/base/files/baidu_base_common_base_lib_file_enumerator.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:265ms
[ 47%] cached baidu/base/common/base/baidu_base_common_base_lib_unix_socket.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:260ms
[ 47%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_channel.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:674ms
[ 47%] cached baidu/base/common/base/memory/baidu_base_common_base_lib_discardable_memory_malloc.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:354ms
[ 47%] cached baidu/base/common/base/files/baidu_base_common_base_lib_scoped_file.cc.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:257ms
[ 48%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_list_service.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:617ms
[ 48%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_gzip_compress.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:325ms
[ 48%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_rtmp.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:692ms
[ 48%] cached baidu/base/common/base/metrics/baidu_base_common_base_lib_sample_map.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:332ms
[ 48%] cached baidu/base/common/base/baidu_base_common_base_lib_file_util_posix.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:440ms
[ 48%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_remote_file_naming_service.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:595ms
[ 48%] cached baidu/base/bvar/bvar/baidu_base_bvar_bvar_lib_gflag.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:316ms
[ 49%] cached baidu/base/ullib/src/comlog/baidu_base_ullib_ullib_lib_layout.cpp.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:145ms
[ 49%] cached baidu/base/common/base/profiler/baidu_base_common_base_lib_scoped_profile.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:311ms
[ 49%] cached baidu/base/common/base/third_party/snappy/baidu_base_common_base_lib_snappy-stubs-internal.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:181ms
[ 49%] cached baidu/base/common/base/baidu_base_common_base_lib_value_conversions.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:236ms
[ 49%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_http_rpc_protocol.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:673ms
[ 49%] cached baidu/base/common/base/threading/baidu_base_common_base_lib_non_thread_safe_impl.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:347ms
[ 49%] cached baidu/base/common/base/synchronization/baidu_base_common_base_lib_condition_variable_posix.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:287ms
[ 50%] cached baidu/base/ullib/src/baidu_base_ullib_ullib_lib_ul_udpdns.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:201ms
[ 50%] cached baidu/base/common/base/baidu_base_common_base_lib_sys_info.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:323ms
[ 50%] cached baidu/base/common/base/baidu_base_common_base_lib_version.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:325ms
[ 50%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_stream.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:739ms
[ 50%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_vars_service.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:624ms
[ 50%] cached baidu/base/common/base/profiler/baidu_base_common_base_lib_tracked_time.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:172ms
[ 50%] cached baidu/base/common/base/debug/baidu_base_common_base_lib_trace_event_memory.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:314ms
[ 50%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_rtmp_protocol.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:697ms
[ 51%] cached baidu/base/common/base/process/baidu_base_common_base_lib_process_iterator_linux.cc.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:293ms
[ 51%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_memcache_binary_protocol.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:742ms
[ 51%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_dir_service.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:671ms
[ 51%] cached baidu/base/common/base/baidu_base_common_base_lib_tracking_info.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:399ms
[ 51%] cached baidu/base/common/base/strings/baidu_base_common_base_lib_utf_string_conversions.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:301ms
[ 51%] cached baidu/base/common/base/third_party/nspr/baidu_base_common_base_lib_prtime.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:262ms
[ 51%] cached baidu/base/common/base/threading/baidu_base_common_base_lib_thread_local_storage.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:310ms
[ 52%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_controller.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:746ms
[ 52%] cached baidu/base/baidu-rpc/src/baidu/rpc/builtin/baidu_base_baidu-rpc_bdrpc_lib_connections_service.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:681ms
[ 52%] cached baidu/base/bvar/bvar/baidu_base_bvar_bvar_lib_default_variables.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:408ms
[ 52%] cached baidu/base/baidu-rpc/src/baidu/rpc/details/baidu_base_baidu-rpc_bdrpc_lib_ssl_helper.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:651ms
[ 52%] cached baidu/base/common/base/third_party/xdg_user_dirs/baidu_base_common_base_lib_xdg_user_dir_lookup.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:152ms
[ 52%] cached baidu/base/ullib/src/baidu_base_ullib_ullib_lib_ul_net2.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:148ms
[ 52%] cached baidu/base/common/base/metrics/baidu_base_common_base_lib_statistics_recorder.cc.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:338ms
[ 53%] cached baidu/base/common/base/baidu_base_common_base_lib_ini_parser.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:285ms
[ 53%] cached baidu/base/common/base/baidu_base_common_base_lib_path_service.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:387ms
[ 53%] cached baidu/base/common/base/json/baidu_base_common_base_lib_json_parser.cc.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:316ms
[ 53%] cached baidu/base/common/base/threading/baidu_base_common_base_lib_worker_pool.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:316ms
[ 53%] cached baidu/base/ullib/src/comlog/baidu_base_ullib_ullib_lib_namemg.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:182ms
[ 53%] cached baidu/base/common/base/time/baidu_base_common_base_lib_tick_clock.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:141ms
[ 53%] cached baidu/base/common/base/third_party/xdg_mime/baidu_base_common_base_lib_xdgmimeparent.c.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:159ms
[ 54%] cached baidu/base/common/base/debug/baidu_base_common_base_lib_trace_event_impl.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:358ms
[ 54%] cached baidu/base/common/base/files/baidu_base_common_base_lib_file_path_constants.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:216ms
[ 54%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_mongo_protocol.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:693ms
[ 54%] cached baidu/base/common/base/json/baidu_base_common_base_lib_json_reader.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:316ms
[ 54%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_streaming_rpc_protocol.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:613ms
[ 54%] cached baidu/base/common/base/time/baidu_base_common_base_lib_time_posix.cc.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:302ms
[ 54%] cached baidu/base/common/base/files/baidu_base_common_base_lib_file.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:334ms
[ 55%] cached baidu/base/common/base/debug/baidu_base_common_base_lib_alias.cc.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:111ms
[ 55%] cached baidu/base/common/base/baidu_base_common_base_lib_thread_local.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:130ms
[ 55%] cached baidu/base/common/base/debug/baidu_base_common_base_lib_profiler.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:342ms
[ 55%] cached baidu/base/common/base/files/baidu_base_common_base_lib_file_posix.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:301ms
[ 55%] cached baidu/base/common/base/baidu_base_common_base_lib_safe_strerror_posix.cc.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:154ms
[ 55%] cached public/configure/compiler/src/public_configure_config_lib_idl.c.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:139ms
[ 55%] cached baidu/base/ullib/src/comlog/sendsvr/baidu_base_ullib_ullib_lib_loghead.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:208ms
[ 56%] cached baidu/base/ullib/src/comlog/sendsvr/baidu_base_ullib_ullib_lib_netappendersvr.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:194ms
[ 56%] cached baidu/base/mcpack2pb/mcpack/baidu_base_mcpack2pb_mcpack2pb_lib_field_type.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:112ms
[ 56%] cached baidu/base/common/base/files/baidu_base_common_base_lib_file_util_proxy.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:345ms
[ 56%] cached baidu/base/common/base/threading/baidu_base_common_base_lib_thread_local_storage_posix.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:389ms
[ 56%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_nshead_protocol.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:737ms
[ 56%] cached baidu/base/common/base/baidu_base_common_base_lib_sys_info_posix.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:269ms
[ 56%] cached baidu/base/common/base/baidu_base_common_base_lib_thread_task_runner_handle.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:294ms
[ 57%] cached baidu/base/common/base/baidu_base_common_base_lib_command_line.cc.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:315ms
[ 57%] cached baidu/base/common/base/metrics/baidu_base_common_base_lib_histogram_samples.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:311ms
[ 57%] cached baidu/base/common/base/baidu_base_common_base_lib_lazy_instance.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:355ms
[ 57%] cached baidu/base/common/base/message_loop/baidu_base_common_base_lib_message_pump.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:198ms
[ 57%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_public_pbrpc_protocol.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:715ms
[ 57%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_socket_map.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:733ms
[ 57%] cached public/noah/giano-lib/release/baas-lib-c/common/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_baas_lib_defines.cpp.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:113ms
[ 57%] cached baidu/base/common/base/threading/baidu_base_common_base_lib_thread_id_name_manager.cc.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:307ms
[ 58%] cached baidu/base/common/base/baidu_base_common_base_lib_linux_util.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:337ms
[ 58%] cached baidu/base/common/base/memory/baidu_base_common_base_lib_discardable_memory.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:239ms
[ 58%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_itp.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:769ms
[ 58%] cached baidu/base/common/base/system_monitor/baidu_base_common_base_lib_system_monitor.cc.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:322ms
[ 58%] cached baidu/base/common/base/baidu_base_common_base_lib_string_printf.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:208ms
[ 58%] cached baidu/base/common/base/baidu_base_common_base_lib_endpoint.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:296ms
[ 58%] cached baidu/base/ullib/src/comlog/sendsvr/baidu_base_ullib_ullib_lib_checksvr.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:191ms
[ 59%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_ubrpc2pb_protocol.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:816ms
[ 59%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_nshead_pb_service_adaptor.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:816ms
[ 59%] cached baidu/base/common/base/memory/baidu_base_common_base_lib_ref_counted_memory.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:335ms
[ 59%] cached baidu/base/common/base/baidu_base_common_base_lib_tracked_objects.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:284ms
[ 59%] cached baidu/base/common/base/metrics/baidu_base_common_base_lib_histogram_snapshot_manager.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:250ms
[ 59%] cached baidu/base/common/base/task/baidu_base_common_base_lib_cancelable_task_tracker.cc.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:298ms
[ 59%] cached baidu/base/common/base/baidu_base_common_base_lib_environment.cc.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:270ms
[ 60%] cached baidu/base/mcpack2pb/baidu_base_mcpack2pb_mcpack2pb_lib_idl_options.pb.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:242ms
[ 60%] cached baidu/base/common/base/process/baidu_base_common_base_lib_process_linux.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:318ms
[ 60%] cached public/noah/giano-lib/release/baas-lib-c/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_client_utility.cpp.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:145ms
[ 60%] cached baidu/base/common/base/threading/baidu_base_common_base_lib_worker_pool_posix.cc.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:356ms
[ 60%] cached baidu/base/common/base/third_party/icu/baidu_base_common_base_lib_icu_utf.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:162ms
[ 60%] cached baidu/base/common/base/process/baidu_base_common_base_lib_launch.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:203ms
[ 60%] cached baidu/base/common/base/strings/baidu_base_common_base_lib_safe_sprintf.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:125ms
[ 61%] cached baidu/base/common/base/json/baidu_base_common_base_lib_json_string_value_serializer.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:376ms
[ 61%] cached baidu/base/ullib/src/dep/baidu_base_ullib_ullib_lib_dep_exlink.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:173ms
[ 61%] cached baidu/base/common/base/baidu_base_common_base_lib_deferred_sequenced_task_runner.cc.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:366ms
[ 61%] cached baidu/base/common/base/third_party/dmg_fp/baidu_base_common_base_lib_dtoa_wrapper.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:332ms
[ 61%] cached baidu/base/common/base/nix/baidu_base_common_base_lib_xdg_util.cc.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:373ms
[ 61%] cached baidu/base/common/base/baidu_base_common_base_lib_values.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:400ms
[ 61%] cached baidu/base/common/base/time/baidu_base_common_base_lib_default_tick_clock.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:232ms
[ 62%] cached baidu/base/common/base/baidu_base_common_base_lib_file_util.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:442ms
[ 62%] cached public/configure/constraint/public_configure_config_lib_ConstraintFunction.cpp.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:195ms
[ 62%] cached public/configure/public_configure_config_lib_ConfigUnit.cpp.o ip:10.103.191.31 start:2019-02-11 16:33:12 used:204ms
[ 62%] cached baidu/base/common/base/debug/baidu_base_common_base_lib_asan_invalid_access.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:272ms
[ 62%] cached baidu/base/protobuf-json/src/baidu_base_protobuf-json_json-pb_lib_protobuf_map.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:186ms
[ 62%] cached baidu/base/common/base/metrics/baidu_base_common_base_lib_field_trial.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:384ms
[ 62%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_redis_protocol.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:749ms
[ 63%] cached op/oped/noah/webfoot/naming-lib/msg/op_oped_noah_webfoot_naming-lib_webfoot_naming_lib_naming.pb.cc.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:161ms
[ 63%] cached baidu/base/ullib/src/comlog/appender/baidu_base_ullib_ullib_lib_quotachecker.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:260ms
[ 63%] cached baidu/base/common/base/debug/baidu_base_common_base_lib_stack_trace.cc.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:176ms
[ 63%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_locality_aware_load_balancer.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:527ms
[ 63%] cached baidu/base/common/base/debug/baidu_base_common_base_lib_crash_logging.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:258ms
[ 63%] cached baidu/base/common/base/posix/baidu_base_common_base_lib_unix_domain_socket_linux.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:274ms
[ 63%] cached baidu/base/common/base/third_party/snappy/baidu_base_common_base_lib_snappy-sinksource.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:156ms
[ 64%] cached public/configure/compiler/src/public_configure_config_lib_idl_gram.c.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:168ms
[ 64%] cached baidu/im-common/mempool/src/baidu_im-common_mempool_mempool_lib_mempool.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:279ms
[ 64%] cached baidu/base/common/base/baidu_base_common_base_lib_run_loop.cc.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:291ms
[ 64%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_hulu_pbrpc_protocol.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:871ms
[ 64%] cached baidu/base/common/base/process/baidu_base_common_base_lib_memory.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:314ms
[ 64%] cached public/noah/giano-lib/release/baas-lib-c/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_credential_generator.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:150ms
[ 64%] cached baidu/base/ullib/src/dep/baidu_base_ullib_ullib_lib_dep_http.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:201ms
[ 64%] cached baidu/base/common/base/message_loop/baidu_base_common_base_lib_message_pump_libevent.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:400ms
[ 65%] cached baidu/base/common/base/third_party/symbolize/baidu_base_common_base_lib_demangle.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:173ms
[ 65%] cached baidu/base/common/base/metrics/baidu_base_common_base_lib_histogram.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:455ms
[ 65%] cached baidu/base/common/base/third_party/xdg_mime/baidu_base_common_base_lib_xdgmimeint.c.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:227ms
[ 65%] cached baidu/base/common/base/files/baidu_base_common_base_lib_memory_mapped_file_posix.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:439ms
[ 65%] cached baidu/base/common/base/memory/baidu_base_common_base_lib_shared_memory_posix.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:330ms
[ 65%] cached baidu/base/common/base/message_loop/baidu_base_common_base_lib_message_pump_default.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:351ms
[ 65%] cached public/noah/giano-lib/release/baas-lib-c/common/base/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_check_error.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:13 used:119ms
[ 66%] cached baidu/base/mcpack2pb/mcpack/baidu_base_mcpack2pb_mcpack2pb_lib_serializer.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:303ms
[ 66%] cached baidu/base/common/base/debug/baidu_base_common_base_lib_trace_event_impl_constants.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:347ms
[ 66%] cached baidu/base/common/base/third_party/dynamic_annotations/baidu_base_common_base_lib_dynamic_annotations.c.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:166ms
[ 66%] cached baidu/base/common/base/third_party/dmg_fp/baidu_base_common_base_lib_g_fmt.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:167ms
[ 66%] cached public/noah/giano-lib/release/baas-lib-c/mock/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_mock_credential_verifier_impl.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:155ms
[ 66%] cached baidu/base/common/base/metrics/baidu_base_common_base_lib_histogram_base.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:343ms
[ 66%] cached public/noah/giano-lib/release/baas-lib-c/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_proof_generator.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:157ms
[ 67%] cached baidu/base/common/base/memory/baidu_base_common_base_lib_ref_counted.cc.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:209ms
[ 67%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_round_robin_load_balancer.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:581ms
[ 67%] cached baidu/base/common/base/debug/baidu_base_common_base_lib_debugger.cc.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:304ms
[ 67%] cached baidu/base/mcpack2pb/mcpack/baidu_base_mcpack2pb_mcpack2pb_lib_mcpack2pb.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:12 used:325ms
[ 67%] cached baidu/base/common/base/posix/baidu_base_common_base_lib_global_descriptors.cc.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:305ms
[ 67%] cached public/noah/giano-lib/release/baas-lib-c/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_baas_resource.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:13 used:151ms
[ 67%] cached op/oped/noah/webfoot/naming-lib/msg/op_oped_noah_webfoot_naming-lib_webfoot_naming_lib_service.pb.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:199ms
[ 68%] cached public/configure/compiler/src/public_configure_config_lib_idl_lex.c.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:176ms
[ 68%] cached baidu/base/common/base/json/baidu_base_common_base_lib_json_writer.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:350ms
[ 68%] cached baidu/base/ullib/src/comlog/appender/baidu_base_ullib_ullib_lib_asyncfileappender.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:303ms
[ 68%] cached baidu/base/common/base/process/baidu_base_common_base_lib_process_handle_posix.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:299ms
[ 68%] cached baidu/base/baidu-rpc/src/baidu/rpc/baidu_base_baidu-rpc_bdrpc_lib_socket.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:794ms
[ 68%] cached public/configure/public_configure_config_lib_ConfigGroup.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:211ms
[ 68%] cached public/configure/public_configure_config_lib_Configure.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:12 used:224ms
[ 69%] cached baidu/base/ullib/src/comlog/appender/baidu_base_ullib_ullib_lib_netappender.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:250ms
[ 69%] cached baidu/base/common/base/json/baidu_base_common_base_lib_json_file_value_serializer.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:309ms
[ 69%] cached baidu/base/common/base/threading/baidu_base_common_base_lib_thread_restrictions.cc.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:133ms
[ 69%] cached public/noah/giano-lib/release/baas-lib-c/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_server_utility.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:13 used:157ms
[ 69%] cached public/noah/giano-lib/release/baas-lib-c/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_proof_verifier.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:13 used:159ms
[ 69%] cached baidu/base/common/base/time/baidu_base_common_base_lib_default_clock.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:183ms
[ 69%] cached baidu/base/common/base/baidu_base_common_base_lib_status.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:260ms
[ 70%] cached baidu/base/ullib/src/baidu_base_ullib_ullib_lib_ul_thr.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:187ms
[ 70%] cached baidu/base/ullib/src/comlog/comempool/baidu_base_ullib_ullib_lib_dlist.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:115ms
[ 70%] cached baidu/base/protobuf-json/src/baidu_base_protobuf-json_json-pb_lib_encode_decode.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:168ms
[ 70%] cached baidu/base/common/base/process/baidu_base_common_base_lib_kill.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:284ms
[ 70%] cached baidu/base/common/base/baidu_base_common_base_lib_file_watcher.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:221ms
[ 70%] cached baidu/base/common/base/files/baidu_base_common_base_lib_scoped_temp_dir.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:431ms
[ 70%] cached baidu/base/common/base/process/baidu_base_common_base_lib_process_info_linux.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:360ms
[ 71%] cached baidu/base/common/base/metrics/baidu_base_common_base_lib_histogram_delta_serialization.cc.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:333ms
[ 71%] cached baidu/base/baidu-rpc/src/baidu/rpc/policy/baidu_base_baidu-rpc_bdrpc_lib_esp_protocol.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:595ms
[ 71%] cached baidu/base/common/base/strings/baidu_base_common_base_lib_string16.cc.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:193ms
[ 71%] cached baidu/base/common/base/third_party/modp_b64/baidu_base_common_base_lib_modp_b64.cc.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:189ms
[ 71%] cached baidu/base/common/base/files/baidu_base_common_base_lib_important_file_writer.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:399ms
[ 71%] cached baidu/base/common/base/strings/baidu_base_common_base_lib_nullable_string16.cc.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:181ms
[ 71%] cached baidu/base/common/base/third_party/symbolize/baidu_base_common_base_lib_symbolize.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:251ms
[ 71%] cached baidu/base/common/base/memory/baidu_base_common_base_lib_discardable_memory_linux.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:435ms
[ 72%] cached baidu/base/common/base/profiler/baidu_base_common_base_lib_alternate_timer.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:439ms
[ 72%] cached baidu/base/ullib/src/baidu_base_ullib_ullib_lib_ul_pack.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:179ms
[ 72%] cached baidu/base/baidu-rpc/output/lib/libbdrpc.a ip:10.103.191.36 start:2019-02-11 16:33:13 used:23ms
[ 72%] cached baidu/base/ullib/src/comlog/appender/baidu_base_ullib_ullib_lib_fileappender.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:294ms
[ 72%] cached baidu/base/common/base/third_party/superfasthash/baidu_base_common_base_lib_superfasthash.c.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:174ms
[ 72%] cached baidu/base/common/base/message_loop/baidu_base_common_base_lib_message_loop.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:427ms
[ 72%] cached baidu/base/common/base/strings/baidu_base_common_base_lib_string_util.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:335ms
[ 73%] cached baidu/base/ullib/src/baidu_base_ullib_ullib_lib_ul_url.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:281ms
[ 73%] cached baidu/base/common/base/threading/baidu_base_common_base_lib_platform_thread_linux.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:374ms
[ 73%] cached baidu/base/common/base/memory/baidu_base_common_base_lib_discardable_memory_manager.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:451ms
[ 73%] cached baidu/base/common/base/strings/baidu_base_common_base_lib_string_piece.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:321ms
[ 73%] cached baidu/base/common/base/threading/baidu_base_common_base_lib_post_task_and_reply_impl.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:358ms
[ 73%] cached baidu/base/common/base/threading/baidu_base_common_base_lib_sequenced_worker_pool.cc.o ip:10.103.191.34 start:2019-02-11 16:33:12 used:431ms
[ 73%] cached baidu/base/common/base/process/baidu_base_common_base_lib_kill_posix.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:366ms
[ 74%] cached baidu/base/ullib/src/baidu_base_ullib_ullib_lib_ul_circle.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:178ms
[ 74%] cached baidu/base/ullib/src/comlog/baidu_base_ullib_ullib_lib_category.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:193ms
[ 74%] cached baidu/base/common/base/process/baidu_base_common_base_lib_internal_linux.cc.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:304ms
[ 74%] cached baidu/base/common/base/third_party/xdg_mime/baidu_base_common_base_lib_xdgmimemagic.c.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:126ms
[ 74%] cached baidu/base/common/base/memory/baidu_base_common_base_lib_weak_ptr.cc.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:337ms
[ 74%] cached baidu/base/common/testing/baidu_base_common_base_lib_multiprocess_func_list.cc.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:177ms
[ 74%] cached baidu/base/common/base/strings/baidu_base_common_base_lib_string_util_constants.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:363ms
[ 75%] cached public/noah/giano-lib/release/baas-lib-c/mock/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_mock_common.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:163ms
[ 75%] cached baidu/base/ullib/src/comlog/appender/baidu_base_ullib_ullib_lib_appenderfactory.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:243ms
[ 75%] cached baidu/base/common/base/third_party/xdg_mime/baidu_base_common_base_lib_xdgmime.c.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:219ms
[ 75%] cached baidu/base/common/base/nix/baidu_base_common_base_lib_mime_util_xdg.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:359ms
[ 75%] cached baidu/base/ullib/src/baidu_base_ullib_ullib_lib_ul_conf.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:161ms
[ 75%] cached baidu/base/common/base/process/baidu_base_common_base_lib_launch_posix.cc.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:335ms
[ 75%] cached baidu/base/ullib/src/comlog/baidu_base_ullib_ullib_lib_xthread.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:139ms
[ 76%] cached baidu/base/ullib/src/baidu_base_ullib_ullib_lib_basiclogadapter.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:241ms
[ 76%] cached baidu/base/ullib/src/baidu_base_ullib_ullib_lib_ul_net1.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:206ms
[ 76%] cached baidu/base/common/base/metrics/baidu_base_common_base_lib_user_metrics.cc.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:329ms
[ 76%] compile baidu/paddle-serving/predictor/plugin/baidu_paddle-serving_predictor_pdcodegen_app_substitute.cc.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:383ms
[ 76%] cached baidu/base/ullib/src/comlog/baidu_base_ullib_ullib_lib_comsig.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:182ms
[ 76%] cached baidu/base/ullib/src/comlog/baidu_base_ullib_ullib_lib_comlog.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:194ms
[ 76%] cached baidu/base/common/base/strings/baidu_base_common_base_lib_utf_offset_string_conversions.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:413ms
[ 77%] cached baidu/base/common/base/process/baidu_base_common_base_lib_process_metrics_posix.cc.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:366ms
[ 77%] cached baidu/base/ullib/src/baidu_base_ullib_ullib_lib_ul_file.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:194ms
[ 77%] cached baidu/base/common/base/strings/baidu_base_common_base_lib_string_split.cc.o ip:10.102.26.39 start:2019-02-11 16:33:12 used:385ms
[ 77%] cached public/configure/public_configure_config_lib_FileReloader.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:270ms
[ 77%] cached public/configure/public_configure_config_lib_ConfigError.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:314ms
[ 77%] cached public/noah/giano-lib/release/baas-lib-c/authorization/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_access_controller.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:186ms
[ 77%] cached baidu/base/bthread/output/lib/libbthread.a ip:10.103.191.32 start:2019-02-11 16:33:13 used:71ms
[ 78%] cached baidu/base/ullib/src/comlog/baidu_base_ullib_ullib_lib_logstat.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:207ms
[ 78%] cached baidu/base/ullib/src/dep/baidu_base_ullib_ullib_lib_dep_file.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:163ms
[ 78%] cached baidu/base/common/base/strings/baidu_base_common_base_lib_stringprintf.cc.o ip:10.103.191.35 start:2019-02-11 16:33:12 used:426ms
[ 78%] cached public/noah/giano-lib/release/baas-lib-c/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_credential_context.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:161ms
[ 78%] cached baidu/base/ullib/src/comlog/baidu_base_ullib_ullib_lib_event.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:216ms
[ 78%] cached baidu/base/ullib/src/dep/baidu_base_ullib_ullib_lib_dep_udpdns.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:205ms
[ 78%] cached baidu/base/ullib/src/dep/baidu_base_ullib_ullib_lib_dep_pack.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:214ms
[ 78%] cached baidu/base/common/base/third_party/xdg_mime/baidu_base_common_base_lib_xdgmimeglob.c.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:114ms
[ 79%] cached public/noah/giano-lib/release/baas-lib-c/common/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_baas_log.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:175ms
[ 79%] cached baidu/base/common/base/threading/baidu_base_common_base_lib_simple_thread.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:458ms
[ 79%] cached baidu/base/common/base/threading/baidu_base_common_base_lib_platform_thread_posix.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:512ms
[ 79%] cached baidu/base/ullib/src/baidu_base_ullib_ullib_lib_ul_string.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:168ms
[ 79%] cached public/noah/giano-lib/release/baas-lib-c/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_project_business.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:129ms
[ 79%] cached public/noah/giano-lib/release/baas-lib-c/mock/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_mock_credential_context_impl.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:178ms
[ 79%] cached baidu/base/protobuf-json/src/baidu_base_protobuf-json_json-pb_lib_json_to_pb.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:283ms
[ 80%] cached baidu/base/common/base/synchronization/baidu_base_common_base_lib_waitable_event_watcher_posix.cc.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:378ms
[ 80%] cached op/oped/noah/webfoot/naming-lib/msg/op_oped_noah_webfoot_naming-lib_webfoot_naming_lib_naminglib.pb.cc.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:223ms
[ 80%] cached public/configure/public_configure_config_lib_CmdOption.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:222ms
[ 80%] cached baidu/base/ullib/src/comlog/baidu_base_ullib_ullib_lib_xutils.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:209ms
[ 80%] cached baidu/base/common/base/power_monitor/baidu_base_common_base_lib_power_monitor_source.cc.o ip:10.103.191.33 start:2019-02-11 16:33:12 used:475ms
[ 80%] cached baidu/im-common/mempool/output/lib/libmempool.a ip:10.103.191.32 start:2019-02-11 16:33:13 used:44ms
[ 80%] cached baidu/base/iobuf/base/baidu_base_iobuf_iobuf_lib_iobuf.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:360ms
[ 81%] cached public/noah/giano-lib/release/baas-lib-c/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_baas_common.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:193ms
[ 81%] cached baidu/base/common/base/threading/baidu_base_common_base_lib_thread_local_posix.cc.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:354ms
[ 81%] cached baidu/base/common/base/process/baidu_base_common_base_lib_process_metrics_linux.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:567ms
[ 81%] cached baidu/base/ullib/src/dep/baidu_base_ullib_ullib_lib_dep_conf.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:211ms
[ 81%] cached public/noah/giano-lib/release/baas-lib-c/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_giano_mock_helper.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:177ms
[ 81%] cached baidu/base/common/base/threading/baidu_base_common_base_lib_thread.cc.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:398ms
[ 81%] cached public/noah/giano-lib/release/baas-lib-c/shared_object/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_upgrade_helper.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:144ms
[ 82%] cached baidu/base/common/base/posix/baidu_base_common_base_lib_file_descriptor_shuffle.cc.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:350ms
[ 82%] cached baidu/base/ullib/src/dep/baidu_base_ullib_ullib_lib_dep_htmltag.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:181ms
[ 82%] cached baidu/base/protobuf-json/src/baidu_base_protobuf-json_json-pb_lib_pb_to_json.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:308ms
[ 82%] cached baidu/base/ullib/src/comlog/sendsvr/baidu_base_ullib_ullib_lib_sendsvr.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:215ms
[ 82%] cached baidu/base/ullib/src/dep/baidu_base_ullib_ullib_lib_dep_net.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:182ms
[ 82%] cached baidu/base/iobuf/output/lib/libiobuf.a ip:10.103.191.32 start:2019-02-11 16:33:13 used:23ms
[ 82%] cached baidu/base/ullib/src/baidu_base_ullib_ullib_lib_ul_func.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:294ms
[ 83%] cached baidu/base/ullib/src/baidu_base_ullib_ullib_lib_ul_url2.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:351ms
[ 83%] cached baidu/base/ullib/src/baidu_base_ullib_ullib_lib_ul_log.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:317ms
[ 83%] cached public/noah/giano-lib/release/baas-lib-c/common/jsoncpp/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_json_value.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:128ms
[ 83%] cached baidu/base/ullib/src/comlog/baidu_base_ullib_ullib_lib_logcore.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:292ms
[ 83%] cached op/oped/noah/webfoot/naming-lib/src/op_oped_noah_webfoot_naming-lib_webfoot_naming_lib_webfoot_item.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:146ms
[ 83%] cached baidu/base/bvar/output/lib/libbvar.a ip:10.102.26.39 start:2019-02-11 16:33:13 used:36ms
[ 83%] cached public/noah/giano-lib/release/baas-lib-c/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_proof_utility.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:150ms
[ 84%] cached baidu/base/common/base/power_monitor/baidu_base_common_base_lib_power_monitor_device_source.cc.o ip:10.102.26.38 start:2019-02-11 16:33:12 used:651ms
[ 84%] cached baidu/base/ullib/output/lib/libullib.a ip:10.103.191.34 start:2019-02-11 16:33:13 used:22ms
[ 84%] cached baidu/base/protobuf-json/output/lib/libjson-pb.a ip:10.103.191.35 start:2019-02-11 16:33:13 used:29ms
[ 84%] cached op/oped/noah/webfoot/naming-lib/src/op_oped_noah_webfoot_naming-lib_webfoot_naming_lib_webfoot_naming.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:245ms
[ 84%] cached baidu/base/mcpack2pb/mcpack/baidu_base_mcpack2pb_mcpack2pb_lib_parser.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:384ms
[ 84%] cached baidu/base/common/output/lib/libbase.a ip:10.103.191.35 start:2019-02-11 16:33:13 used:24ms
[ 84%] cached public/spreg/public_spreg_spreg_lib_spreg.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:107ms
[ 85%] cached public/spreg/output/lib/libspreg.a ip:10.103.191.31 start:2019-02-11 16:33:13 used:21ms
[ 85%] cached op/oped/noah/webfoot/naming-lib/output/lib/libwebfoot_naming.a ip:10.103.191.34 start:2019-02-11 16:33:13 used:22ms
[ 85%] cached public/configure/utils/public_configure_config_lib_init.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:166ms
[ 85%] cached baidu/base/mcpack2pb/output/lib/libmcpack2pb.a ip:10.102.26.40 start:2019-02-11 16:33:13 used:22ms
[ 85%] cached public/configure/public_configure_config_lib_ConfigReloader.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:258ms
[ 85%] cached public/noah/giano-lib/release/baas-lib-c/mock/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_mock_proof_generator_impl.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:180ms
[ 85%] cached public/noah/giano-lib/release/baas-lib-c/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_delegation_params_ex.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:210ms
[ 85%] cached public/configure/public_configure_config_lib_cfgext.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:243ms
[ 86%] cached public/noah/giano-lib/release/baas-lib-c/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_credential_verifier.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:225ms
[ 86%] cached public/noah/giano-lib/release/baas-lib-c/mock/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_mock_credential_generator_impl.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:154ms
[ 86%] cached public/configure/public_configure_config_lib_cfgflag.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:199ms
[ 86%] cached public/noah/giano-lib/release/baas-lib-c/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_role_business.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:195ms
[ 86%] cached public/configure/reader/public_configure_config_lib_RawData.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:255ms
[ 86%] cached public/configure/constraint/public_configure_config_lib_Constraint.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:211ms
[ 86%] cached public/configure/utils/public_configure_config_lib_trans.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:173ms
[ 87%] cached public/noah/giano-lib/release/baas-lib-c/public_noah_giano-lib_release_baas-lib-c_baas_interface_lib_resource_management.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:185ms
[ 87%] cached public/configure/output/lib/libconfig.a ip:10.103.191.34 start:2019-02-11 16:33:13 used:22ms
[ 87%] cached baidu/paddle-serving/predictor/unittest/baidu_paddle-serving_predictor_test_pdserving_ut_app_test_main.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:720ms
[ 87%] cached public/noah/giano-lib/release/baas-lib-c/output/lib/libbaas_interface.a ip:10.103.191.37 start:2019-02-11 16:33:13 used:20ms
[ 87%] cached baidu/paddle-serving/predictor/unittest/baidu_paddle-serving_predictor_test_pdserving_ut_app_test_manager.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:12 used:724ms
[ 87%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdclient_app_xrecord_format.pb.cc.o ip:10.103.191.36 start:2019-02-11 16:33:11 used:2758ms
[ 87%] cached baidu/paddle-serving/predictor/unittest/baidu_paddle-serving_predictor_test_pdserving_ut_app_test_bsf.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:864ms
[ 88%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdserving_lib_pds_option.pb.cc.o ip:10.103.191.31 start:2019-02-11 16:33:13 used:882ms
[ 88%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdclient_app_msg_data.pb.cc.o ip:10.102.26.40 start:2019-02-11 16:33:13 used:914ms
[ 88%] cached baidu/paddle-serving/predictor/unittest/baidu_paddle-serving_predictor_test_pdserving_ut_app_test_server_manager.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:659ms
[ 88%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdserving_app_xrecord_format.pb.cc.o ip:10.103.191.36 start:2019-02-11 16:33:11 used:2753ms
[ 88%] cached baidu/paddle-serving/predictor/unittest/baidu_paddle-serving_predictor_test_pdserving_ut_app_test_message_op.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:739ms
[ 88%] cached baidu/paddle-serving/predictor/unittest/baidu_paddle-serving_predictor_test_pdserving_ut_app_test_op.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:734ms
[ 88%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdcodegen_app_pds_option.pb.cc.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:901ms
[ 89%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdclient_app_pds_option.pb.cc.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:906ms
[ 89%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdserving_app_pds_option.pb.cc.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:917ms
[ 89%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdserving_app_msg_data.pb.cc.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:942ms
[ 89%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdclient_app_builtin_format.pb.cc.o ip:10.102.26.40 start:2019-02-11 16:33:13 used:2342ms
[ 89%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_lib_mc_cache.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:2200ms
[ 89%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_app_mc_cache.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:2258ms
[ 89%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdclient_app_mc_cache.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:2192ms
[ 90%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdserving_lib_builtin_format.pb.cc.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:2322ms
[ 90%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdserving_lib_xrecord_format.pb.cc.o ip:10.102.26.40 start:2019-02-11 16:33:13 used:2653ms
[ 90%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdserving_app_builtin_format.pb.cc.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:2346ms
[ 90%] compile baidu/paddle-serving/predictor/src/baidu_paddle-serving_predictor_pdcodegen_app_pdcodegen.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:2766ms
[ 90%] compile baidu/paddle-serving/predictor/op/baidu_paddle-serving_predictor_pdserving_lib_common_echo_op.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:11 used:5550ms
[ 90%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdclient_app_echo_service.pb.cc.o ip:10.103.191.36 start:2019-02-11 16:33:11 used:5413ms
[ 90%] compile baidu/paddle-serving/predictor/output/bin/pdcodegen ip:10.103.191.33 start:2019-02-11 16:33:16 used:290ms
[ 91%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdclient_app_predictor_metric.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:3836ms
[ 91%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_app_predictor_metric.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:3690ms
[ 91%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdclient_app_dag_view.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:11 used:5831ms
[ 91%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdserving_lib_dense_service.pb.cc.o ip:10.103.191.36 start:2019-02-11 16:33:11 used:5599ms
[ 91%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_lib_predictor_metric.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:3786ms
[ 91%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdserving_app_sparse_service.pb.cc.o ip:10.103.191.36 start:2019-02-11 16:33:11 used:5889ms
[ 91%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdserving_lib_int64tensor_service.pb.cc.o ip:10.103.191.36 start:2019-02-11 16:33:11 used:5840ms
[ 92%] compile baidu/paddle-serving/predictor/common/baidu_paddle-serving_predictor_pdclient_app_constant.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:13 used:3999ms
[ 92%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdclient_app_resource.cpp.o ip:10.103.191.36 start:2019-02-11 16:33:11 used:6085ms
[ 92%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_app_memory.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:13 used:4172ms
[ 92%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdserving_lib_image_classification.pb.cc.o ip:10.103.191.36 start:2019-02-11 16:33:11 used:6059ms
[ 92%] compile baidu/paddle-serving/predictor/common/baidu_paddle-serving_predictor_pdserving_lib_constant.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:4004ms
[ 92%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_app_op_repository.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:13 used:4503ms
[ 92%] compile baidu/paddle-serving/predictor/common/baidu_paddle-serving_predictor_pdserving_app_constant.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:4191ms
[ 92%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdclient_app_memory.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:4426ms
[ 93%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_lib_memory.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:4431ms
[ 93%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdclient_app_op_repository.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:4511ms
[ 93%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_lib_op_repository.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:4522ms
[ 93%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdclient_app_server.cpp.o ip:10.103.191.31 start:2019-02-11 16:33:13 used:5264ms
[ 93%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_app_server.cpp.o ip:10.103.191.31 start:2019-02-11 16:33:13 used:5258ms
[ 93%] compile baidu/paddle-serving/predictor/op/baidu_paddle-serving_predictor_pdserving_app_dense_echo_op.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:13 used:5288ms
[ 93%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_lib_server.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:13 used:5321ms
[ 94%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdserving_app_echo_service.pb.cc.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:5466ms
[ 94%] compile baidu/paddle-serving/predictor/op/baidu_paddle-serving_predictor_pdserving_app_write_json_op.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:13 used:5587ms
[ 94%] compile baidu/paddle-serving/predictor/op/baidu_paddle-serving_predictor_pdserving_lib_sparse_echo_op.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:5385ms
[ 94%] compile baidu/paddle-serving/predictor/op/baidu_paddle-serving_predictor_pdserving_app_sparse_echo_op.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:5279ms
[ 94%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdserving_app_image_classification.pb.cc.o ip:10.103.191.31 start:2019-02-11 16:33:13 used:5775ms
[ 94%] compile baidu/paddle-serving/predictor/op/baidu_paddle-serving_predictor_pdserving_app_op.cpp.o ip:10.102.26.40 start:2019-02-11 16:33:13 used:5581ms
[ 94%] compile baidu/paddle-serving/predictor/op/baidu_paddle-serving_predictor_pdserving_app_common_echo_op.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:5550ms
[ 95%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_lib_dag_view.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:13 used:5657ms
[ 95%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdserving_lib_sparse_service.pb.cc.o ip:10.103.191.37 start:2019-02-11 16:33:13 used:5651ms
[ 95%] compile baidu/paddle-serving/predictor/op/baidu_paddle-serving_predictor_pdclient_app_common_echo_op.cpp.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:5302ms
[ 95%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdclient_app_image_classification.pb.cc.o ip:10.103.191.31 start:2019-02-11 16:33:13 used:5957ms
[ 95%] compile baidu/paddle-serving/predictor/op/baidu_paddle-serving_predictor_pdclient_app_op.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:5510ms
[ 95%] compile baidu/paddle-serving/predictor/op/baidu_paddle-serving_predictor_pdserving_lib_op.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:5559ms
[ 95%] compile baidu/paddle-serving/predictor/op/baidu_paddle-serving_predictor_pdserving_lib_dense_echo_op.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:5679ms
[ 96%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdserving_app_dense_service.pb.cc.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:5678ms
[ 96%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdclient_app_sparse_service.pb.cc.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:5690ms
[ 96%] compile baidu/paddle-serving/predictor/src/baidu_paddle-serving_predictor_pdclient_app_pdclient.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:5746ms
[ 96%] compile baidu/paddle-serving/predictor/op/baidu_paddle-serving_predictor_pdclient_app_sparse_echo_op.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:14 used:5515ms
[ 96%] compile baidu/paddle-serving/predictor/op/baidu_paddle-serving_predictor_pdclient_app_dense_echo_op.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:5739ms
[ 96%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdserving_lib_echo_service.pb.cc.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:5777ms
[ 96%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdclient_app_int64tensor_service.pb.cc.o ip:10.102.26.38 start:2019-02-11 16:33:13 used:5849ms
[ 97%] compile baidu/paddle-serving/predictor/op/baidu_paddle-serving_predictor_pdclient_app_write_json_op.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:14 used:5663ms
[ 97%] compile baidu/paddle-serving/predictor/op/baidu_paddle-serving_predictor_pdserving_lib_write_json_op.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:6032ms
[ 97%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdclient_app_dense_service.pb.cc.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:5902ms
[ 97%] compile baidu/paddle-serving/predictor/src/baidu_paddle-serving_predictor_pdserving_lib_pdserving.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:5845ms
[ 97%] compile baidu/paddle-serving/predictor/src/baidu_paddle-serving_predictor_pdserving_app_pdserving.cpp.o ip:10.102.26.39 start:2019-02-11 16:33:13 used:5877ms
[ 97%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdclient_app_workflow.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:13 used:6459ms
[ 97%] compile baidu/paddle-serving/predictor/proto/baidu_paddle-serving_predictor_pdserving_app_int64tensor_service.pb.cc.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:6021ms
[ 98%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_app_dag_view.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:14 used:5873ms
[ 98%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_app_resource.cpp.o ip:10.103.191.37 start:2019-02-11 16:33:13 used:6358ms
[ 98%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_lib_workflow.cpp.o ip:10.103.191.31 start:2019-02-11 16:33:13 used:6576ms
[ 98%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_lib_service.cpp.o ip:10.103.191.31 start:2019-02-11 16:33:13 used:6817ms
[ 98%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_lib_resource.cpp.o ip:10.103.191.34 start:2019-02-11 16:33:13 used:6459ms
[ 98%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdclient_app_dag.cpp.o ip:10.103.191.31 start:2019-02-11 16:33:13 used:7047ms
[ 98%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_app_workflow.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:6674ms
[ 99%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdclient_app_service.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:7013ms
[ 99%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_app_service.cpp.o ip:10.103.191.32 start:2019-02-11 16:33:13 used:7189ms
[ 99%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_lib_dag.cpp.o ip:10.103.191.35 start:2019-02-11 16:33:13 used:7061ms
[ 99%] compile baidu/paddle-serving/predictor/framework/baidu_paddle-serving_predictor_pdserving_app_dag.cpp.o ip:10.103.191.33 start:2019-02-11 16:33:13 used:7230ms
[ 99%] compile baidu/paddle-serving/predictor/output/lib/libpdserving.a ip:10.103.191.35 start:2019-02-11 16:33:21 used:348ms
[ 99%] compile baidu/paddle-serving/predictor/output/bin/pdclient ip:10.102.26.40 start:2019-02-11 16:33:20 used:2319ms
[ 99%] compile baidu/paddle-serving/predictor/output/bin/pdserving ip:10.103.191.35 start:2019-02-11 16:33:21 used:2358ms
[100%] compile baidu/paddle-serving/predictor/output/test/test_pdserving ip:10.102.26.40 start:2019-02-11 16:33:21 used:2233ms
[INFO] build complete: compile total 714, takes 15s
[INFO] build finished 2019-02-11 16:33:23
############################## Pulling artifacts to directory output ###############################
[OK] baidu/paddle-serving/predictor/output/bin/pdclient
[OK] baidu/paddle-serving/predictor/output/bin/pdcodegen
[OK] ffdfadb9c3392d9e09c573268e8c2c09_main.tar
[OK] baidu/paddle-serving/predictor/output/test/test_pdserving
[OK] baidu/paddle-serving/predictor/output/lib/libpdserving.a
[OK] baidu/paddle-serving/predictor/output/bin/pdserving
###################################################### build summary(total takes : 38s) success ######################################################
=> task info
- repo name : baidu/paddle-serving/predictor
- compile master : 10.103.191.36
- taskid : ffdfadb9c3392d9e09c573268e8c2c09
- user : wangguibao
- command : bcloud build
- client start : 2019-02-11 16:32:50
- client stop : 2019-02-11 16:33:28
=> analyze dependency
- repo count : 25
- time consuming : 3.849s
- repo check : 0.004s
- stable repo : 4 (16.0%)
- 3rd repo : 11 (44.0%)
- others repo : 10 (40.0%)
=> queue
- time consuming : 0.000s
=> create workspace
- time consuming : 2.541s
- master : 1.271s
- slave(max) : 1.270s
- dep repo hit cache : 100.0%
=> preprocess
- gen sources : 0.662s
- proto and idl : 0.572s
- publish header files : 4.054s
- pack result : 3.557s
=> compile
- time consuming : 15.917s
- target cache info
all : 628/714 (88.0%)
object file : 528/609 (86.7%)
static lib : 100/101 (99.0%)
bin : 0/3 (0.0%)
ut : 0/1 (0.0%)
=> run ut
- time consuming : 0.000s
=> client download artifacts
- download : 4.699s
- run release.bcloud : 0.000s
=> prodserver upload artifacts
- download : 0.000s
- run release.bcloud : 0.000s
- upload : 0.000s
=> More information refers to http://buildcloud.baidu.com/bcloud/5-best-practice#5_1_6_bcloud_build_time
######################################################################################################################################################
# #
# WARNING!!! #
# Stable branch was provided by following module(s), please change to stable dependency ASAP. #
# How to fix: http://ihelp.baidu.com/bcloud/119 #
# #
# ---------------------------------------------------------------------------------------------------------------------------------- #
# #
# CONFIGS('baidu/base/baidu-rpc@xxx') --> CONFIGS('baidu/base/baidu-rpc@stable') #
# CONFIGS('public/bthread@xxx') --> CONFIGS('baidu/base/bthread@stable') #
# CONFIGS('baidu/im-common/mempool@xxx') --> CONFIGS('baidu/im-common/mempool@stable') #
# #
# #
######################################################################################################################################################
Global:
tool: bcloud
Default:
profile: [change]
Profiles:
- profile:
name: change
command: bcloud ut
release: true
#include "common/constant.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
DEFINE_bool(use_parallel_infer_service, false, "");
DEFINE_int32(el_log_level, 16, "");
DEFINE_int32(idle_timeout_s, 16, "");
DEFINE_int32(port, 8010, "");
DEFINE_string(workflow_path, "./conf", "");
DEFINE_string(workflow_file, "workflow.conf", "");
DEFINE_string(inferservice_path, "./conf", "");
DEFINE_string(inferservice_file, "service.conf", "");
DEFINE_string(logger_path, "./conf", "");
DEFINE_string(logger_file, "log.conf", "");
DEFINE_string(resource_path, "./conf", "");
DEFINE_string(resource_file, "resource.conf", "");
DEFINE_bool(enable_yacl, false, "enable yacl");
DEFINE_string(yacl_module_name, "predictor", "yacl module name");
DEFINE_string(yacl_param_dump_file, "./data/yacl_param_list.txt", "yacl param dump file path");
DEFINE_bool(enable_mc_cache, false, "enable mc cache");
DEFINE_bool(enable_nshead_protocol, false, "enable nshead protocol in server side");
DEFINE_string(nshead_protocol, "itp", "type of nshead protocol, support itp, nova_pbrpc, public_pbrpc, nshead_mcpack");
DEFINE_int32(max_concurrency, 0, "Limit of request processing in parallel, 0: unlimited");
DEFINE_int32(num_threads, 0, "Number of pthreads that server runs on, not change if this value <= 0");
DEFINE_int32(reload_interval_s, 10, "");
DEFINE_bool(enable_model_toolkit, false, "enable model toolkit");
DEFINE_string(enable_protocol_list, "baidu_std nshead", "set protocol list");
} // predictor
} // paddle_serving
} // baidu
// Baidurpc
BAIDU_REGISTER_ERRNO(baidu::paddle_serving::predictor::ERR_INTERNAL_FAILURE,
"Paddle Serving Framework Internal Error.");
BAIDU_REGISTER_ERRNO(baidu::paddle_serving::predictor::ERR_MEM_ALLOC_FAILURE,
"Paddle Serving Memory Alloc Error.");
BAIDU_REGISTER_ERRNO(baidu::paddle_serving::predictor::ERR_OVERFLOW_FAILURE,
"Paddle Serving Array Overflow Error.");
BAIDU_REGISTER_ERRNO(baidu::paddle_serving::predictor::ERR_OP_INFER_FAILURE,
"Paddle Serving Op Inference Error.");
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_CONSTANT_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_CONSTANT_H
#include "common/inner_common.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
// GFLAGS Variables
DECLARE_bool(use_parallel_infer_service);
DECLARE_int32(el_log_level);
DECLARE_int32(idle_timeout_s);
DECLARE_int32(port);
DECLARE_string(workflow_path);
DECLARE_string(workflow_file);
DECLARE_string(inferservice_path);
DECLARE_string(inferservice_file);
DECLARE_string(logger_path);
DECLARE_string(logger_file);
DECLARE_string(resource_path);
DECLARE_string(resource_file);
DECLARE_bool(enable_mc_cache);
DECLARE_bool(enable_nshead_protocol);
DECLARE_string(nshead_protocol);
DECLARE_int32(max_concurrency);
DECLARE_int32(num_threads);
DECLARE_int32(reload_interval_s);
DECLARE_bool(enable_model_toolkit);
DECLARE_string(enable_protocol_list);
// STATIC Variables
static const char* START_OP_NAME = "startup_op";
// ERRORCODE
enum {
// internal error
ERR_INTERNAL_FAILURE = -5000,
ERR_MEM_ALLOC_FAILURE = -5001,
ERR_OVERFLOW_FAILURE = -5002,
// op error
ERR_OP_INFER_FAILURE = -5100,
// no error
ERR_OK = 0,
// internal ignore
ERR_IGNORE_FAILURE = 5000,
// op ignore
ERR_OP_IGNORE_FAILURE = 5100,
};
static const size_t MAX_WORKFLOW_NUM_IN_ONE_SERVICE = 20;
static const uint32_t DEFAULT_CACHE_CAPACITY = 10000;
static const uint32_t DEFAULT_CACHE_UNITSIZE = 8192;
} // predictor
} // paddle_serving
} // baidu
#endif
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_INNER_COMMON_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_INNER_COMMON_H
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdint.h>
#include <pthread.h>
#include <strings.h>
#include <getopt.h>
#include <google/protobuf/text_format.h>
#include <boost/unordered_map.hpp>
#include <boost/function.hpp>
#include <boost/algorithm/string.hpp> // for boost::split&trim
#include <baas-lib-c/baas.h>
#include <baas-lib-c/giano_mock_helper.h>
#include <gflags/gflags.h>
#include <base/logging.h>
#include <base/time.h>
#include <base/object_pool.h>
#include <baidu/rpc/channel.h>
#include <baidu/rpc/server.h>
#include <baidu/rpc/policy/giano_authenticator.h>
#include <bthread.h>
#include <error.h>
#include "Configure.h"
#include <comlog/comlog.h>
#include "common/utils.h"
#include "common/types.h"
#include "common/constant.h"
#endif
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_MACROS_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_MACROS_H
#include "common/inner_common.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
#ifndef CATCH_ANY_AND_RET
#define CATCH_ANY_AND_RET(errno) \
catch (...) { \
LOG(FATAL) << "exception catched"; \
return errno; \
}
#endif
#ifdef USE_PTHREAD
#define THREAD_T pthread_t
#define THREAD_KEY_T pthread_key_t
#define THREAD_MUTEX_T pthread_mutex_t
#define THREAD_KEY_CREATE pthread_key_create
#define THREAD_SETSPECIFIC pthread_setspecific
#define THREAD_GETSPECIFIC pthread_getspecific
#define THREAD_CREATE pthread_create
#define THREAD_CANCEL pthread_cancel
#define THREAD_JOIN pthread_join
#define THREAD_KEY_DELETE pthread_key_delete
#define THREAD_MUTEX_INIT pthread_mutex_init
#define THREAD_MUTEX_LOCK pthread_mutex_lock
#define THREAD_MUTEX_UNLOCK pthread_mutex_unlock
#define THREAD_MUTEX_DESTROY pthread_mutex_destroy
#define THREAD_COND_T pthread_cond_t
#define THREAD_COND_INIT pthread_cond_init
#define THREAD_COND_SIGNAL pthread_cond_signal
#define THREAD_COND_WAIT pthread_cond_wait
#define THREAD_COND_DESTROY pthread_cond_destroy
#else
#define THREAD_T bthread_t
#define THREAD_KEY_T bthread_key_t
#define THREAD_MUTEX_T bthread_mutex_t
#define THREAD_KEY_CREATE bthread_key_create
#define THREAD_SETSPECIFIC bthread_setspecific
#define THREAD_GETSPECIFIC bthread_getspecific
#define THREAD_CREATE bthread_start_background
#define THREAD_CANCEL bthread_stop
#define THREAD_JOIN bthread_join
#define THREAD_KEY_DELETE bthread_key_delete
#define THREAD_MUTEX_INIT bthread_mutex_init
#define THREAD_MUTEX_LOCK bthread_mutex_lock
#define THREAD_MUTEX_UNLOCK bthread_mutex_unlock
#define THREAD_MUTEX_DESTROY bthread_mutex_destroy
#define THREAD_COND_T bthread_cond_t
#define THREAD_COND_INIT bthread_cond_init
#define THREAD_COND_SIGNAL bthread_cond_signal
#define THREAD_COND_WAIT bthread_cond_wait
#define THREAD_COND_DESTROY bthread_cond_destroy
#endif
} // predictor
} // paddle_serving
} // baidu
#endif
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_TYPES_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_TYPES_H
namespace baidu {
namespace paddle_serving {
namespace predictor {
typedef char* Byte;
typedef size_t Size;
typedef const char* ConstByte;
struct Sequence {
Byte data;
Size size;
};
} // predictor
} // paddle_serving
} // baidu
#endif // BAIDU_PADDLE_SERVING_PREDICTOR_TYPES_H
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_UTILS_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_UTILS_H
#include "common/macros.h"
#include "common/inner_common.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class TimerFlow {
public:
static const int MAX_SIZE = 1024;
TimerFlow() {
init();
}
void init() {
_csize = 0;
_name = NULL;
_started = false;
_auto = false;
}
TimerFlow(const char* name) : _csize(0), _name(name) {
_last = _start = base::cpuwide_time_us();
_auto = true;
_started = true;
}
void set_name(const char* name) {
_name = name;
}
void start() {
_last = _start = base::cpuwide_time_us();
_started = true;
}
bool check(const char* tag) {
if (!_started) {
LOG(WARNING) << "Timer not started yet!";
return false;
}
uint64_t now = base::cpuwide_time_us();
if (!appendf("%s:%lu|", tag, now - _last)) {
LOG(WARNING)
<< "Failed check timer: " << _name
<< ", value = [" << tag << ":"
<< (now - _last) << "]!" << noflush;
return false;
}
_last = now;
return true;
}
std::string info() {
return std::string(_buf);
}
void end() {
uint64_t now = base::cpuwide_time_us();
if (!appendf("total:%lu", now - _start)) {
LOG(WARNING) << "Failed dump time_info[" << _name << "]";
}
_started = false;
}
~TimerFlow() {
if (!_auto) {
return;
}
uint64_t now = base::cpuwide_time_us();
if (appendf("total:%lu,%s", now - _start, _name)) {
LOG(INFO)
<< " " << _name << "_tc=[" << _buf << "]";
} else {
LOG(WARNING) << "Failed dump time_info[" << _name << "]";
}
}
private:
bool appendf(const char* fmt, ...) {
va_list ap;
va_start(ap, fmt);
try {
int bytes = vsnprintf(_buf + _csize, MAX_SIZE - _csize, fmt, ap);
if (bytes >= MAX_SIZE - _csize || bytes < 0) {
LOG(WARNING) << "Overflow when appendf!" << noflush;
return false;
}
_csize += bytes;
} CATCH_ANY_AND_RET(false);
va_end(ap);
return true;
}
private:
char _buf[1024];
int _csize;
uint64_t _start;
uint64_t _last;
const char* _name;
bool _started;
bool _auto;
};
template<bool flag>
struct derived_from_message {};
template<typename T, typename TBase>
class TIsDerivedFromB {
private:
static uint8_t check(TBase*) {
return 1;
}
static uint32_t check(void*) {
return 0;
}
public:
enum {
// function call cannot apprear in a constant-expression
RESULT = (sizeof(uint8_t) == sizeof(check((T*)(NULL)))),
};
};
template<typename TBase>
class IsDerivedFrom {
private:
static bool check(TBase*) {
return true;
}
static bool check(void*) {
return false;
}
public:
template<typename T>
static bool yes(T* x) {
return check(x);
}
};
} // predictor
} // paddle_serving
} // baidu
#endif
workflow_type: Sequence
[@Node]
name: dense_echo_op
type: DenseEchoOp
workflow_type: Sequence
[@Node]
name: echo_op
type: CommonEchoOp
workflow_type: Sequence
[@Node]
name: fluid_dense_op
type: FluidDenseOp
--port=8010
--noenable_rd_dict
workflow_type: Sequence
[@Node]
name: image_reader_op
type: ImageReaderOp
[@Node]
name: image_classify_op
type: ImageClassifyOp
[.@Depend]
name: image_reader_op
mode: RO
[@Node]
name: write_json_op
type: WriteJsonOp
[.@Depend]
name: image_classify_op
mode: RO
COMLOG_LEVEL : 16
COMLOG_DEVICE_NUM : 2
COMLOG_DEVICE0 : TRACE
COMLOG_DEVICE1 : WARNING
TRACE_OPEN : 1
TRACE_TYPE : FILE
TRACE_PATH : ./log
TRACE_NAME : pdserving.log
TRACE_SYSLEVEL : 0
TRACE_SELFLEVEL : NOTICE,TRACE,DEBUG
TRACE_SIZE : 4000
TRACE_SPLITE_TYPE : DATECUT
TRACE_DATA_CUTTIME : 60
TRACE_DATA_CRONOCUT : 1
TRACE_RESERVED1 : %Y%m%d%H
TRACE_LAYOUT : %L: %A %T %R
TRACE_QUOTA_DAY : 2
WARNING_OPEN : 1
WARNING_TYPE : FILE
WARNING_PATH : ./log
WARNING_NAME : pdserving.log.wf
WARNING_SYSLEVEL : 0
WARNING_SELFLEVEL : WARNING,FATAL
WARNING_SIZE : 4000
WARNING_SPLITE_TYPE : DATECUT
WARNING_DATA_CUTTIME : 60
WARNING_DATA_CRONOCUT : 1
WARNING_RESERVED1 : %Y%m%d%H
WARNING_LAYOUT : %L: %A %T %R
WARNING_QUOTA_DAY : 2
#[@MODEL]
#MODEL_TYPE : PADDLE_FLUID
#MODEL_NAME : fluid_model_test
#TRAINER_CONFIG: ./data/model/paddle/fluid/word2vec.config
#TRAINER_PARAMETER: ./data/model/paddle/fluid/word2vec.dict
#RELOAD_CHECK_FILE : ./data/model/paddle/fluid_reload_flag
#MODEL_TIME_FILE : ./data/model/paddle/fluid_time_file
[@MODEL]
MODEL_TYPE : PADDLE_FLUID
MODEL_NAME : image_classification_resnet
#TRAINER_PARAMETER: ./data/model/paddle/fluid/image_classification_resnet
#TRAINER_PARAMETER: ./data/model/paddle/fluid/se_resnext50
#TRAINER_PARAMETER: ./data/model/paddle/fluid/resnet_50
TRAINER_PARAMETER: ./data/model/paddle/fluid/SE_ResNeXt50_32x4d
RELOAD_CHECK_FILE : ./data/model/paddle/fluid_reload_flag
MODEL_TIME_FILE : ./data/model/paddle/fluid_time_file
[@rd_dict]
NODE_TYPE:SINGLE
DICT_NAME: demo
DICT_FILE: ./rd_dict.dict
[@rd_dict]
NODE_TYPE:SINGLE
DICT_NAME: demo1
DICT_FILE: ./rd_dict.dict
# model toolkit conf
model_manager_path: ./conf
model_manager_file: model_toolkit.conf
rd_dict_conf_path: ./conf
rd_dict_conf_file: rd_dict.conf
[@Service]
name: BuiltinDenseFormatService
@workflow: workflow1
# 开启后,框架会根据请求中request_field_key字段的值,将请求映射到对应workflow执行
# enable_map_request_to_workflow: 1
# request_field_key: cmd
# 若请求中request_field_key字段的值与request_field_value相等,则执行该workflow
# request_field_value不能重复
# @workflow: workflow1
# @request_field_value: /titleq/wise/ctr
# @workflow: workflow2
# @request_field_value: /titleq/pc/ctr
# @workflow: workflow3
# @request_field_value: /titleq/xxx/ctr
[@Service]
name: BuiltinSparseFormatService
@workflow: workflow2
[@Service]
name: BuiltinTestEchoService
@workflow: workflow3
workflow_type: Sequence
[@Node]
name: sparse_echo_op
type: SparseEchoOp
[.@Depend]
name: startup_op
mode: RO
[@Engine]
Name : FCR_WISE_NONLINEAR_DNN_MODEL
[.@Version]
Type : ABACUS_DNN
VersionFile: ./data/abacus/version
VersionType: abacus_version
ReloadableMeta: ./data/abacus/join_model_nonlinear
ReloadableType: timestamp_ne
ModelDataPath: ./conf/cvm_model/dnn_nonlinear_model.conf
RuntimeThreadNum: 0
BatchInferSize: 0
EnableBatchAlign: 0
[.@Version]
Type : ABACUS_DNN
VersionFile: ./data/abacus/version
VersionType: abacus_version
ReloadableMeta: ./data/abacus/join_model_nonlinear
ReloadableType: timestamp_ne
ModelDataPath: ./conf/cvm_model/ubm_nonlinear_dnn_model.conf
RuntimeThreadNum: 0
BatchInferSize: 0
EnableBatchAlign: 0
[@Engine]
Name : FCR_NONLINEAR_DNN_MT_MODEL
[.@Version]
Type : ABACUS_DNN
VersionFile: ./data/abacus/version
VersionType: abacus_version
ReloadableMeta: ./data/abacus/join_model_nonlinear
ReloadableType: timestamp_ne
ModelDataPath: ./conf/cvm_model/ubm_mt_nonlinear_dnn_model.conf
RuntimeThreadNum: 0
BatchInferSize: 0
EnableBatchAlign: 0
[@Engine]
Name : FCR_MT_MODEL_NO_FPGA
Type : ABACUS_DNN
ReloadableMeta: ./data/abacus/join_model_nonlinear
ReloadableType: timestamp_ne
ModelDataPath: ./conf/cvm_model/ubm_mt_no_fpga_dnn_model.conf
RuntimeThreadNum: 0
BatchInferSize: 0
EnableBatchAlign: 0
[@Engine]
Name : FCR_NONLINEAR_DNN_AD_MODEL
Type : ABACUS_DNN
ReloadableMeta: ./data/abacus/join_model_nonlinear
ReloadableType: timestamp_ne
ModelDataPath: ./conf/cvm_model/ubm_ad_nonlinear_dnn_model.conf
RuntimeThreadNum: 0
BatchInferSize: 0
EnableBatchAlign: 0
[@Workflow]
name: workflow1
path: ./conf
file: dense_dag.conf
[@Workflow]
name: workflow2
path: ./conf
file: sparse_dag.conf
[@Workflow]
name: workflow3
path: ./conf
file: echo_dag.conf
#!/bin/bash
# 启动路径
start_path="$(pwd)"
sh build.sh stop
# 定位到cts目录下
cd "$(dirname "$0")"/
if [[ "x"$@ = x*--module_name=* ]]
then
all_arg=$@
tmp=${all_arg##*--module_name=}
mod_name=${tmp%% *}
sed -i "/^run_mod=/s/run_mod.*/run_mod=$mod_name/" install-all.conf
else
sed -i "/^run_mod=/s/run_mod.*/run_mod=lr_engine/" install-all.conf
fi
env_num=`grep env_num install-all.conf | awk -F '=' '{print $2}'`
# 设置环境变量
export PATH="$(pwd)"/frame/tools/python27/bin:$PATH
export PYTHONPATH="$(pwd)"
alias | grep "alias cp=" >/dev/null
if [ $? -eq 0 ];then
unalias cp
fi
# 回到启动路径,执行main.py
cd "$start_path"
mem_free=`free -m | awk '{print $4}'| head -3 | awk 'END{print}'`
let thread_max=$mem_free/5000
if [ $thread_max -eq 0 ];then
echo "系统内存不足, 不能运行任何case"
exit 1
fi
if [ $thread_max -lt $env_num ];then
env_num=$thread_max
echo "目前系统内存最多支持运行$env_num个线程"
fi
temp_args="--paral=$env_num"
python "$(dirname "$0")"/control/main.py $temp_args $@
ret=$?
sh build.sh stop
if [ $ret -ne 0 ]
then
exit 1
fi
#!/bin/bash
function cfont()
{
while (($#!=0))
do
case $1 in
-b)
echo -ne " ";
;;
-t)
echo -ne "\t";
;;
-n)
echo -ne "\n";
;;
-black)
echo -ne "\033[30m";
;;
-red)
echo -ne "\033[31m";
echo -ne "\033[1m";
;;
-green)
echo -ne "\033[32m";
echo -ne "\033[1m";
;;
-yellow)
echo -ne "\033[33m";
;;
-blue)
echo -ne "\033[34m";
echo -ne "\033[1m";
;;
-purple)
echo -ne "\033[35m";
;;
-cyan)
echo -ne "\033[36m";
echo -ne "\033[1m";
;;
-white|-gray)
echo -ne "\033[37m";
;;
-reset)
echo -ne "\033[0m";
;;
-h|-help|--help)
echo "Usage: cfont -color1 message1 -color2 message2 ...";
echo "eg: cfont -red [ -blue message1 message2 -red ]";
;;
*)
echo -ne "$1"
;;
esac
shift
done
echo -ne "\033[0m";
}
cur_path=`pwd`
work_root=${cur_path%%/baidu/*}
CITOOLS="${work_root}/baidu/fengchao-qa/citools"
if [ ! -e ${CITOOLS}/lib/localbuild_lib.sh ];then
cfont -blue "=============== localbuild_lib.sh is not exist, downloading ...================" -n
git clone ssh://git@icode.baidu.com:8235/baidu/fengchao-qa/citools $CITOOLS >/dev/null
fi
source ${CITOOLS}/lib/localbuild_lib.sh
function get_framework_baseenv()
{
onlineFtp="ftp://tc-orp-app2.tc.baidu.com/home/heqing"
wgetOptions="--tries=3 --retry-connrefused -r -l0 -nv --limit-rate=50m -nH"
cfont -blue "##################################################" -n ;
cfont -blue "### build pdserving_framework xts base env ###" -n ;
cfont -blue "##################################################" -n ;
cfont -reset;
run_path="$(grep "run_path" "./install-all.conf" | cut -d "=" -f 2)"
cd $run_path
wget $wgetOptions --cut-dirs=4 "$onlineFtp"/scmbak/pdserving/framework_tester -o wget.log
ret=$?
retry=0
while [[ $retry -lt 3 ]]; do
if [[ $ret -eq 0 ]];then
break;
fi
wget $wgetOptions --cut-dirs=4 "$onlineFtp"/scmbak/pdserving/framework_tester -o wget.log
ret=$?
((retry++))
done
[[ $ret -ne 0 ]] && return 1
cfont -blue "[XTS] " -green "[ finish download: pdserving-framework ]" -n
cd -
return 0
}
# 搭建cts环境
function build_ctsenv()
{
# 搭建cts环境
if [ -z $1 ]; then
ENV_NUM=0
else
ENV_NUM=$1
fi
#更新安装配置设置
hostname=$(uname -n)
username="$(echo "`whoami`" | awk '{print $1}')"
LIBPATH=${PWD}/lib
echo "libpath is : $LIBPATH"
# 生成install-all.conf
{
echo "[config]"
echo "host=$hostname"
echo "user=$username"
echo "passwd=CAPHI2008"
echo "env_file=${PWD}/envfile"
echo "lib_path=$LIBPATH"
echo "run_path=${PWD}/run_env"
echo "env_num=$ENV_NUM"
} > ./install-all.conf
# 安装cts环境
{
cfont -blue "============= predictor env install =============" -n
rm -rf run_env && mkdir -p run_env
echo "current path is :${cur_path}"
#get_framework_baseenv
#if [ $? -ne 0 ]; then
# echo "pdserving-framework is not ready!!!"
# exit 1
#fi
mkdir -p run_env/predictor/bin
mkdir -p run_env/predictor/conf
# 拷贝pdserving到环境中
[[ -e ../output/bin/pdserving ]] && cp -rf ../output/bin/pdserving run_env/predictor/bin/predictor
[[ -e ../output/lib ]] && cp -rf ../output/lib/ run_env/predictor/
[[ -e ../conf ]] && cp -rf ../conf/* run_env/predictor/conf/
#搭建并行环境
if [ $ENV_NUM -ne 0 ]; then
cfont -blue "=============== build multi env ===============" -n
mkdir -p ${PWD}/run_env/1
mv -f ${PWD}/run_env/framework_tester ${PWD}/run_env/1/framework_tester
mv -f ${PWD}/run_env/model ${PWD}/run_env/1/model
mv -f ${PWD}/run_env/dict ${PWD}/run_env/1/dict
for ((i=2; i<=$ENV_NUM; i=i+1))
do
cp -rf ${PWD}/run_env/1 ${PWD}/run_env/$i
done
fi
}
#安装XTS环境
{
echo "now pwd is :`pwd`"
cfont -blue "=============== XTS(cts) install ================" -n
svn co https://svn.baidu.com/general-test/trunk/xts/frame frame> /dev/null
svn co https://svn.baidu.com/general-test/trunk/xts/im/core/control control>/dev/null
echo "now dir list is :`ls`"
cd lib
svn co https://svn.baidu.com/general-test/trunk/xts/im/core/lib/commonlib commonlib>/dev/null
cd -
}
cfont -blue "[XTS] " -green "[ finish XTS(cts) install ]" -n
onlineFtp="ftp://tc-orp-app2.tc.baidu.com/home/heqing"
wgetOptions="--tries=3 --retry-connrefused -r -l0 -nv --limit-rate=50m -nH"
#安装bidinfo 和基础protolib
{
cd lib
[[ -e bidinfo ]] && rm -rf bidinfo
[[ -e protolib ]] && rm -rf protolib
[[ -e pluginlib ]] && rm -rf pluginlib
wget $wgetOptions --cut-dirs=5 "$onlineFtp"/scmbak/common_lib/pdserving_cts/bidinfo -o wget.log
wget $wgetOptions --cut-dirs=5 "$onlineFtp"/scmbak/common_lib/pdserving_cts/protolib -o wget.log
wget $wgetOptions --cut-dirs=6 "$onlineFtp"/scmbak/common_lib/pdserving_cts/framework/pluginlib -o wget.log
cd -
}
#安装protolib
{
cfont -blue "============== protoc install ==================" -n
[[ -e protoc_tools ]] && rm -rf protoc_tools
wget $wgetOptions --cut-dirs=5 "$onlineFtp"/scmbak/common_lib/pdserving_cts/protoc_tools -o wget.log
[[ -e ../proto ]] && cp -rf ../proto/* ./protoc_tools/proto/
cd protoc_tools
chmod +x ./protobuf-2.4.1/bin/protoc
chmod +x ./protobuf-2.4.1/lib/*
[[ -e protolib ]] && rm -rf protolib
mkdir ./protolib
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:`pwd`/protobuf-2.4.1/lib
./protobuf-2.4.1/bin/protoc -I=./proto --python_out=./protolib/ ./proto/*.proto
cd -
cp ./protoc_tools/protolib/*.py ./lib/protolib/
}
cfont -reset
return 0
}
function get_pid
{
local prog=$1
local user=$2
local prog_path=$3
local ret=-1
local trash_path="/home/$(echo "`whoami`" | awk '{print $1}')/.__trash/"
pids=`pgrep $prog -u $user`
for pid in $pids
do
tmp_path=`ls -l /proc/$pid/exe 2>/dev/null | awk '{print $NF}'`
if [ "$tmp_path" == "$prog_path" ] || [ ! -e $tmp_path ] || [ 0 == `echo $tmp_path | grep -qs $trash_path;echo $?` ]
then
echo $pid
ret=0
fi
done
return $ret
}
function kill_prog()
{
name=$1
username=$2
prog_path=$3
pids=`get_pid $name $username $prog_path`
echo $pids>/dev/null
if [ $? -eq 0 ] ;then
for pid in $pids
do
#echo "$name,$pid"
kill -9 $pid
done
fi
}
function kill_predictor_prog()
{
username="$(echo "`whoami`" | awk '{print $1}')"
if [ -f install-all.conf ]
then
env_num=`grep env_num= install-all.conf|awk -F '=' '{print $2}'`
else
env_num=0
fi
for ((i=0; i<=$env_num; i=i+1))
do
if [ $i -eq 0 ]
then
run_path="${PWD}/run_env"
else
run_path="${PWD}/run_env/$i"
fi
kill_prog predictor $username $run_path/framework_tester/bin/predictor
done
}
function clean_ctsenv()
{
rm -rf install-all.conf ccover
rm -rf run_env fail_env output log frame control lib/commonlib lib/protolib
return 0
}
if [ $# -eq 1 ] && [ $1 == "clean" ]
then
clean_ctsenv
exit 0
fi
if [ $# -eq 1 ] && [ $1 == "stop" ]
then
kill_predictor_prog
exit 0
fi
clean_ctsenv
build_ctsenv "$1"
exit $?
#!/usr/bin/env python
# -*- coding:gbk -*-
"""
case created by templete
"""
import sys
sys.path.append(r'./lib/protolib')
print("sys path is : %s " % str(sys.path))
import os
import json
import commands
from lib.protolib.dense_service_pb2 import Request
from lib.protolib.dense_service_pb2 import Response
from lib.pluginlib.plugin_util import Util as ut
from lib.pluginlib.plugin_case import PluginCase
from lib.pluginlib.plugin_module import PluginModule
from lib.pluginlib.plugin_apistub import ApiStub
class TestDenseService(PluginCase):
"""test wtitleq case class"""
OWNER="zhangwenbo03"
quick=['ALL']
low=[]
daily=[]
ignorelist=[]
RESTART=True
def setUp(self):
"""setup something before run case"""
pass
def tearDown(self):
"""tear down after run case"""
self.t.stop()
print "stop finished"
pass
def testDemoCase(self):
"""demo case"""
req = Request()
denseIns = req.instances.add()
denseIns.features.append(10)
denseIns.features.append(13)
denseIns.features.append(200)
service = "BuiltinDenseFormatService"
type = "debug"
ut_obj = ut()
dict_val = ut_obj.pb2dict(req)
json_val = ut_obj.dict2json(dict_val)
self.t.restart()
self.t.tester.sendJsonData(json_val, service, type)
print "execute demo case"
"""plugin register """
from lib.plugin_tester import *
#!/usr/bin/env python
# -*- coding:gbk -*-
"""
注册类:RegxxxConfData,RegxxxReq,RegxxxXbox,RegxxxAd,xxx为组件名
"""
from lib.pluginlib.plugin_common import ConfData
from lib.pluginlib.plugin_common import TreeConfData
from lib.pluginlib.plugin_common import CommonIndex
class RegpredictorConfData(object):
"""
注册wtitleq组件的conf和data文件
"""
def __init__(self, path):
self.path = path
self.conf = {}
self.data = {}
self.conf['ub'] = ConfData(path=self.path + "/conf/ub.conf", connect_flag=":")
self.data['lr_model'] = CommonIndex(path=self.path + \
'/data/lr-model/wtitleq_model_file.sign',
col_list=['key', 'value'],
format='B')
class RegpredictorReq(object):
"""
注册wtitleq组件的默认请求
"""
def __init__(self):
self.plugin_term = {}
cmd_tag = 'cmd_tag0'
query_schema_list = []
query_value_list = []
pair_schema_list = ['query',
'wadptid',
'wbwsid',
'omit_buf',
'title',
'desc',
'cmatch',
'bidword',
'dynamic_new_title']
pair_value_list = ['鲜花',
'0',
'3',
'鲜花',
'鲜花%2C本地实体鲜花店100%25保证%21',
'鲜花品质100%25%2C主城最快2小时送到%2C全天24时在线订花%21市区内免费送花上门%21鲜%2E%2E',
'223',
'鲜花',
'美丽鲜花']
cmd_str = '/titleq/wise/ctr'
req_term = {"query_schema": query_schema_list,
"pair_schema": pair_schema_list,
"query_value": query_value_list,
"pair_value": pair_value_list,
"cmd": cmd_str}
self.plugin_term.update({cmd_tag: req_term})
self.plugin_list = self.plugin_term.keys()
class RegpredictorNewXbox(object):
"""
注册wtitleq组件的xbox
"""
def __init__(self):
self.need_xbox = True
self.stub_conf = 'xboxstub.conf'
self.stub_name = 'xboxstub'
self.conf_list = ['xbox-wtitleq_pegasus.conf']
class RegpredictorAd(object):
"""
注册wtitleq组件是否需要构造广告库
"""
def __init__(self):
self.need_adstub = False
#pragma once
#include <errno.h>
#include <vector>
#include <deque>
#include <base/atomicops.h>
#include <comlog/comlog.h>
#include "common/inner_common.h"
#include "framework/infer_data.h"
#include "framework/memory.h"
#include <boost/function.hpp>
namespace im {
namespace bsf {
template<>
struct Task<baidu::paddle_serving::predictor::Tensor,
baidu::paddle_serving::predictor::Tensor> {
typedef Task<baidu::paddle_serving::predictor::Tensor,
baidu::paddle_serving::predictor::Tensor> TaskT;
typedef baidu::paddle_serving::predictor::Tensor Tensor;
typedef baidu::paddle_serving::predictor::Tensor InType;
typedef baidu::paddle_serving::predictor::Tensor OutType;
typedef baidu::paddle_serving::predictor::BatchTensor BatchTensor;
typedef baidu::paddle_serving::predictor::BatchTensor InArrayT;
typedef baidu::paddle_serving::predictor::BatchTensor OutArrayT;
struct Segment {
Segment(void* p, size_t b, size_t s)
: ptr(p), begin(b), size(s) {}
void* ptr;
size_t begin;
size_t size;
};
int read_fd;
int write_fd;
pid_t owner_tid;
const InArrayT* in;
OutArrayT* out;
size_t rem;
size_t size;
base::atomic<size_t> index;
const BatchTensor* get(bool is_in) const {
if (is_in) {
return in;
} else {
return out;
}
}
BatchTensor* get(bool is_in) {
if (is_in) {
return const_cast<BatchTensor*>(in);
} else {
return out;
}
}
Task() {
read_fd = -1;
write_fd = -1;
owner_tid = -1;
in = NULL;
out = NULL;
rem = -1;
size = -1;
index.store(0, base::memory_order_relaxed);
}
};
template<>
class BatchTasks<Task<
baidu::paddle_serving::predictor::Tensor,
baidu::paddle_serving::predictor::Tensor> > {
public:
typedef baidu::paddle_serving::predictor::Tensor Tensor;
typedef baidu::paddle_serving::predictor::Tensor InType;
typedef baidu::paddle_serving::predictor::Tensor OutType;
typedef baidu::paddle_serving::predictor::DataBuf DataBuf;
typedef baidu::paddle_serving::predictor::MempoolWrapper MempoolWrapper;
typedef Task<baidu::paddle_serving::predictor::Tensor,
baidu::paddle_serving::predictor::Tensor> TaskT;
typedef TaskMeta<TaskT> TaskMetaT;
typedef TaskT::InArrayT InArrayT;
typedef TaskT::OutArrayT OutArrayT;
BatchTasks(size_t batch_size, bool batch_align = false)
: _batch_size(batch_size)
, _rem_size(batch_size)
, _batch_align(batch_align) {
_batch_in.clear();
_batch_out.clear();
_tasks.clear();
}
~BatchTasks() {
_batch_in.clear();
_batch_out.clear();
_tasks.clear();
}
static bool check_valid(
const InArrayT& in, OutArrayT& out, bool align) {
if (align) {
if (out.count() <= 0 || out.size() <= 0) {
CFATAL_LOG("Out tensor is empty, when aligned");
return false;
}
if (out.size() != in.size()) {
CFATAL_LOG("In/Out tensor size not eq: %ld!=%ld",
out.size(), in.size());
return false;
}
for (size_t fi = 0, shape0 = 0; fi < out.count(); ++fi) {
if (!out[fi].valid()) {
CFATAL_LOG("Out[%ld] tensor not valid", fi);
return false;
}
if (out.size() != out[fi].shape0()) {
CFATAL_LOG("Shape0 not consistency, %ld!=%ld, %ld",
out.size(), out[fi].shape0(), fi);
return false;
}
}
}
return true;
}
size_t append_task(TaskT* task) {
size_t add = std::min(task->rem, _rem_size);
if (!_batch_align) {
add = task->rem;
}
TaskMetaT tm(task, task->in->size() - task->rem, add);
_tasks.push_back(tm);
task->rem -= add;
_rem_size -= add;
return _rem_size;
}
void merge_tasks() {
merge_input();
merge_output();
}
void merge_input() {
if (_tasks.size() <= 0 || _tasks[0].task->in->count() <= 0) {
return ;
}
if (_tasks.size() == 1 && !_batch_align) {
TaskMetaT& tm = _tasks[0];
_batch_in = *(tm.task->in);
return ;
}
merge_tensor(true);
}
void merge_output() {
if (_batch_align) {
if (_tasks.size() <= 0 || _tasks[0].task->out->count() <= 0) {
return ;
}
}
if (_tasks.size() <= 0 || _tasks[0].task->out->count() <= 0) {
return ;
}
TaskMetaT& tm = _tasks[0];
if (_tasks.size() == 1 && !_batch_align) {
_batch_out = *(tm.task->out);
return ;
}
if (tm.task->out->size() <= 0) {
// shape is empty
_batch_out = *(tm.task->out);
return ;
}
if ((*tm.task->out)[0].data.data() == 0
|| (*tm.task->out)[0].data.size() == 0) {
_batch_out = *(tm.task->out);
return ;
}
merge_tensor(false);
}
void merge_tensor(bool is_in) {
// accumulate batch size from fetched tasks
size_t batch_size = 0;
for (size_t ti = 0; ti < _tasks.size(); ++ti) {
TaskMetaT& tm = _tasks[ti];
size_t add = tm.end - tm.begin;
batch_size += add;
}
// merge all instanses in each tensor data
size_t tensor_count = _tasks[0].task->get(is_in)->count();
for (size_t fi = 0; fi < tensor_count; ++fi) {
const Tensor& head = (*(_tasks[0].task->get(is_in)))[fi];
Tensor batch_tensor;
batch_tensor.name = head.name;
batch_tensor.type = head.type;
batch_tensor.shape.push_back(batch_size);
size_t ins_ele_count = 1;
for (size_t si = 1; si < head.shape.size(); ++si) {
batch_tensor.shape.push_back(head.shape[si]);
ins_ele_count *= head.shape[si];
}
size_t tensor_ele_count = ins_ele_count * batch_size;
size_t ins_byte = ins_ele_count * head.ele_byte();
size_t tensor_byte = tensor_ele_count * head.ele_byte();
void* data_buf
= MempoolWrapper::instance().malloc(tensor_byte);
if (!data_buf) {
CFATAL_LOG("Malloc failed, size: %ld", tensor_byte);
return ;
}
size_t data_byte = 0;
for (size_t ti = 0; ti < _tasks.size(); ++ti) {
TaskMetaT& tm = _tasks[ti];
size_t acc_byte = ins_byte * (tm.end - tm.begin);
if (data_byte + acc_byte > tensor_byte) {
CFATAL_LOG("Invalid bytes: %ld + %ld >= %ld",
data_byte, acc_byte, tensor_byte);
return ;
}
const Tensor& tensor = (*(tm.task->get(is_in)))[fi];
memcpy(data_buf + data_byte,
tensor.data.data() + tm.begin * ins_byte,
acc_byte);
data_byte += acc_byte;
}
if (data_byte != tensor_byte) {
CFATAL_LOG("Invalid tensor byte: %ld != %ld",
data_byte, tensor_byte);
return ;
}
batch_tensor.data = DataBuf(data_buf, tensor_byte);
if (is_in) {
_batch_in.push_back(batch_tensor);
} else {
_batch_out.push_back(batch_tensor);
}
}
LOG(TRACE) << "merge input(" << is_in << ") samples: "
<< batch_size << " from " << _tasks.size() << " pvs";
}
void notify_tasks() {
if (_batch_out.size() != _batch_in.size()) {
CFATAL_LOG("batch size not consistency: %ld != %ld",
_batch_out.size(), _batch_in.size());
return ;
}
size_t tensor_count = _batch_out.count();
size_t batch_size = _batch_out.size();
for (size_t fi = 0; fi < tensor_count; ++fi) {
const Tensor& tensor = _batch_out[fi];
size_t ins_byte = tensor.ele_byte();
for (size_t si = 1; si < tensor.shape.size(); ++si) {
ins_byte *= tensor.shape[si];
}
for (size_t ti = 0, bi = 0, add = 0;
ti < _tasks.size(); ++ti, bi += add) {
OutArrayT* dst = _tasks[ti].task->out;
add = _tasks[ti].end - _tasks[ti].begin;
size_t offset_src = ins_byte * bi;
size_t add_byte = add * ins_byte;
if (_batch_align) { // merge all batchs
size_t offset_dst = ins_byte * _tasks[ti].begin;
void* ptr = const_cast<void*>((*dst)[fi].data.data());
memcpy(ptr + offset_dst,
_batch_out[fi].data.data() + offset_src, add_byte);
} else { // overwrite
if (dst->count() <= 0) {
dst->push_back(_batch_out[fi]);
} else {
(*dst)[fi] = _batch_out[fi];
}
(*dst)[fi].shape[0] = add;
(*dst)[fi].data = DataBuf(
_batch_out[fi].data.data() + offset_src, add_byte);
}
}
}
for (size_t ti = 0; ti < _tasks.size(); ++ti) {
TaskT* task = _tasks[ti].task;
size_t begin = _tasks[ti].begin;
size_t end = _tasks[ti].end;
size_t add = end - begin;
size_t index = task->index.fetch_add(add);
if ((index + add) >= task->in->size()) {
char c = 0;
while (write(task->write_fd, &c, 1) != 1 && errno == EINTR) {
;
}
base::return_object(task);
}
}
}
const typename TaskT::InArrayT& in() const {
return _batch_in;
}
typename TaskT::OutArrayT& out() {
return _batch_out;
}
size_t task_size() {
return _tasks.size();
}
private:
std::vector<TaskMetaT> _tasks;
InArrayT _batch_in;
OutArrayT _batch_out;
size_t _rem_size;
size_t _batch_size;
bool _batch_align;
};
} // namespace bsf
} // namespace im
#pragma once
#include <boost/bind.hpp>
#include <base/atomicops.h>
#include <comlog/comlog.h>
#include "common/inner_common.h"
#include <sys/syscall.h>
namespace im {
namespace bsf {
template<typename TaskT>
void* TaskExecutor<TaskT>::thread_entry(void* args) {
ComlogGuard logging_guard;
ThreadContext<TaskT>* context = static_cast<ThreadContext<TaskT>*>(args);
TaskExecutor<TaskT>* executor = static_cast<TaskExecutor<TaskT>*>(context->executor);
executor->work(context);
return NULL;
}
template<typename TaskT>
int TaskExecutor<TaskT>::start(uint32_t thread_num, uint32_t init_timeout_sec) {
_stop = false;
if (!_thread_contexts.empty()) {
CWARNING_LOG("BSF has started");
return 0;
}
if (thread_num == 0) {
CFATAL_LOG("cannot init BSF with zero thread");
return -1;
}
ThreadContext<TaskT>* contexts = new ThreadContext<TaskT>[thread_num];
for (uint32_t i = 0; i < thread_num; ++i) {
contexts[i].executor = this;
if (_user_thread_contexts != NULL) {
contexts[i].user_thread_context = _user_thread_contexts[i];
}
int rc = THREAD_CREATE(
&contexts[i].tid, NULL, &TaskExecutor::thread_entry, &contexts[i]);
if (rc != 0) {
CFATAL_LOG("failed to create BSF worker thread: index=%u, rc=%d, errno=%d:%m",
i, rc, errno);
return -1;
}
_thread_contexts.push_back(&contexts[i]);
}
int init_timeout = init_timeout_sec * 1000 * 1000;
bool has_error = false;
bool has_timeout = true;
if (init_timeout == 0) {
has_timeout = false;
}
while (!has_timeout || init_timeout > 0) {
bool done = true;
for (size_t i = 0; i < _thread_contexts.size(); ++i) {
if (_thread_contexts[i]->init_status < 0) {
has_error = true;
break;
}
if (_thread_contexts[i]->init_status == 0) {
done = false;
}
}
if (has_error) {
CFATAL_LOG("BSF thread init error");
return -1;
}
if (done) {
CDEBUG_LOG("BSF thread init done");
return 0;
}
// 100ms
const int sleep_interval = 100 * 1000;
usleep(sleep_interval);
init_timeout -= sleep_interval;
}
CFATAL_LOG("BSF thread init timed out");
return -1;
}
template<typename TaskT>
void TaskExecutor<TaskT>::stop() {
_stop = true;
for (size_t i = 0; i < _thread_contexts.size(); ++i) {
THREAD_CANCEL(_thread_contexts[i]->tid);
}
for (size_t i = 0; i < _thread_contexts.size(); ++i) {
THREAD_JOIN(_thread_contexts[i]->tid, NULL);
}
_thread_contexts.clear();
}
template<typename TaskT>
TaskHandler<TaskT> TaskExecutor<TaskT>::schedule(
const InArrayT& in, OutArrayT& out) {
TaskT* task = base::get_object<TaskT>();
if (!task) {
LOG(FATAL) << "Failed get TaskT from object pool";
return TaskHandler<TaskT>::valid_handle();
}
if (!BatchTasks<TaskT>::check_valid(in, out, _batch_align)) {
LOG(FATAL) << "Invalid input & output";
return TaskHandler<TaskT>::valid_handle();
}
int fds[2];
int rc = pipe(fds);
if (rc != 0) {
CFATAL_LOG("call pipe() failed, errno=%d:%m", errno);
return TaskHandler<TaskT>::valid_handle();
}
task->read_fd = fds[0];
task->write_fd = fds[1];
task->owner_tid = ::syscall(SYS_gettid);
task->in = &in;
task->out = &out;
task->rem = in.size();
task->size = in.size();
task->index.store(0, base::memory_order_relaxed);
AutoMutex lock(_mut);
_task_queue.push_back(task);
THREAD_COND_SIGNAL(&_cond);
return TaskHandler<TaskT>(*task);
}
template<typename TaskT>
bool TaskExecutor<TaskT>::fetch_batch(BatchTasks<TaskT>& batch) {
AutoMutex lock(_mut);
while (_task_queue.empty()) {
THREAD_COND_WAIT(&_cond, &_mut);
}
if (_task_queue.empty()) {
CFATAL_LOG("invalid task queue!");
return false;
}
while (!_task_queue.empty()) {
TaskT* task = _task_queue.front();
size_t rem = batch.append_task(task);
if (task->rem <= 0) {
_task_queue.pop_front();
}
if (rem <= 0) break;
}
return true;
}
template<typename TaskT>
int TaskExecutor<TaskT>::work(ThreadContext<TaskT>* context) {
if (_thread_init_fn != NULL) {
if (_thread_init_fn(context->user_thread_context) != 0) {
CFATAL_LOG("execute thread init thunk failed, BSF thread will exit");
context->init_status = -1;
return -1;
} else {
CDEBUG_LOG("execute thread init thunk succeed");
}
}
context->init_status = 1;
while (!_stop) {
if (_thread_reset_fn != NULL) {
if (_thread_reset_fn(context->user_thread_context) != 0) {
CFATAL_LOG("execute user thread reset failed");
}
}
BatchTasks<TaskT> batch(_batch_size, _batch_align);
if (fetch_batch(batch)) {
batch.merge_tasks();
_fn(batch.in(), batch.out());
batch.notify_tasks();
}
}
return 0;
}
template<typename InItemT, typename OutItemT>
bool TaskManager<InItemT, OutItemT>::schedule(const InArrayT& in,
OutArrayT& out) {
TaskHandler<TaskT> handler = _executor.schedule(in, out);
if (handler.valid()) {
_task_owned = handler;
return true;
} else {
CFATAL_LOG("failed to schedule task");
return false;
}
}
template<typename InItemT, typename OutItemT>
void TaskManager<InItemT, OutItemT>::wait() {
char buffer[128];
while (read(_task_owned.read_fd, buffer, sizeof(buffer)) < 0
&& errno == EINTR) {
;
}
close(_task_owned.read_fd);
close(_task_owned.write_fd);
_task_owned.read_fd = -1;
_task_owned.write_fd = -1;
return;
}
}
}
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_BSF_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_BSF_H
#include <errno.h>
#include <vector>
#include <deque>
#include <base/atomicops.h>
#include <comlog/comlog.h>
#include "common/inner_common.h"
#include <boost/function.hpp>
namespace im {
namespace bsf {
static const size_t DEFAULT_BATCH_SIZE = 100;
template<typename InItemT, typename OutItemT>
struct Task {
typedef std::vector<InItemT> InArrayT;
typedef std::vector<OutItemT> OutArrayT;
typedef InItemT InType;
typedef OutItemT OutType;
typedef Task<InItemT, OutItemT> TaskT;
int read_fd;
int write_fd;
pid_t owner_tid;
const InArrayT* in;
OutArrayT* out;
size_t rem;
size_t size;
size_t batch_size() {
return in->size();
}
base::atomic<size_t> index;
Task() {
read_fd = -1;
write_fd = -1;
owner_tid = -1;
in = NULL;
out = NULL;
rem = -1;
size = -1;
index.store(0, base::memory_order_relaxed);
}
};
template<typename TaskT>
struct TaskMeta {
TaskMeta(TaskT* ptr, size_t start, size_t add)
: task(ptr)
, begin(start)
, end(start + add) {}
TaskT* task;
size_t begin;
size_t end;
};
template<typename TaskT>
class BatchTasks {
public:
typedef typename TaskT::InType InType;
typedef typename TaskT::OutType OutType;
typedef TaskMeta<TaskT> TaskMetaT;
BatchTasks(size_t batch_size, bool batch_align = true)
: _batch_size(batch_size)
, _rem_size(batch_size)
, _batch_align(batch_align) {
_batch_in.clear();
_batch_out.clear();
_tasks.clear();
}
~BatchTasks() {
_batch_in.clear();
_batch_out.clear();
_tasks.clear();
}
// synchronized operation
size_t append_task(TaskT* task) {
size_t add = std::min(task->rem, _rem_size);
if (!_batch_align) {
add = task->rem;
}
TaskMetaT tm(task, task->in->size() - task->rem, add);
_tasks.push_back(tm);
task->rem -= add;
_rem_size -= add;
return _rem_size;
}
static bool check_valid(
const typename TaskT::InArrayT& in,
typename TaskT::OutArrayT& out, bool align) {
(void)in;
(void)out;
(void)align;
return true;
}
void merge_tasks() {
for (size_t ti = 0; ti < _tasks.size(); ++ti) {
TaskMetaT& tm = _tasks[ti];
for (size_t vi = tm.begin; vi < tm.end; ++vi) {
_batch_in.push_back((*tm.task->in)[vi]);
_batch_out.push_back((*tm.task->out)[vi]);
}
}
}
void notify_tasks() {
if (_batch_out.size() != _batch_in.size()) {
CFATAL_LOG("batch size not consistency: %ld != %ld",
_batch_out.size(), _batch_in.size());
return ;
}
for (size_t ti = 0, bi = 0; ti < _tasks.size(); ++ti) {
TaskT* task = _tasks[ti].task;
size_t begin = _tasks[ti].begin;
size_t end = _tasks[ti].end;
size_t add = end - begin;
for (size_t oi = begin; oi < end; ++oi, ++bi) {
if (bi >= _batch_in.size()) {
CFATAL_LOG("batch index overflow: %d > %d",
bi, _batch_in.size());
return ;
}
(*task->out)[oi] = _batch_out[bi];
}
size_t index = task->index.fetch_add(add);
if ((index + add) >= task->in->size()) {
char c = 0;
while (write(task->write_fd, &c, 1) != 1 && errno == EINTR) {
;
}
base::return_object(task);
}
}
}
const typename TaskT::InArrayT& in() const {
return _batch_in;
}
typename TaskT::OutArrayT& out() {
return _batch_out;
}
size_t task_size() {
return _tasks.size();
}
private:
std::vector<TaskMetaT> _tasks;
typename TaskT::InArrayT _batch_in;
typename TaskT::OutArrayT _batch_out;
size_t _rem_size;
size_t _batch_size;
bool _batch_align;
};
// BSF 任务句柄, 用来等待时指定任务列表
template<typename TaskT>
struct TaskHandler {
int read_fd;
int write_fd;
TaskHandler()
: read_fd(-1), write_fd(-1) {
// do nothing
}
TaskHandler(TaskT const& task)
: read_fd(task.read_fd)
, write_fd(task.write_fd) {
// do nothing
}
inline bool valid() const {
return read_fd >= 0 && write_fd >= 0;
}
static TaskHandler<TaskT>& valid_handle() {
static TaskHandler<TaskT> vhandle;
return vhandle;
}
};
template<typename TaskT>
class TaskExecutor;
template<typename InItemT, typename OutItemT>
class TaskManager;
template<typename TaskT>
struct ThreadContext {
TaskExecutor<TaskT>* executor;
void* user_thread_context;
THREAD_T tid;
int init_status;
ThreadContext()
: executor(NULL)
, user_thread_context(NULL)
, tid(-1), init_status(0) {
// do nothing
}
~ThreadContext() {
tid = -1;
executor = NULL;
user_thread_context = NULL;
init_status = 0;
}
};
template<typename TaskT>
class TaskExecutor {
public:
typedef typename TaskT::InType InType;
typedef typename TaskT::OutType OutType;
typedef typename TaskT::InArrayT InArrayT;
typedef typename TaskT::OutArrayT OutArrayT;
typedef std::vector<TaskT> TaskArrayT;
TaskExecutor()
: _stop(false)
, _thread_init_fn(NULL)
, _thread_reset_fn(NULL)
, _user_thread_contexts(NULL)
, _batch_size(DEFAULT_BATCH_SIZE)
, _batch_align(false)
, _fn(NULL) {
THREAD_MUTEX_INIT(&_mut, NULL);
THREAD_COND_INIT(&_cond, NULL);
_task_queue.clear();
}
~TaskExecutor() {
THREAD_MUTEX_DESTROY(&_mut);
THREAD_COND_DESTROY(&_cond);
}
static TaskExecutor<TaskT>* instance() {
static TaskExecutor<TaskT> singleton;
return &singleton;
}
void set_batch_size(size_t batch_size) {
_batch_size = batch_size;
}
void set_batch_align(size_t batch_align) {
_batch_align = batch_align;
}
void set_thread_init_fn(boost::function<int(void*)> init_fn, void** contexts = NULL) {
_thread_init_fn = init_fn;
_user_thread_contexts = contexts;
}
void set_thread_reset_fn(boost::function<int(void*)> reset_fn) {
_thread_reset_fn = reset_fn;
}
void set_thread_callback_fn(boost::function<void(const InArrayT&, OutArrayT&)> cb) {
_fn = cb;
}
int start(uint32_t thread_num, uint32_t init_timeout_sec = 0);
void stop();
static void* thread_entry(void* args);
private:
TaskExecutor(TaskExecutor<TaskT> const& other);
TaskExecutor* operator=(TaskExecutor<TaskT> const& other);
int work(ThreadContext<TaskT>* context);
TaskHandler<TaskT> schedule(const InArrayT&, OutArrayT&);
bool fetch_batch(BatchTasks<TaskT>& batch);
bool _stop;
// can't use boost::mutex, because some stupid macro
THREAD_MUTEX_T _mut;
THREAD_COND_T _cond;
std::deque<TaskT*> _task_queue;
boost::function<int(void*)> _thread_init_fn;
boost::function<int(void*)> _thread_reset_fn;
void** _user_thread_contexts;
std::vector<ThreadContext<TaskT>*> _thread_contexts;
friend class TaskManager<InType, OutType>;
boost::function<void(const InArrayT&, OutArrayT&)> _fn;
size_t _batch_size;
bool _batch_align;
};
template<typename InItemT, typename OutItemT>
class TaskManager {
public:
typedef Task<InItemT, OutItemT> TaskT;
typedef typename TaskT::InArrayT InArrayT;
typedef typename TaskT::OutArrayT OutArrayT;
explicit TaskManager(TaskExecutor<TaskT>& exe, size_t batch_size) : _executor(exe) {
}
TaskManager()
: _executor(*TaskExecutor<TaskT>::instance()) {
}
~TaskManager() {
wait();
}
bool schedule(const InArrayT& in, OutArrayT& out);
void wait();
inline void clear() {
wait();
}
private:
TaskExecutor<TaskT>& _executor;
TaskHandler<TaskT> _task_owned;
}; // class TaskManager
struct ComlogGuard {
ComlogGuard() {
com_openlog_r();
}
~ComlogGuard() {
com_closelog_r();
}
};
class AutoMutex {
public:
AutoMutex(THREAD_MUTEX_T& mut)
: _mut(mut) {
THREAD_MUTEX_LOCK(&_mut);
}
~AutoMutex() {
THREAD_MUTEX_UNLOCK(&_mut);
}
private:
THREAD_MUTEX_T& _mut;
};
} // namespace bsf
} // namespace im
#include "bsf-inl.h"
#include "bsf-inl-tensor.h"
#endif //BAIDU_PADDLE_SERVING_PREDICTOR_BSF_H
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_CHANNEL_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_CHANNEL_H
#include "common/inner_common.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class Channel;
class Bus {
public:
Bus() {
clear();
}
int regist(const std::string& op, Channel* channel) {
std::pair<boost::unordered_map<std::string, Channel*>::iterator, bool> r
= _op_channels.insert(std::make_pair(op, channel));
if (!r.second) {
LOG(ERROR) << "Failed insert op&channel into bus:" << op;
return -1;
}
return 0;
}
Channel* channel_by_name(const std::string& op_name) {
typename boost::unordered_map<std::string, Channel*>::iterator it
= _op_channels.find(op_name);
if (it == _op_channels.end()) {
LOG(WARNING)
<< "Not found channel in bus, op_name:"
<< op_name << ".";
return NULL;
}
return it->second;
}
void clear() {
_op_channels.clear();
}
size_t size() const {
return _op_channels.size();
}
private:
boost::unordered_map<std::string, Channel*> _op_channels;
};
class Channel {
public:
Channel() {}
void init(uint32_t id, const char* op) {
_id = id;
_op = std::string(op);
clear_data();
}
void deinit() {
clear_data();
}
uint32_t id() const {
return _id;
}
const std::string& op() {
return _op;
}
int share_to_bus(Bus* bus) {
if (bus->regist(_op, this) != 0) {
LOG(FATAL)
<< "Failed regist channel[" << _op
<< "] to bus!" << noflush;
return -1;
}
return 0;
}
virtual void clear_data() = 0;
virtual void* param() = 0;
virtual const void* param() const = 0;
virtual google::protobuf::Message* message() = 0;
virtual const google::protobuf::Message* message() const = 0;
virtual Channel& operator=(const Channel& channel) = 0;
virtual std::string debug_string() const = 0;
private:
uint32_t _id;
std::string _op;
};
template<typename T>
class OpChannel : public Channel {
public:
OpChannel() {
}
void clear_data() {
_data.Clear();
}
void* param() {
return &_data;
}
const void* param() const {
return &_data;
}
google::protobuf::Message* message() {
return message_impl(derived_from_message<
TIsDerivedFromB<T, google::protobuf::Message>::RESULT>());
}
google::protobuf::Message* message_impl(derived_from_message<true>) {
return dynamic_cast<const google::protobuf::Message*>(&_data);
}
google::protobuf::Message* message_impl(derived_from_message<false>) {
LOG(FATAL) << "Current type: " << typeid(T).name()
<< " is not derived from protobuf.";
return NULL;
}
const google::protobuf::Message* message() const {
return message_impl(derived_from_message<
TIsDerivedFromB<T, google::protobuf::Message>::RESULT>());
}
const google::protobuf::Message* message_impl(derived_from_message<true>) const {
return dynamic_cast<const google::protobuf::Message*>(&_data);
}
const google::protobuf::Message* message_impl(derived_from_message<false>) const {
LOG(FATAL) << "Current type: " << typeid(T).name()
<< " is not derived from protobuf.";
return NULL;
}
Channel& operator=(const Channel& channel) {
_data = *(dynamic_cast<const OpChannel<T>&>(channel)).data();
return *this;
}
std::string debug_string() const {
return _data.ShortDebugString();
}
// functions of derived class
T* data() {
return &_data;
}
const T* data() const {
return &_data;
}
Channel& operator=(const T& obj) {
_data = obj;
return *this;
}
private:
T _data;
};
template<>
class OpChannel<google::protobuf::Message> : public Channel {
public:
OpChannel<google::protobuf::Message>() : _data(NULL) {
}
virtual ~OpChannel<google::protobuf::Message>() {
_data = NULL;
}
void clear_data() {
_data = NULL;
}
void* param() {
return const_cast<void*>((const void*)_data);
}
const void* param() const {
return _data;
}
google::protobuf::Message* message() {
return const_cast<google::protobuf::Message*>(_data);
}
const google::protobuf::Message* message() const {
return _data;
}
Channel& operator=(const Channel& channel) {
_data = channel.message();
return *this;
}
std::string debug_string() const {
if (_data) {
return _data->ShortDebugString();
} else {
return "{\"Error\": \"Null Message Ptr\"}";
}
}
// derived function imiplements
google::protobuf::Message* data() {
return const_cast<google::protobuf::Message*>(_data);
}
const google::protobuf::Message* data() const {
return _data;
}
OpChannel<google::protobuf::Message>& operator=(
google::protobuf::Message* message) {
_data = message;
return *this;
}
OpChannel<google::protobuf::Message>& operator=(
const google::protobuf::Message* message) {
_data = message;
return *this;
}
private:
const google::protobuf::Message* _data;
};
} // predictor
} // paddle_serving
} // baidu
#endif
#include "common/inner_common.h"
#include "framework/dag.h"
#include "op/op.h"
#include "framework/predictor_metric.h" // PredictorMetric
namespace baidu {
namespace paddle_serving {
namespace predictor {
Dag::Dag() {
_index_nodes.clear();
_name_nodes.clear();
_stages.clear();
}
Dag::~Dag() {
deinit();
}
int Dag::deinit() {
for (std::vector<DagStage*>::iterator iter = _stages.begin(); iter != _stages.end(); ++iter) {
if (*iter != NULL) {
delete *iter;
}
}
_stages.clear();
for (std::vector<DagNode*>::iterator iter = _index_nodes.begin();
iter != _index_nodes.end();
++iter) {
DagNode* node = *iter;
if (node != NULL) {
void* conf = node->conf;
if (conf != NULL) {
Op* op = OpRepository::instance().get_op(node->type);
if (op == NULL) {
LOG(FATAL) << "Failed to get_op, op type[" << node->type << "]";
return -1;
}
op->delete_config(conf);
OpRepository::instance().return_op(node->type, op);
}
delete node;
}
}
_index_nodes.clear();
_name_nodes.clear();
return 0;
}
EdgeMode Dag::parse_mode(std::string& mode) {
if (mode == "RO") {
return RO;
} else if (mode == "RW") {
return RW;
} else {
return UNKNOWN;
}
}
// [@Node]
// name: preprocess
// type: ProcessorOp
// [.@Depend]
// name: StartupOp
// mode: RO
// [@Node]
// name: discret_extractor
// type: DiscretExtractOp
// [.@Depend]
// name: StartupOp
// mode: RO
// [.@Depend]
// name: preprocess
// mode: RW
// [@Node]
// name: dnn_inference
// type: PaddleV2InferenceOp
// [.@Depend]
// name: discret_extractor
// mode: RO
// [@Node]
// name: postprocess
// type: PostProcessOp
// [.@Depend]
// name: dnn_inference
// mode: RO
int Dag::init(const char* path, const char* file, const std::string& name) {
comcfg::Configure conf;
if (conf.load(path, file) != 0) {
LOG(FATAL) << "Failed load conf from"
<< path << "/" << file << " in dag: "
<< name;
return ERR_INTERNAL_FAILURE;
}
return init(conf, name);
}
int Dag::init(const comcfg::Configure& conf, const std::string& name) {
_dag_name = name;
_index_nodes.clear();
_name_nodes.clear();
for (uint32_t i = 0; i < conf["Node"].size(); i++) {
DagNode* node = new (std::nothrow) DagNode();
if (node == NULL) {
LOG(ERROR) << "Failed create new dag node";
return ERR_MEM_ALLOC_FAILURE;
}
node->id = i + 1; // 0 is reserved for begginer-op
node->name = conf["Node"][i]["name"].to_cstr();
node->type = conf["Node"][i]["type"].to_cstr();
uint32_t depend_size = conf["Node"][i]["Depend"].size();
for (uint32_t j = 0; j < depend_size; j++) {
const comcfg::ConfigUnit& depend =
conf["Node"][i]["Depend"][j];
std::string name = depend["name"].to_cstr();
std::string mode = depend["mode"].to_cstr();
node->depends.insert(
std::make_pair(name, parse_mode(mode)));
}
Op* op = OpRepository::instance().get_op(node->type);
if (op == NULL) {
LOG(FATAL) << "Failed to get_op, op type[" << node->type << "]";
return ERR_INTERNAL_FAILURE;
}
// node->conf could be NULL
node->conf = op->create_config(conf["Node"][i]);
OpRepository::instance().return_op(node->type, op);
_name_nodes.insert(std::make_pair(node->name, node));
_index_nodes.push_back(node);
}
if (topo_sort() != 0) {
LOG(FATAL) << "Topo sort dag[" << _dag_name << "] failed!";
return ERR_INTERNAL_FAILURE;
}
if (FLAGS_el_log_level == 16) {
LOG(DEBUG) << "DAG: " << _dag_name << noflush;
LOG(DEBUG) << ", Op Num: " << _index_nodes.size();
for (uint32_t nid = 0; nid < _index_nodes.size(); nid++) {
DagNode* node = _index_nodes[nid];
LOG(DEBUG)
<< ", OP-" << node->id << "-" << node->name << "-"
<< node->type << noflush;
LOG(DEBUG) << " depends: " << node->depends.size() << noflush;
boost::unordered_map<std::string, EdgeMode>::iterator it;
for (it = node->depends.begin(); it != node->depends.end(); it++) {
LOG(DEBUG) << " " << it->first << " " << it->second << noflush;
}
}
LOG(DEBUG) << "";
}
return ERR_OK;
}
uint32_t Dag::nodes_size() {
return _index_nodes.size();
}
const DagNode* Dag::node_by_id(uint32_t id) {
return _index_nodes[id];
}
const DagNode* Dag::node_by_id(uint32_t id) const {
return _index_nodes[id];
}
const DagNode* Dag::node_by_name(std::string& name) {
return _name_nodes[name];
}
const DagNode* Dag::node_by_name(const std::string& name) const {
boost::unordered_map<std::string, DagNode*>::const_iterator it;
it = _name_nodes.find(name);
if (it == _name_nodes.end()) {
LOG(WARNING) << "Not found op by name:" << name;
return NULL;
}
return it->second;
}
uint32_t Dag::stage_size() {
return _stages.size();
}
const DagStage* Dag::stage_by_index(uint32_t index) {
return _stages[index];
}
int Dag::topo_sort() {
// TODO ƽ
std::stringstream ss;
for (uint32_t nid = 0; nid < _index_nodes.size(); nid++) {
DagStage* stage = new (std::nothrow) DagStage();
if (stage == NULL) {
LOG(ERROR) << "Invalid stage!";
return ERR_MEM_ALLOC_FAILURE;
}
stage->nodes.push_back(_index_nodes[nid]);
ss.str("");
ss << _stages.size();
stage->name = ss.str();
stage->full_name = full_name() + NAME_DELIMITER + stage->name;
_stages.push_back(stage);
// assign stage number after stage created
_index_nodes[nid]->stage = nid;
// assign dag node full name after stage created
_index_nodes[nid]->full_name = stage->full_name + NAME_DELIMITER + _index_nodes[nid]->name;
}
return ERR_OK;
}
void Dag::regist_metric(const std::string& service_name) {
for (int stage_idx = 0; stage_idx < _stages.size(); ++stage_idx) {
DagStage* stage = _stages[stage_idx];
PredictorMetric::GetInstance()->regist_latency_metric(
STAGE_METRIC_PREFIX + service_name + NAME_DELIMITER + stage->full_name);
for (int node_idx = 0; node_idx < stage->nodes.size(); ++node_idx) {
DagNode* node = stage->nodes[node_idx];
PredictorMetric::GetInstance()->regist_latency_metric(
OP_METRIC_PREFIX + service_name + NAME_DELIMITER + node->full_name);
Op* op = OpRepository::instance().get_op(node->type);
if (op == NULL) {
LOG(FATAL) << "Failed to get_op, op type[" << node->type << "]";
return;
}
op->set_full_name(service_name + NAME_DELIMITER + node->full_name);
op->set_config(node->conf);
op->regist_metric();
OpRepository::instance().return_op(node->type, op);
}
}
}
} // predictor
} // paddle_serving
} // baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_DAG_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_DAG_H
#include "common/inner_common.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
enum EdgeMode {
RO = 0,
RW = 1,
UNKNOWN
};
struct DagNode {
uint32_t id;
uint32_t stage;
std::string name; // opname
std::string full_name; // workflow_stageindex_opname
std::string type;
void* conf;
boost::unordered_map<std::string, EdgeMode> depends;
};
struct DagStage {
std::vector<DagNode*> nodes;
std::string name; // stageindex
std::string full_name; // workflow_stageindex
};
class Dag {
public:
Dag();
virtual ~Dag();
EdgeMode parse_mode(std::string& mode);
int init(const char* path, const char* file, const std::string& name);
int init(const comcfg::Configure& conf, const std::string& name);
int deinit();
uint32_t nodes_size();
const DagNode* node_by_id(uint32_t id);
const DagNode* node_by_id(uint32_t id) const;
const DagNode* node_by_name(std::string& name);
const DagNode* node_by_name(const std::string& name) const;
uint32_t stage_size();
const DagStage* stage_by_index(uint32_t index);
const std::string& name() const {
return _dag_name;
}
const std::string& full_name() const {
return _dag_name;
}
void regist_metric(const std::string& service_name);
private:
int topo_sort();
private:
std::string _dag_name;
boost::unordered_map<std::string, DagNode*> _name_nodes;
std::vector<DagNode*> _index_nodes;
std::vector<DagStage*> _stages;
};
} // predictor
} // paddle_serving
} // baidu
#endif // BAIDU_PADDLE_SERVING_PREDICTOR_DAG_H
#include "framework/dag_view.h"
#include <baidu/rpc/traceprintf.h> // TRACEPRINTF
#include "common/inner_common.h"
#include "framework/op_repository.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
int DagView::init(Dag* dag, const std::string& service_name) {
_name = dag->name();
_full_name = service_name + NAME_DELIMITER + dag->name();
_bus = base::get_object<Bus>();
_bus->clear();
uint32_t stage_size = dag->stage_size();
// create tls stage view
for (uint32_t si = 0; si < stage_size; si++) {
const DagStage* stage = dag->stage_by_index(si);
if (stage == NULL) {
LOG(FATAL) << "Failed get stage by index:" << si;
return ERR_INTERNAL_FAILURE;
}
ViewStage* vstage = base::get_object<ViewStage>();
if (vstage == NULL) {
LOG(FATAL)
<< "Failed get vstage from object pool"
<< "at:" << si;
return ERR_MEM_ALLOC_FAILURE;
}
vstage->full_name = service_name + NAME_DELIMITER + stage->full_name;
uint32_t node_size = stage->nodes.size();
// create tls view node
for (uint32_t ni = 0; ni < node_size; ni++) {
DagNode* node = stage->nodes[ni];
ViewNode* vnode = base::get_object<ViewNode>();
if (vnode == NULL) {
LOG(FATAL) << "Failed get vnode at:" << ni;
return ERR_MEM_ALLOC_FAILURE;
}
// factory type
Op* op = OpRepository::instance().get_op(node->type);
if (op == NULL) {
LOG(FATAL) << "Failed get op with type:"
<< node->type;
return ERR_INTERNAL_FAILURE;
}
// initialize a TLS op object
if (op->init(_bus, dag, node->id, node->name, node->type, node->conf) != 0) {
LOG(WARNING) << "Failed init op, type:" << node->type;
return ERR_INTERNAL_FAILURE;
}
op->set_full_name(service_name + NAME_DELIMITER + node->full_name);
vnode->conf = node;
vnode->op = op;
vstage->nodes.push_back(vnode);
}
_view.push_back(vstage);
}
return ERR_OK;
}
int DagView::deinit() {
uint32_t stage_size = _view.size();
for (uint32_t si = 0; si < stage_size; si++) {
ViewStage* vstage = _view[si];
uint32_t node_size = vstage->nodes.size();
for (uint32_t ni = 0; ni < node_size; ni++) {
ViewNode* vnode = vstage->nodes[ni];
vnode->op->deinit();
OpRepository::instance().return_op(vnode->op);
vnode->reset();
// clear item
base::return_object(vnode);
}
// clear vector
vstage->nodes.clear();
base::return_object(vstage);
}
_view.clear();
_bus->clear();
base::return_object(_bus);
return ERR_OK;
}
int DagView::execute(base::IOBufBuilder* debug_os) {
uint32_t stage_size = _view.size();
for (uint32_t si = 0; si < stage_size; si++) {
TRACEPRINTF("start to execute stage[%u]", si);
int errcode = execute_one_stage(_view[si], debug_os);
TRACEPRINTF("finish to execute stage[%u]", si);
if (errcode < 0) {
LOG(FATAL)
<< "failed execute stage["
<< _view[si]->debug();
return errcode;
}
}
return ERR_OK;
}
// The default execution strategy is in sequencing
// You can derive a subclass to implement this func.
// ParallelDagView maybe the one you want.
int DagView::execute_one_stage(ViewStage* vstage,
base::IOBufBuilder* debug_os) {
base::Timer stage_time(base::Timer::STARTED);
uint32_t node_size = vstage->nodes.size();
for (uint32_t ni = 0; ni < node_size; ni++) {
ViewNode* vnode = vstage->nodes[ni];
DagNode* conf = vnode->conf;
Op* op = vnode->op;
TRACEPRINTF("start to execute op[%s]", op->name());
int errcode = op->process(debug_os != NULL);
TRACEPRINTF("finish to execute op[%s]", op->name());
if (errcode < 0) {
LOG(FATAL)
<< "Execute failed, Op:" << op->debug_string();
return errcode;
}
if (errcode > 0) {
LOG(TRACE)
<< "Execute ignore, Op:" << op->debug_string();
continue;
}
if (debug_os) {
(*debug_os)
<< "{\"op_name\": \"" << op->name()
<< "\", \"debug_str:\": \""
<< op->debug_string()
<< "\", \"time_info\": \"" << op->time_info() << "\"}";
}
//LOG(DEBUG) << "Execute succ, Op:" << op->debug_string();
}
stage_time.stop();
PredictorMetric::GetInstance()->update_latency_metric(
STAGE_METRIC_PREFIX + vstage->full_name, stage_time.u_elapsed());
return ERR_OK;
}
int DagView::set_request_channel(Channel& request) {
// Each workflow should get the very beginning
// request (channel), and commit it to bus, for
// the first stage ops consuming.
request.share_to_bus(_bus);
return ERR_OK;
}
const Channel* DagView::get_response_channel() const {
// Caller obtains response channel from bus, and
// writes it to rpc response(protbuf/json)
if (_view.size() < 1) {
LOG(FATAL) << "invalid empty view stage!" << noflush;
return NULL;
}
ViewStage* last_stage = _view[_view.size() - 1];
if (last_stage->nodes.size() != 1
|| last_stage->nodes[0] == NULL) {
LOG(FATAL) << "Invalid last stage, size["
<< last_stage->nodes.size()
<< "] != 1" << noflush;
return NULL;
}
Op* last_op = last_stage->nodes[0]->op;
if (last_op == NULL) {
LOG(FATAL) << "Last op is NULL";
return NULL;
}
return last_op->mutable_channel();
}
} // predictor
} // paddle_serving
} // baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_DAG_VIEW_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_DAG_VIEW_H
#include "op/op.h"
#include "common/inner_common.h"
#include "framework/channel.h"
#include "framework/dag.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class Op;
struct ViewNode {
Op* op; // op->full_name == service_workflow_stageindex_opname
DagNode* conf;
void reset() {
op = NULL;
conf = NULL;
}
};
struct ViewStage {
std::vector<ViewNode*> nodes;
std::string full_name; // service_workflow_stageindex
std::string debug() {
return "TOBE IMPLEMENTED!";
}
};
class DagView {
public:
DagView() : _bus(NULL) {
_view.clear();
}
~DagView() {}
int init(Dag* dag, const std::string& service_name);
int deinit();
int execute(base::IOBufBuilder* debug_os);
// The default execution strategy is in sequencing
// You can derive a subclass to implement this func.
// ParallelDagView maybe the one you want.
virtual int execute_one_stage(ViewStage* vstage,
base::IOBufBuilder* debug_os);
int set_request_channel(Channel& request);
const Channel* get_response_channel() const;
const std::string& name() const {
return _name;
}
const std::string& full_name() const {
return _full_name;
}
private:
std::string _name;
std::string _full_name;
std::vector<ViewStage*> _view;
Bus* _bus;
};
// The derived DagView supports parallel execution
// strategy, by implments the execute_one_stage().
class ParallelDagView : public DagView {
public:
int execute_one_stage(ViewStage* vstage, base::IOBufBuilder*) {
return 0;
}
};
} // predictor
} // paddle_serving
} // baidu
#endif // BAIDU_PADDLE_SERVING_PREDICTOR_DAG_VIEW_H
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file include/factory.h
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/10 22:09:57
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_FACTORY_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_FACTORY_H
#include "common/inner_common.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
//////////////// DECLARE INTERFACE ////////////////
#define DECLARE_FACTORY_OBJECT(D, B) \
static int regist(const std::string& tag) { \
FactoryDerive<D, B>* factory = \
new (std::nothrow) FactoryDerive<D, B>();\
if (factory == NULL \
|| FactoryPool<B>::instance().register_factory(\
tag, factory) != 0) { \
LOG(FATAL) << "Failed regist factory:"\
<< #D << " in macro!"; \
return -1; \
} \
return 0; \
}
#define PDS_STR_CAT(a, b) PDS_STR_CAT_I(a, b)
#define PDS_STR_CAT_I(a, b) a ## b
#define DEFINE_FACTORY_OBJECT(D) \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, __LINE__)(void) \
{ \
D::regist(#D); \
}
//////////////// REGISTER INTERFACE ////////////////
#define REGIST_FACTORY_OBJECT_IMPL(D, B) \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, __LINE__)(void) \
{ \
::baidu::paddle_serving::predictor::FactoryDerive<D, B>* factory =\
new (::std::nothrow) ::baidu::paddle_serving::predictor::FactoryDerive<D, B>();\
if (factory == NULL \
|| ::baidu::paddle_serving::predictor::FactoryPool<B>::instance().register_factory(\
#D, factory) != 0) { \
LOG(FATAL) << "Failed regist factory:" \
<< #D << "->" << #B << " in macro!";\
return ; \
} \
return ; \
}
#define REGIST_FACTORY_OBJECT_IMPL_WITH_NAME(D, B, N) \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, __LINE__)(void) \
{ \
::baidu::paddle_serving::predictor::FactoryDerive<D, B>* factory =\
new (::std::nothrow) ::baidu::paddle_serving::predictor::FactoryDerive<D, B>();\
if (factory == NULL \
|| ::baidu::paddle_serving::predictor::FactoryPool<B>::instance().register_factory(\
N, factory) != 0) { \
LOG(FATAL) << "Failed regist factory:" \
<< #D << "->" << #B << ", tag: " \
<< N << " in macro!"; \
return ; \
} \
LOG(WARNING) << "Succ regist factory:" \
<< #D << "->" << #B << ", tag: " \
<< N << " in macro!"; \
return ; \
}
template<typename B>
class FactoryBase {
public:
virtual B* gen() = 0;
virtual void del(B* obj) = 0;
};
template<typename D, typename B>
class FactoryDerive : public FactoryBase<B> {
public:
B* gen() {
return new(std::nothrow) D();
}
void del(B* obj) {
delete dynamic_cast<D*>(obj);
}
};
template<typename B>
class FactoryPool {
public:
static FactoryPool<B>& instance() {
static FactoryPool<B> singleton;
return singleton;
}
int register_factory(const std::string& tag,
FactoryBase<B>* factory) {
typename std::map<std::string, FactoryBase<B>*>::iterator it
= _pool.find(tag);
if (it != _pool.end()) {
LOG(FATAL) << "Insert duplicate with tag: "
<< tag;
return -1;
}
std::pair<
typename std::map<std::string, FactoryBase<B>*>::iterator,
bool> r = _pool.insert(std::make_pair(tag, factory));
if (!r.second) {
LOG(FATAL) << "Failed insert new factory with:"
<< tag;
return -1;
}
LOG(TRACE) << "Succ insert one factory, tag: " << tag
<< ", base type: " << typeid(B).name();
return 0;
}
B* generate_object(const std::string& tag) {
typename std::map<std::string, FactoryBase<B>*>::iterator it
= _pool.find(tag);
if (it == _pool.end() || it->second == NULL) {
LOG(FATAL) << "Not found factory pool, tag:"
<< tag << ", pool size: " << _pool.size();
return NULL;
}
return it->second->gen();
}
template<typename D>
void return_object(B* object) {
FactoryDerive<D, B> factory;
factory.del(object);
}
private:
std::map<std::string, FactoryBase<B>*> _pool;
};
} // predictor
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_PREDICTOR_FACTORY_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_INFER_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_INFER_H
#include "common/inner_common.h"
#include "framework/infer_data.h"
#include "framework/factory.h"
#include "framework/bsf.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class InferEngine {
public:
virtual ~InferEngine() {}
virtual int proc_initialize(const comcfg::ConfigUnit& conf, bool version) {
return proc_initialize_impl(conf, version);
}
virtual int proc_finalize() {
return proc_finalize_impl();
}
virtual int thrd_initialize() {
return thrd_initialize_impl();
}
virtual int thrd_clear() {
return thrd_clear_impl();
}
virtual int thrd_finalize() {
return thrd_finalize_impl();
}
virtual int infer(const void* in, void* out, uint32_t batch_size = -1) {
return infer_impl1(in, out, batch_size);
}
virtual int reload() = 0;
virtual uint64_t version() const = 0;
// begin: framework inner call
virtual int proc_initialize_impl(
const comcfg::ConfigUnit& conf, bool version) = 0;
virtual int thrd_initialize_impl() = 0;
virtual int thrd_finalize_impl() = 0;
virtual int thrd_clear_impl() = 0;
virtual int proc_finalize_impl() = 0;
virtual int infer_impl1(
const void* in, void* out, uint32_t batch_size = -1) = 0;
virtual int infer_impl2(const BatchTensor& in, BatchTensor& out) = 0;
// end: framework inner call
};
class ReloadableInferEngine : public InferEngine {
public:
virtual ~ReloadableInferEngine() {}
union last_check_status {
time_t last_timestamp;
uint64_t last_md5sum;
uint64_t last_revision;
};
typedef im::bsf::Task<Tensor, Tensor> TaskT;
virtual int load(const std::string& data_path) = 0;
int proc_initialize_impl(const comcfg::ConfigUnit& conf, bool version) {
_reload_tag_file = conf["ReloadableMeta"].to_cstr();
_reload_mode_tag = conf["ReloadableType"].to_cstr();
_model_data_path = conf["ModelDataPath"].to_cstr();
_infer_thread_num = conf["RuntimeThreadNum"].to_uint32();
_infer_batch_size = conf["BatchInferSize"].to_uint32();
_infer_batch_align = conf["EnableBatchAlign"].to_uint32();
if (!check_need_reload() || load(_model_data_path) != 0) {
LOG(FATAL) << "Failed load model_data_path" << _model_data_path;
return -1;
}
if (parse_version_info(conf, version) != 0) {
LOG(FATAL) << "Failed parse version info";
return -1;
}
LOG(WARNING) << "Succ load model_data_path" << _model_data_path;
return 0;
}
int proc_initialize(const comcfg::ConfigUnit& conf, bool version) {
if (proc_initialize_impl(conf, version) != 0) {
LOG(FATAL) << "Failed proc initialize impl";
return -1;
}
// init bsf framework
if (_infer_thread_num <= 0) {
return 0;
}
im::bsf::TaskExecutor<TaskT>::instance()->set_thread_init_fn(
boost::bind(&InferEngine::thrd_initialize_impl, this));
im::bsf::TaskExecutor<TaskT>::instance()->set_thread_reset_fn(
boost::bind(&InferEngine::thrd_clear_impl, this));
im::bsf::TaskExecutor<TaskT>::instance()->set_thread_callback_fn(
boost::bind(&InferEngine::infer_impl2, this, _1, _2));
im::bsf::TaskExecutor<TaskT>::instance()->set_batch_size(_infer_batch_size);
im::bsf::TaskExecutor<TaskT>::instance()->set_batch_align(_infer_batch_align);
if (im::bsf::TaskExecutor<TaskT>::instance()->start(_infer_thread_num)
!= 0) {
LOG(FATAL) << "Failed start bsf executor, threads:" << _infer_thread_num;
return -1;
}
LOG(WARNING) << "Enable batch schedule framework, thread_num:"
<< _infer_thread_num << ", batch_size:" << _infer_batch_size
<< ", enable_batch_align:" << _infer_batch_align;
return 0;
}
int infer(const void* in, void* out, uint32_t batch_size = -1) {
if (_infer_thread_num <= 0) {
return infer_impl1(in, out, batch_size);
}
im::bsf::TaskManager<Tensor, Tensor> task_manager;
task_manager.schedule(*(const BatchTensor*)in, *(BatchTensor*)out);
task_manager.wait();
return 0;
}
int thrd_initialize() {
if (_infer_thread_num > 0) {
return 0;
}
return thrd_initialize_impl();
}
int thrd_clear() {
if (_infer_thread_num > 0) {
return 0;
}
return thrd_clear_impl();
}
int proc_finalize() {
if (proc_finalize_impl() != 0) {
LOG(FATAL) << "Failed proc finalize impl";
return -1;
}
if (_infer_thread_num > 0) {
im::bsf::TaskExecutor<TaskT>::instance()->stop();
}
return 0;
}
int reload() {
if (check_need_reload()) {
LOG(WARNING) << "begin reload model[" << _model_data_path << "].";
return load(_model_data_path);
}
return 0;
}
uint64_t version() const {
return _version;
}
uint32_t thread_num() const {
return _infer_thread_num;
}
private:
int parse_version_info(const comcfg::ConfigUnit& config, bool version) {
try {
std::string version_file = config["VersionFile"].to_cstr();
std::string version_type = config["VersionType"].to_cstr();
if (version_type == "abacus_version") {
if (parse_abacus_version(version_file) != 0) {
LOG(FATAL)
<< "Failed parse abacus version: " << version_file;
return -1;
}
} else if (version_type == "corece_uint64") {
if (parse_corece_uint64(version_file) != 0) {
LOG(FATAL)
<< "Failed parse corece_uint64: " << version_file;
return -1;
}
} else {
LOG(FATAL) << "Not supported version_type: " << version_type;
return -1;
}
} catch (comcfg::ConfigException e) { // no version file
if (version) {
LOG(FATAL) << "Cannot parse version engine, err:"
<< e.what();
return -1;
}
LOG(WARNING) << "Consistency with non-versioned configure";
_version = uint64_t(-1);
}
return 0;
}
int parse_abacus_version(const std::string& version_file) {
FILE* fp = fopen(version_file.c_str(), "r");
if (!fp) {
LOG(FATAL) << "Failed open version file:" << version_file;
return -1;
}
bool has_parsed = false;
char buffer[1024] = {0};
while (fgets(buffer, sizeof(buffer), fp)) {
char* begin = NULL;
if (strncmp(buffer, "version:", 8) == 0 ||
strncmp(buffer, "Version:", 8) == 0) {
begin = buffer + 8;
} else if (strncmp(buffer, "version :", 9) == 0 ||
strncmp(buffer, "Version :", 9) == 0) {
begin = buffer + 9;
} else {
LOG(WARNING) << "Not version line: " << buffer;
continue;
}
std::string vstr = begin;
boost::algorithm::trim_if(
vstr, boost::algorithm::is_any_of("\n\r "));
char* endptr = NULL;
_version = strtoull(vstr.c_str(), &endptr, 10);
if (endptr == vstr.c_str()) {
LOG(FATAL)
<< "Invalid version: [" << buffer << "], end: ["
<< endptr << "]" << ", vstr: [" << vstr << "]";
fclose(fp);
return -1;
}
has_parsed = true;
}
if (!has_parsed) {
LOG(FATAL) << "Failed parse abacus version: " << version_file;
fclose(fp);
return -1;
}
LOG(WARNING) << "Succ parse abacus version: " << _version
<< " from: " << version_file;
fclose(fp);
return 0;
}
int parse_corece_uint64(const std::string& version_file) {
FILE* fp = fopen(version_file.c_str(), "r");
if (!fp) {
LOG(FATAL) << "Failed open version file:" << version_file;
return -1;
}
bool has_parsed = false;
char buffer[1024] = {0};
if (fgets(buffer, sizeof(buffer), fp)) {
char* endptr = NULL;
_version = strtoull(buffer, &endptr, 10);
if (endptr == buffer) {
LOG(FATAL) << "Invalid version: " << buffer;
fclose(fp);
return -1;
}
has_parsed = true;
}
if (!has_parsed) {
LOG(FATAL) << "Failed parse abacus version: " << version_file;
fclose(fp);
return -1;
}
LOG(WARNING) << "Succ parse corece version: " << _version
<< " from: " << version_file;
fclose(fp);
return 0;
}
bool check_need_reload() {
if (_reload_mode_tag == "timestamp_ne") {
return check_timestamp_ne();
} else if (_reload_mode_tag == "timestamp_gt") {
return check_timestamp_gt();
} else if (_reload_mode_tag == "md5sum") {
return check_md5sum();
} else if (_reload_mode_tag == "revision") {
return check_revision();
} else if (_reload_mode_tag == "none") {
return false;
} else {
LOG(FATAL) << "Not support check type: "
<< _reload_mode_tag;
return false;
}
}
bool check_timestamp_ne() {
struct stat st;
if (stat(_reload_tag_file.c_str(), &st) != 0) {
LOG(FATAL) << "Failed stat config file:"
<< _reload_tag_file;
return false;
}
if ((st.st_mode & S_IFREG) &&
st.st_mtime != _last_status.last_timestamp) {
_last_status.last_timestamp = st.st_mtime;
return true;
}
return false;
}
bool check_timestamp_gt() {
struct stat st;
if (stat(_reload_tag_file.c_str(), &st) != 0) {
LOG(FATAL) << "Failed stat config file:"
<< _reload_tag_file;
return false;
}
if ((st.st_mode & S_IFREG) &&
st.st_mtime > _last_status.last_timestamp) {
_last_status.last_timestamp = st.st_mtime;
return true;
}
return false;
}
bool check_md5sum() {
return false;
}
bool check_revision() {
return false;
}
protected:
std::string _model_data_path;
private:
std::string _reload_tag_file;
std::string _reload_mode_tag;
last_check_status _last_status;
uint32_t _infer_thread_num;
uint32_t _infer_batch_size;
bool _infer_batch_align;
uint64_t _version;
};
template<typename EngineCore>
struct ModelData {
ModelData() : current_idx(1) {
cores[0] = NULL;
cores[1] = NULL;
}
~ModelData() {
delete cores[0];
delete cores[1];
}
EngineCore* cores[2];
uint32_t current_idx;
};
template<typename EngineCore>
class DBReloadableInferEngine : public ReloadableInferEngine {
public:
virtual ~DBReloadableInferEngine() {}
int proc_initialize(const comcfg::ConfigUnit& conf, bool version) {
THREAD_KEY_CREATE(&_skey, NULL);
THREAD_MUTEX_INIT(&_mutex, NULL);
return ReloadableInferEngine::proc_initialize(conf, version);
}
virtual int load(const std::string& model_data_dir) {
if (_reload_vec.empty()) {
return 0;
}
for (uint32_t ti = 0; ti < _reload_vec.size(); ++ti) {
if (load_data(_reload_vec[ti], model_data_dir) != 0) {
LOG(FATAL) << "Failed reload engine model: " << ti;
return -1;
}
}
LOG(WARNING) << "Succ load engine, path: " << model_data_dir;
return 0;
}
int load_data(ModelData<EngineCore>* md, const std::string& data_path) {
uint32_t next_idx = (md->current_idx + 1) % 2;
if (md->cores[next_idx]) {
delete md->cores[next_idx];
}
md->cores[next_idx] = new (std::nothrow) EngineCore;
if (!md->cores[next_idx]
|| md->cores[next_idx]->create(data_path) != 0) {
LOG(FATAL) << "Failed create model, path: " << data_path;
return -1;
}
md->current_idx = next_idx;
return 0;
}
virtual int thrd_initialize_impl() {
// memory pool to be inited in non-serving-threads
if (MempoolWrapper::instance().thread_initialize() != 0) {
LOG(FATAL) << "Failed thread initialize mempool";
return -1;
}
ModelData<EngineCore>* md = new(std::nothrow) ModelData<EngineCore>;
if (!md || load_data(md, _model_data_path) != 0) {
LOG(FATAL) << "Failed create thread data from " << _model_data_path;
return -1;
}
THREAD_SETSPECIFIC(_skey, md);
im::bsf::AutoMutex lock(_mutex);
_reload_vec.push_back(md);
return 0;
}
int thrd_clear_impl() {
// for non-serving-threads
if (MempoolWrapper::instance().thread_clear() != 0) {
LOG(FATAL) << "Failed thread clear mempool";
return -1;
}
return 0;
}
int thrd_finalize_impl() {
return 0;
}
int proc_finalize_impl() {
THREAD_KEY_DELETE(_skey);
THREAD_MUTEX_DESTROY(&_mutex);
return 0;
}
EngineCore* get_core() {
ModelData<EngineCore>* md = (ModelData<EngineCore>*)THREAD_GETSPECIFIC(_skey);
if (!md) {
LOG(FATAL) << "Failed get thread specific data";
return NULL;
}
return md->cores[md->current_idx];
}
protected:
THREAD_KEY_T _skey;
THREAD_MUTEX_T _mutex;
std::vector<ModelData<EngineCore>*> _reload_vec;
private:
};
// 多个EngineCore共用同一份模型数据
template<typename EngineCore>
class CloneDBReloadableInferEngine : public DBReloadableInferEngine<EngineCore> {
public:
virtual ~CloneDBReloadableInferEngine() {}
virtual int proc_initialize(const comcfg::ConfigUnit& conf, bool version) {
_pd = new (std::nothrow) ModelData<EngineCore>;
if (!_pd) {
LOG(FATAL) << "Failed to allocate for ProcData";
return -1;
}
return DBReloadableInferEngine<EngineCore>::proc_initialize(
conf, version);
}
virtual int load(const std::string& model_data_dir) {
// 加载进程级模型数据
if (!_pd || DBReloadableInferEngine<EngineCore>::load_data(
_pd, model_data_dir) != 0) {
LOG(FATAL)
<< "Failed to create common model from ["
<< model_data_dir << "].";
return -1;
}
LOG(WARNING)
<< "Succ load common model[" << _pd->cores[_pd->current_idx]
<< "], path[" << model_data_dir << "].";
if (DBReloadableInferEngine<EngineCore>::_reload_vec.empty()) {
return 0;
}
for (uint32_t ti = 0; ti < DBReloadableInferEngine<EngineCore>::_reload_vec.size(); ++ti) {
if (load_data(DBReloadableInferEngine<EngineCore>::_reload_vec[ti],
_pd->cores[_pd->current_idx]) != 0) {
LOG(FATAL) << "Failed reload engine model: " << ti;
return -1;
}
}
LOG(WARNING) << "Succ load clone model, path[" << model_data_dir << "]";
return 0;
}
// 加载线程级对象,多个线程级对象共用pd_core的模型数据
int load_data(
ModelData<EngineCore>* td,
EngineCore* pd_core) {
uint32_t next_idx = (td->current_idx + 1) % 2;
if (td->cores[next_idx]) {
delete td->cores[next_idx];
}
td->cores[next_idx] = new (std::nothrow) EngineCore;
if (!td->cores[next_idx]
|| td->cores[next_idx]->clone(pd_core->get()) != 0) {
LOG(FATAL) << "Failed clone model from pd_core[ " << pd_core
<< "], idx[" << next_idx << "]";
return -1;
}
td->current_idx = next_idx;
LOG(WARNING)
<< "td_core[" << td->cores[td->current_idx]
<< "] clone model from pd_core["
<< pd_core << "] succ, cur_idx[" << td->current_idx << "].";
return 0;
}
virtual int thrd_initialize_impl() {
// memory pool to be inited in non-serving-threads
if (MempoolWrapper::instance().thread_initialize() != 0) {
LOG(FATAL) << "Failed thread initialize mempool";
return -1;
}
ModelData<EngineCore>* md = new(std::nothrow) ModelData<EngineCore>;
if (!md || load_data(md, _pd->cores[_pd->current_idx]) != 0) {
LOG(FATAL) << "Failed clone thread data, origin_core["
<< _pd->cores[_pd->current_idx] << "].";
return -1;
}
THREAD_SETSPECIFIC(DBReloadableInferEngine<EngineCore>::_skey, md);
im::bsf::AutoMutex lock(DBReloadableInferEngine<EngineCore>::_mutex);
DBReloadableInferEngine<EngineCore>::_reload_vec.push_back(md);
return 0;
}
protected:
ModelData<EngineCore>* _pd; // 进程级EngineCore,多个线程级EngineCore共用该对象的模型数据
};
template<typename FluidFamilyCore>
class FluidInferEngine : public DBReloadableInferEngine<FluidFamilyCore> {
public:
FluidInferEngine() {}
~FluidInferEngine() {}
int infer_impl1(const void* in, void* out, uint32_t batch_size = -1) {
FluidFamilyCore* core
= DBReloadableInferEngine<FluidFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(FATAL) << "Failed get fluid core in infer_impl()";
return -1;
}
if (!core->Run(in, out)) {
LOG(FATAL) << "Failed run fluid family core";
return -1;
}
return 0;
}
int infer_impl2(const BatchTensor& in, BatchTensor& out) {
return infer_impl1(&in, &out);
}
};
template<typename TensorrtFamilyCore>
class TensorrtInferEngine : public DBReloadableInferEngine<TensorrtFamilyCore> {
public:
TensorrtInferEngine() {}
~TensorrtInferEngine() {}
int infer_impl1(const void* in, void* out, uint32_t batch_size) {
TensorrtFamilyCore* core
= DBReloadableInferEngine<TensorrtFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(FATAL) << "Failed get fluid core in infer_impl()";
return -1;
}
if (!core->Run(in, out, batch_size)) {
LOG(FATAL) << "Failed run fluid family core";
return -1;
}
return 0;
}
int infer_impl2(const BatchTensor& in, BatchTensor& out) {
LOG(FATAL) << "Tensortrt donot supports infer_impl2 yet!";
return -1;
}
};
template<typename AbacusFamilyCore>
class AbacusInferEngine : public CloneDBReloadableInferEngine<AbacusFamilyCore> {
public:
AbacusInferEngine() {}
~AbacusInferEngine() {}
int infer_impl1(const void* in, void* out, uint32_t batch_size = -1) {
LOG(FATAL) << "Abacus dnn engine must use predict interface";
return -1;
}
int infer_impl2(const BatchTensor& in, BatchTensor& out) {
LOG(FATAL) << "Abacus dnn engine must use predict interface";
return -1;
}
// Abacus special interface
int predict(uint32_t ins_num) {
AbacusFamilyCore* core
= CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(FATAL) << "Failed get abacus core in predict()";
return -1;
}
return core->predict(ins_num);
}
int set_use_fpga(bool use_fpga) {
AbacusFamilyCore* core
= CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(FATAL) << "Failed get abacus core in predict()";
return -1;
}
return core->set_use_fpga(use_fpga);
}
int debug() {
AbacusFamilyCore* core
= CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(FATAL) << "Failed get abacus core in debug()";
return -1;
}
return core->debug();
}
int set_search_id(uint64_t sid) {
AbacusFamilyCore* core
= CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(FATAL) << "Failed get abacus core in set_serach_id()";
return -1;
}
return core->set_search_id(sid);
}
int set_hidden_layer_dim(uint32_t dim) {
AbacusFamilyCore* core
= CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(FATAL) << "Failed get abacus core in set_layer_dim()";
return -1;
}
return core->set_hidden_layer_dim(dim);
}
int get_input(
uint32_t ins_idx, uint32_t* fea_num, void* in) {
AbacusFamilyCore* core
= CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(FATAL) << "Failed get abacus core in get_input()";
return -1;
}
return core->get_input(ins_idx, fea_num, in);
}
int get_layer_value(const std::string& name,
uint32_t ins_num, uint32_t fea_dim, void* out) {
AbacusFamilyCore* core
= CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(FATAL) << "Failed get abacus core in get_layer_value()";
return -1;
}
return core->get_layer_value(name, ins_num, fea_dim, out);
}
void set_position_idx(void* input, uint64_t fea, uint32_t ins_idx) {
AbacusFamilyCore* core
= CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(FATAL) << "Failed get abacus core in set_position_idx()";
return;
}
core->set_position_idx(input, fea, ins_idx);
return;
}
};
template<typename PaddleV2FamilyCore>
class PaddleV2InferEngine : public CloneDBReloadableInferEngine<PaddleV2FamilyCore> {
public:
PaddleV2InferEngine() {}
~PaddleV2InferEngine() {}
int infer_impl1(const void* in, void* out, uint32_t batch_size = -1) {
LOG(FATAL) << "Paddle V2 engine must use predict interface";
return -1;
}
int infer_impl2(const BatchTensor& in, BatchTensor& out) {
LOG(FATAL) << "Paddle V2 engine must use predict interface";
return -1;
}
};
typedef FactoryPool<InferEngine> StaticInferFactory;
class VersionedInferEngine : public InferEngine {
public:
VersionedInferEngine() {
_versions.clear();
}
~VersionedInferEngine() {}
int proc_initialize(const comcfg::ConfigUnit& conf) {
size_t version_num = conf["Version"].size();
for (size_t vi = 0; vi < version_num; ++vi) {
if (proc_initialize(conf["Version"][vi], true) != 0) {
LOG(FATAL) << "Failed proc initialize version: "
<< vi << ", model: " << conf["Name"].to_cstr();
return -1;
}
}
if (version_num == 0) {
if (proc_initialize(conf, false) != 0) {
LOG(FATAL) << "Failed proc intialize engine: "
<< conf["Name"].to_cstr();
return -1;
}
}
LOG(WARNING)
<< "Succ proc initialize engine: " << conf["Name"].to_cstr();
return 0;
}
int proc_initialize(const comcfg::ConfigUnit& conf, bool version) {
std::string engine_type = conf["Type"].to_cstr();
InferEngine* engine
= StaticInferFactory::instance().generate_object(
engine_type);
if (!engine) {
LOG(FATAL) << "Failed generate engine with type:"
<< engine_type;
return -1;
}
if (engine->proc_initialize(conf, version) != 0) {
LOG(FATAL) << "Failed initialize engine, type:"
<< engine_type;
return -1;
}
auto r = _versions.insert(std::make_pair(engine->version(), engine));
if (!r.second) {
LOG(FATAL) << "Failed insert item: " << engine->version()
<< ", type: " << engine_type;
return -1;
}
LOG(WARNING)
<< "Succ proc initialize version engine: " << engine->version();
return 0;
}
int proc_finalize() {
for (auto iter = _versions.begin(); iter != _versions.end(); ++iter) {
if (iter->second->proc_finalize() != 0) {
LOG(FATAL) << "Failed proc finalize version engine: " <<
iter->first;
}
LOG(WARNING)
<< "Succ proc finalize version engine: " << iter->first;
}
return 0;
}
int thrd_initialize() {
for (auto iter = _versions.begin(); iter != _versions.end(); ++iter) {
if (iter->second->thrd_initialize() != 0) {
LOG(FATAL) << "Failed thrd initialize version engine: " <<
iter->first;
return -1;
}
LOG(WARNING)
<< "Succ thrd initialize version engine: " << iter->first;
}
return 0;
}
int thrd_clear() {
for (auto iter = _versions.begin(); iter != _versions.end(); ++iter) {
if (iter->second->thrd_clear() != 0) {
LOG(FATAL) << "Failed thrd clear version engine: " <<
iter->first;
return -1;
}
LOG(DEBUG) << "Succ thrd clear version engine: " << iter->first;
}
return 0;
}
int thrd_finalize() {
for (auto iter = _versions.begin(); iter != _versions.end(); ++iter) {
if (iter->second->thrd_finalize() != 0) {
LOG(FATAL) << "Failed thrd finalize version engine: " <<
iter->first;
return -1;
}
LOG(WARNING) << "Succ thrd finalize version engine: " << iter->first;
}
return 0;
}
int reload() {
for (auto iter = _versions.begin(); iter != _versions.end(); ++iter) {
if (iter->second->reload() != 0) {
LOG(FATAL) << "Failed reload version engine: " <<
iter->first;
return -1;
}
LOG(WARNING) << "Succ reload version engine: " << iter->first;
}
return 0;
}
uint64_t version() const {
InferEngine* engine = default_engine();
if (engine) {
return engine->version();
} else {
return uint64_t(-1);
}
}
// inference interface
InferEngine* default_engine() const {
if (_versions.size() != 1) {
LOG(FATAL) << "Ambiguous default engine version:"
<< _versions.size();
return NULL;
}
return _versions.begin()->second;
}
int infer(const void* in, void* out, uint32_t batch_size) {
InferEngine* engine = default_engine();
if (!engine) {
LOG(WARNING) << "fail to get default engine";
return NULL;
}
return engine->infer(in, out, batch_size);
}
template<typename T>
T* get_core() {
InferEngine* engine = default_engine();
if (!engine) {
LOG(WARNING) << "fail to get core";
return NULL;
}
auto db_engine = dynamic_cast<DBReloadableInferEngine<T>*>(engine);
if (db_engine) {
return db_engine->get_core();
}
LOG(WARNING) << "fail to get core";
return NULL;
}
// versioned inference interface
int infer(
const void* in, void* out, uint32_t batch_size, uint64_t version) {
auto iter = _versions.find(version);
if (iter == _versions.end()) {
LOG(FATAL) << "Not found version engine: " << version;
return -1;
}
return iter->second->infer(in, out, batch_size);
}
template<typename T>
T* get_core(uint64_t version) {
auto iter = _versions.find(version);
if (iter == _versions.end()) {
LOG(FATAL) << "Not found version engine: " << version;
return NULL;
}
auto db_engine = dynamic_cast<DBReloadableInferEngine<T>*>(iter->second);
if (db_engine) {
return db_engine->get_core();
}
LOG(WARNING) << "fail to get core for " << version;
return NULL;
}
// --
int proc_initialize_impl(const comcfg::ConfigUnit& conf, bool) { return -1; }
int thrd_initialize_impl() { return -1; }
int thrd_finalize_impl() { return -1; }
int thrd_clear_impl() { return -1; }
int proc_finalize_impl() { return -1; }
int infer_impl1(const void* in, void* out, uint32_t batch_size = -1) { return -1; }
int infer_impl2(const BatchTensor& in, BatchTensor& out) { return -1; }
private:
boost::unordered_map<uint64_t, InferEngine*> _versions;
};
class InferManager {
public:
static InferManager& instance() {
static InferManager ins;
return ins;
}
int proc_initialize(const char* path, const char* file) {
comcfg::Configure conf;
if (conf.load(path, file) != 0) {
LOG(FATAL) << "failed load infer config, path:"
<< path << "/" << file;
return -1;
}
size_t engine_num = conf["Engine"].size();
for (size_t ei = 0; ei < engine_num; ++ei) {
std::string engine_name = conf["Engine"][ei]["Name"].to_cstr();
VersionedInferEngine* engine = new (std::nothrow) VersionedInferEngine();
if (!engine) {
LOG(FATAL) << "Failed generate versioned engine: " << engine_name;
return -1;
}
if (engine->proc_initialize(conf["Engine"][ei]) != 0) {
LOG(FATAL) << "Failed initialize version engine, name:"
<< engine_name;
return -1;
}
auto r = _map.insert(std::make_pair(engine_name, engine));
if (!r.second) {
LOG(FATAL) << "Failed insert item: " << engine_name;
return -1;
}
LOG(WARNING) << "Succ proc initialize engine: " << engine_name;
}
return 0;
}
int thrd_initialize() {
for (auto it = _map.begin(); it != _map.end(); ++it) {
if (it->second->thrd_initialize() != 0) {
LOG(FATAL) << "Failed thrd initialize engine, name: "
<< it->first;
return -1;
}
LOG(WARNING) << "Succ thrd initialize engine, name: "
<< it->first;
}
return 0;
}
int thrd_clear() {
for (auto it = _map.begin(); it != _map.end(); ++it) {
if (it->second->thrd_clear() != 0) {
LOG(FATAL) << "Failed thrd clear engine, name: "
<< it->first;
return -1;
}
}
return 0;
}
int reload() {
for (auto it = _map.begin(); it != _map.end(); ++it) {
if (it->second->reload() != 0) {
LOG(FATAL) << "Failed reload engine, name: "
<< it->first;
return -1;
}
}
return 0;
}
int thrd_finalize() {
for (auto it = _map.begin(); it != _map.end(); ++it) {
if (it->second->thrd_finalize() != 0) {
LOG(FATAL) << "Failed thrd finalize engine, name: "
<< it->first;
return -1;
}
LOG(WARNING) << "Succ thrd finalize engine, name: "
<< it->first;
}
return 0;
}
int proc_finalize() {
for (auto it = _map.begin(); it != _map.end(); ++it) {
if (it->second->proc_finalize() != 0) {
LOG(FATAL) << "Failed proc finalize engine, name: "
<< it->first;
return -1;
}
LOG(WARNING) << "Succ proc finalize engine, name: "
<< it->first;
}
return 0;
}
// Inference interface
int infer(const char* model_name, const void* in, void* out, uint32_t batch_size = -1) {
auto it = _map.find(model_name);
if (it == _map.end()) {
LOG(WARNING) << "Cannot find engine in map, model name:"
<< model_name;
return -1;
}
return it->second->infer(in, out, batch_size);
}
template<typename T>
T* get_core(const char* model_name) {
auto it = _map.find(model_name);
if (it == _map.end()) {
LOG(WARNING) << "Cannot find engine in map, model name:"
<< model_name;
return NULL;
}
auto infer_engine = dynamic_cast<DBReloadableInferEngine<T>*>(
it->second->default_engine());
if (infer_engine) {
return infer_engine->get_core();
}
LOG(WARNING) << "fail to get core for " << model_name;
return NULL;
}
// Versioned inference interface
int infer(const char* model_name, const void* in, void* out,
uint32_t batch_size, uint64_t version) {
auto it = _map.find(model_name);
if (it == _map.end()) {
LOG(WARNING) << "Cannot find engine in map, model name:"
<< model_name;
return -1;
}
return it->second->infer(in, out, batch_size, version);
}
template<typename T>
T* get_core(const char* model_name, uint64_t version) {
auto it = _map.find(model_name);
if (it == _map.end()) {
LOG(WARNING) << "Cannot find engine in map, model name:"
<< model_name;
return NULL;
}
return it->second->get_core<T>(version);
}
int query_version(const std::string& model, uint64_t& version) {
auto it = _map.find(model);
if (it == _map.end()) {
LOG(WARNING) << "Cannot find engine in map, model name:"
<< model;
return -1;
}
auto infer_engine = it->second->default_engine();
if (!infer_engine) {
LOG(WARNING) << "Cannot get default engine for model:"
<< model;
return -1;
}
version = infer_engine->version();
LOG(DEBUG) << "Succ get version: " << version << " for model: "
<< model;
return 0;
}
private:
boost::unordered_map<std::string, VersionedInferEngine*> _map;
};
} // predictor
} // paddle_serving
} // baidu
#endif // BAIDU_PADDLE_SERVING_PREDICTOR_INFER_H
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_INFER_DATA_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_INFER_DATA_H
#include "common/inner_common.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
enum DataType {
FLOAT32,
INT64
};
class DataBuf {
public:
DataBuf() : _data(NULL), _size(0), _owned(true) {}
DataBuf(size_t size)
: _data(new char[size]), _size(size), _owned(true) {}
DataBuf(void* data, size_t size)
: _data(data), _size(size), _owned(false) {}
DataBuf(void* data, size_t size, bool owned)
: _data(data), _size(size), _owned(owned) {}
void* data() const {
return _data;
}
size_t size() const {
return _size;
}
void free() {
_size = 0;
if (_owned) {
delete[] (char*)_data;
}
}
~DataBuf() {
free();
}
private:
void* _data;
size_t _size;
bool _owned;
};
struct Tensor {
Tensor() {
shape.clear();
for (int li = 0; li < lod.size(); ++li) {
lod[li].clear();
}
lod.clear();
}
Tensor(const Tensor& tensor) {
name = tensor.name;
data = tensor.data;
type = tensor.type;
shape.assign(tensor.shape.begin(), tensor.shape.end());
for (int li = 0; li < tensor.lod.size(); ++li) {
std::vector<size_t> l;
l.assign(tensor.lod[li].begin(), tensor.lod[li].end());
lod.push_back(l);
}
}
~Tensor() {
shape.clear();
}
size_t ele_byte() const {
if (type == INT64) {
return sizeof(int64_t);
} else {
return sizeof(float);
}
}
bool valid() const {
if (shape.empty()) {
if (data.data() || data.size()) {
CFATAL_LOG("data should be empty");
return false;
}
return true;
}
if (!data.data() || !data.size()) {
CFATAL_LOG("data cannot empty");
return false;
}
size_t byte_size = 1;
for (size_t si = 0; si < shape.size(); ++si) {
byte_size *= shape[si];
}
if (byte_size * ele_byte() != data.size()) {
CFATAL_LOG("wrong data size: %ld vs. %ld",
byte_size * ele_byte(), data.size());
return false;
}
}
size_t shape0() {
if (shape.empty()) {
return 0;
}
return shape[0];
}
std::string name;
std::vector<int> shape;
DataBuf data;
DataType type;
std::vector<std::vector<size_t> > lod;
};
class BatchTensor {
public:
BatchTensor() {}
~BatchTensor() {
_features.clear();
}
BatchTensor(const BatchTensor& tv) {
_features.assign(
tv.features().begin(), tv.features().end());
}
Tensor& operator[](int index) {
return _features[index];
}
const Tensor& operator[](int index) const {
return _features[index];
}
void push_back(const Tensor& tensor) {
_features.push_back(tensor);
}
size_t count() const {
return _features.size();
}
size_t size() const {
// shape0 indicates batch_size
if (count() <= 0 || _features[0].shape.size() <= 0) {
return 0;
}
return _features[0].shape[0];
}
const std::vector<Tensor>& features() const {
return _features;
}
void clear() {
_features.clear();
}
private:
std::vector<Tensor> _features;
};
} // predictor
} // paddle_serving
} // baidu
#endif // BAIDU_PADDLE_SERVING_PREDICTOR_INFER_DATA_H
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_LOGGER_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_LOGGER_H
#include <base/comlog_sink.h>
#include "common/inner_common.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class LoggerWrapper {
public:
static LoggerWrapper& instance() {
static LoggerWrapper lw;
return lw;
}
int initialize(const std::string& path, const std::string& name) {
if (com_loadlog(path.c_str(), name.c_str()) != 0) {
LOG(ERROR) << "Fail to com_loadlog from: "
<< path << "/" << name;
return -1;
}
return 0;
}
int finalize() {
return 0;
}
};
}
}
}
#endif
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_MANAGER_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_MANAGER_H
#include "common/inner_common.h"
#include "framework/workflow.h"
#include "common/constant.h"
#include "framework/service.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class Workflow;
//class InferService;
//class ParallelInferService;
template<typename I>
I* create_item_impl() {
return new (std::nothrow) I();
}
template<>
inline InferService* create_item_impl<InferService>() {
if (FLAGS_use_parallel_infer_service) {
return new (std::nothrow) ParallelInferService();
} else {
return new (std::nothrow) InferService();
}
}
template<typename T>
class Manager {
public:
static Manager<T>& instance() {
static Manager<T> mgr;
return mgr;
}
int initialize(const std::string path, const std::string file) {
comcfg::Configure conf;
if (conf.load(path.c_str(), file.c_str()) != 0) {
LOG(FATAL)
<< "Failed load manager<" << typeid(T).name()
<< "> configure!";
return -1;
}
try {
uint32_t item_size = conf[T::tag()].size();
for (uint32_t ii = 0; ii < item_size; ii++) {
std::string name = conf[T::tag()][ii]["name"].to_cstr();
T* item = new (std::nothrow) T();
if (item == NULL) {
LOG(FATAL) << "Failed create " << T::tag() << " for: " << name;
return -1;
}
if (item->init(conf[T::tag()][ii]) != 0) {
LOG(FATAL)
<< "Failed init item: " << name << " at:"
<< ii << "!";
return -1;
}
std::pair<
typename boost::unordered_map<std::string, T*>::iterator, bool>
r = _item_map.insert(std::make_pair(name, item));
if (!r.second) {
LOG(FATAL)
<< "Failed insert item:" << name << " at:"
<< ii << "!";
return -1;
}
LOG(TRACE)
<< "Succ init item:" << name << " from conf:"
<< path << "/" << file << ", at:" << ii << "!";
}
} catch (comcfg::ConfigException e) {
LOG(FATAL)
<< "Config[" << path << "/" << file << "] format "
<< "invalid, err: " << e.what();
return -1;
} catch (...) {
LOG(FATAL)
<< "Config[" << path << "/" << file << "] format "
<< "invalid, load failed";
return -1;
}
return 0;
}
T* create_item() {
return create_item_impl<T>();
}
T* item(const std::string& name) {
typename boost::unordered_map<std::string, T*>::iterator it;
it = _item_map.find(name);
if (it == _item_map.end()) {
LOG(WARNING) << "Not found item: " << name << "!";
return NULL;
}
return it->second;
}
T& operator[](const std::string& name) {
T* i = item(name);
if (i == NULL) {
std::string err = "Not found item in manager for:";
err += name;
throw std::overflow_error(err);
}
return *i;
}
int reload() {
int ret = 0;
typename boost::unordered_map<std::string, T*>::iterator it
= _item_map.begin();
for (; it != _item_map.end(); ++it) {
if (it->second->reload() != 0) {
LOG(WARNING) << "failed reload item: " << it->first << "!";
ret = -1;
}
}
LOG(TRACE) << "Finish reload "
<< _item_map.size()
<< " " << T::tag() << "(s)";
return ret;
}
int finalize() {
return 0;
}
private:
Manager<T>() {}
private:
boost::unordered_map<std::string, T*> _item_map;
};
typedef Manager<InferService> InferServiceManager;
typedef Manager<Workflow> WorkflowManager;
} // predictor
} // paddle_serving
} // baidu
#endif
#include "mc_cache.h"
#include <bvar/bvar.h> // bvar
namespace baidu {
namespace paddle_serving {
namespace predictor {
::bvar::Adder<int> g_mc_cache_seek_error_count("mc_cache_seek_error_count"); // 失败查询次数
::bvar::Window<::bvar::Adder<int> > g_mc_cache_seek_error_window(
"mc_cache_seek_error_window", &g_mc_cache_seek_error_count,
::bvar::FLAGS_bvar_dump_interval);
::bvar::Adder<int> g_mc_cache_seek_count("mc_cache_seek_count"); // 总查询次数
::bvar::Window<::bvar::Adder<int> > g_mc_cache_seek_window(
"mc_cache_seek_window", &g_mc_cache_seek_count,
::bvar::FLAGS_bvar_dump_interval);
float get_mc_cache_seek_error_percent(void*) {
if (g_mc_cache_seek_window.get_value() <= 0) {
return 0;
}
return g_mc_cache_seek_error_window.get_value()
/ (float) g_mc_cache_seek_window.get_value();
}
::bvar::PassiveStatus<float> g_mc_cache_seek_error_percent("mc_cache_seek_error_percent",
get_mc_cache_seek_error_percent, NULL);
McCache::McCache() {
_pcache = NULL;
_cache_unitsize = 0;
}
int McCache::initialize(uint32_t cache_capacity, uint32_t unitsize) {
_pcache = mc_creat_cache(cache_capacity, unitsize);
if (_pcache == NULL) {
LOG(ERROR) << "create mc_cache capacity[" << cache_capacity
<< "], unitsize[" << unitsize << "] failed.";
return -1;
}
_cache_unitsize = unitsize;
return 0;
}
int McCache::finalize() {
// 销毁cache结构
if (mc_destroy_cache(_pcache) == RT_NOTICE_NONE_PROCESSED) {
LOG(ERROR) << "input pcache[" << _pcache << "] destroy failed";
return -1;
}
return 0;
}
int McCache::add_item(uint32_t* sign, void* pdata) {
int ret = 0;
{
BAIDU_SCOPED_LOCK(_mutex);
ret = mc_additem(_pcache, sign, pdata, _cache_unitsize);
}
return (ret - 1);
}
int McCache::add_item(uint32_t* sign, void* pdata, uint32_t unitsize) {
CHECK_GT(_cache_unitsize, unitsize) << "input unitsize should < _cache_unitsize";
int ret = 0;
{
BAIDU_SCOPED_LOCK(_mutex);
ret = mc_additem(_pcache, sign, pdata, unitsize);
}
return (ret - 1);
}
int McCache::seek_item(uint32_t* sign, void* pdata) const {
int ret = 0;
{
BAIDU_SCOPED_LOCK(_mutex);
ret = mc_seekitem(_pcache, sign, pdata, _cache_unitsize);
}
g_mc_cache_seek_count << 1;
if (ret != RT_NOTICE_PROCESSED) {
g_mc_cache_seek_error_count << 1;
}
return (ret - 1);
}
int McCache::remove_item(uint32_t* sign) {
int ret = 0;
{
BAIDU_SCOPED_LOCK(_mutex);
ret = mc_removeitem(_pcache, sign);
}
if (ret != RT_NOTICE_PROCESSED) {
LOG(WARNING) << "remove item from cache failed, errno[" << ret
<< "], sign[" << *sign << "].";
return -1;
}
return 0;
}
const uint32_t McCache::get_cache_unitsize() {
return _cache_unitsize;
}
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_FRAMEWORK_MC_CACHE_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_FRAMEWORK_MC_CACHE_H
#include <stdint.h>
#include <mc_cache.h> // mc_creat_cache
#include <base/scoped_lock.h> // BAIDU_SCOPED_LOCK
namespace baidu {
namespace paddle_serving {
namespace predictor {
class McCache {
public:
McCache();
int initialize(uint32_t cache_capacity, uint32_t unitsize);
int finalize();
int add_item(uint32_t* sign, void* pdata);
int add_item(uint32_t* sign, void* pdata, uint32_t unitsize);
int seek_item(uint32_t* sign, void* pdata) const;
int remove_item(uint32_t* sign);
const uint32_t get_cache_unitsize();
private:
mc_cache* _pcache; // cacheָ
uint32_t _cache_unitsize; // cacheԪС
mutable base::Mutex _mutex;
};
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#endif // BAIDU_PADDLE_SERVING_PREDICTOR_FRAMEWORK_MC_CACHE_H
#include "common/inner_common.h"
#include "framework/memory.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
int MempoolWrapper::initialize() {
if (THREAD_KEY_CREATE(&_bspec_key, NULL) != 0) {
LOG(ERROR) << "unable to create thread_key of thrd_data";
return -1;
}
if (THREAD_SETSPECIFIC(_bspec_key, NULL) != 0) {
LOG(ERROR) << "failed initialize bsepecific key to null";
return -1;
}
return 0;
}
int MempoolWrapper::thread_initialize() {
_region.init();
im::Mempool* p_mempool = new (std::nothrow) im::Mempool(&_region);
if (p_mempool == NULL) {
LOG(ERROR) << "Failed create thread mempool";
return -1;
}
if (THREAD_SETSPECIFIC(_bspec_key, p_mempool) != 0) {
LOG(ERROR) << "unable to set the thrd_data";
delete p_mempool;
return -1;
}
LOG(WARNING) << "Succ thread initialize mempool wrapper";
return 0;
}
int MempoolWrapper::thread_clear() {
im::Mempool* p_mempool = (im::Mempool*) THREAD_GETSPECIFIC(
_bspec_key);
if (p_mempool) {
p_mempool->release_block();
_region.reset();
}
return 0;
}
void* MempoolWrapper::malloc(size_t size) {
im::Mempool* p_mempool = (im::Mempool*) THREAD_GETSPECIFIC(
_bspec_key);
if (!p_mempool) {
LOG(WARNING) << "Cannot malloc memory:" << size
<< ", since mempool is not thread initialized";
return NULL;
}
return p_mempool->malloc(size);
}
} // predictor
} // paddle_serving
} // baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_MEMORY_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_MEMORY_H
#include "common/inner_common.h"
#include "mempool.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class MempoolWrapper {
public:
MempoolWrapper() {}
static MempoolWrapper& instance() {
static MempoolWrapper mempool;
return mempool;
}
int initialize();
int thread_initialize();
int thread_clear();
void* malloc(size_t size);
private:
im::fugue::memory::Region _region;
THREAD_KEY_T _bspec_key;
};
} // predictor
} // paddle_serving
} // baidu
#endif
#pragma once
#include "common/inner_common.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class IMerger {
public:
virtual bool merge(const google::protobuf::Message*, google::protobuf::Message*) = 0;
};
class DefaultMerger : public IMerger {
public:
bool merge(
const google::protobuf::Message* s, google::protobuf::Message* d) {
if (!s || !d) {
return false;
}
d->MergeFrom(*s);
return true;
}
};
template<typename T>
class Singleton {
public:
static T* instance() {
static T ins;
return &ins;
}
};
class MergerManager {
public:
typedef IMerger MergerT;
static MergerManager& instance() {
static MergerManager ins;
return ins;
}
bool set(std::string name, MergerT* merger) {
if (_mergers.find(name) != _mergers.end()) {
LOG(ERROR) << "Duplicated merger: " << name;
return false;
}
_mergers[name] = merger;
return true;
}
bool get(const std::string& name, MergerT*& merger) {
std::map<std::string, MergerT*>::iterator iter =
_mergers.find(name);
if (iter == _mergers.end()) {
return false;
}
merger = iter->second;
return true;
}
private:
MergerManager() {
set("default", Singleton<DefaultMerger>::instance());
}
private:
std::map<std::string, MergerT*> _mergers;
};
#define DECLARE_MERGER(M) \
static bool regist_self() {\
if (!baidu::paddle_serving::predictor::MergerManager::instance().set(\
#M, baidu::paddle_serving::predictor::Singleton<M>::instance())) {\
LOG(ERROR) << "Failed regist merger: " << #M;\
return false;\
}\
LOG(INFO) << "Succ regist merger: " << #M;\
return true;\
}
#define PDS_STR_CAT(a, b) PDS_STR_CAT_I(a, b)
#define PDS_STR_CAT_I(a, b) a ## b
#define DEFINE_MERGER(M)\
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, __LINE__)(void)\
{\
M::regist_self();\
}
} // predictor
} // paddle_serving
} // baidu
#include "framework/op_repository.h"
#include "op/op.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
Op* OpRepository::get_op(std::string op_type) {
ManagerMap::iterator iter = _repository.find(op_type);
Op* op = NULL;
if (iter != _repository.end()) {
op = (iter->second)->get_op();
} else {
LOG(FATAL) << "Try to create unknown op[" << op_type << "]";
}
return op;
}
void OpRepository::return_op(Op* op) {
if (op == NULL) {
LOG(FATAL) << "Try to return NULL op";
return;
}
ManagerMap::iterator iter = _repository.find(op->type());
if (iter != _repository.end()) {
iter->second->return_op(op);
} else {
LOG(FATAL) << "Try to return unknown op[" << op << "], op_type["
<< op->type() << "].";
}
}
void OpRepository::return_op(const std::string& op_type, Op* op) {
if (op == NULL) {
LOG(FATAL) << "Try to return NULL op";
return;
}
ManagerMap::iterator iter = _repository.find(op_type);
if (iter != _repository.end()) {
iter->second->return_op(op);
} else {
LOG(FATAL) << "Try to return unknown op[" << op << "], op_type["
<< op_type << "].";
}
}
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_OP_REPOSITORY_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_OP_REPOSITORY_H
#include "common/inner_common.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
#define REGISTER_OP(op) \
::baidu::paddle_serving::predictor::OpRepository::instance().regist_op<op>(#op)
class Op;
class Factory {
public:
virtual Op* get_op() = 0;
virtual void return_op(Op* op) = 0;
};
template<typename OP_TYPE>
class OpFactory : public Factory {
public:
Op* get_op() {
return base::get_object<OP_TYPE>();
}
void return_op(Op* op) {
base::return_object<OP_TYPE>(dynamic_cast<OP_TYPE*>(op));
}
static OpFactory<OP_TYPE>& instance() {
static OpFactory<OP_TYPE> ins;
return ins;
}
};
class OpRepository {
public:
typedef boost::unordered_map<std::string, Factory*> ManagerMap;
OpRepository() {}
~OpRepository() {}
static OpRepository& instance() {
static OpRepository repo;
return repo;
}
template<typename OP_TYPE>
void regist_op(std::string op_type) {
_repository[op_type] = &OpFactory<OP_TYPE>::instance();
LOG(TRACE) << "Succ regist op: " << op_type << "!";
}
Op* get_op(std::string op_type);
void return_op(Op* op);
void return_op(const std::string& op_type, Op* op);
private:
ManagerMap _repository;
};
} // predictor
} // paddle_serving
} // baidu
#endif
#include "predictor_metric.h"
#include "base/memory/singleton.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
PredictorMetric* PredictorMetric::GetInstance() {
return Singleton<PredictorMetric>::get();
}
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_FRAMEWORK_PREDICTOR_METRIC_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_FRAMEWORK_PREDICTOR_METRIC_H
#include <bvar/bvar.h> // bvar
#include <base/scoped_lock.h> // BAIDU_SCOPED_LOCK
#include <base/containers/flat_map.h> // FlatMap
#include <base/memory/singleton.h> // DefaultSingletonTraits
namespace baidu {
namespace paddle_serving {
namespace predictor {
static const std::string WORKFLOW_METRIC_PREFIX = "workflow_";
static const std::string STAGE_METRIC_PREFIX = "stage_";
static const std::string OP_METRIC_PREFIX = "op_";
static const std::string NAME_DELIMITER = "_";
typedef ::bvar::Window<::bvar::Adder<int> > AdderWindow;
typedef ::bvar::Window<::bvar::IntRecorder> RecorderWindow;
class AdderWindowMetric {
public:
AdderWindowMetric() :
sum_window(&sum, ::bvar::FLAGS_bvar_dump_interval) {
}
AdderWindowMetric(const std::string& name) :
sum_window(name + "_sum_window", &sum, ::bvar::FLAGS_bvar_dump_interval) {
}
inline AdderWindowMetric& operator<<(int count) {
sum << count;
}
public:
::bvar::Adder<int> sum;
AdderWindow sum_window;
};
static float g_get_rate(void* arg);
class RateBaseMetric {
public:
RateBaseMetric(const std::string& name) :
rate_value(name + "_rate", g_get_rate, this) {
}
void update_lhs(int count) { lhs.sum << count; }
void update_rhs(int count) { rhs.sum << count; }
public:
::bvar::PassiveStatus<float> rate_value;
AdderWindowMetric lhs;
AdderWindowMetric rhs;
};
static float g_get_rate(void* arg) {
RateBaseMetric* rate_metric = static_cast<RateBaseMetric*>(arg);
if (rate_metric->rhs.sum_window.get_value() <= 0) {
return 0;
}
return rate_metric->lhs.sum_window.get_value() * 100
/ (float) rate_metric->rhs.sum_window.get_value();
}
// 计算平均值时取整
class AvgWindowMetric {
public:
AvgWindowMetric() :
avg_window(&avg, ::bvar::FLAGS_bvar_dump_interval) {
}
AvgWindowMetric(const std::string& name) :
avg_window(name + "_avg_window", &avg, ::bvar::FLAGS_bvar_dump_interval) {
}
inline AvgWindowMetric& operator<<(int64_t value) {
avg << value;
}
public:
::bvar::IntRecorder avg;
RecorderWindow avg_window;
};
// 计算平均值时不取整
static double g_get_double_avg(void* arg);
class AvgDoubleWindowMetric {
public:
AvgDoubleWindowMetric(const std::string& name) :
avg_value(name + "_avg_double_window", g_get_double_avg, this) {
}
inline AvgDoubleWindowMetric& operator<<(int64_t value) {
recorder << value;
}
public:
::bvar::PassiveStatus<double> avg_value;
AvgWindowMetric recorder;
};
static double g_get_double_avg(void* arg) {
AvgDoubleWindowMetric* avg_metric = static_cast<AvgDoubleWindowMetric*>(arg);
return avg_metric->recorder.avg_window.get_value().get_average_double();
}
class PredictorMetric {
public:
static PredictorMetric* GetInstance();
~PredictorMetric() {
for (::base::FlatMap<std::string, bvar::LatencyRecorder*>::iterator iter
= latency_recorder_map.begin();
iter != latency_recorder_map.end();
++iter) {
delete iter->second;
}
for (::base::FlatMap<std::string, AdderWindowMetric*>::iterator iter
= adder_window_map.begin();
iter != adder_window_map.end();
++iter) {
delete iter->second;
}
for (::base::FlatMap<std::string, AvgWindowMetric*>::iterator iter
= avg_window_map.begin();
iter != avg_window_map.end();
++iter) {
delete iter->second;
}
for (::base::FlatMap<std::string, AvgDoubleWindowMetric*>::iterator iter
= avg_double_window_map.begin();
iter != avg_double_window_map.end();
++iter) {
delete iter->second;
}
for (::base::FlatMap<std::string, RateBaseMetric*>::iterator iter
= rate_map.begin();
iter != rate_map.end();
++iter) {
delete iter->second;
}
}
void regist_latency_metric(const std::string& metric_name) {
{
BAIDU_SCOPED_LOCK(_mutex);
LOG(INFO) << "try to regist latency metric[" << metric_name << "].";
if (latency_recorder_map.seek(metric_name) == NULL) {
bvar::LatencyRecorder* metric = new (std::nothrow) bvar::LatencyRecorder(metric_name);
latency_recorder_map.insert(metric_name, metric);
LOG(INFO) << "succ to regist latency metric[" << metric_name << "].";
}
}
}
void regist_adder_window_metric(const std::string& metric_name) {
{
BAIDU_SCOPED_LOCK(_mutex);
LOG(INFO) << "try to regist adder window metric[" << metric_name << "].";
if (adder_window_map.seek(metric_name) == NULL) {
AdderWindowMetric* metric = new (std::nothrow) AdderWindowMetric(metric_name);
adder_window_map.insert(metric_name, metric);
LOG(INFO) << "succ to regist adder window metric[" << metric_name << "].";
}
}
}
void regist_avg_window_metric(const std::string& metric_name) {
{
BAIDU_SCOPED_LOCK(_mutex);
LOG(INFO) << "try to regist avg window metric[" << metric_name << "].";
if (avg_window_map.seek(metric_name) == NULL) {
AvgWindowMetric* metric = new (std::nothrow) AvgWindowMetric(metric_name);
avg_window_map.insert(metric_name, metric);
LOG(INFO) << "succ to regist avg window metric[" << metric_name << "].";
}
}
}
void regist_avg_double_window_metric(const std::string& metric_name) {
{
BAIDU_SCOPED_LOCK(_mutex);
LOG(INFO) << "try to regist avg double window metric[" << metric_name << "].";
if (avg_double_window_map.seek(metric_name) == NULL) {
AvgDoubleWindowMetric* metric = new (std::nothrow) AvgDoubleWindowMetric(metric_name);
avg_double_window_map.insert(metric_name, metric);
LOG(INFO) << "succ to regist avg double window metric[" << metric_name << "].";
}
}
}
void regist_rate_metric(const std::string& metric_name) {
{
BAIDU_SCOPED_LOCK(_mutex);
LOG(INFO) << "try to regist rate metric[" << metric_name << "].";
if (rate_map.seek(metric_name) == NULL) {
RateBaseMetric* metric = new (std::nothrow) RateBaseMetric(metric_name);
rate_map.insert(metric_name, metric);
LOG(INFO) << "succ to regist rate metric[" << metric_name << "].";
}
}
}
inline void update_latency_metric(const std::string& metric_name, int64_t latency) {
bvar::LatencyRecorder** metric = latency_recorder_map.seek(metric_name);
if (metric != NULL) {
**metric << latency;
} else {
LOG(FATAL) << "impossible, check if you regist[" << metric_name << "].";
}
}
inline void update_adder_window_metric(const std::string& metric_name, int count) {
AdderWindowMetric** metric = adder_window_map.seek(metric_name);
if (metric != NULL) {
**metric << count;
} else {
LOG(FATAL) << "impossible, check if you regist[" << metric_name << "].";
}
}
inline void update_avg_window_metric(const std::string& metric_name, int64_t value) {
AvgWindowMetric** metric = avg_window_map.seek(metric_name);
if (metric != NULL) {
**metric << value;
} else {
LOG(FATAL) << "impossible, check if you regist[" << metric_name << "].";
}
}
inline void update_avg_double_window_metric(const std::string& metric_name, int64_t value) {
AvgDoubleWindowMetric** metric = avg_double_window_map.seek(metric_name);
if (metric != NULL) {
**metric << value;
} else {
LOG(FATAL) << "impossible, check if you regist[" << metric_name << "].";
}
}
inline void update_rate_metric_lhs(const std::string& name, int count) {
RateBaseMetric** metric = rate_map.seek(name);
if (metric != NULL) {
(*metric)->update_lhs(count);
} else {
LOG(FATAL) << "impossible, check if you regist[" << name << "].";
}
}
inline void update_rate_metric_rhs(const std::string& name, int count) {
RateBaseMetric** metric = rate_map.seek(name);
if (metric != NULL) {
(*metric)->update_rhs(count);
} else {
LOG(FATAL) << "impossible, check if you regist[" << name << "].";
}
}
private:
PredictorMetric() :
bucket_count(300) {
latency_recorder_map.init(bucket_count);
adder_window_map.init(bucket_count);
avg_window_map.init(bucket_count);
avg_double_window_map.init(bucket_count);
rate_map.init(bucket_count);
}
private:
const size_t bucket_count;
::base::FlatMap<std::string, bvar::LatencyRecorder*> latency_recorder_map;
::base::FlatMap<std::string, AdderWindowMetric*> adder_window_map;
::base::FlatMap<std::string, AvgWindowMetric*> avg_window_map;
::base::FlatMap<std::string, AvgDoubleWindowMetric*> avg_double_window_map;
::base::FlatMap<std::string, RateBaseMetric*> rate_map;
friend struct DefaultSingletonTraits<PredictorMetric>;
mutable base::Mutex _mutex;
DISALLOW_COPY_AND_ASSIGN(PredictorMetric);
};
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#endif // BAIDU_PADDLE_SERVING_PREDICTOR_FRAMEWORK_PREDICTOR_METRIC_H
#include "common/inner_common.h"
#include "framework/resource.h"
#include "framework/infer.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
// __thread bool p_thread_initialized = false;
static void dynamic_resource_deleter(void* d) {
delete static_cast<DynamicResource*>(d);
}
DynamicResource::DynamicResource() {}
DynamicResource::~DynamicResource() {}
int DynamicResource::initialize() {
return 0;
}
int DynamicResource::clear() {
return 0;
}
int Resource::initialize(const std::string& path, const std::string& file) {
comcfg::Configure conf;
if (conf.load(path.c_str(), file.c_str()) != 0) {
LOG(ERROR) << "Failed initialize resource from: "
<< path << "/" << file;
return -1;
}
// mempool
if (MempoolWrapper::instance().initialize() != 0) {
LOG(ERROR) << "Failed proc initialized mempool wrapper";
return -1;
}
LOG(WARNING) << "Successfully proc initialized mempool wrapper";
if (FLAGS_enable_mc_cache) {
_mc_cache = new (std::nothrow) McCache();
CHECK(_mc_cache != nullptr) << "failed to new McCache";
uint32_t cache_capacity = 0;
conf["cache_capacity"].get_uint32(&cache_capacity, DEFAULT_CACHE_CAPACITY);
LOG(INFO) << "cache_capacity[" << cache_capacity << "].";
uint32_t cache_unitsize = 0;
conf["cache_unitsize"].get_uint32(&cache_unitsize, DEFAULT_CACHE_UNITSIZE);
LOG(INFO) << "cache_unitsize[" << cache_unitsize << "].";
if (_mc_cache->initialize(cache_capacity, cache_unitsize) != 0) {
LOG(ERROR) << "init mc cache failed";
return -1;
}
LOG(INFO) << "mc cache proc_init success.";
}
if (FLAGS_enable_model_toolkit) {
int err = 0;
std::string model_toolkit_path = conf["model_toolkit_path"].to_cstr(&err);
if (err != 0) {
LOG(ERROR) << "read model_toolkit_path failed, path["
<< path << "], file[" << file << "]";
return -1;
}
std::string model_toolkit_file = conf["model_toolkit_file"].to_cstr(&err);
if (err != 0) {
LOG(ERROR) << "read model_toolkit_file failed, path["
<< path << "], file[" << file << "]";
return -1;
}
if (InferManager::instance().proc_initialize(
model_toolkit_path.c_str(), model_toolkit_file.c_str()) != 0) {
LOG(ERROR) << "failed proc initialize modeltoolkit, config: "
<< model_toolkit_path << "/" << model_toolkit_file;
return -1;
}
}
if (THREAD_KEY_CREATE(&_tls_bspec_key, dynamic_resource_deleter) != 0) {
LOG(ERROR) << "unable to create tls_bthread_key of thrd_data";
return -1;
}
THREAD_SETSPECIFIC(_tls_bspec_key, NULL);
return 0;
}
int Resource::thread_initialize() {
// mempool
if (MempoolWrapper::instance().thread_initialize() != 0) {
LOG(ERROR) << "Failed thread initialized mempool wrapper";
return -1;
}
LOG(WARNING) << "Successfully thread initialized mempool wrapper";
// infer manager
if (FLAGS_enable_model_toolkit && InferManager::instance().thrd_initialize() != 0) {
LOG(FATAL) << "Failed thrd initialized infer manager";
return -1;
}
DynamicResource* p_dynamic_resource = (DynamicResource*) THREAD_GETSPECIFIC(_tls_bspec_key);
if (p_dynamic_resource == NULL) {
p_dynamic_resource = new (std::nothrow) DynamicResource;
if (p_dynamic_resource == NULL) {
LOG(FATAL) << "failed to create tls DynamicResource";
return -1;
}
if (p_dynamic_resource->initialize() != 0) {
LOG(FATAL) << "DynamicResource initialize failed.";
delete p_dynamic_resource;
p_dynamic_resource = NULL;
return -1;
}
if (THREAD_SETSPECIFIC(_tls_bspec_key, p_dynamic_resource) != 0) {
LOG(FATAL) << "unable to set tls DynamicResource";
delete p_dynamic_resource;
p_dynamic_resource = NULL;
return -1;
}
}
LOG(INFO) << "Successfully thread initialized dynamic resource";
return 0;
}
int Resource::thread_clear() {
// mempool
if (MempoolWrapper::instance().thread_clear() != 0) {
LOG(ERROR) << "Failed thread clear mempool wrapper";
return -1;
}
// infer manager
if (FLAGS_enable_model_toolkit && InferManager::instance().thrd_clear() != 0) {
LOG(FATAL) << "Failed thrd clear infer manager";
return -1;
}
DynamicResource* p_dynamic_resource = (DynamicResource*) THREAD_GETSPECIFIC(_tls_bspec_key);
if (p_dynamic_resource == NULL) {
LOG(FATAL) << "tls dynamic resource shouldn't be null after thread_initialize";
return -1;
}
if (p_dynamic_resource->clear() != 0) {
LOG(FATAL) << "Failed to invoke dynamic resource clear";
return -1;
}
// ...
return 0;
}
int Resource::reload() {
if (FLAGS_enable_model_toolkit && InferManager::instance().reload() != 0) {
LOG(FATAL) << "Failed reload infer manager";
return -1;
}
// other resource reload here...
return 0;
}
int Resource::finalize() {
if (FLAGS_enable_mc_cache && _mc_cache != NULL) {
if (_mc_cache->finalize() != 0) {
LOG(ERROR) << "failed to finalize mc cache";
delete _mc_cache;
_mc_cache = NULL;
return -1;
}
delete _mc_cache;
_mc_cache = NULL;
LOG(INFO) << "mc_cache finalize success";
}
if (FLAGS_enable_model_toolkit && InferManager::instance().proc_finalize() != 0) {
LOG(FATAL) << "Failed proc finalize infer manager";
return -1;
}
THREAD_KEY_DELETE(_tls_bspec_key);
return 0;
}
} // predictor
} // paddle_serving
} // baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_RESOURCE_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_RESOURCE_H
#include "common/inner_common.h"
#include "framework/memory.h"
#include "framework/mc_cache.h" // McCache
namespace baidu {
namespace paddle_serving {
namespace predictor {
class BaseRdDict;
struct DynamicResource {
DynamicResource();
~DynamicResource();
int initialize();
int clear();
};
class Resource {
public:
Resource() :
_mc_cache(NULL) {
}
~Resource() { finalize(); }
static Resource& instance() {
static Resource ins;
return ins;
}
int initialize(const std::string& path, const std::string& file);
int thread_initialize();
int thread_clear();
int reload();
int finalize();
DynamicResource* get_dynamic_resource() {
return (DynamicResource*) THREAD_GETSPECIFIC(_tls_bspec_key);
}
McCache* get_mc_cache() {
return _mc_cache;
}
private:
int thread_finalize() {
return 0;
}
THREAD_KEY_T _tls_bspec_key;
McCache* _mc_cache;
};
} // predictor
} // paddle_serving
} // baidu
#endif
#include <baidu/rpc/policy/itp.h> // ItpAdaptor
#include <baidu/rpc/policy/nova_pbrpc_protocol.h> // NovaServiceAdaptor
#include <baidu/rpc/policy/public_pbrpc_protocol.h> // PublicPbrpcServiceAdaptor
#include <baidu/rpc/policy/nshead_mcpack_protocol.h> // NsheadMcpackAdaptor
#include "common/inner_common.h"
#include "framework/server.h"
#include "framework/service_manager.h"
#include "framework/resource.h"
#include "framework/manager.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
volatile bool ServerManager::_s_reload_starting = true;
bool ServerManager::_compare_string_piece_without_case(
const base::StringPiece& s1, const char* s2) {
if (strlen(s2) != s1.size()) {
return false;
}
return strncasecmp(s1.data(), s2, s1.size()) == 0;
}
ServerManager::ServerManager() {
_format_services.clear();
_options.idle_timeout_sec = FLAGS_idle_timeout_s;
if (FLAGS_enable_nshead_protocol) {
LOG(INFO) << "FLAGS_enable_nshead_protocol on, try to set FLAGS_nshead_protocol["
<< FLAGS_nshead_protocol << "] in server side";
_set_server_option_by_protocol(FLAGS_nshead_protocol);
}
_options.max_concurrency = FLAGS_max_concurrency;
_options.num_threads = FLAGS_num_threads;
}
int ServerManager::add_service_by_format(const std::string& format) {
Service* service =
FormatServiceManager::instance().get_service(format);
if (service == NULL) {
LOG(FATAL) << "Not found service by format:" << format << "!";
return -1;
}
if (_format_services.find(format) != _format_services.end()) {
LOG(FATAL) << "Cannot insert duplicated service by format:"
<< format << "!";
return -1;
}
std::pair<boost::unordered_map<std::string, Service*>::iterator, bool> it
= _format_services.insert(std::make_pair(format, service));
if (!it.second) {
LOG(FATAL) << "Failed insert service by format:"
<< format << "!";
return -1;
}
return 0;
}
int ServerManager::start_and_wait() {
if (_start_reloader() != 0) {
LOG(ERROR) << "Failed start reloader";
return -1;
}
boost::unordered_map<std::string, Service*>::iterator it;
for (it = _format_services.begin(); it != _format_services.end();
it++) {
if (_server.AddService(it->second, baidu::rpc::SERVER_DOESNT_OWN_SERVICE)
!= 0) {
LOG(ERROR) << "Failed to add service of format:"
<< it->first << "!";
return -1;
}
}
if (_server.Start(FLAGS_port, &_options) != 0) {
LOG(ERROR) << "Failed to start Paddle Inference Server" ;
return -1;
}
_server.RunUntilAskedToQuit();
if (_wait_reloader() != 0) {
LOG(ERROR) << "Failed start reloader";
return -1;
}
return 0;
}
void ServerManager::_set_server_option_by_protocol(
const ::base::StringPiece& protocol_type) {
std::string enabled_protocols = FLAGS_enable_protocol_list;
if (_compare_string_piece_without_case(protocol_type, "itp")) {
_options.nshead_service = new ::baidu::rpc::policy::ItpAdaptor;
} else if (_compare_string_piece_without_case(protocol_type, "nova_pbrpc")) {
_options.nshead_service = new ::baidu::rpc::policy::NovaServiceAdaptor;;
} else if (_compare_string_piece_without_case(protocol_type, "public_pbrpc")) {
_options.nshead_service = new ::baidu::rpc::policy::PublicPbrpcServiceAdaptor;
} else if (_compare_string_piece_without_case(protocol_type, "nshead_mcpack")) {
_options.nshead_service = new ::baidu::rpc::policy::NsheadMcpackAdaptor;
} else {
LOG(ERROR) << "fail to set nshead protocol, protocol_type[" << protocol_type << "].";
return;
}
_options.enabled_protocols = enabled_protocols;
LOG(INFO) << "success to set nshead protocol, protocol_type[" << protocol_type << "].";
}
int ServerManager::_start_reloader() {
int ret = THREAD_CREATE(
&_reload_thread, NULL,
ServerManager::_reload_worker,
NULL);
if (ret != 0) {
LOG(ERROR) << "Failed start reload thread, ret:" << ret;
return -1;
}
return 0;
}
int ServerManager::_wait_reloader() {
THREAD_JOIN(_reload_thread, NULL);
return 0;
}
void* ServerManager::_reload_worker(void* args) {
LOG(TRACE) << "Entrence reload worker, "
<< "interval_s: " << FLAGS_reload_interval_s;
while (ServerManager::reload_starting()) {
LOG(TRACE) << "Begin reload framework...";
if (Resource::instance().reload() != 0) {
LOG(FATAL) << "Failed reload resource!";
}
if (WorkflowManager::instance().reload() != 0) {
LOG(FATAL) << "Failed reload workflows";
}
usleep(FLAGS_reload_interval_s * 1000000);
}
LOG(TRACE) << "Exit reload worker!";
return NULL;
}
} // predictor
} // paddle_serving
} // baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_SERVER_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_SERVER_H
#include "common/inner_common.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class ServerManager {
public:
typedef google::protobuf::Service Service;
ServerManager();
static ServerManager& instance() {
static ServerManager server;
return server;
}
static bool reload_starting() {
return _s_reload_starting;
}
static void stop_reloader() {
_s_reload_starting = false;
}
int add_service_by_format(const std::string& format);
int start_and_wait();
private:
int _start_reloader();
int _wait_reloader();
static void* _reload_worker(void* args);
bool _compare_string_piece_without_case(
const base::StringPiece& s1, const char* s2);
void _set_server_option_by_protocol(const ::base::StringPiece& protocol_type);
baidu::rpc::ServerOptions _options;
baidu::rpc::Server _server;
boost::unordered_map<std::string, Service*> _format_services;
THREAD_T _reload_thread;
static volatile bool _s_reload_starting;
};
} // predictor
} // paddle_serving
} // baidu
#endif
#include "common/inner_common.h"
#include "framework/channel.h"
#include "common/constant.h"
#include "framework/service.h"
#include <base/time.h> // base::Timer
#include "framework/server.h"
#include "framework/dag_view.h"
#include "framework/manager.h"
#include "framework/resource.h"
#include "framework/predictor_metric.h" // PredictorMetric
namespace baidu {
namespace paddle_serving {
namespace predictor {
int InferService::init(const comcfg::ConfigUnit& conf) {
_infer_service_format = conf["name"].to_cstr();
char merger[256];
conf["merger"].get_cstr(merger, sizeof(merger), "default");
if (!MergerManager::instance().get(merger, _merger)) {
LOG(ERROR) << "Failed get merger: " << merger;
return ERR_INTERNAL_FAILURE;
} else {
LOG(WARNING) << "Succ get merger: " << merger <<
" for service: " << _infer_service_format;
}
ServerManager& svr_mgr = ServerManager::instance();
if (svr_mgr.add_service_by_format(_infer_service_format) != 0) {
LOG(FATAL)
<< "Not found service by format name:"
<< _infer_service_format << "!";
return ERR_INTERNAL_FAILURE;
}
uint32_t default_value = 0;
conf["enable_map_request_to_workflow"].get_uint32(&default_value, 0);
_enable_map_request_to_workflow = (default_value != 0);
LOG(INFO) << "service[" << _infer_service_format
<< "], enable_map_request_to_workflow["
<< _enable_map_request_to_workflow << "].";
uint32_t flow_size = conf["workflow"].size();
if (_enable_map_request_to_workflow) {
if (_request_to_workflow_map.init(
MAX_WORKFLOW_NUM_IN_ONE_SERVICE/*load_factor=80*/) != 0) {
LOG(FATAL)
<< "init request to workflow map failed, bucket_count["
<< MAX_WORKFLOW_NUM_IN_ONE_SERVICE << "].";
return ERR_INTERNAL_FAILURE;
}
int err = 0;
const char* pchar = conf["request_field_key"].to_cstr(&err);
if (err != 0) {
LOG(FATAL)
<< "read request_field_key failed, err_code["
<< err << "].";
return ERR_INTERNAL_FAILURE;
}
_request_field_key = std::string(pchar);
LOG(INFO)
<< "service[" << _infer_service_format
<< "], request_field_key["
<< _request_field_key << "].";
uint32_t request_field_value_size = conf["request_field_value"].size();
if (request_field_value_size != flow_size) {
LOG(FATAL)
<< "flow_size[" << flow_size
<< "] not equal request_field_value_size["
<< request_field_value_size << "].";
return ERR_INTERNAL_FAILURE;
}
for (uint32_t fi = 0; fi < flow_size; fi++) {
std::vector<std::string> tokens;
std::vector<Workflow*> workflows;
std::string list = conf["workflow"][fi].to_cstr();
boost::split(tokens, list, boost::is_any_of(","));
uint32_t tsize = tokens.size();
for (uint32_t ti = 0; ti < tsize; ++ti) {
boost::trim_if(tokens[ti], boost::is_any_of(" "));
Workflow* workflow =
WorkflowManager::instance().item(tokens[ti]);
if (workflow == NULL) {
LOG(FATAL)
<< "Failed get workflow by name:"
<< tokens[ti] << ", ti: " << ti;
return ERR_INTERNAL_FAILURE;
}
workflow->regist_metric(full_name());
workflows.push_back(workflow);
}
const std::string& request_field_value = conf["request_field_value"][fi].to_cstr();
if (_request_to_workflow_map.insert(request_field_value, workflows) == NULL) {
LOG(FATAL)
<< "insert [" << request_field_value << ","
<< list << "] to _request_to_workflow_map failed.";
return ERR_INTERNAL_FAILURE;
}
LOG(INFO) << "workflow[" << list
<< "], request_field_value[" << request_field_value << "].";
}
} else {
for (uint32_t fi = 0; fi < flow_size; fi++) {
const std::string& workflow_name =
conf["workflow"][fi].to_cstr();
Workflow* workflow =
WorkflowManager::instance().item(workflow_name);
if (workflow == NULL) {
LOG(FATAL)
<< "Failed get workflow by name:"
<< workflow_name;
return ERR_INTERNAL_FAILURE;
}
workflow->regist_metric(full_name());
_flows.push_back(workflow);
}
}
LOG(TRACE)
<< "Succ load infer_service: "
<< _infer_service_format << "!";
return ERR_OK;
}
int InferService::reload() {
return ERR_OK;
}
const std::string& InferService::name() const {
return _infer_service_format;
}
// ִÿworkflow
int InferService::inference(
const google::protobuf::Message* request,
google::protobuf::Message* response,
base::IOBufBuilder* debug_os) {
TRACEPRINTF("start to inference");
// when funtion call begins, framework will reset
// thread local variables&resources automatically.
if (Resource::instance().thread_clear() != 0) {
LOG(ERROR) << "Failed thread clear whole resource";
return ERR_INTERNAL_FAILURE;
}
TRACEPRINTF("finish to thread clear");
if (_enable_map_request_to_workflow) {
std::vector<Workflow*>* workflows = _map_request_to_workflow(request);
if (!workflows || workflows->size() == 0) {
LOG(ERROR) << "Failed to map request to workflow";
return ERR_INTERNAL_FAILURE;
}
size_t fsize = workflows->size();
for (size_t fi = 0; fi < fsize; ++fi) {
Workflow* workflow = (*workflows)[fi];
if (workflow == NULL) {
LOG(ERROR) << "Failed to get valid workflow at: " << fi;
return ERR_INTERNAL_FAILURE;
}
TRACEPRINTF("start to execute workflow[%s]", workflow->name().c_str());
int errcode = _execute_workflow(workflow, request, response, debug_os);
TRACEPRINTF("finish to execute workflow[%s]", workflow->name().c_str());
if (errcode < 0) {
LOG(FATAL) << "Failed execute workflow[" << workflow->name()
<< "] in:" << name();
return errcode;
}
}
} else {
TRACEPRINTF("start to execute one workflow");
size_t fsize = _flows.size();
for (size_t fi = 0; fi < fsize; ++fi) {
TRACEPRINTF("start to execute one workflow-%lu", fi);
int errcode = execute_one_workflow(fi, request, response, debug_os);
TRACEPRINTF("finish to execute one workflow-%lu", fi);
if (errcode < 0) {
LOG(FATAL) << "Failed execute 0-th workflow in:" << name();
return errcode;
}
}
}
return ERR_OK;
}
int InferService::debug(
const google::protobuf::Message* request,
google::protobuf::Message* response,
base::IOBufBuilder* debug_os) {
return inference(request, response, debug_os);
}
int InferService::execute_one_workflow(
uint32_t index,
const google::protobuf::Message* request,
google::protobuf::Message* response,
base::IOBufBuilder* debug_os) {
if (index >= _flows.size()) {
LOG(FATAL) << "Faield execute workflow, index: "
<< index << " >= max:" << _flows.size();
return ERR_OVERFLOW_FAILURE;
}
Workflow* workflow = _flows[index];
return _execute_workflow(workflow, request, response, debug_os);
}
int InferService::_execute_workflow(
Workflow* workflow,
const google::protobuf::Message* request,
google::protobuf::Message* response,
base::IOBufBuilder* debug_os) {
base::Timer workflow_time(base::Timer::STARTED);
// create and submit beginer channel
BuiltinChannel req_channel;
req_channel.init(0, START_OP_NAME);
req_channel = request;
DagView* dv = workflow->fetch_dag_view(full_name());
dv->set_request_channel(req_channel);
// call actual inference interface
int errcode = dv->execute(debug_os);
if (errcode < 0) {
LOG(FATAL) << "Failed execute dag for workflow:"
<< workflow->name();
return errcode;
}
TRACEPRINTF("finish to dv execute");
// create ender channel and copy
const Channel* res_channel = dv->get_response_channel();
if (!_merger || !_merger->merge(res_channel->message(), response)) {
LOG(FATAL) << "Failed merge channel res to response";
return ERR_INTERNAL_FAILURE;
}
TRACEPRINTF("finish to copy from");
workflow_time.stop();
PredictorMetric::GetInstance()->update_latency_metric(
WORKFLOW_METRIC_PREFIX + dv->full_name(), workflow_time.u_elapsed());
// return tls data to object pool
workflow->return_dag_view(dv);
TRACEPRINTF("finish to return dag view");
return ERR_OK;
}
std::vector<Workflow*>* InferService::_map_request_to_workflow(
const google::protobuf::Message* request) {
const google::protobuf::Descriptor* desc = request->GetDescriptor();
const google::protobuf::FieldDescriptor* field = desc->FindFieldByName(_request_field_key);
if (field == NULL) {
LOG(ERROR) << "No field[" << _request_field_key << "] in [" << desc->full_name() << "].";
return NULL;
}
if (field->is_repeated()) {
LOG(ERROR) << "field[" << desc->full_name() << "."
<< _request_field_key << "] is repeated.";
return NULL;
}
if (field->cpp_type() != google::protobuf::FieldDescriptor::CPPTYPE_STRING) {
LOG(ERROR) << "field[" << desc->full_name() << "."
<< _request_field_key << "] should be string";
return NULL;
}
const std::string& field_value = request->GetReflection()->GetString(*request, field);
std::vector<Workflow*>* p_workflow = _request_to_workflow_map.seek(field_value);
if (p_workflow == NULL) {
LOG(ERROR) << "cannot find key[" << field_value << "] in _request_to_workflow_map";
return NULL;
}
return p_workflow;
}
} // predictor
} // paddle_serving
} // baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_SERVICE_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_SERVICE_H
#include "common/inner_common.h"
#include "framework/workflow.h"
#include "framework/merger.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class InferService {
public:
typedef OpChannel<google::protobuf::Message> BuiltinChannel;
static const char* tag() {
return "Service";
}
InferService() :
_last_change_timestamp(0),
_enable_map_request_to_workflow(false),
_request_field_key(""),
_merger(NULL) {
_flows.clear();
_request_to_workflow_map.clear();
}
int init(const comcfg::ConfigUnit& conf);
int deinit() { return 0; }
int reload();
const std::string& name() const;
const std::string& full_name() const {
return _infer_service_format;
}
// 串行执行每个workflow
virtual int inference(
const google::protobuf::Message* request,
google::protobuf::Message* response,
base::IOBufBuilder* debug_os = NULL);
int debug(
const google::protobuf::Message* request,
google::protobuf::Message* response,
base::IOBufBuilder* debug_os);
int execute_one_workflow(
uint32_t index,
const google::protobuf::Message* request,
google::protobuf::Message* response,
base::IOBufBuilder* debug_os);
private:
int _execute_workflow(
Workflow* workflow,
const google::protobuf::Message* request,
google::protobuf::Message* response,
base::IOBufBuilder* debug_os);
std::vector<Workflow*>* _map_request_to_workflow(const google::protobuf::Message* request);
private:
std::vector<Workflow*> _flows;
std::string _infer_service_format;
uint64_t _last_change_timestamp;
bool _enable_map_request_to_workflow;
std::string _request_field_key;
::base::FlatMap<std::string, std::vector<Workflow*> > _request_to_workflow_map;
IMerger* _merger;
};
class ParallelInferService : public InferService {
public:
// 并行执行每个workflow
int inference(
const google::protobuf::Message* request,
google::protobuf::Message* response,
base::IOBufBuilder* debug_os) {
return 0;
}
};
} // predictor
} // paddle_serving
} // baidu
#endif // BAIDU_PADDLE_SERVING_PREDICTOR_INFERSERVICE_H
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_FORMAT_MANAGER_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_FORMAT_MANAGER_H
#include "common/inner_common.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
#define REGIST_FORMAT_SERVICE(svr_name, svr) \
do { \
int ret = ::baidu::paddle_serving::predictor::FormatServiceManager::instance().regist_service(\
svr_name, svr); \
if (ret != 0) { \
LOG(FATAL) \
<< "Failed regist service[" \
<< svr_name << "]" << "[" \
<< typeid(svr).name() << "]" \
<< "!"; \
} else { \
LOG(INFO) \
<< "Success regist service[" \
<< svr_name << "][" \
<< typeid(svr).name() << "]" \
<< "!"; \
} \
} while (0)
class FormatServiceManager {
public:
typedef google::protobuf::Service Service;
int regist_service(const std::string& svr_name, Service* svr) {
if (_service_map.find(svr_name) != _service_map.end()) {
LOG(FATAL)
<< "Service[" << svr_name << "]["
<< typeid(svr).name() << "]"
<< " already exist!";
return -1;
}
std::pair<boost::unordered_map<std::string, Service*>::iterator, bool> ret;
ret = _service_map.insert(std::make_pair(svr_name, svr));
if (ret.second == false) {
LOG(FATAL)
<< "Service[" << svr_name << "]["
<< typeid(svr).name() << "]"
<< " insert failed!";
return -1;
}
LOG(INFO)
<< "Service[" << svr_name << "] insert successfully!";
return 0;
}
Service* get_service(const std::string& svr_name) {
boost::unordered_map<std::string, Service*>::iterator res;
if ((res = _service_map.find(svr_name)) == _service_map.end()) {
LOG(WARNING)
<< "Service[" << svr_name << "] "
<< "not found in service manager"
<< "!";
return NULL;
}
return (*res).second;
}
static FormatServiceManager& instance() {
static FormatServiceManager service_;
return service_;
}
private:
boost::unordered_map<std::string, Service*> _service_map;
};
} // predictor
} // paddle_serving
} // baidu
#endif
#include "common/inner_common.h"
#include "framework/workflow.h"
#include "framework/predictor_metric.h" // PredictorMetric
namespace baidu {
namespace paddle_serving {
namespace predictor {
int Workflow::init(const comcfg::ConfigUnit& conf) {
const std::string& name = conf["name"].to_cstr();
const std::string& path = conf["path"].to_cstr();
const std::string& file = conf["file"].to_cstr();
comcfg::Configure wf_conf;
if (wf_conf.load(path.c_str(), file.c_str()) != 0) {
LOG(ERROR)
<< "Failed load workflow, conf:"
<< path << "/" << file << "!";
return -1;
}
_type = wf_conf["workflow_type"].to_cstr();
_name = name;
if (_dag.init(wf_conf, name) != 0) {
LOG(ERROR) << "Failed initialize dag: " << _name;
return -1;
}
return 0;
}
DagView* Workflow::fetch_dag_view(const std::string& service_name) {
DagView* view = NULL;
if (_type == "Sequence") {
view = base::get_object<DagView>();
} else if (_type == "Parallel") {
view = base::get_object<ParallelDagView>();
} else {
LOG(FATAL)
<< "Unknown dag type:" << _type << "!";
return NULL;
}
if (view == NULL) {
LOG(FATAL) << "create dag view from pool failed!";
return NULL;
}
view->init(&_dag, service_name);
return view;
}
void Workflow::return_dag_view(DagView* view) {
view->deinit();
if (_type == "Sequence") {
base::return_object<DagView>(view);
} else if (_type == "Parallel") {
base::return_object<ParallelDagView>(
dynamic_cast<ParallelDagView*>(view));
} else {
LOG(FATAL)
<< "Unknown dag type:" << _type << "!";
return ;
}
}
int Workflow::reload() {
// reload op's config here...
return 0;
}
void Workflow::regist_metric(const std::string& service_name) {
PredictorMetric::GetInstance()->regist_latency_metric(
WORKFLOW_METRIC_PREFIX + service_name + NAME_DELIMITER + full_name());
_dag.regist_metric(service_name);
}
} // predictor
} // paddle_serving
} // baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_WORKFLOW_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_WORKFLOW_H
#include "common/inner_common.h"
#include "framework/dag.h"
#include "framework/dag_view.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
template<typename T>
class Manager;
class Workflow {
public:
Workflow() {}
static const char* tag() {
return "Workflow";
}
// Each workflow object corresponds to an independent
// configure file, so you can share the object between
// different apps.
int init(const comcfg::ConfigUnit& conf);
DagView* fetch_dag_view(const std::string& service_name);
int deinit() { return 0; }
void return_dag_view(DagView* view);
int reload();
const std::string& name() {
return _name;
}
const std::string& full_name() {
return _name;
}
void regist_metric(const std::string& service_name);
private:
Dag _dag;
std::string _type;
std::string _name;
};
} // predictor
} // paddle_serving
} // baidu
#endif
#include "op/common_echo_op.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
DEFINE_OP(CommonEchoOp);
} // predictor
} // paddle_serving
} // baidu
#ifndef BAIDU_PREDICTOR_PREDICTOR_COMMON_ECHO_OP_H
#define BAIDU_PREDICTOR_PREDICTOR_COMMON_ECHO_OP_H
#include "echo_service.pb.h"
#include "common/inner_common.h"
#include "op/op.h"
#include "framework/channel.h"
#include "framework/op_repository.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class CommonEchoOp : public OpWithChannel<
baidu::paddle_serving::predictor::echo_service::RequestAndResponse> {
public:
typedef baidu::paddle_serving::predictor::echo_service::RequestAndResponse
RequestAndResponse;
DECLARE_OP(CommonEchoOp);
int inference() {
const RequestAndResponse* req = dynamic_cast<const RequestAndResponse*>(
get_request_message());
RequestAndResponse* data = mutable_data<RequestAndResponse>();
data->CopyFrom(*req);
return 0;
}
};
} // predictor
} // paddle_serving
} // baidu
#endif
#include "op/dense_echo_op.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
using baidu::paddle_serving::predictor::format::DensePrediction;
using baidu::paddle_serving::predictor::dense_service::Request;
using baidu::paddle_serving::predictor::dense_service::Response;
int DenseEchoOp::inference() {
const Request* req =
dynamic_cast<const Request*>(get_request_message());
Response* res = mutable_data<Response>();
LOG(DEBUG) << "Receive request in dense service:"
<< req->ShortDebugString();
uint32_t sample_size = req->instances_size();
for (uint32_t si = 0; si < sample_size; si++) {
DensePrediction* dense_res =
res->mutable_predictions()->Add();
dense_res->add_categories(100.0 + si * 0.1);
dense_res->add_categories(200.0 + si * 0.1);
}
return 0;
}
DEFINE_OP(DenseEchoOp);
} // predictor
} // paddle_serving
} // baidu
#ifndef BAIDU_PADDLE_SSERVER_PREDICTOR_OP_DENSE_ECHO_OP_H
#define BAIDU_PADDLE_SSERVER_PREDICTOR_OP_DENSE_ECHO_OP_H
#include "dense_service.pb.h"
#include "common/inner_common.h"
#include "op/op.h"
#include "framework/channel.h"
#include "framework/op_repository.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class DenseEchoOp : public OpWithChannel<
baidu::paddle_serving::predictor::dense_service::Response> {
public:
DECLARE_OP(DenseEchoOp);
int inference();
};
} // predictor
} // paddle_serving
} // baidu
#endif
#include "op/op.h"
#include <base/time.h> // base::Timer
#include "common/utils.h"
#include "common/constant.h"
#include "framework/channel.h"
#include "framework/dag.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
int Op::init(Bus* bus, Dag* dag, uint32_t id, const std::string& name,
const std::string& type, void* conf) {
_bus = bus;
_dag = dag;
_id = id;
_name = name;
_type = type;
set_config(conf);
_timer = base::get_object<TimerFlow>();
if (!_timer) {
LOG(FATAL) << "Invalid timerflow in op:"
<< this->name();
return -1;
}
_timer->init();
_has_calc = false;
_has_init = true;
Channel* channel = mutable_channel();
if (channel == NULL) {
LOG(FATAL)
<< "Failed mutable channel in op: "
<< this->id() << ", " << this->name() << "!";
return -1;
}
return custom_init();
}
int Op::deinit() {
if (_timer) {
base::return_object(_timer);
}
_bus = NULL;
_dag = NULL;
_timer = NULL;
if (release_channel() != 0) {
LOG(FATAL) << "Failed release channel in op:"
<< this->id() << ", " << this->name() << "!";
return -1;
}
return custom_deinit();
}
int Op::check_time(const char* tag) {
if (!_timer) {
LOG(FATAL) << "Invalid timer in op";
return -1;
}
if (!_timer->check(tag)) {
LOG(FATAL) << "Failed check timer:" << tag;
return -1;
}
return 0;
}
int Op::process(bool debug) {
base::Timer op_time(base::Timer::STARTED);
if (debug && _timer) {
_timer->start();
}
if (!_has_init) {
LOG(FATAL)
<< "Make sure op has been init before inference";
return ERR_INTERNAL_FAILURE;
}
if (_has_calc) {
LOG(DEBUG)
<< "Op: " << _name << " already processed before";
return ERR_OK;
}
// 1. dependency inference
/*
DagNode* node = _dag->node_by_name(this->name());
if (node == NULL) {
LOG(FATAL) << "Failed get node of op:" << this->name();
return -1;
}
boost::unordered_map<std::string, EdgeMode>& depends =
node->depends;
boost::unordered_map<std::string, EdgeMode>::iterator it;
for (it = depends.begin(); it != depends.end(); it++) {
Op* depend_op = view->find(it->first);
if (depend_op->process() != 0) {
LOG(WARNING) << "Op: " << _name << " processed failed!";
return -1;
}
}*/
if (debug && _timer) {
_timer->check("depend");
}
// 2. current inference
if (inference() != 0) {
return ERR_OP_INFER_FAILURE;
}
if (debug && _timer) {
_timer->check("infer");
}
// 3. share output to bus
Channel* channel = mutable_channel();
channel->share_to_bus(_bus);
// 4. mark has calculated
_has_calc = true;
if (debug && _timer) {
_timer->check("share");
_timer->end();
}
op_time.stop();
PredictorMetric::GetInstance()->update_latency_metric(
OP_METRIC_PREFIX + full_name(), op_time.u_elapsed());
LOG(NOTICE) << " " << name() << "_time=[" << op_time.u_elapsed() << "]" << noflush;
return ERR_OK;
}
std::string Op::time_info() {
if (_timer) {
return _timer->info();
} else {
return "Invalid Timer!";
}
}
bool Op::is_mutable(const std::string& op) {
if (op == START_OP_NAME) {
return false;
}
DagNode* node = const_cast<DagNode*>(_dag->node_by_name(_name));
if (node->depends.find(op) == node->depends.end()) {
LOG(WARNING)
<< "op: " << _name << " doesnot depend on"
<< "op: " << op << "!";
return false;
}
if (node->depends[op] != RW) {
LOG(WARNING)
<< "op: " << _name << " has no RW access"
<< "ot op: " << op << ", mode: " << node->depends[op]
<< ", please use get_argment() instead.";
return false;
}
return true;
}
bool Op::is_mutable(const std::string& op) const {
if (op == START_OP_NAME) {
return false;
}
DagNode* node = const_cast<DagNode*>(
_dag->node_by_name(_name));
if (node->depends.find(op) == node->depends.end()) {
LOG(WARNING)
<< "op: " << _name << " doesnot depend on"
<< "op: " << op << "!";
return false;
}
if (node->depends[op] != RW) {
LOG(WARNING)
<< "op: " << _name << " has no RW access"
<< "ot op: " << op << ", mode: " << node->depends[op]
<< ", please use get_argment() instead.";
return false;
}
return true;
}
bool Op::is_readable(const std::string& op) {
if (op == START_OP_NAME) {
return true;
}
DagNode* node = const_cast<DagNode*>(_dag->node_by_name(_name));
if (node->depends.find(op) == node->depends.end()) {
LOG(WARNING)
<< "op: " << _name << " doesnot depend on"
<< "op: " << op << "!";
return false;
}
if (node->depends[op] != RW && node->depends[op] != RO) {
LOG(WARNING)
<< "op: " << _name << " has no RO access"
<< "ot op: " << op << ", mode: " << node->depends[op]
<< ", please check your configuration.";
return false;
}
return true;
}
bool Op::is_readable(const std::string& op) const {
if (op == START_OP_NAME) {
return true;
}
DagNode* node = const_cast<DagNode*>(_dag->node_by_name(_name));
if (node->depends.find(op) == node->depends.end()) {
LOG(WARNING)
<< "op: " << _name << " doesnot depend on "
<< "op: " << op << "!";
return false;
}
if (node->depends[op] != RW && node->depends[op] != RO) {
LOG(WARNING)
<< "op: " << _name << " has no RO access"
<< "ot op: " << op << ", mode: " << node->depends[op]
<< ", please check your configuration.";
return false;
}
return true;
}
// 获得依赖Op的Channel对象
Channel* Op::mutable_depend_channel(const std::string& op) {
if (!is_mutable(op)) {
LOG(WARNING)
<< "Op: " << _name << " cannot mutable op: "
<< op << "!";
return NULL;
}
// 从bus中获取依赖op的channel
return _bus->channel_by_name(op);
}
// 获得依赖Op的Channel对象
const Channel* Op::get_depend_channel(const std::string& op) const {
// 从dag中获取依赖op的mode属性
if (!is_readable(op)) {
LOG(WARNING)
<< "op: " << _name << " doesnot depend on op: "
<< op << "!";
return NULL;
}
// 从bus中获取依赖op的channel
return _bus->channel_by_name(op);
}
google::protobuf::Message* Op::mutable_message() {
return mutable_channel()->message();
}
const google::protobuf::Message* Op::get_message() const {
return get_channel()->message();
}
bool Op::has_calc() { return _has_calc; }
const char* Op::name() const {
return _name.c_str();
}
const std::string& Op::type() const {
return _type;
}
uint32_t Op::id() const {
return _id;
}
const std::string Op::debug_string() {
const Channel* channel = get_channel();
if (!channel) {
LOG(FATAL) << "Invalid channel!";
return "Invalid channel in OP";
}
return channel->debug_string();
}
const google::protobuf::Message* Op::get_request_message() {
return _bus->channel_by_name(START_OP_NAME)->message();
}
} // predictor
} // paddle_serving
} // baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_OP_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_OP_H
#include <bvar/bvar.h> // bvar::LatencyRecorder
#include "common/inner_common.h"
#include "framework/channel.h"
#include "framework/op_repository.h"
#include "framework/predictor_metric.h" // PredictorMetric
namespace baidu {
namespace paddle_serving {
namespace predictor {
class Dag;
class Op {
public:
Op() : _bus(NULL),
_dag(NULL),
_has_calc(false),
_has_init(false),
_timer(NULL) {}
virtual ~Op() {}
// ------对依赖OP的Channel/Data/Message数据获取接口-----
// 获得依赖Op的Channel对象
Channel* mutable_depend_channel(const std::string& op);
// 获得依赖Op的Channel对象
const Channel* get_depend_channel(const std::string& op) const;
template<typename T>
T* mutable_depend_argument(const std::string& op) {
Channel* channel = mutable_depend_channel(op);
if (channel == NULL) {
LOG(WARNING) << "cannot mutable channel of " << op
<< " in " << _name;
return NULL;
}
OpChannel<T>* op_channel =
dynamic_cast<OpChannel<T>*>(channel);
if (!op_channel) {
LOG(FATAL) << "Cannot dynamic cast channel of op:"
<< this->name() << " to type: " << typeid(T).name();
return NULL;
}
return op_channel->data();
}
template<typename T>
const T* get_depend_argument(const std::string& op) const {
const Channel* channel = get_depend_channel(op);
if (channel == NULL) {
LOG(WARNING) << "cannot get read-only channel of " << op
<< " in " << _name;
return NULL;
}
const OpChannel<T>* op_channel =
dynamic_cast<const OpChannel<T>*>(channel);
if (!op_channel) {
LOG(FATAL) << "Cannot dynamic cast channel of op:"
<< this->name() << " to type: " << typeid(T).name();
return NULL;
}
return op_channel->data();
}
// -----对当前OP的Channel/Data/Message数据获取接口----
// 获得该OP的Protobuf message类型指针
google::protobuf::Message* mutable_message();
// 获得该OP的Protobuf message类型指针
const google::protobuf::Message* get_message() const;
// 获得该OP的模板类数据对象
template<typename T>
T* mutable_data() {
Channel* channel = mutable_channel();
return (dynamic_cast<OpChannel<T>*>(channel))->data();
}
// 获得该OP的模板类数据对象
template<typename T>
const T* get_data() const {
const Channel* channel = get_channel();
return (dynamic_cast<const OpChannel<T>*>(channel))->data();
}
// ---------------- 其它基类成员函数 ----------------
int init(Bus* bus, Dag* dag, uint32_t id, const std::string& name,
const std::string& type, void* conf);
int deinit();
int check_time(const char* tag);
int process(bool debug);
std::string time_info();
// 获得输入对象
const google::protobuf::Message* get_request_message();
bool has_calc();
const char* name() const;
const std::string& full_name() const {
return _full_name;
}
void set_full_name(const std::string full_name) {
_full_name = full_name;
}
const std::string& type() const;
uint32_t id() const;
// --------------- Default implements ----------------
virtual int custom_init() { return 0; }
virtual int custom_deinit() { return 0; }
virtual const std::string debug_string();
// ------------------ OP Interface -------------------
// 获得当前Op的Channel派生类对象
virtual Channel* mutable_channel() = 0;
// 获得当前Op的Channel派生类对象
virtual const Channel* get_channel() const = 0;
// 释放当前Op的Channel派生类对象
virtual int release_channel() = 0;
// 当前Op自定义inference函数接口
virtual int inference() = 0;
// ------------------ Conf Interface -------------------
virtual void* create_config(const comcfg::ConfigUnit& conf) { return NULL; }
virtual void delete_config(void* conf) { }
virtual void set_config(void* conf) { return; }
// ------------------ Metric Interface -------------------
virtual void regist_metric() { return; }
private:
bool is_mutable(const std::string& op);
bool is_mutable(const std::string& op) const;
bool is_readable(const std::string& op);
bool is_readable(const std::string& op) const;
private:
Bus* _bus;
Dag* _dag;
uint32_t _id;
std::string _name;
std::string _full_name; // service_workflow_stageindex_opname
std::string _type;
bool _has_calc;
bool _has_init;
TimerFlow* _timer;
};
template<typename T>
class OpWithChannel : public Op {
public:
typedef T DataType;
typedef OpChannel<T> ChannelType;
OpWithChannel() : _channel(NULL) {}
virtual ~OpWithChannel() {}
// ---------- Implements ----------
Channel* mutable_channel() {
if (_channel != NULL) {
return _channel;
}
_channel = base::get_object<ChannelType>();
if (!_channel) {
LOG(FATAL)
<< "Failed mutable channel of type:"
<< typeid(T).name();
return NULL;
}
_channel->init(this->id(), this->name());
return _channel;
}
const Channel* get_channel() const {
return _channel;
}
int release_channel() {
if (_channel) {
_channel->deinit();
base::return_object<ChannelType>(_channel);
}
_channel = NULL;
return 0;
}
// ------------- Interface -------------
// Op自定义inference接口
virtual int inference() = 0;
private:
ChannelType* _channel;
};
template<typename T, typename C>
class OpWithChannelAndConf : public OpWithChannel<T> {
public:
void set_config(void* conf) {
_conf = static_cast<C*>(conf);
}
C* get_self_config() { return _conf; }
virtual void delete_config(void* conf) { delete static_cast<C*>(conf); }
private:
C* _conf;
};
#define DECLARE_OP(OP_TYPE) \
OP_TYPE() { \
REGISTER_OP(OP_TYPE); \
} \
static OP_TYPE _s_##OP_TYPE \
#define DEFINE_OP(OP_TYPE) \
OP_TYPE OP_TYPE::_s_##OP_TYPE \
} // predictor
} // paddle_serving
} // baidu
#endif
#ifndef BAIDU_PADDLE_SEVING_PREDICTOR_OP_STRUCT_DEMO_H
#define BAIDU_PADDLE_SEVING_PREDICTOR_OP_STRUCT_DEMO_H
#include "common/inner_common.h"
#include "op/op.h"
#include "framework/channel.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
struct DemoData {
boost::unordered_map<std::string, int> name_id;
int data;
};
class StructOp : public OpWithChannel<DemoData> {
public:
DECLARE_OP(StructOp);
int inference() {
DemoData* data = mutable_data<DemoData>();
data.data = 1;
return 0;
}
};
DEFINE_OP(StructOp);
} // predictor
} // paddle_serving
} // baidu
#endif
#include "op/sparse_echo_op.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
int SparseEchoOp::inference() {
// Every op can obtain request message by:
// get_request_message()
const Request* req =
dynamic_cast<const Request*>(get_request_message());
// Each op can obtain self-writable-data by:
// mutable_data()
Response* res = mutable_data<Response>();
// You can get the channel/data of depended ops by:
// get/mutable_depend_argment()
// ...
LOG(DEBUG)
<< "Receive request in sparse service:"
<< req->ShortDebugString();
uint32_t sample_size = req->instances_size();
for (uint32_t si = 0; si < sample_size; si++) {
SparsePrediction* sparse_res =
res->mutable_predictions()->Add();
sparse_res->add_categories(100.0 + si * 0.1);
sparse_res->add_categories(200.0 + si * 0.1);
}
return 0;
}
DEFINE_OP(SparseEchoOp);
} // predictor
} // paddle_serving
} // baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_OP_SPARSE_ECHO_OP_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_OP_SPARSE_ECHO_OP_H
#include "sparse_service.pb.h"
#include "common/inner_common.h"
#include "op/op.h"
#include "framework/channel.h"
#include "framework/op_repository.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class SparseEchoOp : public OpWithChannel<
baidu::paddle_serving::predictor::sparse_service::Response> {
public:
DECLARE_OP(SparseEchoOp);
typedef baidu::paddle_serving::predictor::sparse_service::Request Request;
typedef baidu::paddle_serving::predictor::sparse_service::Response Response;
typedef baidu::paddle_serving::predictor::format::SparsePrediction
SparsePrediction;
int inference();
};
} // predictor
} // paddle_serving
} // baidu
#endif
#include "pb_to_json.h"
#include <google/protobuf/text_format.h>
#include "op/write_json_op.h"
#include "framework/memory.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
using baidu::paddle_serving::predictor::format::XImageResInstance;
using baidu::paddle_serving::predictor::image_classification::ClassifyResponse;
using baidu::paddle_serving::predictor::image_classification::Response;
int WriteJsonOp::inference() {
const ClassifyResponse* classify_out =
get_depend_argument<ClassifyResponse>("image_classify_op");
if (!classify_out) {
LOG(ERROR) << "Failed mutable depended argument, op:"
<< "image_classify_op";
return -1;
}
Response* res = mutable_data<Response>();
if (!res) {
LOG(ERROR) << "Failed mutable output response in op:"
<< "WriteJsonOp";
return -1;
}
// transfer classify output message into json format
std::string err_string;
uint32_t sample_size = classify_out->predictions_size();
for (uint32_t si = 0; si < sample_size; si++) {
XImageResInstance* ins = res->add_predictions();
if (!ins) {
LOG(ERROR) << "Failed add one prediction ins";
return -1;
}
std::string* text = ins->mutable_response_json();
if (!ProtoMessageToJson(classify_out->predictions(si),
text, &err_string)) {
LOG(ERROR) << "Failed convert message["
<< classify_out->predictions(si).ShortDebugString()
<< "], err: " << err_string;
return -1;
}
}
LOG(TRACE) << "Succ write json:"
<< classify_out->ShortDebugString();
return 0;
}
DEFINE_OP(WriteJsonOp);
} // predictor
} // paddle_serving
} // baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_OP_WRITE_JSON_OP_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_OP_WRITE_JSON_OP_H
#include "builtin_format.pb.h"
#include "image_classification.pb.h"
#include "common/inner_common.h"
#include "op/op.h"
#include "framework/channel.h"
#include "framework/op_repository.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class WriteJsonOp : public OpWithChannel<
baidu::paddle_serving::predictor::image_classification::Response> {
public:
DECLARE_OP(WriteJsonOp);
int inference();
};
} // predictor
} // paddle_serving
} // baidu
#endif
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// http://code.google.com/p/protobuf/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// from google3/util/gtl/stl_util-inl.h
#ifndef GOOGLE_PROTOBUF_STUBS_STL_UTIL_INL_H__
#define GOOGLE_PROTOBUF_STUBS_STL_UTIL_INL_H__
#include <google/protobuf/stubs/common.h>
namespace google {
namespace protobuf {
// STLDeleteContainerPointers()
// For a range within a container of pointers, calls delete
// (non-array version) on these pointers.
// NOTE: for these three functions, we could just implement a DeleteObject
// functor and then call for_each() on the range and functor, but this
// requires us to pull in all of algorithm.h, which seems expensive.
// For hash_[multi]set, it is important that this deletes behind the iterator
// because the hash_set may call the hash function on the iterator when it is
// advanced, which could result in the hash function trying to deference a
// stale pointer.
template <class ForwardIterator>
void STLDeleteContainerPointers(ForwardIterator begin,
ForwardIterator end) {
while (begin != end) {
ForwardIterator temp = begin;
++begin;
delete *temp;
}
}
// Inside Google, this function implements a horrible, disgusting hack in which
// we reach into the string's private implementation and resize it without
// initializing the new bytes. In some cases doing this can significantly
// improve performance. However, since it's totally non-portable it has no
// place in open source code. Feel free to fill this function in with your
// own disgusting hack if you want the perf boost.
inline void STLStringResizeUninitialized(string* s, size_t new_size) {
s->resize(new_size);
}
// Return a mutable char* pointing to a string's internal buffer,
// which may not be null-terminated. Writing through this pointer will
// modify the string.
//
// string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
// next call to a string method that invalidates iterators.
//
// As of 2006-04, there is no standard-blessed way of getting a
// mutable reference to a string's internal buffer. However, issue 530
// (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-active.html#530)
// proposes this as the method. According to Matt Austern, this should
// already work on all current implementations.
inline char* string_as_array(string* str) {
// DO NOT USE const_cast<char*>(str->data())! See the unittest for why.
return str->empty() ? NULL : &*str->begin();
}
// STLDeleteElements() deletes all the elements in an STL container and clears
// the container. This function is suitable for use with a vector, set,
// hash_set, or any other STL container which defines sensible begin(), end(),
// and clear() methods.
//
// If container is NULL, this function is a no-op.
//
// As an alternative to calling STLDeleteElements() directly, consider
// ElementDeleter (defined below), which ensures that your container's elements
// are deleted when the ElementDeleter goes out of scope.
template <class T>
void STLDeleteElements(T *container) {
if (!container) return;
STLDeleteContainerPointers(container->begin(), container->end());
container->clear();
}
// Given an STL container consisting of (key, value) pairs, STLDeleteValues
// deletes all the "value" components and clears the container. Does nothing
// in the case it's given a NULL pointer.
template <class T>
void STLDeleteValues(T *v) {
if (!v) return;
for (typename T::iterator i = v->begin(); i != v->end(); ++i) {
delete i->second;
}
v->clear();
}
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_STUBS_STL_UTIL_INL_H__
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// http://code.google.com/p/protobuf/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// from google3/strings/strutil.h
#ifndef GOOGLE_PROTOBUF_STUBS_STRUTIL_H__
#define GOOGLE_PROTOBUF_STUBS_STRUTIL_H__
#include <stdlib.h>
#include <vector>
#include <google/protobuf/stubs/common.h>
namespace google {
namespace protobuf {
#ifdef _MSC_VER
#define strtoll _strtoi64
#define strtoull _strtoui64
#elif defined(__DECCXX) && defined(__osf__)
// HP C++ on Tru64 does not have strtoll, but strtol is already 64-bit.
#define strtoll strtol
#define strtoull strtoul
#endif
// ----------------------------------------------------------------------
// ascii_isalnum()
// Check if an ASCII character is alphanumeric. We can't use ctype's
// isalnum() because it is affected by locale. This function is applied
// to identifiers in the protocol buffer language, not to natural-language
// strings, so locale should not be taken into account.
// ascii_isdigit()
// Like above, but only accepts digits.
// ----------------------------------------------------------------------
inline bool ascii_isalnum(char c) {
return ('a' <= c && c <= 'z') ||
('A' <= c && c <= 'Z') ||
('0' <= c && c <= '9');
}
inline bool ascii_isdigit(char c) {
return ('0' <= c && c <= '9');
}
// ----------------------------------------------------------------------
// HasPrefixString()
// Check if a string begins with a given prefix.
// StripPrefixString()
// Given a string and a putative prefix, returns the string minus the
// prefix string if the prefix matches, otherwise the original
// string.
// ----------------------------------------------------------------------
inline bool HasPrefixString(const string& str,
const string& prefix) {
return str.size() >= prefix.size() &&
str.compare(0, prefix.size(), prefix) == 0;
}
inline string StripPrefixString(const string& str, const string& prefix) {
if (HasPrefixString(str, prefix)) {
return str.substr(prefix.size());
} else {
return str;
}
}
// ----------------------------------------------------------------------
// HasSuffixString()
// Return true if str ends in suffix.
// StripSuffixString()
// Given a string and a putative suffix, returns the string minus the
// suffix string if the suffix matches, otherwise the original
// string.
// ----------------------------------------------------------------------
inline bool HasSuffixString(const string& str,
const string& suffix) {
return str.size() >= suffix.size() &&
str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0;
}
inline string StripSuffixString(const string& str, const string& suffix) {
if (HasSuffixString(str, suffix)) {
return str.substr(0, str.size() - suffix.size());
} else {
return str;
}
}
// ----------------------------------------------------------------------
// StripString
// Replaces any occurrence of the character 'remove' (or the characters
// in 'remove') with the character 'replacewith'.
// Good for keeping html characters or protocol characters (\t) out
// of places where they might cause a problem.
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT void StripString(string* s, const char* remove,
char replacewith);
// ----------------------------------------------------------------------
// LowerString()
// UpperString()
// Convert the characters in "s" to lowercase or uppercase. ASCII-only:
// these functions intentionally ignore locale because they are applied to
// identifiers used in the Protocol Buffer language, not to natural-language
// strings.
// ----------------------------------------------------------------------
inline void LowerString(string * s) {
string::iterator end = s->end();
for (string::iterator i = s->begin(); i != end; ++i) {
// tolower() changes based on locale. We don't want this!
if ('A' <= *i && *i <= 'Z') *i += 'a' - 'A';
}
}
inline void UpperString(string * s) {
string::iterator end = s->end();
for (string::iterator i = s->begin(); i != end; ++i) {
// toupper() changes based on locale. We don't want this!
if ('a' <= *i && *i <= 'z') *i += 'A' - 'a';
}
}
// ----------------------------------------------------------------------
// StringReplace()
// Give me a string and two patterns "old" and "new", and I replace
// the first instance of "old" in the string with "new", if it
// exists. RETURN a new string, regardless of whether the replacement
// happened or not.
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT string StringReplace(const string& s, const string& oldsub,
const string& newsub, bool replace_all);
// ----------------------------------------------------------------------
// SplitStringUsing()
// Split a string using a character delimiter. Append the components
// to 'result'. If there are consecutive delimiters, this function skips
// over all of them.
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT void SplitStringUsing(const string& full, const char* delim,
vector<string>* res);
// ----------------------------------------------------------------------
// JoinStrings()
// These methods concatenate a vector of strings into a C++ string, using
// the C-string "delim" as a separator between components. There are two
// flavors of the function, one flavor returns the concatenated string,
// another takes a pointer to the target string. In the latter case the
// target string is cleared and overwritten.
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT void JoinStrings(const vector<string>& components,
const char* delim, string* result);
inline string JoinStrings(const vector<string>& components,
const char* delim) {
string result;
JoinStrings(components, delim, &result);
return result;
}
// ----------------------------------------------------------------------
// UnescapeCEscapeSequences()
// Copies "source" to "dest", rewriting C-style escape sequences
// -- '\n', '\r', '\\', '\ooo', etc -- to their ASCII
// equivalents. "dest" must be sufficiently large to hold all
// the characters in the rewritten string (i.e. at least as large
// as strlen(source) + 1 should be safe, since the replacements
// are always shorter than the original escaped sequences). It's
// safe for source and dest to be the same. RETURNS the length
// of dest.
//
// It allows hex sequences \xhh, or generally \xhhhhh with an
// arbitrary number of hex digits, but all of them together must
// specify a value of a single byte (e.g. \x0045 is equivalent
// to \x45, and \x1234 is erroneous).
//
// It also allows escape sequences of the form \uhhhh (exactly four
// hex digits, upper or lower case) or \Uhhhhhhhh (exactly eight
// hex digits, upper or lower case) to specify a Unicode code
// point. The dest array will contain the UTF8-encoded version of
// that code-point (e.g., if source contains \u2019, then dest will
// contain the three bytes 0xE2, 0x80, and 0x99).
//
// Errors: In the first form of the call, errors are reported with
// LOG(ERROR). The same is true for the second form of the call if
// the pointer to the string vector is NULL; otherwise, error
// messages are stored in the vector. In either case, the effect on
// the dest array is not defined, but rest of the source will be
// processed.
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT int UnescapeCEscapeSequences(const char* source, char* dest);
LIBPROTOBUF_EXPORT int UnescapeCEscapeSequences(const char* source, char* dest,
vector<string> *errors);
// ----------------------------------------------------------------------
// UnescapeCEscapeString()
// This does the same thing as UnescapeCEscapeSequences, but creates
// a new string. The caller does not need to worry about allocating
// a dest buffer. This should be used for non performance critical
// tasks such as printing debug messages. It is safe for src and dest
// to be the same.
//
// The second call stores its errors in a supplied string vector.
// If the string vector pointer is NULL, it reports the errors with LOG().
//
// In the first and second calls, the length of dest is returned. In the
// the third call, the new string is returned.
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT int UnescapeCEscapeString(const string& src, string* dest);
LIBPROTOBUF_EXPORT int UnescapeCEscapeString(const string& src, string* dest,
vector<string> *errors);
LIBPROTOBUF_EXPORT string UnescapeCEscapeString(const string& src);
// ----------------------------------------------------------------------
// CEscapeString()
// Copies 'src' to 'dest', escaping dangerous characters using
// C-style escape sequences. This is very useful for preparing query
// flags. 'src' and 'dest' should not overlap.
// Returns the number of bytes written to 'dest' (not including the \0)
// or -1 if there was insufficient space.
//
// Currently only \n, \r, \t, ", ', \ and !isprint() chars are escaped.
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT int CEscapeString(const char* src, int src_len,
char* dest, int dest_len);
// ----------------------------------------------------------------------
// CEscape()
// More convenient form of CEscapeString: returns result as a "string".
// This version is slower than CEscapeString() because it does more
// allocation. However, it is much more convenient to use in
// non-speed-critical code like logging messages etc.
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT string CEscape(const string& src);
namespace strings {
// Like CEscape() but does not escape bytes with the upper bit set.
LIBPROTOBUF_EXPORT string Utf8SafeCEscape(const string& src);
// Like CEscape() but uses hex (\x) escapes instead of octals.
LIBPROTOBUF_EXPORT string CHexEscape(const string& src);
} // namespace strings
// ----------------------------------------------------------------------
// strto32()
// strtou32()
// strto64()
// strtou64()
// Architecture-neutral plug compatible replacements for strtol() and
// strtoul(). Long's have different lengths on ILP-32 and LP-64
// platforms, so using these is safer, from the point of view of
// overflow behavior, than using the standard libc functions.
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT int32 strto32_adaptor(const char *nptr, char **endptr,
int base);
LIBPROTOBUF_EXPORT uint32 strtou32_adaptor(const char *nptr, char **endptr,
int base);
inline int32 strto32(const char *nptr, char **endptr, int base) {
if (sizeof(int32) == sizeof(long))
return strtol(nptr, endptr, base);
else
return strto32_adaptor(nptr, endptr, base);
}
inline uint32 strtou32(const char *nptr, char **endptr, int base) {
if (sizeof(uint32) == sizeof(unsigned long))
return strtoul(nptr, endptr, base);
else
return strtou32_adaptor(nptr, endptr, base);
}
// For now, long long is 64-bit on all the platforms we care about, so these
// functions can simply pass the call to strto[u]ll.
inline int64 strto64(const char *nptr, char **endptr, int base) {
GOOGLE_COMPILE_ASSERT(sizeof(int64) == sizeof(long long),
sizeof_int64_is_not_sizeof_long_long);
return strtoll(nptr, endptr, base);
}
inline uint64 strtou64(const char *nptr, char **endptr, int base) {
GOOGLE_COMPILE_ASSERT(sizeof(uint64) == sizeof(unsigned long long),
sizeof_uint64_is_not_sizeof_long_long);
return strtoull(nptr, endptr, base);
}
// ----------------------------------------------------------------------
// FastIntToBuffer()
// FastHexToBuffer()
// FastHex64ToBuffer()
// FastHex32ToBuffer()
// FastTimeToBuffer()
// These are intended for speed. FastIntToBuffer() assumes the
// integer is non-negative. FastHexToBuffer() puts output in
// hex rather than decimal. FastTimeToBuffer() puts the output
// into RFC822 format.
//
// FastHex64ToBuffer() puts a 64-bit unsigned value in hex-format,
// padded to exactly 16 bytes (plus one byte for '\0')
//
// FastHex32ToBuffer() puts a 32-bit unsigned value in hex-format,
// padded to exactly 8 bytes (plus one byte for '\0')
//
// All functions take the output buffer as an arg.
// They all return a pointer to the beginning of the output,
// which may not be the beginning of the input buffer.
// ----------------------------------------------------------------------
// Suggested buffer size for FastToBuffer functions. Also works with
// DoubleToBuffer() and FloatToBuffer().
static const int kFastToBufferSize = 32;
LIBPROTOBUF_EXPORT char* FastInt32ToBuffer(int32 i, char* buffer);
LIBPROTOBUF_EXPORT char* FastInt64ToBuffer(int64 i, char* buffer);
char* FastUInt32ToBuffer(uint32 i, char* buffer); // inline below
char* FastUInt64ToBuffer(uint64 i, char* buffer); // inline below
LIBPROTOBUF_EXPORT char* FastHexToBuffer(int i, char* buffer);
LIBPROTOBUF_EXPORT char* FastHex64ToBuffer(uint64 i, char* buffer);
LIBPROTOBUF_EXPORT char* FastHex32ToBuffer(uint32 i, char* buffer);
// at least 22 bytes long
inline char* FastIntToBuffer(int i, char* buffer) {
return (sizeof(i) == 4 ?
FastInt32ToBuffer(i, buffer) : FastInt64ToBuffer(i, buffer));
}
inline char* FastUIntToBuffer(unsigned int i, char* buffer) {
return (sizeof(i) == 4 ?
FastUInt32ToBuffer(i, buffer) : FastUInt64ToBuffer(i, buffer));
}
inline char* FastLongToBuffer(long i, char* buffer) {
return (sizeof(i) == 4 ?
FastInt32ToBuffer(i, buffer) : FastInt64ToBuffer(i, buffer));
}
inline char* FastULongToBuffer(unsigned long i, char* buffer) {
return (sizeof(i) == 4 ?
FastUInt32ToBuffer(i, buffer) : FastUInt64ToBuffer(i, buffer));
}
// ----------------------------------------------------------------------
// FastInt32ToBufferLeft()
// FastUInt32ToBufferLeft()
// FastInt64ToBufferLeft()
// FastUInt64ToBufferLeft()
//
// Like the Fast*ToBuffer() functions above, these are intended for speed.
// Unlike the Fast*ToBuffer() functions, however, these functions write
// their output to the beginning of the buffer (hence the name, as the
// output is left-aligned). The caller is responsible for ensuring that
// the buffer has enough space to hold the output.
//
// Returns a pointer to the end of the string (i.e. the null character
// terminating the string).
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT char* FastInt32ToBufferLeft(int32 i, char* buffer);
LIBPROTOBUF_EXPORT char* FastUInt32ToBufferLeft(uint32 i, char* buffer);
LIBPROTOBUF_EXPORT char* FastInt64ToBufferLeft(int64 i, char* buffer);
LIBPROTOBUF_EXPORT char* FastUInt64ToBufferLeft(uint64 i, char* buffer);
// Just define these in terms of the above.
inline char* FastUInt32ToBuffer(uint32 i, char* buffer) {
FastUInt32ToBufferLeft(i, buffer);
return buffer;
}
inline char* FastUInt64ToBuffer(uint64 i, char* buffer) {
FastUInt64ToBufferLeft(i, buffer);
return buffer;
}
// ----------------------------------------------------------------------
// SimpleItoa()
// Description: converts an integer to a string.
//
// Return value: string
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT string SimpleItoa(int i);
LIBPROTOBUF_EXPORT string SimpleItoa(unsigned int i);
LIBPROTOBUF_EXPORT string SimpleItoa(long i);
LIBPROTOBUF_EXPORT string SimpleItoa(unsigned long i);
LIBPROTOBUF_EXPORT string SimpleItoa(long long i);
LIBPROTOBUF_EXPORT string SimpleItoa(unsigned long long i);
// ----------------------------------------------------------------------
// SimpleDtoa()
// SimpleFtoa()
// DoubleToBuffer()
// FloatToBuffer()
// Description: converts a double or float to a string which, if
// passed to NoLocaleStrtod(), will produce the exact same original double
// (except in case of NaN; all NaNs are considered the same value).
// We try to keep the string short but it's not guaranteed to be as
// short as possible.
//
// DoubleToBuffer() and FloatToBuffer() write the text to the given
// buffer and return it. The buffer must be at least
// kDoubleToBufferSize bytes for doubles and kFloatToBufferSize
// bytes for floats. kFastToBufferSize is also guaranteed to be large
// enough to hold either.
//
// Return value: string
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT string SimpleDtoa(double value);
LIBPROTOBUF_EXPORT string SimpleFtoa(float value);
LIBPROTOBUF_EXPORT char* DoubleToBuffer(double i, char* buffer);
LIBPROTOBUF_EXPORT char* FloatToBuffer(float i, char* buffer);
// In practice, doubles should never need more than 24 bytes and floats
// should never need more than 14 (including null terminators), but we
// overestimate to be safe.
static const int kDoubleToBufferSize = 32;
static const int kFloatToBufferSize = 24;
// ----------------------------------------------------------------------
// NoLocaleStrtod()
// Exactly like strtod(), except it always behaves as if in the "C"
// locale (i.e. decimal points must be '.'s).
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT double NoLocaleStrtod(const char* text, char** endptr);
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_STUBS_STRUTIL_H__
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// http://code.google.com/p/protobuf/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Author: kenton@google.com (Kenton Varda)
//#include <google/protobuf/stubs/strutil.h>
#include "plugin/strutil.h"
#include "plugin/substitute.h"
#include "plugin/stl_util-inl.h"
namespace google {
namespace protobuf {
namespace strings {
using internal::SubstituteArg;
// Returns the number of args in arg_array which were passed explicitly
// to Substitute().
static int CountSubstituteArgs(const SubstituteArg* const* args_array) {
int count = 0;
while (args_array[count] != NULL && args_array[count]->size() != -1) {
++count;
}
return count;
}
string Substitute(
const char* format,
const SubstituteArg& arg0, const SubstituteArg& arg1,
const SubstituteArg& arg2, const SubstituteArg& arg3,
const SubstituteArg& arg4, const SubstituteArg& arg5,
const SubstituteArg& arg6, const SubstituteArg& arg7,
const SubstituteArg& arg8, const SubstituteArg& arg9) {
string result;
SubstituteAndAppend(&result, format, arg0, arg1, arg2, arg3, arg4,
arg5, arg6, arg7, arg8, arg9);
return result;
}
void SubstituteAndAppend(
string* output, const char* format,
const SubstituteArg& arg0, const SubstituteArg& arg1,
const SubstituteArg& arg2, const SubstituteArg& arg3,
const SubstituteArg& arg4, const SubstituteArg& arg5,
const SubstituteArg& arg6, const SubstituteArg& arg7,
const SubstituteArg& arg8, const SubstituteArg& arg9) {
const SubstituteArg* const args_array[] = {
&arg0, &arg1, &arg2, &arg3, &arg4, &arg5, &arg6, &arg7, &arg8, &arg9, NULL
};
// Determine total size needed.
int size = 0;
for (int i = 0; format[i] != '\0'; i++) {
if (format[i] == '$') {
if (ascii_isdigit(format[i+1])) {
int index = format[i+1] - '0';
if (args_array[index]->size() == -1) {
GOOGLE_LOG(DFATAL)
<< "strings::Substitute format string invalid: asked for \"$"
<< index << "\", but only " << CountSubstituteArgs(args_array)
<< " args were given. Full format string was: \""
<< CEscape(format) << "\".";
return;
}
size += args_array[index]->size();
++i; // Skip next char.
} else if (format[i+1] == '$') {
++size;
++i; // Skip next char.
} else {
GOOGLE_LOG(DFATAL)
<< "Invalid strings::Substitute() format string: \""
<< CEscape(format) << "\".";
return;
}
} else {
++size;
}
}
if (size == 0) return;
// Build the string.
int original_size = output->size();
STLStringResizeUninitialized(output, original_size + size);
char* target = string_as_array(output) + original_size;
for (int i = 0; format[i] != '\0'; i++) {
if (format[i] == '$') {
if (ascii_isdigit(format[i+1])) {
const SubstituteArg* src = args_array[format[i+1] - '0'];
memcpy(target, src->data(), src->size());
target += src->size();
++i; // Skip next char.
} else if (format[i+1] == '$') {
*target++ = '$';
++i; // Skip next char.
}
} else {
*target++ = format[i];
}
}
GOOGLE_DCHECK_EQ(target - output->data(), output->size());
}
} // namespace strings
} // namespace protobuf
} // namespace google
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// http://code.google.com/p/protobuf/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Author: kenton@google.com (Kenton Varda)
// from google3/strings/substitute.h
#include <string>
#include <google/protobuf/stubs/common.h>
// hmmm...
//#include <google/protobuf/stubs/strutil.h>
#include "plugin/strutil.h"
#ifndef GOOGLE_PROTOBUF_STUBS_SUBSTITUTE_H_
#define GOOGLE_PROTOBUF_STUBS_SUBSTITUTE_H_
namespace google {
namespace protobuf {
namespace strings {
// ----------------------------------------------------------------------
// strings::Substitute()
// strings::SubstituteAndAppend()
// Kind of like StringPrintf, but different.
//
// Example:
// string GetMessage(string first_name, string last_name, int age) {
// return strings::Substitute("My name is $0 $1 and I am $2 years old.",
// first_name, last_name, age);
// }
//
// Differences from StringPrintf:
// * The format string does not identify the types of arguments.
// Instead, the magic of C++ deals with this for us. See below
// for a list of accepted types.
// * Substitutions in the format string are identified by a '$'
// followed by a digit. So, you can use arguments out-of-order and
// use the same argument multiple times.
// * It's much faster than StringPrintf.
//
// Supported types:
// * Strings (const char*, const string&)
// * Note that this means you do not have to add .c_str() to all of
// your strings. In fact, you shouldn't; it will be slower.
// * int32, int64, uint32, uint64: Formatted using SimpleItoa().
// * float, double: Formatted using SimpleFtoa() and SimpleDtoa().
// * bool: Printed as "true" or "false".
//
// SubstituteAndAppend() is like Substitute() but appends the result to
// *output. Example:
//
// string str;
// strings::SubstituteAndAppend(&str,
// "My name is $0 $1 and I am $2 years old.",
// first_name, last_name, age);
//
// Substitute() is significantly faster than StringPrintf(). For very
// large strings, it may be orders of magnitude faster.
// ----------------------------------------------------------------------
namespace internal { // Implementation details.
class SubstituteArg {
public:
inline SubstituteArg(const char* value)
: text_(value), size_(strlen(text_)) {}
inline SubstituteArg(const string& value)
: text_(value.data()), size_(value.size()) {}
// Indicates that no argument was given.
inline explicit SubstituteArg()
: text_(NULL), size_(-1) {}
// Primitives
// We don't overload for signed and unsigned char because if people are
// explicitly declaring their chars as signed or unsigned then they are
// probably actually using them as 8-bit integers and would probably
// prefer an integer representation. But, we don't really know. So, we
// make the caller decide what to do.
inline SubstituteArg(char value)
: text_(scratch_), size_(1) { scratch_[0] = value; }
inline SubstituteArg(short value)
: text_(FastInt32ToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(unsigned short value)
: text_(FastUInt32ToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(int value)
: text_(FastInt32ToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(unsigned int value)
: text_(FastUInt32ToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(long value)
: text_(FastLongToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(unsigned long value)
: text_(FastULongToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(long long value)
: text_(FastInt64ToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(unsigned long long value)
: text_(FastUInt64ToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(float value)
: text_(FloatToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(double value)
: text_(DoubleToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(bool value)
: text_(value ? "true" : "false"), size_(strlen(text_)) {}
inline const char* data() const { return text_; }
inline int size() const { return size_; }
private:
const char* text_;
int size_;
char scratch_[kFastToBufferSize];
};
} // namespace internal
LIBPROTOBUF_EXPORT string Substitute(
const char* format,
const internal::SubstituteArg& arg0 = internal::SubstituteArg(),
const internal::SubstituteArg& arg1 = internal::SubstituteArg(),
const internal::SubstituteArg& arg2 = internal::SubstituteArg(),
const internal::SubstituteArg& arg3 = internal::SubstituteArg(),
const internal::SubstituteArg& arg4 = internal::SubstituteArg(),
const internal::SubstituteArg& arg5 = internal::SubstituteArg(),
const internal::SubstituteArg& arg6 = internal::SubstituteArg(),
const internal::SubstituteArg& arg7 = internal::SubstituteArg(),
const internal::SubstituteArg& arg8 = internal::SubstituteArg(),
const internal::SubstituteArg& arg9 = internal::SubstituteArg());
LIBPROTOBUF_EXPORT void SubstituteAndAppend(
string* output, const char* format,
const internal::SubstituteArg& arg0 = internal::SubstituteArg(),
const internal::SubstituteArg& arg1 = internal::SubstituteArg(),
const internal::SubstituteArg& arg2 = internal::SubstituteArg(),
const internal::SubstituteArg& arg3 = internal::SubstituteArg(),
const internal::SubstituteArg& arg4 = internal::SubstituteArg(),
const internal::SubstituteArg& arg5 = internal::SubstituteArg(),
const internal::SubstituteArg& arg6 = internal::SubstituteArg(),
const internal::SubstituteArg& arg7 = internal::SubstituteArg(),
const internal::SubstituteArg& arg8 = internal::SubstituteArg(),
const internal::SubstituteArg& arg9 = internal::SubstituteArg());
} // namespace strings
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_STUBS_SUBSTITUTE_H_
syntax="proto2";
import "pds_option.proto";
package baidu.paddle_serving.predictor.format;
// dense format
message DenseInstance {
repeated float features = 1;
};
message DensePrediction {
repeated float categories = 1;
};
// sparse format
message SparseInstance {
repeated uint32 keys = 1;
repeated uint32 shape = 2;
repeated float values = 3;
};
message SparsePrediction {
repeated float categories = 1;
};
// int64-tensor format
message Int64TensorInstance {
repeated int64 data = 1;
repeated uint32 shape = 2;
};
message Float32TensorPredictor {
repeated float data = 1;
repeated uint32 shape = 2;
};
// x-image format
message XImageReqInstance {
required bytes image_binary = 1;
required uint32 image_length = 2;
};
message XImageResInstance {
required string response_json = 1;
};
// x-record format
message XRecordInstance {
// TODO
required bytes data = 1;
};
syntax="proto2";
import "pds_option.proto";
import "builtin_format.proto";
package baidu.paddle_serving.predictor.dense_service;
option cc_generic_services = true;
message Request {
repeated baidu.paddle_serving.predictor.format.DenseInstance instances = 1;
};
message Response {
repeated baidu.paddle_serving.predictor.format.DensePrediction predictions = 1;
};
service BuiltinDenseFormatService {
rpc inference(Request) returns (Response);
rpc debug(Request) returns (Response);
option (pds.options).generate_impl = true;
};
syntax="proto2";
import "pds_option.proto";
package baidu.paddle_serving.predictor.echo_service;
option cc_generic_services = true;
message RequestAndResponse {
required int32 a = 1;
required float b = 2;
};
service BuiltinTestEchoService {
rpc inference(RequestAndResponse) returns (RequestAndResponse);
rpc debug(RequestAndResponse) returns (RequestAndResponse);
option (pds.options).generate_impl = true;
};
syntax="proto2";
import "pds_option.proto";
import "builtin_format.proto";
package baidu.paddle_serving.predictor.image_classification;
option cc_generic_services = true;
message ClassifyResponse {
repeated baidu.paddle_serving.predictor.format.DensePrediction predictions = 1;
};
message Request {
repeated baidu.paddle_serving.predictor.format.XImageReqInstance instances = 1;
};
message Response {
// Each json string is serialized from ClassifyResponse predictions
repeated baidu.paddle_serving.predictor.format.XImageResInstance predictions = 1;
};
service ImageClassifyService {
rpc inference(Request) returns (Response);
rpc debug(Request) returns (Response);
option (pds.options).generate_impl = true;
};
syntax="proto2";
import "pds_option.proto";
import "builtin_format.proto";
package baidu.paddle_serving.predictor.int64tensor_service;
option cc_generic_services = true;
message Request {
repeated baidu.paddle_serving.predictor.format.Int64TensorInstance
instances = 1;
};
message Response {
repeated baidu.paddle_serving.predictor.format.Float32TensorPredictor
predictions = 2;
};
service BuiltinFluidService {
rpc inference(Request) returns (Response);
rpc debug(Request) returns (Response);
option (pds.options).generate_impl = true;
};
//syntax="proto2";
package pds.ut;
message OpMessageData {
optional int32 a = 1 [default=33];
optional float b = 2 [default=4.4];
};
syntax="proto2";
import "google/protobuf/descriptor.proto";
package pds;
extend google.protobuf.FieldOptions {
optional bool pack_on = 70000 [default=false];
};
extend google.protobuf.ServiceOptions {
optional PaddleServiceOption options = 80000;
};
message PaddleServiceOption {
optional bool generate_impl = 1 [default = false];
optional bool generate_stub = 2 [default = false];
};
syntax="proto2";
import "pds_option.proto";
import "builtin_format.proto";
package baidu.paddle_serving.predictor.sparse_service;
option cc_generic_services = true;
message Request {
repeated baidu.paddle_serving.predictor.format.SparseInstance instances = 1;
};
message Response {
repeated baidu.paddle_serving.predictor.format.SparsePrediction predictions = 1;
};
service BuiltinSparseFormatService {
rpc inference(Request) returns (Response);
rpc debug(Request) returns (Response);
option (pds.options).generate_impl = true;
};
syntax="proto2";
package aialgs.data;
message Float32Tensor {
repeated float values = 1;
repeated uint64 keys = 2;
repeated uint64 shape = 3;
};
message Float64Tensor {
repeated double values = 1;
repeated uint64 keys = 2;
repeated uint64 shape = 3;
};
message Int32Tensor {
repeated int32 values = 1;
repeated uint64 keys = 2;
repeated uint64 shape = 3;
};
message Bytes {
repeated bytes value = 1;
optional string content_type = 2;
};
message Value {
optional Float32Tensor float32_tensor = 2;
optional Float64Tensor float64_tensor = 3;
optional Int32Tensor int32_tensor = 7;
optional Bytes bytes = 9;
};
message Record {
message FeaturesEntry {
optional string key = 1;
optional Value value = 2;
};
message LabelEntry {
optional string key = 1;
optional Value value = 2;
};
repeated FeaturesEntry features = 1;
repeated LabelEntry label = 2;
optional string uid = 3;
optional string metadata = 4;
optional string configuration = 5;
};
// Copyright (c) 2014 baidu-rpc authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// A client sending requests to server every 1 second.
//
#include <fstream>
#include "dense_service.pb.h"
#include "image_classification.pb.h"
#include "sparse_service.pb.h"
#include "int64tensor_service.pb.h"
#include "common/utils.h"
#include "common/inner_common.h"
#include "common/constant.h"
#include "framework/logger.h"
DEFINE_string(attachment, "foo", "Carry this along with requests");
DEFINE_bool(auth, false, "Enable Giano authentication");
DEFINE_string(auth_group, "g_guest", "Giano Group");
DEFINE_string(protocol, "baidu_std", "Protocol type. Defined in protocol/baidu/rpc/options.proto");
DEFINE_bool(compress, true, "Enable compression");
//DEFINE_string(protocol, "http", "Protocol type. Defined in protocol/baidu/rpc/options.proto");
DEFINE_string(connection_type, "", "Connection type. Available values: single, pooled, short");
DEFINE_string(server, "0.0.0.0:8010", "IP Address of server");
DEFINE_string(load_balancer, "", "The algorithm for load balancing");
DEFINE_int32(timeout_ms, 100, "RPC timeout in milliseconds");
DEFINE_int32(max_retry, 3, "Max retries(not including the first RPC)");
DEFINE_int32(interval_ms, 1000, "Milliseconds between consecutive requests");
DEFINE_string(http_content_type, "application/json", "Content type of http request");
using baidu::paddle_serving::predictor::FLAGS_logger_path;
using baidu::paddle_serving::predictor::FLAGS_logger_file;
using baidu::paddle_serving::predictor::LoggerWrapper;
namespace dense_format {
using baidu::paddle_serving::predictor::dense_service::BuiltinDenseFormatService_Stub;
using baidu::paddle_serving::predictor::dense_service::Request;
using baidu::paddle_serving::predictor::dense_service::Response;
using baidu::paddle_serving::predictor::format::DenseInstance;
using baidu::paddle_serving::predictor::format::DensePrediction;
void send_dense_format(BuiltinDenseFormatService_Stub& stub, int log_id) {
baidu::rpc::Controller cntl;
// We will receive response synchronously, safe to put variables
// on stack.
baidu::paddle_serving::predictor::TimerFlow timer("dense");
Request dense_request;
Response dense_response;
// set request header
DenseInstance* ins = NULL;
ins = dense_request.mutable_instances()->Add();
ins->add_features(1.5);
ins->add_features(16.0);
ins->add_features(14.0);
ins->add_features(23.0);
timer.check("fill");
cntl.set_log_id(log_id ++); // set by user
if (FLAGS_protocol != "http" && FLAGS_protocol != "h2c") {
// Set attachment which is wired to network directly instead of
// being serialized into protobuf messages.
cntl.request_attachment().append(FLAGS_attachment);
} else {
cntl.http_request().set_content_type(FLAGS_http_content_type);
}
if (FLAGS_compress) {
cntl.set_request_compress_type(baidu::rpc::COMPRESS_TYPE_SNAPPY);
}
timer.check("compress");
// Because `done'(last parameter) is NULL, this function waits until
// the response comes back or error occurs(including timedout).
stub.debug(&cntl, &dense_request, &dense_response, NULL);
timer.check("inference");
if (!cntl.Failed()) {
if (cntl.response_attachment().empty()) {
LOG(INFO) << "Received response from " << cntl.remote_side()
<< " to " << cntl.local_side()
<< ": " << dense_response.ShortDebugString()
<< " latency=" << cntl.latency_us() << "us" << noflush;
} else {
LOG(INFO) << "Received response from " << cntl.remote_side()
<< " to " << cntl.local_side()
<< ": " << dense_response.ShortDebugString()
<< " (attached=" << cntl.response_attachment() << ")"
<< " latency=" << cntl.latency_us() << "us " << noflush;
}
} else {
LOG(WARNING) << cntl.ErrorText();
}
timer.check("dump");
}
} // namespace dense_format
namespace sparse_format {
using baidu::paddle_serving::predictor::sparse_service::BuiltinSparseFormatService_Stub;
using baidu::paddle_serving::predictor::sparse_service::Request;
using baidu::paddle_serving::predictor::sparse_service::Response;
using baidu::paddle_serving::predictor::format::SparseInstance;
using baidu::paddle_serving::predictor::format::SparsePrediction;
void send_sparse_format(BuiltinSparseFormatService_Stub& stub, int log_id) {
baidu::rpc::Controller cntl;
// We will receive response synchronously, safe to put variables
// on stack.
baidu::paddle_serving::predictor::TimerFlow timer("sparse");
Request sparse_request;
Response sparse_response;
// set request body
SparseInstance* ins = NULL;
ins = sparse_request.mutable_instances()->Add();
ins->add_keys(26);
ins->add_keys(182);
ins->add_keys(232);
ins->add_keys(243);
ins->add_keys(431);
ins->add_shape(2000);
ins->add_values(1);
ins->add_values(1);
ins->add_values(1);
ins->add_values(4);
ins->add_values(14);
ins = sparse_request.mutable_instances()->Add();
ins->add_keys(0);
ins->add_keys(182);
ins->add_keys(232);
ins->add_keys(243);
ins->add_keys(431);
ins->add_shape(2000);
ins->add_values(13);
ins->add_values(1);
ins->add_values(1);
ins->add_values(4);
ins->add_values(1);
timer.check("fill");
cntl.set_log_id(log_id ++); // set by user
if (FLAGS_protocol != "http" && FLAGS_protocol != "h2c") {
// Set attachment which is wired to network directly instead of
// being serialized into protobuf messages.
cntl.request_attachment().append(FLAGS_attachment);
} else {
cntl.http_request().set_content_type(FLAGS_http_content_type);
}
if (FLAGS_compress) {
cntl.set_request_compress_type(baidu::rpc::COMPRESS_TYPE_SNAPPY);
}
timer.check("compress");
// Because `done'(last parameter) is NULL, this function waits until
// the response comes back or error occurs(including timedout).
stub.inference(&cntl, &sparse_request, &sparse_response, NULL);
timer.check("inference");
if (!cntl.Failed()) {
if (cntl.response_attachment().empty()) {
LOG(INFO) << "Received response from " << cntl.remote_side()
<< " to " << cntl.local_side()
<< ": " << sparse_response.ShortDebugString()
<< " latency=" << cntl.latency_us() << "us" << noflush;
} else {
LOG(INFO) << "Received response from " << cntl.remote_side()
<< " to " << cntl.local_side()
<< ": " << sparse_response.ShortDebugString()
<< " (attached=" << cntl.response_attachment() << ")"
<< " latency=" << cntl.latency_us() << "us" << noflush;
}
} else {
LOG(WARNING) << cntl.ErrorText();
}
timer.check("dump");
}
}
namespace fluid_format {
using baidu::paddle_serving::predictor::int64tensor_service::BuiltinFluidService_Stub;
using baidu::paddle_serving::predictor::int64tensor_service::Request;
using baidu::paddle_serving::predictor::int64tensor_service::Response;
using baidu::paddle_serving::predictor::format::Int64TensorInstance;
using baidu::paddle_serving::predictor::format::Float32TensorPredictor;
void send_fluid_format(BuiltinFluidService_Stub& stub, int log_id) {
baidu::rpc::Controller cntl;
// We will receive response synchronously, safe to put variables
// on stack.
baidu::paddle_serving::predictor::TimerFlow timer("fluid");
Request fluid_request;
Response fluid_response;
// set request header
Int64TensorInstance* ins = NULL;
ins = fluid_request.mutable_instances()->Add();
ins->add_data(15);
ins->add_data(160);
ins->add_data(14);
ins->add_data(23);
ins->add_data(18);
ins->add_data(39);
ins->add_shape(2);
ins->add_shape(3);
timer.check("fill");
cntl.set_log_id(log_id); // set by user
if (FLAGS_protocol != "http" && FLAGS_protocol != "h2c") {
// Set attachment which is wired to network directly instead of
// being serialized into protobuf messages.
cntl.request_attachment().append(FLAGS_attachment);
} else {
cntl.http_request().set_content_type(FLAGS_http_content_type);
}
if (FLAGS_compress) {
cntl.set_request_compress_type(baidu::rpc::COMPRESS_TYPE_SNAPPY);
}
timer.check("compress");
// Because `done'(last parameter) is NULL, this function waits until
// the response comes back or error occurs(including timedout).
stub.debug(&cntl, &fluid_request, &fluid_response, NULL);
timer.check("inference");
if (!cntl.Failed()) {
if (cntl.response_attachment().empty()) {
LOG(INFO) << "Received response from " << cntl.remote_side()
<< " to " << cntl.local_side()
<< ": " << fluid_response.ShortDebugString()
<< " latency=" << cntl.latency_us() << "us" << noflush;
} else {
LOG(INFO) << "Received response from " << cntl.remote_side()
<< " to " << cntl.local_side()
<< ": " << fluid_response.ShortDebugString()
<< " (attached=" << cntl.response_attachment() << ")"
<< " latency=" << cntl.latency_us() << "us " << noflush;
}
} else {
LOG(WARNING) << cntl.ErrorText();
}
timer.check("dump");
}
} // namespace fluid_format
namespace ximage_format {
char* g_image_buffer = NULL;
size_t g_image_size = 0;
std::string g_image_path = "./data/images/what.jpg";
using baidu::paddle_serving::predictor::image_classification::ImageClassifyService_Stub;
using baidu::paddle_serving::predictor::image_classification::Request;
using baidu::paddle_serving::predictor::image_classification::Response;
using baidu::paddle_serving::predictor::format::XImageReqInstance;
using baidu::paddle_serving::predictor::format::XImageResInstance;
void send_ximage_format(ImageClassifyService_Stub& stub, int log_id) {
baidu::rpc::Controller cntl;
// We will receive response synchronously, safe to put variables
// on stack.
baidu::paddle_serving::predictor::TimerFlow timer("ximage");
Request ximage_request;
Response ximage_response;
// set request header
std::ifstream fin(g_image_path.c_str(), std::ios::binary);
fin.seekg(0, std::ios::end);
int isize = fin.tellg();
if (g_image_size < isize || !g_image_buffer) {
g_image_buffer = new (std::nothrow) char[isize];
g_image_size = isize;
}
fin.seekg(0, std::ios::beg);
fin.read(g_image_buffer, sizeof(char) * isize);
fin.close();
timer.check("read");
XImageReqInstance* ins = ximage_request.mutable_instances()->Add();
ins->set_image_binary(g_image_buffer, isize);
ins->set_image_length(isize);
timer.check("fill");
cntl.set_log_id(log_id ++); // set by user
if (FLAGS_protocol != "http" && FLAGS_protocol != "h2c") {
// Set attachment which is wired to network directly instead of
// being serialized into protobuf messages.
cntl.request_attachment().append(FLAGS_attachment);
} else {
cntl.http_request().set_content_type(FLAGS_http_content_type);
}
if (FLAGS_compress) {
cntl.set_request_compress_type(baidu::rpc::COMPRESS_TYPE_SNAPPY);
}
timer.check("compress");
// Because `done'(last parameter) is NULL, this function waits until
// the response comes back or error occurs(including timedout).
stub.inference(&cntl, &ximage_request, &ximage_response, NULL);
timer.check("inference");
if (!cntl.Failed()) {
if (cntl.response_attachment().empty()) {
LOG(INFO) << "Received response from " << cntl.remote_side()
<< " to " << cntl.local_side()
<< ": " << ximage_response.ShortDebugString()
<< " latency=" << cntl.latency_us() << "us" << noflush;
} else {
LOG(INFO) << "Received response from " << cntl.remote_side()
<< " to " << cntl.local_side()
<< ": " << ximage_response.ShortDebugString()
<< " (attached=" << cntl.response_attachment() << ")"
<< " latency=" << cntl.latency_us() << "us " << noflush;
}
} else {
LOG(WARNING) << cntl.ErrorText();
}
timer.check("dump");
if (g_image_buffer) {
delete[] g_image_buffer;
g_image_buffer = NULL;
}
}
} // namespace ximage_format
int main(int argc, char* argv[]) {
// Parse gflags. We recommend you to use gflags as well.
google::ParseCommandLineFlags(&argc, &argv, true);
// initialize logger instance
if (LoggerWrapper::instance().initialize(
FLAGS_logger_path, FLAGS_logger_file) != 0) {
LOG(ERROR) << "Failed initialize logger, conf:"
<< FLAGS_logger_path << "/" << FLAGS_logger_file;
return -1;
}
// Login to get `CredentialGenerator' (see baas-lib-c/baas.h for more
// information) and then pass it to `GianoAuthenticator'.
std::unique_ptr<baidu::rpc::policy::GianoAuthenticator> auth;
if (FLAGS_auth) {
if (baas::BAAS_Init() != 0) {
LOG(ERROR) << "Fail to init BAAS";
return -1;
}
baas::CredentialGenerator gen = baas::ClientUtility::Login(FLAGS_auth_group);
auth.reset(new baidu::rpc::policy::GianoAuthenticator(&gen, NULL));
}
// A Channel represents a communication line to a Server. Notice that
// Channel is thread-safe and can be shared by all threads in your program.
baidu::rpc::Channel channel;
// Initialize the channel, NULL means using default options.
baidu::rpc::ChannelOptions options;
options.protocol = FLAGS_protocol;
options.connection_type = FLAGS_connection_type;
options.auth = auth.get();
options.timeout_ms = FLAGS_timeout_ms/*milliseconds*/;
options.max_retry = FLAGS_max_retry;
if (channel.Init(FLAGS_server.c_str(), FLAGS_load_balancer.c_str(), &options) != 0) {
LOG(ERROR) << "Fail to initialize channel";
return -1;
}
// Normally, you should not call a Channel directly, but instead construct
// a stub Service wrapping it. stub can be shared by all threads as well.
baidu::paddle_serving::predictor::sparse_service::BuiltinSparseFormatService_Stub
stub1(&channel);
baidu::paddle_serving::predictor::dense_service::BuiltinDenseFormatService_Stub
stub2(&channel);
baidu::paddle_serving::predictor::int64tensor_service::BuiltinFluidService_Stub
stub3(&channel);
baidu::paddle_serving::predictor::image_classification::ImageClassifyService_Stub
stub4(&channel);
// Send a request and wait for the response every 1 second.
int log_id = 0;
while (!baidu::rpc::IsAskedToQuit()) {
// We will receive response synchronously, safe to put variables
// on stack.
log_id++;
sparse_format::send_sparse_format(stub1, log_id);
usleep(FLAGS_interval_ms * 1000L);
log_id++;
dense_format::send_dense_format(stub2, log_id);
usleep(FLAGS_interval_ms * 1000L);
//log_id++;
//fluid_format::send_fluid_format(stub3, log_id);
//usleep(FLAGS_interval_ms * 1000L);
log_id++;
ximage_format::send_ximage_format(stub4, log_id);
usleep(FLAGS_interval_ms * 1000L);
}
LOG(INFO) << "Pdserving Client is going to quit";
return 0;
}
#include <boost/algorithm/string.hpp>
#include <boost/scoped_ptr.hpp>
#include <list>
#include <google/protobuf/descriptor.h>
#include <google/protobuf/compiler/plugin.h>
#include <google/protobuf/compiler/code_generator.h>
#include <google/protobuf/io/printer.h>
#include <google/protobuf/io/zero_copy_stream.h>
#include "plugin/strutil.h"
#include "plugin/substitute.h"
#include "pds_option.pb.h"
using std::string;
using google::protobuf::Descriptor;
using google::protobuf::FileDescriptor;
using google::protobuf::FieldDescriptor;
using google::protobuf::MethodDescriptor;
using google::protobuf::ServiceDescriptor;
using google::protobuf::compiler::CodeGenerator;
using google::protobuf::compiler::GeneratorContext;
using google::protobuf::HasSuffixString;
using google::protobuf::StripSuffixString;
namespace google {
namespace protobuf {
string dots_to_colons(const string& name) {
return StringReplace(name, ".", "::", true);
}
string full_class_name(const Descriptor* descriptor) {
// Find "outer", the descriptor of the top-level message in which
// "descriptor" is embedded.
const Descriptor* outer = descriptor;
while (outer->containing_type() != NULL) {
outer = outer->containing_type();
}
return outer->full_name();
}
}
}
string strip_proto(const string& filename) {
if (HasSuffixString(filename, ".protolevel")) {
return StripSuffixString(filename, ".protolevel");
} else {
return StripSuffixString(filename, ".proto");
}
}
void string_format(std::string& source) {
size_t len = source.length();
std::string sep = "_";
for (int i = 0; i < len; i++) {
if (source[i] >= 'A' && source[i] <= 'Z') {
source[i] += 32;
if (i == 0) {
continue;
}
source.insert(i, sep);
i++;
len++;
}
}
}
bool valid_service_method(const std::vector<const MethodDescriptor*>& methods) {
if (methods.size() != 2) {
return false;
}
if (methods[0]->name() == "inference" && methods[1]->name() == "debug") {
return true;
}
if (methods[1]->name() == "inference" && methods[0]->name() == "debug") {
return true;
}
return false;
}
class PdsCodeGenerator : public CodeGenerator {
public:
virtual bool Generate(
const FileDescriptor* file,
const string& parameter,
GeneratorContext* context,
std::string* error) const {
const string header = strip_proto(file->name()) + ".pb.h";
const string body = strip_proto(file->name()) + ".pb.cc";
bool include_inserted = false;
for (int i = 0; i < file->service_count(); ++i) {
const ServiceDescriptor* descriptor = file->service(i);
if (!descriptor) {
*error = "get descriptor failed";
return false;
}
pds::PaddleServiceOption options
= descriptor->options().GetExtension(pds::options);
bool generate_impl = options.generate_impl();
bool generate_stub = options.generate_stub();
if (!generate_impl && !generate_stub) {
return true;
}
if (!include_inserted) {
boost::scoped_ptr<google::protobuf::io::ZeroCopyOutputStream> output(
context->OpenForInsert(header, "includes"));
google::protobuf::io::Printer printer(output.get(), '$');
if (generate_impl) {
printer.Print("#include \"common/inner_common.h\"\n");
printer.Print("#include \"framework/service.h\"\n");
printer.Print("#include \"framework/manager.h\"\n");
printer.Print("#include \"framework/service_manager.h\"\n");
}
if (generate_stub) {
printer.Print("#include <baidu/rpc/parallel_channel.h>\n");
printer.Print("#include \"factory.h\"\n");
printer.Print("#include \"stub.h\"\n");
printer.Print("#include \"stub_impl.h\"\n");
}
include_inserted = true;
}
const std::string& class_name = descriptor->name();
const std::string& service_name = descriptor->name();
// xxx.ph.h
{
if (generate_impl) {
// service scope
// namespace scope
boost::scoped_ptr<google::protobuf::io::ZeroCopyOutputStream>
output(context->OpenForInsert(header, "namespace_scope"));
google::protobuf::io::Printer printer(output.get(), '$');
if (!generate_paddle_serving_head(&printer, descriptor, error,
service_name, class_name)) {
return false;
}
}
if (generate_stub) {
// service class scope
// namespace scope
{
boost::scoped_ptr<google::protobuf::io::ZeroCopyOutputStream>
output(context->OpenForInsert(header, "namespace_scope"));
google::protobuf::io::Printer printer(output.get(), '$');
if (!generate_paddle_serving_stub_head(&printer, descriptor, error,
service_name, class_name)) {
return false;
}
}
}
}
// xxx.pb.cc
{
if (generate_impl) {
// service scope
// namespace scope
boost::scoped_ptr<google::protobuf::io::ZeroCopyOutputStream>
output(context->OpenForInsert(body, "namespace_scope"));
google::protobuf::io::Printer printer(output.get(), '$');
if (!generate_paddle_serving_body(&printer, descriptor, error,
service_name, class_name)) {
return false;
}
}
if (generate_stub) {
// service class scope
{
}
// namespace scope
{
boost::scoped_ptr<google::protobuf::io::ZeroCopyOutputStream>
output(context->OpenForInsert(body, "namespace_scope"));
google::protobuf::io::Printer printer(output.get(), '$');
if (!generate_paddle_serving_stub_body(&printer, descriptor, error,
service_name, class_name)) {
return false;
}
}
}
}
}
return true;
}
private:
bool generate_paddle_serving_head(
google::protobuf::io::Printer* printer,
const ServiceDescriptor* descriptor,
string *error,
const std::string& service_name,
const std::string& class_name) const {
std::vector<const MethodDescriptor*> methods;
for (int i = 0; i < descriptor->method_count(); ++i) {
methods.push_back(descriptor->method(i));
}
if (!valid_service_method(methods)) {
*error = "Service can only contains two methods: inferend, debug";
return false;
}
std::string variable_name = class_name;
string_format(variable_name);
printer->Print(
"class $name$Impl : public $name$ {\n"
"public:\n"
" virtual ~$name$Impl() {}\n"
" static $name$Impl& instance() {\n"
" return _s_$variable_name$_impl;\n"
" }\n\n"
" $name$Impl(const std::string& service_name) {\n"
" REGIST_FORMAT_SERVICE(\n"
" service_name, &$name$Impl::instance());\n"
" }\n\n",
"name", class_name, "variable_name", variable_name);
for (int i = 0; i < methods.size(); i++) {
const MethodDescriptor* m = methods[i];
printer->Print(
" virtual void $name$(google::protobuf::RpcController* cntl_base,\n"
" const $input_name$* request,\n"
" $output_name$* response,\n"
" google::protobuf::Closure* done);\n\n",
"name", m->name(),
"input_name", google::protobuf::dots_to_colons(m->input_type()->full_name()),
"output_name", google::protobuf::dots_to_colons(m->output_type()->full_name()));
}
printer->Print(
" static $name$Impl _s_$variable_name$_impl;\n"
"};", "name", class_name, "variable_name", variable_name);
return true;
}
bool generate_paddle_serving_body(
google::protobuf::io::Printer* printer,
const ServiceDescriptor* descriptor,
string *error,
const std::string& service_name,
const std::string& class_name) const {
std::vector<const MethodDescriptor*> methods;
for (int i = 0; i < descriptor->method_count(); ++i) {
methods.push_back(descriptor->method(i));
}
if (!valid_service_method(methods)) {
*error = "Service can only contains two methods: inferend, debug";
return false;
}
std::string variable_name = class_name;
string_format(variable_name);
for (int i = 0; i < methods.size(); i++) {
const MethodDescriptor* m = methods[i];
printer->Print(
"void $name$Impl::$method$(\n",
"name", class_name, "method", m->name());
printer->Print(
" google::protobuf::RpcController* cntl_base,\n"
" const $input_name$* request,\n"
" $output_name$* response,\n"
" google::protobuf::Closure* done) {\n"
" struct timeval tv;\n"
" gettimeofday(&tv, NULL);"
" long start = tv.tv_sec * 1000000 + tv.tv_usec;",
"input_name", google::protobuf::dots_to_colons(m->input_type()->full_name()),
"output_name", google::protobuf::dots_to_colons(m->output_type()->full_name()));
if (m->name() == "inference") {
printer->Print(
" baidu::rpc::ClosureGuard done_guard(done);\n"
" baidu::rpc::Controller* cntl = \n"
" static_cast<baidu::rpc::Controller*>(cntl_base);\n"
" ::baidu::paddle_serving::predictor::InferService* svr = \n"
" ::baidu::paddle_serving::predictor::InferServiceManager::instance().item(\"$service$\");\n"
" if (svr == NULL) {\n"
" LOG(ERROR) << \"Not found service: $service$\";\n"
" cntl->SetFailed(404, \"Not found service: $service$\");\n"
" return ;\n"
" }\n"
" LOG(NOTICE) << \" remote_side=\[\" << cntl->remote_side() << \"\]\" << noflush;\n"
" LOG(NOTICE) << \" local_side=\[\" << cntl->local_side() << \"\]\" << noflush;\n"
" LOG(NOTICE) << \" service_name=\[\" << \"$name$\" << \"\]\" << noflush;\n"
" LOG(NOTICE) << \" log_id=\[\" << cntl->log_id() << \"\]\" << noflush;\n"
" int err_code = svr->inference(request, response);\n"
" if (err_code != 0) {\n"
" LOG(WARNING)\n"
" << \"Failed call inferservice[$name$], name[$service$]\"\n"
" << \", error_code: \" << err_code;\n"
" cntl->SetFailed(err_code, \"InferService inference failed!\");\n"
" }\n"
" gettimeofday(&tv, NULL);\n"
" long end = tv.tv_sec * 1000000 + tv.tv_usec;\n"
" // flush notice log\n"
" LOG(NOTICE) << \" tc=\[\" << (end - start) << \"\]\";\n",
"name", class_name, "service", service_name);
}
if (m->name() == "debug") {
printer->Print(
" baidu::rpc::ClosureGuard done_guard(done);\n"
" baidu::rpc::Controller* cntl = \n"
" static_cast<baidu::rpc::Controller*>(cntl_base);\n"
" ::baidu::paddle_serving::predictor::InferService* svr = \n"
" ::baidu::paddle_serving::predictor::InferServiceManager::instance().item(\"$service$\");\n"
" if (svr == NULL) {\n"
" LOG(ERROR) << \"Not found service: $service$\";\n"
" cntl->SetFailed(404, \"Not found service: $service$\");\n"
" return ;\n"
" }\n"
" LOG(NOTICE) << \" remote_side=\[\" << cntl->remote_side() << \"\]\" << noflush;\n"
" LOG(NOTICE) << \" local_side=\[\" << cntl->local_side() << \"\]\" << noflush;\n"
" LOG(NOTICE) << \" service_name=\[\" << \"$name$\" << \"\]\" << noflush;\n"
" LOG(NOTICE) << \" log_id=\[\" << cntl->log_id() << \"\]\" << noflush;\n"
" base::IOBufBuilder debug_os;\n"
" int err_code = svr->inference(request, response, &debug_os);\n"
" if (err_code != 0) {\n"
" LOG(WARNING)\n"
" << \"Failed call inferservice[$name$], name[$service$]\"\n"
" << \", error_code: \" << err_code;\n"
" cntl->SetFailed(err_code, \"InferService inference failed!\");\n"
" }\n"
" debug_os.move_to(cntl->response_attachment());\n"
" gettimeofday(&tv, NULL);\n"
" long end = tv.tv_sec * 1000000 + tv.tv_usec;\n"
" // flush notice log\n"
" LOG(NOTICE) << \" tc=\[\" << (end - start) << \"\]\";\n"
" LOG(INFO)\n"
" << \"TC=[\" << (end - start) << \"] Received debug request[log_id=\" << cntl->log_id()\n"
" << \"] from \" << cntl->remote_side()\n"
" << \" to \" << cntl->local_side();\n",
"name", class_name, "service", service_name);
}
printer->Print("}\n");
}
printer->Print(
"$name$Impl $name$Impl::_s_$variable_name$_impl(\"$service$\");\n",
"name", class_name,
"variable_name", variable_name,
"service", service_name);
return true;
}
bool generate_paddle_serving_stub_head(
google::protobuf::io::Printer* printer,
const ServiceDescriptor* descriptor,
string *error,
const std::string& service_name,
const std::string& class_name) const {
printer->Print(
"class $name$_StubCallMapper : public baidu::rpc::CallMapper {\n"
"private:\n"
" uint32_t _package_size;\n"
" baidu::paddle_serving::sdk_cpp::Stub* _stub_handler;\n"
"public:\n", "name", class_name);
printer->Indent();
printer->Print(
"$name$_StubCallMapper(uint32_t package_size, baidu::paddle_serving::sdk_cpp::Stub* stub) {\n"
" _package_size = package_size;\n"
" _stub_handler = stub;\n"
"}\n", "name", class_name);
printer->Print(
"baidu::rpc::SubCall default_map(\n"
" int channel_index,\n"
" const google::protobuf::MethodDescriptor* method,\n"
" const google::protobuf::Message* request,\n"
" google::protobuf::Message* response) {\n"
" baidu::paddle_serving::sdk_cpp::TracePackScope scope(\"default_map\", channel_index);",
"name", class_name);
printer->Indent();
if (!generate_paddle_serving_stub_default_map(printer, descriptor, error,
service_name, class_name)) {
return false;
}
printer->Outdent();
printer->Print(
"}\n");
printer->Print(
"baidu::rpc::SubCall sub_package_map(\n"
" int channel_index,\n"
" const google::protobuf::MethodDescriptor* method,\n"
" const google::protobuf::Message* request,\n"
" google::protobuf::Message* response) {\n"
" baidu::paddle_serving::sdk_cpp::TracePackScope scope(\"sub_map\", channel_index);",
"name", class_name);
printer->Indent();
std::vector<const FieldDescriptor*> in_shared_fields;
std::vector<const FieldDescriptor*> in_item_fields;
const MethodDescriptor* md = descriptor->FindMethodByName("inference");
if (!md) {
*error = "not found inference method!";
return false;
}
for (int i = 0; i < md->input_type()->field_count(); ++i) {
const FieldDescriptor* fd = md->input_type()->field(i);
if (!fd) {
*error = "invalid fd at: " + i;
return false;
}
bool pack_on = fd->options().GetExtension(pds::pack_on);
if (pack_on && !fd->is_repeated()) {
*error = "Pack fields must be repeated, field: " + fd->name();
return false;
}
if (pack_on) {
in_item_fields.push_back(fd);
} else {
in_shared_fields.push_back(fd);
}
}
if (!generate_paddle_serving_stub_package_map(printer, descriptor, error,
service_name, class_name, in_shared_fields, in_item_fields)) {
return false;
}
printer->Outdent();
printer->Print(
"}\n");
printer->Print(
"baidu::rpc::SubCall Map(\n"
" int channel_index,\n"
" const google::protobuf::MethodDescriptor* method,\n"
" const google::protobuf::Message* request,\n"
" google::protobuf::Message* response) {\n",
"name", class_name);
printer->Indent();
if (in_item_fields.size() <= 0) {
printer->Print(
"// No packed items found in proto file, use default map method\n"
"return default_map(channel_index, method, request, response);\n");
} else {
printer->Print(
"base::Timer tt(base::Timer::STARTED);\n"
"baidu::rpc::SubCall ret;\n"
"if (_package_size == 0) {\n"
" ret = default_map(channel_index, method, request, response);\n"
"} else {\n"
" ret = sub_package_map(channel_index, method, request, response);\n"
"}\n"
"tt.stop();\n"
"if (ret.flags != baidu::rpc::SKIP_SUB_CHANNEL && ret.method != NULL) {\n"
" _stub_handler->update_latency(tt.u_elapsed(), \"pack_map\");\n"
"}\n"
"return ret;\n");
}
printer->Outdent();
printer->Print("}\n");
printer->Outdent();
printer->Print("};\n");
////////////////////////////////////////////////////////////////
printer->Print(
"class $name$_StubResponseMerger : public baidu::rpc::ResponseMerger {\n"
"private:\n"
" uint32_t _package_size;\n"
" baidu::paddle_serving::sdk_cpp::Stub* _stub_handler;\n"
"public:\n", "name", class_name);
printer->Indent();
printer->Print(
"$name$_StubResponseMerger(uint32_t package_size, baidu::paddle_serving::sdk_cpp::Stub* stub) {\n"
" _package_size = package_size;\n"
" _stub_handler = stub;\n"
"}\n", "name", class_name);
printer->Print(
"baidu::rpc::ResponseMerger::Result default_merge(\n"
" google::protobuf::Message* response,\n"
" const google::protobuf::Message* sub_response) {\n"
" baidu::paddle_serving::sdk_cpp::TracePackScope scope(\"default_merge\");",
"name", class_name);
printer->Indent();
if (!generate_paddle_serving_stub_default_merger(printer, descriptor, error,
service_name, class_name)) {
return false;
}
printer->Outdent();
printer->Print(
"}\n");
printer->Print(
"baidu::rpc::ResponseMerger::Result sub_package_merge(\n"
" google::protobuf::Message* response,\n"
" const google::protobuf::Message* sub_response) {\n"
" baidu::paddle_serving::sdk_cpp::TracePackScope scope(\"sub_merge\");",
"name", class_name);
printer->Indent();
if (!generate_paddle_serving_stub_package_merger(printer, descriptor, error,
service_name, class_name)) {
return false;
}
printer->Outdent();
printer->Print(
"}\n");
printer->Print(
"baidu::rpc::ResponseMerger::Result Merge(\n"
" google::protobuf::Message* response,\n"
" const google::protobuf::Message* sub_response) {\n",
"name", class_name);
printer->Indent();
printer->Print(
"base::Timer tt(base::Timer::STARTED);\n"
"baidu::rpc::ResponseMerger::Result ret;"
"if (_package_size <= 0) {\n"
" ret = default_merge(response, sub_response);\n"
"} else {\n"
" ret = sub_package_merge(response, sub_response);\n"
"}\n"
"tt.stop();\n"
"if (ret != baidu::rpc::ResponseMerger::FAIL) {\n"
" _stub_handler->update_latency(tt.u_elapsed(), \"pack_merge\");\n"
"}\n"
"return ret;\n");
printer->Outdent();
printer->Print("}\n");
printer->Outdent();
printer->Print("};\n");
return true;
}
bool generate_paddle_serving_stub_default_map(
google::protobuf::io::Printer* printer,
const ServiceDescriptor* descriptor,
string *error,
const std::string& service_name,
const std::string& class_name) const {
printer->Print(
"if (channel_index > 0) { \n"
" return baidu::rpc::SubCall::Skip();\n"
"}\n");
printer->Print(
"google::protobuf::Message* cur_res = _stub_handler->fetch_response();\n"
"if (cur_res == NULL) {\n"
" LOG(TRACE) << \"Failed fetch response from stub handler, new it\";\n"
" cur_res = response->New();\n"
" if (cur_res == NULL) {\n"
" LOG(FATAL) << \"Failed new response item!\";\n"
" _stub_handler->update_average(1, \"pack_fail\");\n"
" return baidu::rpc::SubCall::Bad();\n"
" }\n"
" return baidu::rpc::SubCall(method, request, cur_res, baidu::rpc::DELETE_RESPONSE);\n"
"}\n");
"LOG(DEBUG) \n"
" << \"[default] Succ map, channel_index: \" << channel_index;\n";
printer->Print(
"return baidu::rpc::SubCall(method, request, cur_res, 0);\n"
);
return true;
}
bool generate_paddle_serving_stub_default_merger(
google::protobuf::io::Printer* printer,
const ServiceDescriptor* descriptor,
string *error,
const std::string& service_name,
const std::string& class_name) const {
printer->Print(
"try {\n"
" response->MergeFrom(*sub_response);\n"
" return baidu::rpc::ResponseMerger::MERGED;\n"
"} catch (const std::exception& e) {\n"
" LOG(FATAL) << \"Merge failed.\";\n"
" _stub_handler->update_average(1, \"pack_fail\");\n"
" return baidu::rpc::ResponseMerger::FAIL;\n"
"}\n");
return true;
}
bool generate_paddle_serving_stub_package_map(
google::protobuf::io::Printer* printer,
const ServiceDescriptor* descriptor,
string *error,
const std::string& service_name,
const std::string& class_name,
std::vector<const FieldDescriptor*>& in_shared_fields,
std::vector<const FieldDescriptor*>& in_item_fields) const {
const MethodDescriptor* md = descriptor->FindMethodByName("inference");
if (!md) {
*error = "not found inference method!";
return false;
}
printer->Print(
"const $req_type$* req \n"
" = dynamic_cast<const $req_type$*>(request);\n"
"$req_type$* sub_req = NULL;",
"req_type", google::protobuf::dots_to_colons(
md->input_type()->full_name()));
// 1. pack fields 逐字段计算index范围,并从req copy值sub_req
printer->Print("\n// 1. 样本字段(必须为repeated类型)按指定下标复制\n");
for (uint32_t ii = 0; ii < in_item_fields.size(); ii++) {
const FieldDescriptor* fd = in_item_fields[ii];
std::string field_name = fd->name();
printer->Print("\n/////$field_name$\n", "field_name", field_name);
if (ii == 0) {
printer->Print(
"uint32_t total_size = req->$field_name$_size();\n"
"if (channel_index == 0) {\n"
" _stub_handler->update_average(total_size, \"item_size\");\n"
"}\n", "field_name", field_name);
printer->Print(
"int start = _package_size * channel_index;\n"
"if (start >= total_size) {\n"
" return baidu::rpc::SubCall::Skip();\n"
"}\n"
"int end = _package_size * (channel_index + 1);\n"
"if (end > total_size) {\n"
" end = total_size;\n"
"}\n");
printer->Print(
"sub_req = dynamic_cast<$req_type$*>(_stub_handler->fetch_request());\n"
"if (sub_req == NULL) {\n"
" LOG(FATAL) << \"failed fetch sub_req from stub.\";\n"
" _stub_handler->update_average(1, \"pack_fail\");\n"
" return baidu::rpc::SubCall::Bad();\n"
"}\n",
"name", class_name, "req_type", google::protobuf::dots_to_colons(
md->input_type()->full_name()));
} else {
printer->Print(
"if (req->$field_name$_size() != total_size) {\n"
" LOG(FATAL) << \"pack field size not consistency: \"\n"
" << total_size << \"!=\" << req->$field_name$_size()\n"
" << \", field: $field_name$.\";\n"
" _stub_handler->update_average(1, \"pack_fail\");\n"
" return baidu::rpc::SubCall::Bad();\n"
"}\n", "field_name", field_name);
}
printer->Print("for (uint32_t i = start; i < end; ++i) {\n");
printer->Indent();
if (fd->cpp_type() == google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE) {
printer->Print(
"sub_req->add_$field_name$()->CopyFrom(req->$field_name$(i));\n",
"field_name", field_name);
} else {
printer->Print(
"sub_req->add_$field_name$(req->$field_name$(i));\n",
"field_name", field_name);
}
printer->Outdent();
printer->Print("}\n");
}
// 2. shared fields逐字段从req copy至sub_req
printer->Print("\n// 2. 共享字段,从req逐个复制到sub_req\n");
if (in_item_fields.size() == 0) {
printer->Print(
"if (sub_req == NULL) { // no packed items\n"
" sub_req = dynamic_cast<$req_type$*>(_stub_handler->fetch_request());\n"
" if (!sub_req) {\n"
" LOG(FATAL) << \"failed fetch sub_req from stub handler.\";\n"
" _stub_handler->update_average(1, \"pack_fail\");\n"
" return baidu::rpc::SubCall::Bad();\n"
" }\n"
"}\n", "req_type", google::protobuf::dots_to_colons(
md->input_type()->full_name()));
}
for (uint32_t si = 0; si < in_shared_fields.size(); si++) {
const FieldDescriptor* fd = in_shared_fields[si];
std::string field_name = fd->name();
printer->Print("\n/////$field_name$\n", "field_name", field_name);
if (fd->is_optional()) {
printer->Print(
"if (req->has_$field_name$()) {\n", "field_name", field_name);
printer->Indent();
}
if (fd->cpp_type() == google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE || fd->is_repeated()) {
printer->Print(
"sub_req->mutable_$field_name$()->CopyFrom(req->$field_name$());\n",
"field_name", field_name);
} else {
printer->Print(
"sub_req->set_$field_name$(req->$field_name$());\n",
"field_name", field_name);
}
if (fd->is_optional()) {
printer->Outdent();
printer->Print("}\n");
}
}
printer->Print(
"LOG(DEBUG)\n"
" << \"[pack] Succ map req at: \"\n"
" << channel_index;\n");
printer->Print(
"google::protobuf::Message* sub_res = _stub_handler->fetch_response();\n"
"if (sub_res == NULL) {\n"
" LOG(FATAL) << \"failed create sub_res from res.\";\n"
" _stub_handler->update_average(1, \"pack_fail\");\n"
" return baidu::rpc::SubCall::Bad();\n"
"}\n"
"return baidu::rpc::SubCall(method, sub_req, sub_res, 0);\n");
return true;
}
bool generate_paddle_serving_stub_package_merger(
google::protobuf::io::Printer* printer,
const ServiceDescriptor* descriptor,
string *error,
const std::string& service_name,
const std::string& class_name) const {
return generate_paddle_serving_stub_default_merger(
printer, descriptor, error, service_name, class_name);
}
bool generate_paddle_serving_stub_body(
google::protobuf::io::Printer* printer,
const ServiceDescriptor* descriptor,
string *error,
const std::string& service_name,
const std::string& class_name) const {
std::vector<const MethodDescriptor*> methods;
for (int i = 0; i < descriptor->method_count(); ++i) {
methods.push_back(descriptor->method(i));
}
if (!valid_service_method(methods)) {
*error = "Service can only contains two methods: inferend, debug";
return false;
}
const MethodDescriptor* md = methods[0];
std::map<string, string> variables;
variables["name"] = class_name;
variables["req_type"] = google::protobuf::dots_to_colons(md->input_type()->full_name());
variables["res_type"] = google::protobuf::dots_to_colons(md->output_type()->full_name());
variables["fullname"] = descriptor->full_name();
printer->Print(variables,
"REGIST_STUB_OBJECT_WITH_TAG(\n"
" $name$_Stub,\n"
" $name$_StubCallMapper,\n"
" $name$_StubResponseMerger,\n"
" $req_type$,\n"
" $res_type$,\n"
" \"$fullname$\");\n");
variables.clear();
return true;
}
};
int main(int argc, char** argv) {
PdsCodeGenerator generator;
return google::protobuf::compiler::PluginMain(argc, argv, &generator);
};
#include <iostream>
#include <fstream>
#include <bthread_unstable.h> // bthread_set_worker_startfn
#include "common/inner_common.h"
#include "framework/workflow.h"
#include "framework/service.h"
#include "framework/manager.h"
#include "framework/server.h"
#include "framework/logger.h"
#include "framework/resource.h"
#include "common/constant.h"
using baidu::paddle_serving::predictor::ServerManager;
using baidu::paddle_serving::predictor::WorkflowManager;
using baidu::paddle_serving::predictor::LoggerWrapper;
using baidu::paddle_serving::predictor::InferServiceManager;
using baidu::paddle_serving::predictor::Resource;
using baidu::paddle_serving::predictor::FLAGS_workflow_path;
using baidu::paddle_serving::predictor::FLAGS_workflow_file;
using baidu::paddle_serving::predictor::FLAGS_inferservice_path;
using baidu::paddle_serving::predictor::FLAGS_inferservice_file;
using baidu::paddle_serving::predictor::FLAGS_logger_path;
using baidu::paddle_serving::predictor::FLAGS_logger_file;
using baidu::paddle_serving::predictor::FLAGS_resource_path;
using baidu::paddle_serving::predictor::FLAGS_resource_file;
using baidu::paddle_serving::predictor::FLAGS_reload_interval_s;
using baidu::paddle_serving::predictor::FLAGS_port;
void print_revision(std::ostream& os, void*) {
#if defined(PDSERVING_VERSION)
os << PDSERVING_VERSION;
#else
os << "undefined";
#endif
#if defined(PDSERVING_BUILDTIME)
os << ", BuildAt: " << PDSERVING_BUILDTIME;
#endif
}
static bvar::PassiveStatus<std::string> s_predictor_revision(
"predictor_revision", print_revision, NULL);
DEFINE_bool(v, false, "print version, bool");
DEFINE_bool(g, false, "user defined gflag path");
DECLARE_string(flagfile);
void pthread_worker_start_fn() {
Resource::instance().thread_initialize();
}
static void g_change_server_port() {
comcfg::Configure conf;
if (conf.load(FLAGS_inferservice_path.c_str(), FLAGS_inferservice_file.c_str()) != 0) {
LOG(WARNING) << "failed to load configure[" << FLAGS_inferservice_path
<< "," << FLAGS_inferservice_file << "].";
return;
}
uint32_t port = 0;
int err = conf["port"].get_uint32(&port, 0);
if (err == 0) {
FLAGS_port = port;
LOG(INFO) << "use configure[" << FLAGS_inferservice_path << "/"
<< FLAGS_inferservice_file << "] port[" << port << "] instead of flags";
}
return;
}
#ifdef UNIT_TEST
int ut_main(int argc, char** argv) {
#else
int main(int argc, char** argv) {
#endif
google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_v) {
print_revision(std::cout, NULL);
std::cout << std::flush;
return 0;
}
if (!FLAGS_g) {
google::SetCommandLineOption("flagfile", "conf/gflags.conf");
}
google::ParseCommandLineFlags(&argc, &argv, true);
g_change_server_port();
// initialize logger instance
if (LoggerWrapper::instance().initialize(
FLAGS_logger_path, FLAGS_logger_file) != 0) {
LOG(ERROR) << "Failed initialize logger, conf:"
<< FLAGS_logger_path << "/" << FLAGS_logger_file;
return -1;
}
LOG(TRACE) << "Succ initialize logger";
// initialize resource manager
if (Resource::instance().initialize(
FLAGS_resource_path, FLAGS_resource_file) != 0) {
LOG(ERROR) << "Failed initialize resource, conf:"
<< FLAGS_resource_path << "/" << FLAGS_resource_file;
return -1;
}
LOG(TRACE) << "Succ initialize resource";
// initialize workflow manager
if (WorkflowManager::instance().initialize(
FLAGS_workflow_path, FLAGS_workflow_file) != 0) {
LOG(ERROR) << "Failed initialize workflow manager, conf:"
<< FLAGS_workflow_path << "/" << FLAGS_workflow_file;
return -1;
}
LOG(TRACE) << "Succ initialize workflow";
// initialize service manager
if (InferServiceManager::instance().initialize(
FLAGS_inferservice_path, FLAGS_inferservice_file) != 0) {
LOG(ERROR)
<< "Failed initialize infer service manager, conf:"
<< FLAGS_inferservice_path << "/" << FLAGS_inferservice_file;
return -1;
}
LOG(TRACE) << "Succ initialize inferservice";
int errcode = bthread_set_worker_startfn(pthread_worker_start_fn);
if (errcode != 0) {
LOG(FATAL) << "Failed call pthread worker start function, error_code[" << errcode << "]";
return -1;
}
LOG(INFO) << "Succ call pthread worker start function";
if (ServerManager::instance().start_and_wait() != 0) {
LOG(ERROR) << "Failed start server and wait!";
return -1;
}
LOG(TRACE) << "Succ start service manager";
if (InferServiceManager::instance().finalize() != 0) {
LOG(ERROR) << "Failed finalize infer service manager.";
}
if (WorkflowManager::instance().finalize() != 0) {
LOG(ERROR) << "Failed finalize workflow manager";
}
if (Resource::instance().finalize() != 0) {
LOG(ERROR) << "Failed finalize resource manager";
}
if (LoggerWrapper::instance().finalize() != 0) {
LOG(ERROR) << "Failed finalize logger wrapper";
}
LOG(INFO) << "Paddle Inference Server exit successfully!";
return 0;
}
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file test_bsf.cpp
* @author root(com@baidu.com)
* @date 2018/09/20 13:54:52
* @brief
*
**/
#include "test_bsf.h"
namespace baidu {
namespace paddle_serving {
namespace unittest {
base::atomic<size_t> global_id;
void TestItem::auto_gen() {
id = global_id.fetch_add(1);
char buf[128];
snprintf(buf, sizeof(buf), "test-%d", id);
text = buf;
printf("id:%d,text:%s\n", id, text.c_str());
}
void work(const std::vector<TestItem>& in, std::vector<TestItem>& out) {
for (size_t i = 0; i < in.size(); ++i) {
out[i] = in[i];
usleep(50);
}
}
TEST_F(TestBsf, test_single_thread) {
// initialize TaskExecutor
global_id.store(0, base::memory_order_relaxed);
im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem> >::instance()->set_thread_callback_fn(
boost::bind(&work, _1, _2));
EXPECT_EQ((im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem> >::instance()->start(1)), 0);
std::vector<TestItem> in;
std::vector<TestItem> out;
TestItem::create(in, out, 5);
im::bsf::TaskManager<TestItem, TestItem> task_manager;
task_manager.schedule(in, out);
printf("wait for bsf finish...\n");
task_manager.wait();
printf("bsf executed finished\n");
ASSERT_EQ(out.size(), 5);
for (size_t i = 0; i < out.size(); i++) {
char temp[128];
snprintf(temp, sizeof(temp), "test-%d", i);
EXPECT_EQ(i, in[i].id);
EXPECT_EQ(i, out[i].id);
EXPECT_STREQ(temp, in[i].text.c_str());
EXPECT_STREQ(temp, out[i].text.c_str());
}
im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem> >::instance()->stop();
}
TEST_F(TestBsf, test_multi_thread) {
// initialize TaskExecutor
global_id.store(0, base::memory_order_relaxed);
im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem> >::instance()->set_thread_callback_fn(
boost::bind(&work, _1, _2));
im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem> >::instance()->set_batch_size(100);
EXPECT_EQ((im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem> >::instance()->start(3)), 0);
size_t psize = 5;
pthread_t pid[psize];
for (size_t i = 0; i < psize; ++i) {
pthread_create(&pid[i], NULL, &TestBsf::task_trigger, NULL);
}
for (size_t i = 0; i < psize; ++i) {
pthread_join(pid[i], NULL);
}
im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem> >::instance()->stop();
}
}
}
}
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file test_bsf.h
* @author root(com@baidu.com)
* @date 2018/09/20 13:53:01
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_TEST_BSF_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_TEST_BSF_H
#include <gtest/gtest.h>
#include "common/inner_common.h"
#include "framework/bsf.h"
namespace baidu {
namespace paddle_serving {
namespace unittest {
#ifndef DEFINE_UP_DOWN
#define DEFINE_UP_DOWN \
void SetUp() {} \
void TearDown() {}
struct TestItem {
void auto_gen();
bool operator==(const TestItem& other) {
return text == other.text && id == other.id;
}
static void create(std::vector<TestItem>& in, std::vector<TestItem>& out, size_t size) {
in.clear();
out.clear();
for (size_t i = 0; i < size; i++) {
TestItem item;
item.auto_gen();
in.push_back(item);
item.id += 1000000;
out.push_back(item);
}
}
std::string text;
size_t id;
};
class TestBsf : public ::testing::Test {
public:
TestBsf() {}
virtual ~TestBsf() {}
static void* task_trigger(void* arg) {
for (size_t i = 0; i < 100; i++) {
std::vector<TestItem> in;
std::vector<TestItem> out;
size_t count = rand() % 10 + 1;
TestItem::create(in, out, count);
im::bsf::TaskManager<TestItem, TestItem> task_manager;
task_manager.schedule(in, out);
printf("wait for bsf finish..., count:%d, first:%d\n", count, in[0].id);
task_manager.wait();
printf("bsf executed finished, count:%d, first:%d\n", count, in[0].id);
EXPECT_EQ(out.size(), count);
for (size_t i = 0; i < out.size(); i++) {
EXPECT_EQ(in[i].id, out[i].id);
char temp[128];
snprintf(temp, sizeof(temp), "test-%d", in[i].id);
EXPECT_STREQ(temp, in[i].text.c_str());
EXPECT_STREQ(temp, out[i].text.c_str());
}
}
}
DEFINE_UP_DOWN
};
#undef DEFINE_UP_DOWN
#endif
}
}
}
#endif //BAIDU_PADDLE_SERVING_PREDICTOR_TEST_FTL_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
#include "common/inner_common.h"
#include <gtest/gtest.h>
int main(int argc, char** argv) {
LOG(INFO) << "Start running all ut cases...";
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
#include "test_tool.h"
#include "test_manager.h"
#include "framework/manager.h"
#include "framework/service.h"
namespace baidu {
namespace paddle_serving {
namespace unittest {
using baidu::paddle_serving::predictor::Manager;
using baidu::paddle_serving::predictor::InferService;
using baidu::paddle_serving::predictor::ParallelInferService;
using baidu::paddle_serving::predictor::FLAGS_use_parallel_infer_service;
using baidu::paddle_serving::predictor::InferServiceManager;
struct ManagerItem {
int a;
float b;
int init(const comcfg::ConfigUnit& c) {
return 0;
}
static const char* tag() {
return "Item";
}
};
TEST_F(TestManager, test_manager_instance) {
ManagerItem* item = Manager<ManagerItem>::instance().create_item();
EXPECT_FALSE(item == NULL);
item->a = 1;
item->b = 2.0;
}
TEST_F(TestManager, test_infer_service_create) {
InferService seq;
ParallelInferService par;
FLAGS_use_parallel_infer_service = false;
EXPECT_EQ(typeid(seq),
typeid(*InferServiceManager::instance().create_item()));
FLAGS_use_parallel_infer_service = true;
EXPECT_EQ(typeid(par),
typeid(*InferServiceManager::instance().create_item()));
}
TEST_F(TestManager, test_conf_success) {
const char* conf_content =
"[@Item]\n\
name: item1\n\
a:b\n\
[@Item]\n\
name: item2\n\
c:d";
AutoTempFile file(conf_content);
typedef Manager<ManagerItem> mgr;
EXPECT_EQ(mgr::instance().initialize("./", file.name()), 0);
ManagerItem* item11 = mgr::instance().item("item1");
ManagerItem* item12 = &mgr::instance()["item1"];
EXPECT_EQ(item11, item12);
ManagerItem* item21 = mgr::instance().item("item2");
ManagerItem* item22 = &mgr::instance()["item2"];
EXPECT_EQ(item21, item22);
}
TEST_F(TestManager, test_conf_success_item_not_found) {
const char* conf_content =
"[@Item1]\n\
name: item1\n\
a:b\n\
[@Item2]\n\
name: item2\n\
c:d";
AutoTempFile file(conf_content);
typedef Manager<ManagerItem> mgr;
EXPECT_EQ(mgr::instance().initialize("./", file.name()), 0);
}
TEST_F(TestManager, test_conf_failed_name_not_found) {
const char* conf_content =
"[@Item]\n\
name2: item1\n\
a:b\n\
[@Item]\n\
name: item2\n\
c:d";
AutoTempFile file(conf_content);
typedef Manager<ManagerItem> mgr;
EXPECT_EQ(mgr::instance().initialize("./", file.name()), -1);
}
}
}
}
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_TEST_MANAGER_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_TEST_MANAGER_H
#include <gtest/gtest.h>
namespace baidu {
namespace paddle_serving {
namespace unittest {
#ifndef DEFINE_UP_DOWN
#define DEFINE_UP_DOWN \
void SetUp() {} \
void TearDown() {} \
class TestManager : public ::testing::Test {
public:
TestManager() {}
virtual ~TestManager() {}
DEFINE_UP_DOWN
};
#undef DEFINE_UP_DOWN
#endif
}
}
}
#endif
#include "test_tool.h"
#include "test_message_op.h"
#include "framework/manager.h"
#include "framework/service.h"
#include "framework/dag.h"
namespace baidu {
namespace paddle_serving {
namespace unittest {
using baidu::paddle_serving::predictor::Manager;
using baidu::paddle_serving::predictor::InferService;
using baidu::paddle_serving::predictor::ParallelInferService;
using baidu::paddle_serving::predictor::FLAGS_use_parallel_infer_service;
using baidu::paddle_serving::predictor::InferServiceManager;
using baidu::paddle_serving::predictor::Bus;
using baidu::paddle_serving::predictor::Dag;
using baidu::paddle_serving::predictor::Channel;
using pds::ut::OpMessageData;
TEST_F(TestMSGOP, test_init) {
Bus* bus = new Bus();
ASSERT_NE(bus, NULL);
Dag* dag = NULL;
MsgOP op;
std::string op_name = "TestMSGOp";
std::string op_type = "TestMSGOp";
EXPECT_EQ(0, op.init(bus, dag, (uint32_t)9999, op_name, op_type, NULL));
EXPECT_FALSE(op.has_calc());
EXPECT_EQ(9999, op.id());
EXPECT_STREQ("TestMSGOp", op.name());
EXPECT_STREQ("", op.debug_string().c_str());
EXPECT_NE(op._timer, NULL);
EXPECT_EQ(bus, op._bus);
OpMessageData* ab = op.mutable_data<OpMessageData>();
EXPECT_EQ(33, ab->a());
EXPECT_FLOAT_EQ(4.4, ab->b());
Channel* chn = op.mutable_channel();
EXPECT_EQ(chn->id(), 9999);
EXPECT_STREQ(chn->op().c_str(), "TestMSGOp");
EXPECT_EQ(ab, chn->param());
EXPECT_EQ(0, bus->size());
Channel* chn2 = bus->channel_by_name("TestOp");
EXPECT_EQ(NULL, chn2);
// Message OP can obtain data via message()
EXPECT_EQ(ab, chn->message());
}
}
}
}
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_TEST_MESSAGE_OP_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_TEST_MESSAGE_OP_H
#include <gtest/gtest.h>
#include "op/op.h"
#include "msg_data.pb.h"
#include "framework/channel.h"
namespace baidu {
namespace paddle_serving {
namespace unittest {
class MsgOP : public baidu::paddle_serving::predictor::OpWithChannel<
pds::ut::OpMessageData> {
public:
int inference() {
pds::ut::OpMessageData* msg = mutable_data<pds::ut::OpMessageData>();
msg->set_a(11);
msg->set_b(22.2);
return 0;
}
};
#ifndef DEFINE_UP_DOWN
#define DEFINE_UP_DOWN \
void SetUp() {} \
void TearDown() {} \
class TestMSGOP : public ::testing::Test {
public:
TestMSGOP() {}
virtual ~TestMSGOP() {}
DEFINE_UP_DOWN
};
#undef DEFINE_UP_DOWN
#endif
}
}
}
#endif
#include <base/files/temp_file.h>
#include "framework/manager.h"
#include "framework/service.h"
#include "framework/dag.h"
#include "framework/dag_view.h"
#include "test_tool.h"
#include "test_op.h"
#include "test_message_op.h"
namespace baidu {
namespace paddle_serving {
namespace unittest {
using baidu::paddle_serving::predictor::Manager;
using baidu::paddle_serving::predictor::InferService;
using baidu::paddle_serving::predictor::ParallelInferService;
using baidu::paddle_serving::predictor::FLAGS_use_parallel_infer_service;
using baidu::paddle_serving::predictor::InferServiceManager;
using baidu::paddle_serving::predictor::Bus;
using baidu::paddle_serving::predictor::Dag;
using baidu::paddle_serving::predictor::Channel;
using baidu::paddle_serving::predictor::Dag;
using baidu::paddle_serving::predictor::DagView;
using baidu::paddle_serving::predictor::ViewStage;
using baidu::paddle_serving::predictor::ViewNode;
using pds::ut::OpMessageData;
TEST_F(TestOP, test_init) {
Bus* bus = new Bus();
ASSERT_NE(bus, NULL);
Dag* dag = NULL;
ABOP op;
std::string op_name = "TestOp";
std::string op_type = "TestOp";
EXPECT_EQ(0, op.init(bus, dag, (uint32_t)999, op_name, op_type, NULL));
EXPECT_FALSE(op.has_calc());
EXPECT_EQ(999, op.id());
EXPECT_STREQ("TestOp", op.name());
EXPECT_STREQ("{\"a\": 3, \"b\": 4}", op.debug_string().c_str());
EXPECT_NE(op._timer, NULL);
EXPECT_EQ(bus, op._bus);
AB* ab = op.mutable_data<AB>();
EXPECT_EQ(3, ab->a);
EXPECT_FLOAT_EQ(4.0, ab->b);
Channel* chn = op.mutable_channel();
EXPECT_EQ(chn->id(), 999);
EXPECT_STREQ(chn->op().c_str(), "TestOp");
EXPECT_EQ(ab, chn->param());
EXPECT_EQ(NULL, chn->message());
EXPECT_EQ(0, bus->size());
Channel* chn2 = bus->channel_by_name("TestOp");
EXPECT_EQ(NULL, chn2);
}
TEST_F(TestOP, test_depend_argment) {
Bus bus;
Dag dag;
AutoTempFile file(
"[@Node]\n\
name: node1\n\
type: ABOP\n\
[@Node]\n\
name: node2\n\
type: ABOP\n\
[.@Depend]\n\
name: node1\n\
mode: RO\n\
[@Node]\n\
name: node3\n\
type: ABOP\n\
[.@Depend]\n\
name: node1\n\
mode: RO\n\
[@Node]\n\
name: node4\n\
type: ABOP\n\
[.@Depend]\n\
name: node2\n\
mode: RW\n\
[.@Depend]\n\
name: node3\n\
mode: RO");
std::string dag_name = "DagTest";
EXPECT_EQ(0, dag.init("./", file.name(), dag_name));
ABOP op;
std::string op_name = "node4";
std::string op_type = "ABOP";
EXPECT_EQ(0, op.init(&bus, &dag, (uint32_t)888, op_name, op_type, NULL));
EXPECT_FALSE(op.is_readable("node1"));
EXPECT_FALSE(op.is_mutable("node1"));
EXPECT_TRUE(op.is_readable("node2"));
EXPECT_TRUE(op.is_mutable("node2"));
EXPECT_TRUE(op.is_readable("node3"));
EXPECT_FALSE(op.is_mutable("node3"));
// process() is not called, channel has not been
// committed to bus yet!
EXPECT_TRUE(NULL == op.get_depend_channel("node1"));
EXPECT_TRUE(NULL == op.get_depend_channel("node2"));
EXPECT_TRUE(NULL == op.get_depend_channel("node3"));
EXPECT_TRUE(NULL == op.mutable_depend_channel("node1"));
EXPECT_TRUE(NULL == op.mutable_depend_channel("node2"));
EXPECT_TRUE(NULL == op.mutable_depend_channel("node3"));
}
TEST_F(TestOP, test_inference) {
Bus bus;
Dag dag;
AutoTempFile file(
"[@Node]\n\
name: node1\n\
type: ABOP\n\
[@Node]\n\
name: node2\n\
type: ABOP\n\
[.@Depend]\n\
name: node1\n\
mode: RO\n\
[@Node]\n\
name: node3\n\
type: ABOP\n\
[.@Depend]\n\
name: node1\n\
mode: RO\n\
[@Node]\n\
name: node4\n\
type: ABOP\n\
[.@Depend]\n\
name: node2\n\
mode: RW\n\
[.@Depend]\n\
name: node3\n\
mode: RO");
std::string dag_name = "DagTest";
EXPECT_EQ(0, dag.init("./", file.name(), dag_name));
ABOP op1;
std::string op1_name = "node1";
std::string op_type = "ABOP";
EXPECT_EQ(0, op1.init(&bus, &dag, (uint32_t)888, op1_name, op_type, NULL));
ABOP op2;
std::string op2_name = "node2";
EXPECT_EQ(0, op2.init(&bus, &dag, (uint32_t)888, op2_name, op_type, NULL));
MsgOP op3;
std::string op3_name = "node3";
EXPECT_EQ(0, op3.init(&bus, &dag, (uint32_t)888, op3_name, op_type, NULL));
ABOP op4;
std::string op4_name = "node4";
EXPECT_EQ(0, op4.init(&bus, &dag, (uint32_t)888, op4_name, op_type, NULL));
EXPECT_TRUE(op2.is_readable("node1"));
EXPECT_FALSE(op2.is_mutable("node1"));
EXPECT_FALSE(op2.is_readable("node3"));
EXPECT_FALSE(op2.is_mutable("node3"));
EXPECT_FALSE(op2.is_readable("node4"));
EXPECT_FALSE(op2.is_mutable("node4"));
EXPECT_TRUE(op3.is_readable("node1"));
EXPECT_FALSE(op3.is_mutable("node1"));
EXPECT_FALSE(op3.is_readable("node2"));
EXPECT_FALSE(op3.is_mutable("node2"));
EXPECT_FALSE(op3.is_readable("node4"));
EXPECT_FALSE(op3.is_mutable("node4"));
EXPECT_FALSE(op4.is_readable("node1"));
EXPECT_FALSE(op4.is_mutable("node1"));
EXPECT_TRUE(op4.is_readable("node2"));
EXPECT_TRUE(op4.is_mutable("node2"));
EXPECT_TRUE(op4.is_readable("node3"));
EXPECT_FALSE(op4.is_mutable("node3"));
EXPECT_EQ(0, op1.process(false));
EXPECT_EQ(0, op2.process(false));
EXPECT_EQ(0, op3.process(false));
EXPECT_EQ(0, op4.process(true));
EXPECT_TRUE(NULL == op4.get_depend_channel("node1"));
EXPECT_FALSE(NULL == op4.get_depend_channel("node2"));
EXPECT_FALSE(NULL == op4.get_depend_channel("node3"));
EXPECT_TRUE(NULL == op4.mutable_depend_channel("node1"));
EXPECT_FALSE(NULL == op4.mutable_depend_channel("node2"));
EXPECT_TRUE(NULL == op4.mutable_depend_channel("node3"));
const AB* dop1
= op4.get_depend_argument<AB>("node1");
const AB* dop21
= op4.get_depend_argument<AB>("node2");
const google::protobuf::Message* dop22
= op4.get_depend_channel("node2")->message();
const google::protobuf::Message* dop23
= op4.get_depend_argument<google::protobuf::Message>("node2");
const OpMessageData* dop31
= op4.get_depend_argument<OpMessageData>("node3");
const google::protobuf::Message* dop32
= op4.get_depend_channel("node3")->message();
const google::protobuf::Message* dop33
= op4.get_depend_argument<google::protobuf::Message>("node3");
EXPECT_EQ(NULL, dop1);
EXPECT_NE(NULL, dop21);
EXPECT_EQ(NULL, dop22);
EXPECT_EQ(NULL, dop23);
EXPECT_NE(NULL, dop31);
EXPECT_NE(NULL, dop32);
EXPECT_EQ(NULL, dop33);
EXPECT_EQ(dop31, dop32);
const OpMessageData* dop322 = dynamic_cast<const OpMessageData*>(dop32);
EXPECT_EQ(1, dop21->a);
EXPECT_FLOAT_EQ(2.2, dop21->b);
EXPECT_EQ(11, dop31->a());
EXPECT_FLOAT_EQ(22.2, dop31->b());
EXPECT_EQ(11, dop322->a());
EXPECT_FLOAT_EQ(22.2, dop322->b());
}
TEST_F(TestOP, test_op_with_channel_and_conf) {
Dag dag;
std::string op_name = "test_op";
std::string name_in_conf = "test_name_in_conf";
base::TempFile dag_conf;
dag_conf.save_format(
"[@Node]\n"
"name: %s\n"
"type: OpWithConf\n"
"name_in_conf: %s\n", op_name.c_str(), name_in_conf.c_str());
std::string dag_name = "DagTest";
ASSERT_EQ(0, dag.init("./", dag_conf.fname(), dag_name));
DagView view;
view.init(&dag, "service_name");
ASSERT_EQ(0, view.execute(NULL));
const std::vector<ViewStage*>& view_stage_vec = view._view;
uint32_t stage_size = view_stage_vec.size();
for (uint32_t si = 0; si < stage_size; si++) {
ViewStage* vstage = view_stage_vec[si];
uint32_t node_size = vstage->nodes.size();
for (uint32_t ni = 0; ni < node_size; ni++) {
ViewNode* vnode = vstage->nodes[ni];
OpWithConf* op = dynamic_cast<OpWithConf*>(vnode->op);
ASSERT_NE(NULL, op);
EXPECT_STREQ(op->name(), op_name.c_str());
EXPECT_STREQ(
op->get_self_config()->name_in_conf.c_str(),
name_in_conf.c_str());
EXPECT_STREQ(
op->mutable_data<OpOutput>()->name_for_output.c_str(),
name_in_conf.c_str());
}
}
}
}
}
}
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_TEST_OP_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_TEST_OP_H
#include <gtest/gtest.h>
#include "op/op.h"
#include "framework/channel.h"
namespace baidu {
namespace paddle_serving {
namespace unittest {
struct AB {
int a;
float b;
int Clear() {
a = 3;
b = 4.0;
return 0;
}
std::string ShortDebugString() const {
std::ostringstream oss;
oss << "{\"a\": ";
oss << a;
oss << ", \"b\": ";
oss << b;
oss << "}";
return oss.str();
}
};
class ABOP : public baidu::paddle_serving::predictor::OpWithChannel<AB> {
public:
int inference() {
AB* ab = mutable_data<AB>();
ab->a = 1;
ab->b = 2.2;
return 0;
}
DECLARE_OP(ABOP);
};
DEFINE_OP(ABOP);
struct OpConf {
std::string name_in_conf;
};
struct OpOutput {
std::string name_for_output;
void Clear() { name_for_output.clear(); }
std::string ShortDebugString() const { return name_for_output; }
};
class OpWithConf : public baidu::paddle_serving::predictor::OpWithChannelAndConf<
OpOutput, OpConf> {
public:
DECLARE_OP(OpWithConf);
void* create_config(const comcfg::ConfigUnit& conf) {
OpConf* op_conf = new (std::nothrow) OpConf();
int err = 0;
op_conf->name_in_conf = conf["name_in_conf"].to_cstr(&err);
if (err != 0) {
return NULL;
}
return op_conf;
}
void delete_config(void* conf) { delete static_cast<OpConf*>(conf); }
int inference() {
OpConf* op_conf = get_self_config();
OpOutput* op_output = mutable_data<OpOutput>();
op_output->name_for_output = op_conf->name_in_conf;
return 0;
}
};
DEFINE_OP(OpWithConf);
#ifndef DEFINE_UP_DOWN
#define DEFINE_UP_DOWN \
void SetUp() {} \
void TearDown() {} \
class TestOP : public ::testing::Test {
public:
TestOP() {}
virtual ~TestOP() {}
DEFINE_UP_DOWN
};
#undef DEFINE_UP_DOWN
#endif
}
}
}
#endif
#include "test_server_manager.h" // TestServerManager
#include <gflags/gflags.h> // FLAGS
#include "framework/server.h" // ServerManager
namespace baidu {
namespace paddle_serving {
namespace unittest {
using baidu::paddle_serving::predictor::ServerManager;
using baidu::paddle_serving::predictor::FLAGS_enable_nshead_protocol;
using baidu::paddle_serving::predictor::FLAGS_nshead_protocol;
TEST_F(TestServerManager, test_nshead_protocol) {
ASSERT_EQ(FLAGS_enable_nshead_protocol, false);
ServerManager server_manager1;
EXPECT_EQ(server_manager1._options.nshead_service, NULL);
google::SetCommandLineOption("enable_nshead_protocol", "true");
ASSERT_EQ(FLAGS_enable_nshead_protocol, true);
ASSERT_STREQ(FLAGS_nshead_protocol.c_str(), "itp");
ServerManager server_manager2;
EXPECT_NE(server_manager2._options.nshead_service, NULL);
std::string protocol = "nova_pbrpc";
google::SetCommandLineOption("enable_nshead_protocol", "true");
google::SetCommandLineOption("nshead_protocol", protocol.c_str());
ASSERT_EQ(FLAGS_enable_nshead_protocol, true);
ASSERT_STREQ(FLAGS_nshead_protocol.c_str(), protocol.c_str());
ServerManager server_manager3;
EXPECT_NE(server_manager3._options.nshead_service, NULL);
protocol = "public_pbrpc";
google::SetCommandLineOption("enable_nshead_protocol", "true");
google::SetCommandLineOption("nshead_protocol", protocol.c_str());
ASSERT_EQ(FLAGS_enable_nshead_protocol, true);
ASSERT_STREQ(FLAGS_nshead_protocol.c_str(), protocol.c_str());
ServerManager server_manager4;
EXPECT_NE(server_manager4._options.nshead_service, NULL);
protocol = "nshead_mcpack";
google::SetCommandLineOption("enable_nshead_protocol", "true");
google::SetCommandLineOption("nshead_protocol", protocol.c_str());
ASSERT_EQ(FLAGS_enable_nshead_protocol, true);
ASSERT_STREQ(FLAGS_nshead_protocol.c_str(), protocol.c_str());
ServerManager server_manager5;
EXPECT_NE(server_manager5._options.nshead_service, NULL);
protocol = "nshead_wrong_protocol";
google::SetCommandLineOption("enable_nshead_protocol", "true");
google::SetCommandLineOption("nshead_protocol", protocol.c_str());
ASSERT_EQ(FLAGS_enable_nshead_protocol, true);
ASSERT_STREQ(FLAGS_nshead_protocol.c_str(), protocol.c_str());
ServerManager server_manager6;
EXPECT_EQ(server_manager6._options.nshead_service, NULL);
}
} // namespace unittest
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_TEST_SERVER_MANAGER_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_TEST_SERVER_MANAGER_H
#include <gtest/gtest.h>
namespace baidu {
namespace paddle_serving {
namespace unittest {
class TestServerManager : public ::testing::Test {
public:
void SetUp() { }
void TearDown() { }
};
} // namespace unittest
} // namespace paddle_serving
} // namespace baidu
#endif // BAIDU_PADDLE_SERVING_PREDICTOR_TEST_SERVER_MANAGER_H
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_TEST_TOOL_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_TEST_TOOL_H
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#include <fcntl.h>
#include<unistd.h>
#include <sys/time.h>
#include <gtest/gtest.h>
namespace baidu {
namespace paddle_serving {
namespace unittest {
class AutoTempFile {
public:
AutoTempFile(const char* content) {
_need_del = false;
_name = generate_temp_name();
FILE* fd = fopen(_name.c_str(), "w");
if (!fd) {
return ;
}
fprintf(fd, "%s", content);
fclose(fd);
_need_del = true;
}
~AutoTempFile() {
if (_need_del) {
remove(_name.c_str());
}
}
const char* name() {
return _name.c_str();
}
private:
std::string generate_temp_name() {
timeval tv;
srand(time(0));
gettimeofday(&tv, NULL);
std::ostringstream oss;
oss << "uttest_temp_";
oss << tv.tv_sec * 1000 + tv.tv_usec / 1000;
oss << "_";
oss << (int)getpid();
oss << "_";
oss << rand();
oss << ".conf";
return oss.str();
}
private:
std::string _name;
bool _need_del;
};
}
}
}
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册