提交 6ed1e5d9 编写于 作者: W wangguibao

Remove dependency pcre; LOG(FATAL)->LOG(ERROR)

Change-Id: I27786f118f9a30d79e569323143ca6b230404aad
上级 849a8492
......@@ -69,7 +69,6 @@ include(external/leveldb)
include(external/protobuf)
include(external/snappy)
include(external/brpc)
include(external/pcre)
include(external/boost)
include(flags)
include(generic)
......@@ -83,7 +82,6 @@ set(EXTERNAL_LIBS
glog
protobuf
paddlepaddle
pcre
brpc
)
......
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
INCLUDE(ExternalProject)
SET(PCRE_SOURCES_DIR ${THIRD_PARTY_PATH}/pcre)
SET(PCRE_INSTALL_DIR ${THIRD_PARTY_PATH}/install/pcre)
SET(PCRE_ROOT ${PCRE_INSTALL_DIR} CACHE FILEPATH "pcre root directory." FORCE)
SET(PCRE_INCLUDE_DIR "${PCRE_INSTALL_DIR}/include" CACHE PATH "pcre include directory." FORCE)
INCLUDE_DIRECTORIES(${PCRE_INCLUDE_DIR}) # For pcre code to include its own headers.
INCLUDE_DIRECTORIES(${THIRD_PARTY_PATH}/install) # For Paddle code to include pcre.h.
ExternalProject_Add(
extern_pcre
${EXTERNAL_PROJECT_LOG_ARGS}
SVN_REPOSITORY "svn://vcs.exim.org/pcre/code/tags/pcre-7.7"
PREFIX ${PCRE_SOURCES_DIR}
UPDATE_COMMAND ""
PATCH_COMMAND sh autogen.sh
CONFIGURE_COMMAND ../extern_pcre/configure --prefix=${PCRE_INSTALL_DIR} --disable-shared --with-pic
BUILD_COMMAND make
INSTALL_COMMAND make install
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${PCRE_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
)
IF(WIN32)
IF(NOT EXISTS "${PCRE_INSTALL_DIR}/lib/libz.lib")
add_custom_command(TARGET extern_pcre POST_BUILD
COMMAND cmake -E copy ${PCRE_INSTALL_DIR}/lib/pcrestatic.lib ${PCRE_INSTALL_DIR}/lib/libpcre.lib
)
ENDIF()
SET(PCRE_LIBRARIES "${PCRE_INSTALL_DIR}/lib/libpcre.lib" CACHE FILEPATH "pcre library." FORCE)
ELSE(WIN32)
SET(PCRE_LIBRARIES "${PCRE_INSTALL_DIR}/lib/libpcre.a" CACHE FILEPATH "pcre library." FORCE)
ENDIF(WIN32)
ADD_LIBRARY(pcre STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET pcre PROPERTY IMPORTED_LOCATION ${PCRE_LIBRARIES})
ADD_DEPENDENCIES(pcre extern_pcre)
LIST(APPEND external_project_dependencies pcre)
#coding:gbk
COMPILER('gcc482')
#工作路径.
WORKROOT('../../../')
# version info
## module name
repo_module = REPO_PATH()
## git branch name (master/rb)
repo_name = REPO_BRANCH()
## last changed version
repo_version = REPO_LAST_CHANGED_REV()
version = repo_module + '_' + repo_name.split('/')[-1] + ',' + repo_version
build_time = os.popen('date +%Y-%m-%d_%H:%M:%S').read().strip()
#Preprocessor flags.
CPPFLAGS(r'-DPDSERVING_VERSION=\"%s\"' % (version))
CPPFLAGS(r'-DPDSERVING_BUILDTIME=\"%s\"' % (build_time))
#使用硬链接copy.
#CopyUsingHardLink(True)
#C++编译参数.
#CXXFLAGS('-fsanitize=address -g -DNDEBUG -O2 -pipe -W -Wall -fPIC -fno-omit-frame-pointer -Wno-deprecated \
# -Wno-unused-parameter -Wno-unused-variable -Wno-unused-local-typedefs -Wno-sign-compare \
# -std=c++11')
CXXFLAGS('-g -O2 -pipe -W -Wall -fPIC -fno-omit-frame-pointer -Wno-deprecated \
-Wno-unused-parameter -Wno-unused-variable -Wno-unused-local-typedefs -Wno-sign-compare \
-std=c++11')
#for profiler
#CPPFLAGS('-D__const__= -Dtypeof=__typeof__ -DUSE_PTHREAD -DUSE_XBOX -DBAIDU_RPC_ENABLE_CPU_PROFILER -DBAIDU_RPC_ENABLE_HEAP_PROFILER')
#使用C++11编译idl报错:error: ‘typeof’ was not declared in this scope,需要加上-Dtypeof=__typeof__
CPPFLAGS('-D__const__= -Dtypeof=__typeof__ -DUSE_PTHREAD')
#链接参数.
LDFLAGS('-lpthread -lcrypto -lrt -lssl -ldl -lz')
#依赖模块
CONFIGS('baidu/base/baidu-rpc@ci-base')
CONFIGS('baidu/im-common/mempool@mempool_1-0-1_BRANCH@git_branch')
CONFIGS('baidu/third-party/opencv@master@git_branch')
CONFIGS('public/configure@configure_1-2-17-0_PD_BL')
#CONFIGS('lib2-64/ullib@ullib_3-1-135-21782_PD_BL')
CONFIGS('third-64/boost@boost_1-63-0-101_PD_BL')
CONFIGS('public/bthread@ci-base')
CONFIGS('third-64/protobuf@protobuf_2-4-1-1100_PD_BL')
#CONFIGS('third-64/protobuf@protobuf_3-1-0-6209_PD_BL')
# for ut
CONFIGS('third-64/gtest@1.7.2.0')
# for profiler
#CONFIGS('thirdsrc/tcmalloc@2.5.0.5977', Libraries('libtcmalloc_and_profiler.a'))
# McCache
CONFIGS('baidu/base/cache@cache_3-1-7-21784_PD_BL')
INCPATHS('$OUT/include/')
PROTOFLAGS('--proto_path=.')
#ServiceGenerator
HEADERS(GLOB_GEN_SRCS('./proto/*.h'), '$INC/')
HEADERS(GLOB('./plugin/*.h'), '$INC/plugin')
Application('pdcodegen', Sources(GLOB('plugin/*.cc'), 'proto/pds_option.proto',
'src/pdcodegen.cpp', IncludePaths('. ./proto/ $OUT/include')))
HEADERS(GLOB_GEN_SRCS('./proto/*.h'), '$INC/')
HEADERS(GLOB('./proto/*.proto'), '$INC/proto')
HEADERS(GLOB('./common/*.h'), '$INC/common')
HEADERS(GLOB('./op/*.h'), '$INC/op')
HEADERS(GLOB('./framework/*.h'), '$INC/framework')
cpp_source_dirs = []
cpp_source_dirs.append('common/*.cpp')
cpp_source_dirs.append('op/*.cpp')
cpp_source_dirs.append('framework/*.cpp')
cpp_source_dirs.append('proto/*.proto')
#支持.proto作为源文件
PROTOFLAGS('--plugin=protoc-gen-pdcodegen=plugin/pdcodegen --pdcodegen_out proto --proto_path=.')
#StaticLib
StaticLibrary('pdserving',
Sources(GLOB(' '.join(cpp_source_dirs)), 'src/pdserving.cpp'),
LinkFlags('-lpthread -lcrypto -lm -lrt -lssl -ldl -lz'))
#可执行文件
Application('pdserving',
Sources(GLOB(' '.join(cpp_source_dirs)), 'src/pdserving.cpp'),
LinkFlags('-lpthread -lcrypto -lm -lrt -lssl -ldl -lz '))
Application('pdclient',
Sources(GLOB(' '.join(cpp_source_dirs)), 'src/pdclient.cpp'),
LinkFlags('-lpthread -lcrypto -lm -lrt -lssl -ldl -lz'))
#单元测试
ut_include='./include ./unittest $OUT/include'
ut_sources=GLOB('./unittest/*.cpp')
ut_cppflag='-Dprivate=public -Dprotected=public -DUNIT_TEST -std=c++11 -usercode_in_pthread -DUSE_PTHREAD'
ut_ldflags='-lpthread -lssl -lcrypto -lrt -ldl -lz -std=c++11 -usercode_in_pthread '
ut_gdbflag='-O0 -g -fpermissive -std=c++11 -usercode_in_pthread'
UTApplication(
'test_pdserving',
Sources(
ut_sources,
IncludePaths(ut_include),
CppFlags(ut_cppflag),
CxxFlags(ut_gdbflag)
),
Libraries('$OUT/lib/libpdserving.a'),
LinkFlags(ut_ldflags),
UTOnServer(True))
OUTPUT('./conf', '$OUT/')
OUTPUT('./data', '$OUT/')
OUTPUT('./scripts/images', '$OUT/data/')
OUTPUT('./scripts/start.sh', '$OUT/bin/')
#
# bcloud default makefile
# more details please refer to ./.bcloud.cache/makefile.*
#
.SILENT:
#
#makefile-self location, must be always on the top!!!
#
MAKEFILE_PATH_DEFAULT := $(word $(words $(MAKEFILE_LIST)), $(MAKEFILE_LIST))
MAKEFILE_DIR_DEFAULT := $(shell cd $(dir $(MAKEFILE_PATH_DEFAULT)) && pwd)
MAKEFILE_DEAULT := 'imported'
#
#Global Configs
#
WORK_ROOT := $(shell cd $(MAKEFILE_DIR_DEFAULT)/../../.. && pwd)
GCC_PATH := /opt/compiler/gcc-4.8.2/bin
#
#import global configs
#
ifneq ($(MAKEFILE_CONFIG), 'imported')
include $(MAKEFILE_DIR_DEFAULT)/.bcloud.cache/makefile.config
endif
.PHONEY: all clean distclean cleanall help
all: main
clean:
echo removing $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/\*.o ...
find $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor -name "*.o" | xargs rm -rf
echo removing $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/bin ...
rm -rf $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/bin
echo removing $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/lib ...
rm -rf $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/lib
echo removing $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/so ...
rm -rf $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/so
echo removing $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/test ...
rm -rf $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/test
echo removing $(WORK_ROOT)/baidu/paddle-serving/predictor/output ...
rm -rf $(WORK_ROOT)/baidu/paddle-serving/predictor/output
distclean:
echo removing $(WORK_ROOT)/baidu/paddle-serving/predictor/Makefile ...
rm -rf $(WORK_ROOT)/baidu/paddle-serving/predictor/Makefile
echo removing $(WORK_ROOT)/baidu/paddle-serving/predictor/.bcloud.cache/makefile.baidu_paddle-serving_predictor ...
rm -rf $(WORK_ROOT)/baidu/paddle-serving/predictor/.bcloud.cache/makefile.baidu_paddle-serving_predictor
echo removing $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor ...
rm -rf $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor
echo removing $(WORK_ROOT)/baidu/paddle-serving/predictor/output ...
rm -rf $(WORK_ROOT)/baidu/paddle-serving/predictor/output
echo -e '\033[32m[NOTICE]\033[0m'
echo -e \\tplease run \'bcloud local -M\' before next \'make\'!!!
cleanall:
echo removing $(WORK_ROOT)/bc_out ...
rm -rf $(WORK_ROOT)/bc_out
echo removing $(WORK_ROOT)/baidu/paddle-serving/predictor/output ...
rm -rf $(WORK_ROOT)/baidu/paddle-serving/predictor/output
echo removing $(WORK_ROOT)/baidu/paddle-serving/predictor/Makefile ...
rm -rf $(WORK_ROOT)/baidu/paddle-serving/predictor/Makefile
echo removing $(WORK_ROOT)/baidu/paddle-serving/predictor/.bcloud.cache/makefile\* ...
rm -rf $(WORK_ROOT)/baidu/paddle-serving/predictor/.bcloud.cache/makefile*
echo -e '\033[32m[NOTICE]\033[0m'
echo -e \\tplease run \'bcloud local or bcloud local -M ALL\' before next \'make\'!!!
app/ecom/elib/ecommon-lib: app/ecom/elib/ecommon-lib_deps
baidu/base/protobuf-json: baidu/base/protobuf-json_deps
baidu/base/common: baidu/base/common_deps
third-64/gtest: third-64/gtest_deps
baidu/base/bvar: baidu/base/bvar_deps
public/protobuf-json: public/protobuf-json_deps
baidu/base/cache: baidu/base/cache_deps
baidu/base/spreg: baidu/base/spreg_deps
baidu/bns/naming-lib: baidu/bns/naming-lib_deps
lib2-64/bsl: lib2-64/bsl_deps
third-64/gflags: third-64/gflags_deps
third-64/pcre: third-64/pcre_deps
lib2-64/cache: lib2-64/cache_deps
baidu/base/bthread: baidu/base/bthread_deps
third-64/leveldb: third-64/leveldb_deps
lib2-64/ullib: lib2-64/ullib_deps
public/common: public/common_deps
public/bvar: public/bvar_deps
baidu/base/bsl: baidu/base/bsl_deps
baidu/base/configure: baidu/base/configure_deps
public/spreg: public/spreg_deps
public/configure: public/configure_deps
baidu/base/ullib: baidu/base/ullib_deps
baidu/base/baidu-rpc: baidu/base/baidu-rpc_deps
third-64/libevent: third-64/libevent_deps
baidu/third-party/opencv: baidu/third-party/opencv_deps
baidu/base/dict: baidu/base/dict_deps
op/oped/noah/webfoot/naming-lib: op/oped/noah/webfoot/naming-lib_deps
baidu/elib/ecommon-lib: baidu/elib/ecommon-lib_deps
public/bthread: public/bthread_deps
public/noah/giano-lib/release/baas-lib-c: public/noah/giano-lib/release/baas-lib-c_deps
app/ecom/im/mempool: app/ecom/im/mempool_deps
baidu/base/mcpack2pb: baidu/base/mcpack2pb_deps
third-64/boost: third-64/boost_deps
public/baidu-rpc: public/baidu-rpc_deps
public/mcpack2pb: public/mcpack2pb_deps
baidu/base/iobuf: baidu/base/iobuf_deps
public/iobuf: public/iobuf_deps
baidu/im-common/mempool: baidu/im-common/mempool_deps
lib2-64/dict: lib2-64/dict_deps
help:
echo -e 'all available targets of make($(MAKE_VERSION)):\n'
echo ' all : will compile all targets and run release.bcloud'
echo ' no-release.bcloud: will compile all targets and not run release.bcloud'
echo ' no-ut : will compile all targets without ut and run release.bcloud'
echo ' test : will compile all targets, run ut and run release.bcloud'
echo ' clean : will only cleanup Intermediate files(such as .o, .so, .a, ut and bin) of main module:'
echo ' $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/bin'
echo ' $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/lib'
echo ' $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/so'
echo ' $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/\*.o'
echo ' $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor/output/test'
echo ' $(WORK_ROOT)/baidu/paddle-serving/predictor/output'
echo ' distclean : will cleanup makefile, intermediate files(such as .o, .so, .a, ut and bin) and pb.cc/pb.h idl.h/idl.cpp of main module:'
echo ' $(WORK_ROOT)/baidu/paddle-serving/predictor/Makefile'
echo ' $(WORK_ROOT)/baidu/paddle-serving/predictor/.bcloud.cache/makefile.baidu_paddle-serving_predictor'
echo ' $(WORK_ROOT)/bc_out/baidu/paddle-serving/predictor'
echo ' $(WORK_ROOT)/baidu/paddle-serving/predictor/output'
echo ' cleanall : will cleanup makefiles, intermediate files(such as .o, .so, .a, ut and bin) and pb.cc/pb.h idl.h/idl.cpp of all modules:'
echo ' $(WORK_ROOT)/bc_out'
echo ' $(WORK_ROOT)/baidu/paddle-serving/predictor/output'
echo ' $(WORK_ROOT)/baidu/paddle-serving/predictor/Makefile'
echo ' $(WORK_ROOT)/baidu/paddle-serving/predictor/.bcloud.cache/makefile*'
echo ' help : list all available make targets'
echo -e '\ntargets for the compile of dependency module:'
echo ' app/ecom/elib/ecommon-lib'
echo ' app/ecom/im/mempool'
echo ' baidu/base/baidu-rpc'
echo ' baidu/base/bsl'
echo ' baidu/base/bthread'
echo ' baidu/base/bvar'
echo ' baidu/base/cache'
echo ' baidu/base/common'
echo ' baidu/base/configure'
echo ' baidu/base/dict'
echo ' baidu/base/iobuf'
echo ' baidu/base/mcpack2pb'
echo ' baidu/base/protobuf-json'
echo ' baidu/base/spreg'
echo ' baidu/base/ullib'
echo ' baidu/bns/naming-lib'
echo ' baidu/elib/ecommon-lib'
echo ' baidu/im-common/mempool'
echo ' baidu/third-party/opencv'
echo ' lib2-64/bsl'
echo ' lib2-64/cache'
echo ' lib2-64/dict'
echo ' lib2-64/ullib'
echo ' op/oped/noah/webfoot/naming-lib'
echo ' public/baidu-rpc'
echo ' public/bthread'
echo ' public/bvar'
echo ' public/common'
echo ' public/configure'
echo ' public/iobuf'
echo ' public/mcpack2pb'
echo ' public/noah/giano-lib/release/baas-lib-c'
echo ' public/protobuf-json'
echo ' public/spreg'
echo ' third-64/boost'
echo ' third-64/gflags'
echo ' third-64/gtest'
echo ' third-64/leveldb'
echo ' third-64/libevent'
echo ' third-64/pcre'
#
#import dependency modules
#
ifneq ($(MAKEFILE_BAIDU_PADDLE-SERVING_PREDICTOR), 'imported')
include $(MAKEFILE_DIR_DEFAULT)/.bcloud.cache/makefile.baidu_paddle-serving_predictor
endif
此差异已折叠。
Global:
tool: bcloud
Default:
profile: [change]
Profiles:
- profile:
name: change
command: bcloud ut
release: true
......@@ -36,7 +36,7 @@ public:
bthread_mutex_init(&_mutex, NULL);
FILE* fp = fopen(file_name.c_str(), "r");
if (!fp) {
LOG(FATAL) << "Failed open data file: "
LOG(ERROR) << "Failed open data file: "
<< file_name;
return -1;
}
......@@ -58,7 +58,7 @@ public:
for (size_t ri = 0; ri < buf_size; ri++) {
SparseRequest* req = new SparseRequest();
if (generate_one_req(*req, batch_size) != 0) {
LOG(FATAL) << "Failed generate req at: " << ri;
LOG(ERROR) << "Failed generate req at: " << ri;
fclose(fp);
return -1;
}
......@@ -255,7 +255,7 @@ void* work(void* p) {
InputData* input = arg->input;
PredictorApi* api = arg->api;
if (api->thrd_initialize() != 0) {
LOG(FATAL) << "Failed init api in thrd:" << bthread_self();
LOG(ERROR) << "Failed init api in thrd:" << bthread_self();
return NULL;
}
Response res;
......@@ -264,7 +264,7 @@ void* work(void* p) {
api->thrd_clear();
Predictor* predictor = api->fetch_predictor("sparse_cnn");
if (!predictor) {
LOG(FATAL) << "Failed fetch predictor: sparse_cnn";
LOG(ERROR) << "Failed fetch predictor: sparse_cnn";
continue;
}
SparseRequest* req = input->next_req();
......@@ -272,7 +272,7 @@ void* work(void* p) {
timeval start;
gettimeofday(&start, NULL);
if (predictor->inference(req, &res) != 0) {
LOG(FATAL) << "failed call predictor with req:"
LOG(ERROR) << "failed call predictor with req:"
<< req->ShortDebugString();
api->free_predictor(predictor);
continue;
......@@ -298,20 +298,20 @@ int main(int argc, char** argv) {
int qps = atoi(argv[4]);
PredictorApi api;
if (api.create("./conf", "predictors.conf") != 0) {
LOG(FATAL) << "Failed create predictors api!";
LOG(ERROR) << "Failed create predictors api!";
return -1;
}
InputData data;
if (data.create(
//"./data/feature", req_buffer, batch_size, qps) != 0) {
"./data/pure_feature", req_buffer, batch_size, qps) != 0) {
LOG(FATAL) << "Failed create inputdata!";
LOG(ERROR) << "Failed create inputdata!";
return -1;
}
Arg arg = {&api, &data};
bthread_t* threads = new bthread_t[thread_num];
if (!threads) {
LOG(FATAL) << "Failed create threads, num:" << thread_num;
LOG(ERROR) << "Failed create threads, num:" << thread_num;
return -1;
}
for (int i = 0; i < thread_num; ++i) {
......
......@@ -318,7 +318,7 @@ public:
for (size_t ri = 0; ri < buf_size; ri++) {
Request* req = new Request();
if (generate_one_req(*req, batch_size) != 0) {
LOG(FATAL) << "Failed generate req at: " << ri;
LOG(ERROR) << "Failed generate req at: " << ri;
//fclose(fp);
return -1;
}
......@@ -469,19 +469,19 @@ void* work(void* p) {
Arg* arg = (Arg*) p;
InputData* input = arg->input;
if (PredictorApi::instance().thrd_initialize() != 0) {
LOG(FATAL) << "Failed create bthread local predictor";
LOG(ERROR) << "Failed create bthread local predictor";
return NULL;
}
Response res;
LOG(WARNING) << "Thread entry!";
while (true) {
if (PredictorApi::instance().thrd_clear() != 0) {
LOG(FATAL) << "Failed thrd clear predictor";
LOG(ERROR) << "Failed thrd clear predictor";
return NULL;
}
Predictor* predictor = PredictorApi::instance().fetch_predictor("wasq");
if (!predictor) {
LOG(FATAL) << "Failed fetch predictor: wasq";
LOG(ERROR) << "Failed fetch predictor: wasq";
return NULL;
}
Request* req = input->next_req();
......@@ -489,7 +489,7 @@ void* work(void* p) {
timeval start;
gettimeofday(&start, NULL);
if (predictor->inference(req, &res) != 0) {
LOG(FATAL) << "failed call predictor with req:"
LOG(ERROR) << "failed call predictor with req:"
<< req->ShortDebugString();
return NULL;
}
......@@ -505,7 +505,7 @@ void* work(void* p) {
//printf("done\n");
}
if (PredictorApi::instance().thrd_finalize() != 0) {
LOG(FATAL) << "Failed thrd finalize predictor api";
LOG(ERROR) << "Failed thrd finalize predictor api";
}
LOG(WARNING) << "Thread exit!";
return NULL;
......@@ -520,19 +520,19 @@ int main(int argc, char** argv) {
int thread_num = atoi(argv[3]);
int qps = atoi(argv[4]);
if (PredictorApi::instance().create("./conf", "predictors.conf") != 0) {
LOG(FATAL) << "Failed create predictors api!";
LOG(ERROR) << "Failed create predictors api!";
return -1;
}
InputData data;
if (data.create(
"./data/test_features_sys", req_buffer, batch_size, qps) != 0) {
LOG(FATAL) << "Failed create inputdata!";
LOG(ERROR) << "Failed create inputdata!";
return -1;
}
Arg arg = {NULL, &data};
bthread_t* threads = new bthread_t[thread_num];
if (!threads) {
LOG(FATAL) << "Failed create threads, num:" << thread_num;
LOG(ERROR) << "Failed create threads, num:" << thread_num;
return -1;
}
for (int i = 0; i < thread_num; ++i) {
......
......@@ -30,7 +30,7 @@ int create_req(Request& req) {
FILE* fp = fopen(TEST_IMAGE_PATH, "rb");
if (!fp) {
LOG(FATAL) << "Failed open image: " << TEST_IMAGE_PATH;
LOG(ERROR) << "Failed open image: " << TEST_IMAGE_PATH;
return -1;
}
......@@ -38,7 +38,7 @@ int create_req(Request& req) {
size_t isize = ftell(fp);
char* ibuf = new(std::nothrow) char[isize];
if (!ibuf) {
LOG(FATAL) << "Failed malloc image buffer";
LOG(ERROR) << "Failed malloc image buffer";
fclose(fp);
return -1;
}
......@@ -47,7 +47,7 @@ int create_req(Request& req) {
fread(ibuf, sizeof(ibuf[0]), isize, fp);
XImageReqInstance* ins = req.add_instances();
if (!ins) {
LOG(FATAL) << "Failed create req instance";
LOG(ERROR) << "Failed create req instance";
delete[] ibuf;
fclose(fp);
return -1;
......@@ -87,13 +87,13 @@ void print_res(
buf.append(json);
butil::IOBufAsZeroCopyInputStream wrapper(buf);
if (!json2pb::JsonToProtoMessage(&wrapper, &json_msg, &err_string)) {
LOG(FATAL) << "Failed parse json from str:" << json;
LOG(ERROR) << "Failed parse json from str:" << json;
return ;
}
uint32_t csize = json_msg.categories_size();
if (csize <= 0) {
LOG(FATAL) << "sample-" << si << "has no"
LOG(ERROR) << "sample-" << si << "has no"
<< "categories props";
continue;
}
......@@ -119,7 +119,7 @@ int main(int argc, char** argv) {
PredictorApi api;
if (api.create("./conf", "predictors.prototxt") != 0) {
LOG(FATAL) << "Failed create predictors api!";
LOG(ERROR) << "Failed create predictors api!";
return -1;
}
......@@ -136,7 +136,7 @@ int main(int argc, char** argv) {
Predictor* predictor = api.fetch_predictor("ximage");
if (!predictor) {
LOG(FATAL) << "Failed fetch predictor: wasq";
LOG(ERROR) << "Failed fetch predictor: wasq";
return -1;
}
......@@ -149,7 +149,7 @@ int main(int argc, char** argv) {
butil::IOBufBuilder debug_os;
if (predictor->debug(&req, &res, &debug_os) != 0) {
LOG(FATAL) << "failed call predictor with req:"
LOG(ERROR) << "failed call predictor with req:"
<< req.ShortDebugString();
return -1;
}
......
......@@ -37,7 +37,7 @@ namespace sdk_cpp {
#define ASSIGN_CONF_ITEM(dest, src, fail) \
do { \
if (!src.init) { \
LOG(FATAL) << "Cannot assign an unintialized item: " \
LOG(ERROR) << "Cannot assign an unintialized item: " \
<< #src << " to dest: " << #dest; \
return fail; \
} \
......
......@@ -27,7 +27,7 @@ namespace sdk_cpp {
do { \
param = butil::get_object<T>(); \
if (!param) { \
LOG(FATAL) << "Failed get object from pool" \
LOG(ERROR) << "Failed get object from pool" \
<< ", arg:" << #param << "type: " \
<< #T; \
return err; \
......
......@@ -50,7 +50,7 @@ public:
std::map<std::string, Endpoint*>::iterator it
= _endpoints.find(ep_name);
if (it == _endpoints.end() || !it->second) {
LOG(FATAL) << "Failed fetch predictor:"
LOG(ERROR) << "Failed fetch predictor:"
<< ", ep_name: " << ep_name;
return NULL;
}
......@@ -62,7 +62,7 @@ public:
std::map<std::string, Endpoint*>::iterator it
= _endpoints.find(ep_name);
if (it == _endpoints.end() || !it->second) {
LOG(FATAL) << "Failed fetch predictor:"
LOG(ERROR) << "Failed fetch predictor:"
<< ", ep_name: " << ep_name;
return NULL;
}
......@@ -72,7 +72,7 @@ public:
int free_predictor(Predictor* predictor) {
const Stub* stub = predictor->stub();
if (!stub || stub->return_predictor(predictor) != 0) {
LOG(FATAL) << "Failed return predictor via stub";
LOG(ERROR) << "Failed return predictor via stub";
return -1;
}
......
......@@ -105,7 +105,7 @@ public:
std::string::size_type kv_delim_pos = kv_pair_str.find(KV_DELIM, 0);
if (kv_delim_pos == std::string::npos) {
LOG(FATAL) << "invalid kv pair: " << kv_pair_str.c_str();
LOG(ERROR) << "invalid kv pair: " << kv_pair_str.c_str();
continue;
}
......@@ -167,7 +167,7 @@ public:
}
void update_average(int64_t acc) {
LOG(FATAL) << "Cannot update average to a LatencyRecorder";
LOG(ERROR) << "Cannot update average to a LatencyRecorder";
}
private:
......@@ -181,7 +181,7 @@ public:
}
void update_latency(int64_t acc) {
LOG(FATAL) << "Cannot update latency to a AverageWrapper";
LOG(ERROR) << "Cannot update latency to a AverageWrapper";
}
void update_average(int64_t acc) {
......@@ -307,7 +307,7 @@ public:
std::map<std::string, BvarWrapper*>::iterator iter =
_avg_bvars.find(AVG_PREFIX + name);
if (iter == _avg_bvars.end()) {
LOG(FATAL) << "Not found average record:avg_" << name;
LOG(ERROR) << "Not found average record:avg_" << name;
return ;
}
......@@ -318,7 +318,7 @@ public:
std::map<std::string, BvarWrapper*>::iterator iter =
_ltc_bvars.find(LTC_PREFIX + name);
if (iter == _ltc_bvars.end()) {
LOG(FATAL) << "Not found latency record:ltc_" << name;
LOG(ERROR) << "Not found latency record:ltc_" << name;
return ;
}
......
......@@ -31,7 +31,7 @@ int WeightedRandomRender::initialize(
std::vector<std::string> splits;
if (str_split(weights, WEIGHT_SEPERATOR, &splits) != 0) {
LOG(FATAL) << "Failed split string:" <<
LOG(ERROR) << "Failed split string:" <<
weights;
return -1;
}
......@@ -43,7 +43,7 @@ int WeightedRandomRender::initialize(
uint32_t ratio = strtoul(
splits[wi].c_str(), &end_pos, 10);
if (end_pos == splits[wi].c_str()) {
LOG(FATAL) << "Error ratio(uint32) format:"
LOG(ERROR) << "Error ratio(uint32) format:"
<< splits[wi] << " at " << wi;
return -1;
}
......@@ -53,7 +53,7 @@ int WeightedRandomRender::initialize(
}
if (_normalized_sum <= 0) {
LOG(FATAL) << "Zero normalized weight sum";
LOG(ERROR) << "Zero normalized weight sum";
return -1;
}
......@@ -61,11 +61,11 @@ int WeightedRandomRender::initialize(
<< ", count: " << _variant_weight_list.size()
<< ", normalized: " << _normalized_sum;
} catch (std::bad_cast& e) {
LOG(FATAL) << "Failed init WeightedRandomRender"
LOG(ERROR) << "Failed init WeightedRandomRender"
<< "from configure, err:" << e.what();
return -1;
} catch (...) {
LOG(FATAL) << "Failed init WeightedRandomRender"
LOG(ERROR) << "Failed init WeightedRandomRender"
<< "from configure, err message is unkown.";
return -1;
}
......@@ -82,7 +82,7 @@ Variant* WeightedRandomRender::route(
Variant* WeightedRandomRender::route(
const VariantList& variants) {
if (variants.size() != _variant_weight_list.size()) {
LOG(FATAL) << "#(Weights) is not equal #(Stubs)"
LOG(ERROR) << "#(Weights) is not equal #(Stubs)"
<< ", size: " << _variant_weight_list.size()
<< " vs. " << variants.size();
return NULL;
......@@ -101,7 +101,7 @@ Variant* WeightedRandomRender::route(
}
}
LOG(FATAL) << "Errors accurs in sampling, sample:"
LOG(ERROR) << "Errors accurs in sampling, sample:"
<< sample << ", total: " << _normalized_sum;
return NULL;
......
......@@ -26,7 +26,7 @@ int EndpointConfigManager::create(const char* path, const char* file) {
_endpoint_config_file = file;
if (load() != 0) {
LOG(FATAL) << "Failed reload endpoint config";
LOG(ERROR) << "Failed reload endpoint config";
return -1;
}
......@@ -40,7 +40,7 @@ int EndpointConfigManager::load() {
_endpoint_config_path.c_str(),
_endpoint_config_file.c_str(),
&sdk_conf) != 0) {
LOG(FATAL)
LOG(ERROR)
<< "Failed initialize endpoint list"
<< ", config: " << _endpoint_config_path
<< "/" << _endpoint_config_file;
......@@ -50,7 +50,7 @@ int EndpointConfigManager::load() {
VariantInfo default_var;
if (init_one_variant(sdk_conf.default_variant_conf(),
default_var) != 0) {
LOG(FATAL) << "Failed read default var conf";
LOG(ERROR) << "Failed read default var conf";
return -1;
}
......@@ -62,14 +62,14 @@ int EndpointConfigManager::load() {
EndpointInfo ep;
if (init_one_endpoint(sdk_conf.predictors(ei), ep,
default_var) != 0) {
LOG(FATAL) << "Failed read endpoint info at: "
LOG(ERROR) << "Failed read endpoint info at: "
<< ei;
return -1;
}
std::map<std::string, EndpointInfo>::iterator it;
if (_ep_map.find(ep.endpoint_name) != _ep_map.end()) {
LOG(FATAL) << "Cannot insert duplicated endpoint"
LOG(ERROR) << "Cannot insert duplicated endpoint"
<< ", ep name: " << ep.endpoint_name;
}
......@@ -77,14 +77,14 @@ int EndpointConfigManager::load() {
std::string, EndpointInfo>::iterator, bool> r
= _ep_map.insert(std::make_pair(ep.endpoint_name, ep));
if (!r.second) {
LOG(FATAL) << "Failed insert endpoint, name"
LOG(ERROR) << "Failed insert endpoint, name"
<< ep.endpoint_name;
return -1;
}
}
} catch (std::exception& e) {
LOG(FATAL) << "Failed load configure" << e.what();
LOG(ERROR) << "Failed load configure" << e.what();
return -1;
}
LOG(INFO)
......@@ -109,7 +109,7 @@ int EndpointConfigManager::init_one_endpoint(
PARSE_CONF_ITEM(conf, ep_router, endpoint_router, -1);
if (ep_router.init) {
if (ep_router.value != "WeightedRandomRender") {
LOG(FATAL) << "endpointer_router unrecognized " << ep_router.value;
LOG(ERROR) << "endpointer_router unrecognized " << ep_router.value;
return -1;
}
......@@ -120,7 +120,7 @@ int EndpointConfigManager::init_one_endpoint(
const configure::WeightedRandomRenderConf &router_conf =
conf.weighted_random_render_conf();
if (!router || router->initialize(router_conf) != 0) {
LOG(FATAL) << "Failed fetch valid ab test strategy"
LOG(ERROR) << "Failed fetch valid ab test strategy"
<< ", name:" << ep_router.value;
return -1;
}
......@@ -136,7 +136,7 @@ int EndpointConfigManager::init_one_endpoint(
VariantInfo var;
if (merge_variant(dft_var, conf.variants(vi),
var) != 0) {
LOG(FATAL) << "Failed merge variant info at: "
LOG(ERROR) << "Failed merge variant info at: "
<< vi;
return -1;
}
......@@ -145,7 +145,7 @@ int EndpointConfigManager::init_one_endpoint(
}
if (ep.vars.size() > 1 && ep.ab_test == NULL) {
LOG(FATAL) << "EndpointRouter must be configured, when"
LOG(ERROR) << "EndpointRouter must be configured, when"
<< " #Variants > 1.";
return -1;
}
......@@ -155,7 +155,7 @@ int EndpointConfigManager::init_one_endpoint(
<< ", count of variants: " << ep.vars.size() << ".";
} catch (std::exception& e) {
LOG(FATAL) << "Exception acccurs when load endpoint conf"
LOG(ERROR) << "Exception acccurs when load endpoint conf"
<< ", message: " << e.what();
return -1;
}
......@@ -215,7 +215,7 @@ int EndpointConfigManager::init_one_variant(
PARSE_CONF_ITEM(splits, var.splitinfo.tag_cands_str,
tag_candidates, -1);
if (parse_tag_values(var.splitinfo) != 0) {
LOG(FATAL) << "Failed parse tag_values:" <<
LOG(ERROR) << "Failed parse tag_values:" <<
var.splitinfo.tag_cands_str.value;
return -1;
}
......@@ -225,7 +225,7 @@ int EndpointConfigManager::init_one_variant(
tag, -1);
} catch (...) {
LOG(FATAL) << "Failed load variant from configure unit";
LOG(ERROR) << "Failed load variant from configure unit";
return -1;
}
......
......@@ -29,7 +29,7 @@ int Endpoint::initialize(const EndpointInfo& ep_info) {
const VariantInfo& var_info = ep_info.vars[vi];
Variant* var = new (std::nothrow) Variant;
if (!var || var->initialize(ep_info, var_info) != 0) {
LOG(FATAL) << "Failed initialize variant, tag:"
LOG(ERROR) << "Failed initialize variant, tag:"
<< var_info.parameters.route_tag.value
<< ", endpoint: " << ep_info.endpoint_name
<< ", var index: " << vi;
......@@ -48,7 +48,7 @@ int Endpoint::thrd_initialize() {
for (uint32_t vi = 0; vi < var_size; ++vi) {
Variant* var = _variant_list[vi];
if (!var || var->thrd_initialize()) {
LOG(FATAL) << "Failed thrd initialize var: " << vi;
LOG(ERROR) << "Failed thrd initialize var: " << vi;
return -1;
}
}
......@@ -61,7 +61,7 @@ int Endpoint::thrd_clear() {
for (uint32_t vi = 0; vi < var_size; ++vi) {
Variant* var = _variant_list[vi];
if (!var || var->thrd_clear()) {
LOG(FATAL) << "Failed thrd clear var: " << vi;
LOG(ERROR) << "Failed thrd clear var: " << vi;
return -1;
}
}
......@@ -74,7 +74,7 @@ int Endpoint::thrd_finalize() {
for (uint32_t vi = 0; vi < var_size; ++vi) {
Variant* var = _variant_list[vi];
if (!var || var->thrd_finalize()) {
LOG(FATAL) << "Failed thrd finalize var: " << vi;
LOG(ERROR) << "Failed thrd finalize var: " << vi;
return -1;
}
}
......@@ -91,7 +91,7 @@ Predictor* Endpoint::get_predictor(
}
if (!var) {
LOG(FATAL) << "get null var from endpoint.";
LOG(ERROR) << "get null var from endpoint.";
return NULL;
}
......@@ -104,7 +104,7 @@ Predictor* Endpoint::get_predictor() {
#endif
if (_variant_list.size() == 1) {
if (_variant_list[0] == NULL) {
LOG(FATAL) << "Not valid variant info";
LOG(ERROR) << "Not valid variant info";
return NULL;
}
return _variant_list[0]->get_predictor();
......@@ -117,7 +117,7 @@ int Endpoint::ret_predictor(Predictor* predictor) {
const Stub* stub = predictor->stub();
if (!stub || stub->return_predictor(
predictor) != 0) {
LOG(FATAL) << "Failed return predictor to pool";
LOG(ERROR) << "Failed return predictor to pool";
return -1;
}
......
......@@ -20,7 +20,7 @@ namespace sdk_cpp {
int PredictorApi::register_all() {
if (WeightedRandomRender::register_self() != 0) {
LOG(FATAL) << "Failed register WeightedRandomRender";
LOG(ERROR) << "Failed register WeightedRandomRender";
return -1;
}
......@@ -31,12 +31,12 @@ int PredictorApi::register_all() {
int PredictorApi::create(const char* path, const char* file) {
if (register_all() != 0) {
LOG(FATAL) << "Failed do register all!";
LOG(ERROR) << "Failed do register all!";
return -1;
}
if (_config_manager.create(path, file) != 0) {
LOG(FATAL) << "Failed create config manager from conf:"
LOG(ERROR) << "Failed create config manager from conf:"
<< path << "/" << file;
return -1;
}
......@@ -48,14 +48,14 @@ int PredictorApi::create(const char* path, const char* file) {
const EndpointInfo& ep_info = it->second;
Endpoint* ep = new (std::nothrow) Endpoint();
if (ep->initialize(ep_info) != 0) {
LOG(FATAL) << "Failed intialize endpoint:"
LOG(ERROR) << "Failed intialize endpoint:"
<< ep_info.endpoint_name;
return -1;
}
if (_endpoints.find(
ep_info.endpoint_name) != _endpoints.end()) {
LOG(FATAL) << "Cannot insert duplicated endpoint:"
LOG(ERROR) << "Cannot insert duplicated endpoint:"
<< ep_info.endpoint_name;
return -1;
}
......@@ -64,7 +64,7 @@ int PredictorApi::create(const char* path, const char* file) {
= _endpoints.insert(std::make_pair(
ep_info.endpoint_name, ep));
if (!r.second) {
LOG(FATAL) << "Failed insert endpoint:"
LOG(ERROR) << "Failed insert endpoint:"
<< ep_info.endpoint_name;
return -1;
}
......@@ -81,7 +81,7 @@ int PredictorApi::thrd_initialize() {
for (it = _endpoints.begin(); it != _endpoints.end(); ++it) {
Endpoint* ep = it->second;
if (ep->thrd_initialize() != 0) {
LOG(FATAL) << "Failed thrd initialize endpoint:"
LOG(ERROR) << "Failed thrd initialize endpoint:"
<< it->first;
return -1;
}
......@@ -97,7 +97,7 @@ int PredictorApi::thrd_clear() {
for (it = _endpoints.begin(); it != _endpoints.end(); ++it) {
Endpoint* ep = it->second;
if (ep->thrd_clear() != 0) {
LOG(FATAL) << "Failed thrd clear endpoint:"
LOG(ERROR) << "Failed thrd clear endpoint:"
<< it->first;
return -1;
}
......@@ -113,7 +113,7 @@ int PredictorApi::thrd_finalize() {
for (it = _endpoints.begin(); it != _endpoints.end(); ++it) {
Endpoint* ep = it->second;
if (ep->thrd_finalize() != 0) {
LOG(FATAL) << "Failed thrd finalize endpoint:"
LOG(ERROR) << "Failed thrd finalize endpoint:"
<< it->first;
return -1;
}
......
......@@ -38,7 +38,7 @@ int Variant::initialize(const EndpointInfo& ep_info,
const std::string& tag_value = split_info.tag_values[ti];
if (!stub || stub->initialize(var_info, ep_info.endpoint_name,
&split_info.split_tag.value, &tag_value) != 0) {
LOG(FATAL) << "Failed init stub from factory"
LOG(ERROR) << "Failed init stub from factory"
<< ", stub name: " << ep_info.stub_service
<< ", filter tag: " << tag_value;
return -1;
......@@ -48,7 +48,7 @@ int Variant::initialize(const EndpointInfo& ep_info,
std::map<std::string, Stub*>::iterator iter =
_stub_map.find(tag_value);
if (iter != _stub_map.end()) {
LOG(FATAL) << "duplicated tag value: "
LOG(ERROR) << "duplicated tag value: "
<< tag_value;
return -1;
}
......@@ -65,7 +65,7 @@ int Variant::initialize(const EndpointInfo& ep_info,
ep_info.stub_service);
if (!stub || stub->initialize(
var_info, _endpoint_name, NULL, NULL) != 0) {
LOG(FATAL) << "Failed init stub from factory"
LOG(ERROR) << "Failed init stub from factory"
<< ", stub name: " << ep_info.stub_service;
return -1;
}
......@@ -84,7 +84,7 @@ int Variant::thrd_initialize() {
for (iter = _stub_map.begin(); iter != _stub_map.end(); ++iter) {
Stub* stub = iter->second;
if (!stub || stub->thrd_initialize() != 0) {
LOG(FATAL) << "Failed thrd initialize stub: " << iter->first;
LOG(ERROR) << "Failed thrd initialize stub: " << iter->first;
return -1;
}
LOG(INFO) << "Succ thrd initialize stub:" << iter->first;
......@@ -103,7 +103,7 @@ int Variant::thrd_clear() {
for (iter = _stub_map.begin(); iter != _stub_map.end(); ++iter) {
Stub* stub = iter->second;
if (!stub || stub->thrd_clear() != 0) {
LOG(FATAL) << "Failed thrd clear stub: " << iter->first;
LOG(ERROR) << "Failed thrd clear stub: " << iter->first;
return -1;
}
}
......@@ -119,7 +119,7 @@ int Variant::thrd_finalize() {
for (iter = _stub_map.begin(); iter != _stub_map.end(); ++iter) {
Stub* stub = iter->second;
if (!stub || stub->thrd_finalize() != 0) {
LOG(FATAL) << "Failed thrd finalize stub: " << iter->first;
LOG(ERROR) << "Failed thrd finalize stub: " << iter->first;
return -1;
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册