提交 f8acbeb1 编写于 作者: W wangguibao

sdk-cpp

Change-Id: I5845dc156899b3c9ee6291c0498a52fcb0b3d47f
上级 7eb4fb03
...@@ -75,6 +75,7 @@ include(flags) ...@@ -75,6 +75,7 @@ include(flags)
include(configure) include(configure)
include(generic) include(generic)
include(paddlepaddle) include(paddlepaddle)
include(external/opencv)
include_directories("${PADDLE_SERVING_SOURCE_DIR}") include_directories("${PADDLE_SERVING_SOURCE_DIR}")
...@@ -104,3 +105,4 @@ add_subdirectory(mempool) ...@@ -104,3 +105,4 @@ add_subdirectory(mempool)
add_subdirectory(predictor) add_subdirectory(predictor)
add_subdirectory(inferencer-fluid-cpu) add_subdirectory(inferencer-fluid-cpu)
add_subdirectory(serving) add_subdirectory(serving)
add_subdirectory(sdk-cpp)
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
include (ExternalProject)
# NOTE: opencv is needed when linking with paddle-serving example
set(OPENCV_SOURCES_DIR ${THIRD_PARTY_PATH}/opencv)
set(OPENCV_INSTALL_DIR ${THIRD_PARTY_PATH}/install/opencv)
set(OPENCV_INCLUDE_DIR "${OPENCV_INSTALL_DIR}/include" CACHE PATH "Opencv include directory." FORCE)
INCLUDE_DIRECTORIES(${OPENCV_INCLUDE_DIR})
ExternalProject_Add(
extern_opencv
GIT_REPOSITORY "https://github.com/opencv/opencv"
GIT_TAG "3.2.0"
PREFIX ${OPENCV_SOURCES_DIR}
UPDATE_COMMAND ""
PATCH_COMMAND cmake -E copy ${CMAKE_SOURCE_DIR}/cmake/patch/opencv_ippicv_downloader.cmake ${OPENCV_SOURCES_DIR}/src/extern_opencv/3rdparty/ippicv/downloader.cmake
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
-DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG}
-DCMAKE_C_FLAGS_RELEASE=${CMAKE_C_FLAGS_RELEASE}
-DCMAKE_CXX_FLAGS=${OPENCV_CMAKE_CXX_FLAGS}
-DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE}
-DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG}
-DCMAKE_INSTALL_PREFIX=${OPENCV_INSTALL_DIR}
-DCMAKE_INSTALL_LIBDIR=${OPENCV_INSTALL_DIR}/lib
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DBUILD_TESTS=OFF
-DBUILD_PERF_TESTS=OFF
-DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE}
-DWITH_EIGEN=OFF
-DWITH_JPEG=OFF
-DWITH_PNG=OFF
-DWITH_TIFF=OFF
-DBUILD_SHARED_LIBS=OFF
${EXTERNAL_OPTIONAL_ARGS}
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${OPENCV_INSTALL_DIR}
-DCMAKE_INSTALL_LIBDIR:PATH=${OPENCV_INSTALL_DIR}/lib
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
)
IF(WIN32)
IF(NOT EXISTS "${OPENCV_INSTALL_DIR}/lib/libopencv_core.lib")
add_custom_command(TARGET extern_opencv POST_BUILD
COMMAND cmake -E copy ${OPENCV_INSTALL_DIR}/lib/opencv_core.lib ${OPENCV_INSTALL_DIR}/lib/libopencv_core.lib
)
ENDIF()
IF(NOT EXISTS "${OPENCV_INSTALL_DIR}/lib/libopencv_imgcodecs.lib")
add_custom_command(TARGET extern_opencv POST_BUILD
COMMAND cmake -E copy ${OPENCV_INSTALL_DIR}/lib/opencv_imgcodecs.lib ${OPENCV_INSTALL_DIR}/lib/libopencv_imgcodecs.lib
)
ENDIF()
IF(NOT EXISTS "${OPENCV_INSTALL_DIR}/lib/libopencv_imgproc.lib")
add_custom_command(TARGET extern_opencv POST_BUILD
COMMAND cmake -E copy ${OPENCV_INSTALL_DIR}/lib/opencv_imgproc.lib ${OPENCV_INSTALL_DIR}/lib/libopencv_imgproc.lib
)
ENDIF()
set(OPENCV_CORE_LIB "${OPENCV_INSTALL_DIR}/lib/libopencv_core.lib")
set(OPENCV_IMGCODECS_LIB "${OPENCV_INSTALL_DIR}/lib/libopencv_imgcodecs.lib")
set(OPENCV_IMGPROC_LIB "${OPENCV_INSTALL_DIR}/lib/libopencv_imgproc.lib")
else(WIN32)
set(OPENCV_CORE_LIB "${OPENCV_INSTALL_DIR}/lib/libopencv_core.a")
set(OPENCV_IMGCODECS_LIB "${OPENCV_INSTALL_DIR}/lib/libopencv_imgcodecs.a")
set(OPENCV_IMGPROC_LIB "${OPENCV_INSTALL_DIR}/lib/libopencv_imgproc.a")
endif (WIN32)
add_library(opencv_core STATIC IMPORTED GLOBAL)
set_property(TARGET opencv_core PROPERTY IMPORTED_LOCATION ${OPENCV_CORE_LIB})
add_library(opencv_imgcodecs STATIC IMPORTED GLOBAL)
set_property(TARGET opencv_imgcodecs PROPERTY IMPORTED_LOCATION ${OPENCV_IMGCODECS_LIB})
add_library(opencv_imgproc STATIC IMPORTED GLOBAL)
set_property(TARGET opencv_imgproc PROPERTY IMPORTED_LOCATION ${OPENCV_IMGPROC_LIB})
include_directories(${OPENCV_INCLUDE_DIR})
add_dependencies(opencv_core extern_opencv)
ExternalProject_Get_Property(extern_opencv BINARY_DIR)
ADD_LIBRARY(ippicv STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET ippicv PROPERTY IMPORTED_LOCATION ${BINARY_DIR}/3rdparty/ippicv/ippicv_lnx/lib/intel64/libippicv.a)
ADD_LIBRARY(IlmImf STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET IlmImf PROPERTY IMPORTED_LOCATION ${BINARY_DIR}/3rdparty/lib/libIlmImf.a)
ADD_LIBRARY(libjasper STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET libjasper PROPERTY IMPORTED_LOCATION ${BINARY_DIR}/3rdparty/lib/liblibjasper.a)
ADD_LIBRARY(libwebp STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET libwebp PROPERTY IMPORTED_LOCATION ${BINARY_DIR}/3rdparty/lib/liblibwebp.a)
#ADD_LIBRARY(zlib STATIC IMPORTED GLOBAL)
#SET_PROPERTY(TARGET zlib PROPERTY IMPORTED_LOCATION ${BINARY_DIR}/3rdparty/lib/libzlib.a)
LIST(APPEND opencv_depend_libs
opencv_imgproc
opencv_core
ippicv
IlmImf
libjasper
libwebp
zlib)
...@@ -114,6 +114,7 @@ SET(CMAKE_EXTRA_INCLUDE_FILES "") ...@@ -114,6 +114,7 @@ SET(CMAKE_EXTRA_INCLUDE_FILES "")
# https://github.com/PaddlePaddle/Paddle/issues/12773 # https://github.com/PaddlePaddle/Paddle/issues/12773
if (NOT WIN32) if (NOT WIN32)
set(COMMON_FLAGS set(COMMON_FLAGS
-D__const__=
-fPIC -fPIC
-fno-omit-frame-pointer -fno-omit-frame-pointer
-Wall -Wall
......
...@@ -839,7 +839,7 @@ function(PROTOBUF_GENERATE_SERVING_CPP SRCS HDRS) ...@@ -839,7 +839,7 @@ function(PROTOBUF_GENERATE_SERVING_CPP SRCS HDRS)
ARGS --cpp_out=${CMAKE_CURRENT_BINARY_DIR} ARGS --cpp_out=${CMAKE_CURRENT_BINARY_DIR}
--pdcodegen_out=${CMAKE_CURRENT_BINARY_DIR} --pdcodegen_out=${CMAKE_CURRENT_BINARY_DIR}
--plugin=protoc-gen-pdcodegen=${CMAKE_BINARY_DIR}/predictor/pdcodegen --plugin=protoc-gen-pdcodegen=${CMAKE_BINARY_DIR}/predictor/pdcodegen
--proto_path=${CMAKE_SOURCE_DIR}/predictor/proto # --proto_path=${CMAKE_SOURCE_DIR}/predictor/proto
${_protobuf_include_path} ${ABS_FIL} ${_protobuf_include_path} ${ABS_FIL}
DEPENDS ${ABS_FIL} ${Protobuf_PROTOC_EXECUTABLE} DEPENDS ${ABS_FIL} ${Protobuf_PROTOC_EXECUTABLE}
COMMENT "Running Paddle-serving C++ protocol buffer compiler on ${FIL}" COMMENT "Running Paddle-serving C++ protocol buffer compiler on ${FIL}"
......
#
# The script downloads ICV package
#
# On return this will define:
# OPENCV_ICV_PATH - path to unpacked downloaded package
#
function(_icv_downloader)
# Commit SHA in the opencv_3rdparty repo
set(IPPICV_BINARIES_COMMIT "81a676001ca8075ada498583e4166079e5744668")
# Define actual ICV versions
if(APPLE)
set(OPENCV_ICV_PACKAGE_NAME "ippicv_macosx_20151201.tgz")
set(OPENCV_ICV_PACKAGE_HASH "4ff1fde9a7cfdfe7250bfcd8334e0f2f")
set(OPENCV_ICV_PLATFORM "macosx")
set(OPENCV_ICV_PACKAGE_SUBDIR "/ippicv_osx")
elseif(UNIX)
if(ANDROID AND NOT (ANDROID_ABI STREQUAL x86 OR ANDROID_ABI STREQUAL x86_64))
return()
endif()
set(OPENCV_ICV_PACKAGE_NAME "ippicv_linux_20151201.tgz")
set(OPENCV_ICV_PACKAGE_HASH "808b791a6eac9ed78d32a7666804320e")
set(OPENCV_ICV_PLATFORM "linux")
set(OPENCV_ICV_PACKAGE_SUBDIR "/ippicv_lnx")
elseif(WIN32 AND NOT ARM)
set(OPENCV_ICV_PACKAGE_NAME "ippicv_windows_20151201.zip")
set(OPENCV_ICV_PACKAGE_HASH "04e81ce5d0e329c3fbc606ae32cad44d")
set(OPENCV_ICV_PLATFORM "windows")
set(OPENCV_ICV_PACKAGE_SUBDIR "/ippicv_win")
else()
return() # Not supported
endif()
set(OPENCV_ICV_UNPACK_PATH "${CMAKE_BINARY_DIR}/3rdparty/ippicv")
set(OPENCV_ICV_PATH "${OPENCV_ICV_UNPACK_PATH}${OPENCV_ICV_PACKAGE_SUBDIR}")
if(DEFINED OPENCV_ICV_PACKAGE_DOWNLOADED
AND OPENCV_ICV_PACKAGE_DOWNLOADED STREQUAL OPENCV_ICV_PACKAGE_HASH
AND EXISTS ${OPENCV_ICV_PATH})
# Package has been downloaded and checked by the previous build
set(OPENCV_ICV_PATH "${OPENCV_ICV_PATH}" PARENT_SCOPE)
return()
else()
if(EXISTS ${OPENCV_ICV_UNPACK_PATH})
message(STATUS "ICV: Removing previous unpacked package: ${OPENCV_ICV_UNPACK_PATH}")
file(REMOVE_RECURSE ${OPENCV_ICV_UNPACK_PATH})
endif()
endif()
unset(OPENCV_ICV_PACKAGE_DOWNLOADED CACHE)
set(OPENCV_ICV_PACKAGE_ARCHIVE "${CMAKE_CURRENT_LIST_DIR}/downloads/${OPENCV_ICV_PLATFORM}-${OPENCV_ICV_PACKAGE_HASH}/${OPENCV_ICV_PACKAGE_NAME}")
get_filename_component(OPENCV_ICV_PACKAGE_ARCHIVE_DIR "${OPENCV_ICV_PACKAGE_ARCHIVE}" PATH)
if(EXISTS "${OPENCV_ICV_PACKAGE_ARCHIVE}")
file(MD5 "${OPENCV_ICV_PACKAGE_ARCHIVE}" archive_md5)
if(NOT archive_md5 STREQUAL OPENCV_ICV_PACKAGE_HASH)
message(WARNING "ICV: Local copy of ICV package has invalid MD5 hash: ${archive_md5} (expected: ${OPENCV_ICV_PACKAGE_HASH})")
file(REMOVE "${OPENCV_ICV_PACKAGE_ARCHIVE}")
file(REMOVE_RECURSE "${OPENCV_ICV_PACKAGE_ARCHIVE_DIR}")
endif()
endif()
if(NOT EXISTS "${OPENCV_ICV_PACKAGE_ARCHIVE}")
if(NOT DEFINED OPENCV_ICV_URL)
if(DEFINED ENV{OPENCV_ICV_URL})
set(OPENCV_ICV_URL $ENV{OPENCV_ICV_URL})
else()
set(OPENCV_ICV_URL "https://raw.githubusercontent.com/opencv/opencv_3rdparty/${IPPICV_BINARIES_COMMIT}/ippicv")
endif()
endif()
file(MAKE_DIRECTORY ${OPENCV_ICV_PACKAGE_ARCHIVE_DIR})
message(STATUS "ICV: Downloading ${OPENCV_ICV_PACKAGE_NAME}...")
execute_process(COMMAND wget --no-check-certificate -O "${OPENCV_ICV_PACKAGE_ARCHIVE}" "${OPENCV_ICV_URL}/${OPENCV_ICV_PACKAGE_NAME}" RESULT_VARIABLE __status)
if(NOT __status EQUAL 0)
message(FATAL_ERROR "ICV: Failed to download ICV package: ${OPENCV_ICV_PACKAGE_NAME}. Status=${__status}")
else()
# Don't remove this code, because EXPECTED_MD5 parameter doesn't fail "file(DOWNLOAD)" step
# on wrong hash
file(MD5 "${OPENCV_ICV_PACKAGE_ARCHIVE}" archive_md5)
if(NOT archive_md5 STREQUAL OPENCV_ICV_PACKAGE_HASH)
message(FATAL_ERROR "ICV: Downloaded copy of ICV package has invalid MD5 hash: ${archive_md5} (expected: ${OPENCV_ICV_PACKAGE_HASH})")
endif()
endif()
endif()
ocv_assert(EXISTS "${OPENCV_ICV_PACKAGE_ARCHIVE}")
ocv_assert(NOT EXISTS "${OPENCV_ICV_UNPACK_PATH}")
file(MAKE_DIRECTORY ${OPENCV_ICV_UNPACK_PATH})
ocv_assert(EXISTS "${OPENCV_ICV_UNPACK_PATH}")
message(STATUS "ICV: Unpacking ${OPENCV_ICV_PACKAGE_NAME} to ${OPENCV_ICV_UNPACK_PATH}...")
execute_process(COMMAND ${CMAKE_COMMAND} -E tar xz "${OPENCV_ICV_PACKAGE_ARCHIVE}"
WORKING_DIRECTORY "${OPENCV_ICV_UNPACK_PATH}"
RESULT_VARIABLE __result)
if(NOT __result EQUAL 0)
message(FATAL_ERROR "ICV: Failed to unpack ICV package from ${OPENCV_ICV_PACKAGE_ARCHIVE} to ${OPENCV_ICV_UNPACK_PATH} with error ${__result}")
endif()
ocv_assert(EXISTS "${OPENCV_ICV_PATH}")
set(OPENCV_ICV_PACKAGE_DOWNLOADED "${OPENCV_ICV_PACKAGE_HASH}" CACHE INTERNAL "ICV package hash")
message(STATUS "ICV: Package successfully downloaded")
set(OPENCV_ICV_PATH "${OPENCV_ICV_PATH}" PARENT_SCOPE)
endfunction()
_icv_downloader()
...@@ -9,6 +9,9 @@ namespace predictor { ...@@ -9,6 +9,9 @@ namespace predictor {
// __thread bool p_thread_initialized = false; // __thread bool p_thread_initialized = false;
static void dynamic_resource_deleter(void* d) { static void dynamic_resource_deleter(void* d) {
#if 1
LOG(INFO) << "dynamic_resource_delete on " << bthread_self();
#endif
delete static_cast<DynamicResource*>(d); delete static_cast<DynamicResource*>(d);
} }
...@@ -105,8 +108,12 @@ int Resource::thread_initialize() { ...@@ -105,8 +108,12 @@ int Resource::thread_initialize() {
} }
} }
#if 0
LOG(INFO) << "Successfully thread initialized dynamic resource"; LOG(INFO) << "Successfully thread initialized dynamic resource";
#else
LOG(INFO) << bthread_self() << ": Successfully thread initialized dynamic resource " << p_dynamic_resource;
#endif
return 0; return 0;
} }
...@@ -125,7 +132,11 @@ int Resource::thread_clear() { ...@@ -125,7 +132,11 @@ int Resource::thread_clear() {
DynamicResource* p_dynamic_resource = (DynamicResource*) THREAD_GETSPECIFIC(_tls_bspec_key); DynamicResource* p_dynamic_resource = (DynamicResource*) THREAD_GETSPECIFIC(_tls_bspec_key);
if (p_dynamic_resource == NULL) { if (p_dynamic_resource == NULL) {
#if 0
LOG(FATAL) << "tls dynamic resource shouldn't be null after thread_initialize"; LOG(FATAL) << "tls dynamic resource shouldn't be null after thread_initialize";
#else
LOG(FATAL) << bthread_self() << ": tls dynamic resource shouldn't be null after thread_initialize";
#endif
return -1; return -1;
} }
if (p_dynamic_resource->clear() != 0) { if (p_dynamic_resource->clear() != 0) {
...@@ -133,6 +144,7 @@ int Resource::thread_clear() { ...@@ -133,6 +144,7 @@ int Resource::thread_clear() {
return -1; return -1;
} }
LOG(INFO) << bthread_self() << "Resource::thread_clear success";
// ... // ...
return 0; return 0;
} }
......
include(src/CMakeLists.txt)
include(proto/CMakeLists.txt)
add_library(sdk-cpp ${sdk_cpp_srcs})
add_dependencies(sdk-cpp configure)
target_include_directories(sdk-cpp PUBLIC
${CMAKE_CURRENT_LIST_DIR}/include
${CMKAE_CURRENT_BINARY_DIR}/
${CMAKE_CURRENT_LIST_DIR}/../configure
${CMAKE_CURRENT_LIST_DIR}/../ullib/include
${CMAKE_CURRENT_BINARY_DIR}/../bsl/include
)
target_link_libraries(sdk-cpp brpc configure protobuf leveldb)
add_executable(ximage ${CMAKE_CURRENT_LIST_DIR}/demo/ximage.cpp)
target_include_directories(ximage PUBLIC
${CMAKE_CURRENT_LIST_DIR}/include
${CMAKE_CURRENT_BINARY_DIR}
${CMAKE_CURRENT_LIST_DIR}/../configure
${CMAKE_CURRENT_LIST_DIR}/../ullib/include
${CMAKE_CURRENT_BINARY_DIR}/../bsl/include)
target_link_libraries(ximage sdk-cpp -lpthread -lcrypto -lm -lrt -lssl -ldl
-lz)
add_executable(mapcnn_dense ${CMAKE_CURRENT_LIST_DIR}/demo/mapcnn_dense.cpp)
target_include_directories(mapcnn_dense PUBLIC
${CMAKE_CURRENT_LIST_DIR}/include
${CMAKE_CURRENT_BINARY_DIR}/
${CMAKE_CURRENT_LIST_DIR}/../configure
${CMAKE_CURRENT_LIST_DIR}/../ullib/include
${CMAKE_CURRENT_BINARY_DIR}/../bsl/include)
target_link_libraries(mapcnn_dense sdk-cpp -lpthread -lcrypto -lm -lrt -lssl
-ldl -lz)
add_executable(mapcnn_sparse ${CMAKE_CURRENT_LIST_DIR}/demo/mapcnn_sparse.cpp)
target_include_directories(mapcnn_sparse PUBLIC
${CMAKE_CURRENT_LIST_DIR}/include
${CMAKE_CURRENT_BINARY_DIR}/
${CMAKE_CURRENT_LIST_DIR}/../configure
${CMAKE_CURRENT_LIST_DIR}/../ullib/include
${CMAKE_CURRENT_BINARY_DIR}/../bsl/include)
target_link_libraries(mapcnn_sparse sdk-cpp -lpthread -lcrypto -lm -lrt -lssl
-ldl -lz)
[DefaultVariantInfo]
Tag : default
[.Connection]
ConnectTimeoutMilliSec: 2000
RpcTimeoutMilliSec: 20000
ConnectRetryCount : 2
MaxConnectionPerHost : 100
HedgeRequestTimeoutMilliSec: -1
HedgeFetchRetryCount : 2
BnsReloadIntervalSeconds : 10
ConnectionType : pooled
[.NamingInfo]
ClusterFilterStrategy : Default
LoadBalanceStrategy : la
[.RpcParameter]
# 0-NONE, 1-SNAPPY, 2-GZIP, 3-ZLIB, 4-LZ4
CompressType : 0
PackageSize : 20
Protocol : baidu_std
MaxChannelPerRequest : 3
[@Predictor]
name : ximage
service_name : baidu.paddle_serving.predictor.image_classification.ImageClassifyService
endpoint_router : WeightedRandomRender
[.WeightedRandomRender]
VariantWeightList : 50
[.@VariantInfo]
Tag : var1
[..NamingInfo]
Cluster : list://127.0.0.1:8010
#Cluster : list://10.88.158.21:8010
[DefaultVariantInfo]
Tag : default
[.Connection]
ConnectTimeoutMilliSec: 2000
RpcTimeoutMilliSec: 20000
ConnectRetryCount : 2
MaxConnectionPerHost : 100
HedgeRequestTimeoutMilliSec: -1
HedgeFetchRetryCount : 2
BnsReloadIntervalSeconds : 10
ConnectionType : pooled
[.NamingInfo]
ClusterFilterStrategy : Default
LoadBalanceStrategy : la
[.RpcParameter]
# 0-NONE, 1-SNAPPY, 2-GZIP, 3-ZLIB, 4-LZ4
CompressType : 0
PackageSize : 20
Protocol : baidu_std
MaxChannelPerRequest : 3
[@Predictor]
name : ximage
service_name : baidu.paddle_serving.predictor.image_classification.ImageClassifyService
endpoint_router : WeightedRandomRender
[.WeightedRandomRender]
VariantWeightList : 50
[.@VariantInfo]
Tag : var1
[..NamingInfo]
Cluster : list://127.0.0.1:8010
#Cluster : list://10.88.158.21:8010
[@Predictor]
name : dense_cnn
service_name : baidu.paddle_serving.fluid_engine.DefaultDenseService
endpoint_router : WeightedRandomRender
[.WeightedRandomRender]
VariantWeightList : 25
[.@VariantInfo]
Tag : var1
[..NamingInfo]
Cluster : list://10.194.83.21:8010
#Cluster : bns://opera-ps-mapcnn-000-nj03.MAP.nj03
[..Connection]
[@Predictor]
name : sparse_cnn
service_name : baidu.paddle_serving.fluid_engine.DefaultSparseService
endpoint_router : WeightedRandomRender
[.WeightedRandomRender]
VariantWeightList : 25
[.@VariantInfo]
Tag : var1
[..NamingInfo]
Cluster : list://10.194.83.21:8010
#Cluster : bns://opera-ps-mapcnn-000-nj03.MAP.nj03
[..Connection]
[@Predictor]
name : wasq
service_name : baidu.infinite.map_rnn.MapRnnService
endpoint_router : WeightedRandomRender
[.WeightedRandomRender]
VariantWeightList : 25
[.@VariantInfo]
Tag : var1
[..NamingInfo]
Cluster : list://127.0.0.1:8010
#Cluster : bns://opera-ps-mapcnn-000-nj03.MAP.nj03
[..Connection]
n01440764,丁鲷
n01443537,金鱼
n01484850,大白鲨
n01491361,虎鲨
n01494475,锤头鲨
n01496331,电鳐
n01498041,黄貂鱼
n01514668,公鸡
n01514859,母鸡
n01518878,鸵鸟
n01530575,燕雀
n01531178,金翅雀
n01532829,家朱雀
n01534433,灯芯草雀
n01537544,靛蓝雀,靛蓝鸟
n01558993,蓝鹀
n01560419,夜莺
n01580077,松鸦
n01582220,喜鹊
n01592084,山雀
n01601694,河鸟
n01608432,鸢(猛禽)
n01614925,秃头鹰
n01616318,秃鹫
n01622779,大灰猫头鹰
n01629819,欧洲火蝾螈
n01630670,普通蝾螈
n01631663,水蜥
n01632458,斑点蝾螈
n01632777,蝾螈,泥狗
n01641577,牛蛙
n01644373,树蛙
n01644900,尾蛙,铃蟾蜍,肋蟾蜍,尾蟾蜍
n01664065,红海龟
n01665541,皮革龟
n01667114,泥龟
n01667778,淡水龟
n01669191,箱龟
n01675722,带状壁虎
n01677366,普通鬣蜥
n01682714,美国变色龙
n01685808,鞭尾蜥蜴
n01687978,飞龙科蜥蜴
n01688243,褶边蜥蜴
n01689811,鳄鱼蜥蜴
n01692333,毒蜥
n01693334,绿蜥蜴
n01694178,非洲变色龙
n01695060,科莫多蜥蜴
n01697457,非洲鳄,尼罗河鳄鱼
n01698640,美国鳄鱼,鳄鱼
n01704323,三角龙
n01728572,雷蛇,蠕虫蛇
n01728920,环蛇,环颈蛇
n01729322,希腊蛇
n01729977,绿蛇,草蛇
n01734418,国王蛇
n01735189,袜带蛇,草蛇
n01737021,水蛇
n01739381,藤蛇
n01740131,夜蛇
n01742172,大蟒蛇
n01744401,岩石蟒蛇,岩蛇,蟒蛇
n01748264,印度眼镜蛇
n01749939,绿曼巴
n01751748,海蛇
n01753488,角腹蛇
n01755581,菱纹响尾蛇
n01756291,角响尾蛇
n01768244,三叶虫
n01770081,盲蜘蛛
n01770393,蝎子
n01773157,黑金花园蜘蛛
n01773549,谷仓蜘蛛
n01773797,花园蜘蛛
n01774384,黑寡妇蜘蛛
n01774750,狼蛛
n01775062,狼蜘蛛,狩猎蜘蛛
n01776313,壁虱
n01784675,蜈蚣
n01795545,黑松鸡
n01796340,松鸡,雷鸟
n01797886,披肩鸡,披肩榛鸡
n01798484,草原鸡,草原松鸡
n01806143,孔雀
n01806567,鹌鹑
n01807496,鹧鸪
n01817953,非洲灰鹦鹉
n01818515,金刚鹦鹉
n01819313,硫冠鹦鹉
n01820546,短尾鹦鹉
n01824575,褐翅鸦鹃
n01828970,蜜蜂
n01829413,犀鸟
n01833805,蜂鸟
n01843065,鹟䴕
n01843383,犀鸟
n01847000,野鸭
n01855032,红胸秋沙鸭
n01855672,鹅
n01860187,黑天鹅
n01871265,大象
n01872401,针鼹鼠
n01873310,鸭嘴兽
n01877812,沙袋鼠
n01882714,考拉,考拉熊
n01883070,袋熊
n01910747,水母
n01914609,海葵
n01917289,脑珊瑚
n01924916,扁形虫扁虫
n01930112,线虫,蛔虫
n01943899,海螺
n01944390,蜗牛
n01945685,鼻涕虫
n01950731,海参
n01955084,石鳖
n01968897,鹦鹉螺
n01978287,珍宝蟹
n01978455,石蟹
n01980166,招潮蟹
n01981276,帝王蟹,阿拉斯加蟹,阿拉斯加帝王蟹
n01983481,美国龙虾,缅因州龙虾
n01984695,大螯虾
n01985128,小龙虾
n01986214,寄居蟹
n01990800,等足目动物(明虾和螃蟹近亲)
n02002556,白鹳
n02002724,黑鹳
n02006656,鹭
n02007558,火烈鸟
n02009229,小蓝鹭
n02009912,美国鹭,大白鹭
n02011460,麻鸦
n02012849,鹤
n02013706,秧鹤
n02017213,欧洲水鸡,紫水鸡
n02018207,沼泽泥母鸡,水母鸡
n02018795,鸨
n02025239,红翻石鹬
n02027492,红背鹬,黑腹滨鹬
n02028035,红脚鹬
n02033041,半蹼鹬
n02037110,蛎鹬
n02051845,鹈鹕
n02056570,国王企鹅
n02058221,信天翁,大海鸟
n02066245,灰鲸
n02071294,杀人鲸,逆戟鲸,虎鲸
n02074367,海牛
n02077923,海狮
n02085620,奇瓦瓦
n02085782,日本猎犬
n02085936,马尔济斯犬
n02086079,狮子狗
n02086240,西施犬
n02086646,布莱尼姆猎犬
n02086910,巴比狗
n02087046,玩具犬
n02087394,罗得西亚长背猎狗
n02088094,阿富汗猎犬
n02088238,猎犬
n02088364,比格犬,猎兔犬
n02088466,侦探犬
n02088632,蓝色快狗
n02089078,黑褐猎浣熊犬
n02089867,沃克猎犬
n02089973,英国猎狐犬
n02090379,美洲赤狗
n02090622,俄罗斯猎狼犬
n02090721,爱尔兰猎狼犬
n02091032,意大利灰狗
n02091134,惠比特犬
n02091244,依比沙猎犬
n02091467,挪威猎犬
n02091635,奥达猎犬,水獭猎犬
n02091831,沙克犬,瞪羚猎犬
n02092002,苏格兰猎鹿犬,猎鹿犬
n02092339,威玛猎犬
n02093256,斯塔福德郡牛头梗,斯塔福德郡斗牛梗
n02093428,美国斯塔福德郡梗,美国比特斗牛梗,斗牛梗
n02093647,贝德灵顿梗
n02093754,边境梗
n02093859,凯丽蓝梗
n02093991,爱尔兰梗
n02094114,诺福克梗
n02094258,诺维奇梗
n02094433,约克郡梗
n02095314,刚毛猎狐梗
n02095570,莱克兰梗
n02095889,锡利哈姆梗
n02096051,艾尔谷犬
n02096177,凯恩梗
n02096294,澳大利亚梗
n02096437,丹迪丁蒙梗
n02096585,波士顿梗
n02097047,迷你雪纳瑞犬
n02097130,巨型雪纳瑞犬
n02097209,标准雪纳瑞犬
n02097298,苏格兰梗
n02097474,西藏梗,菊花狗
n02097658,丝毛梗
n02098105,软毛麦色梗
n02098286,西高地白梗
n02098413,拉萨阿普索犬
n02099267,平毛寻回犬
n02099429,卷毛寻回犬
n02099601,金毛猎犬
n02099712,拉布拉多猎犬
n02099849,乞沙比克猎犬
n02100236,德国短毛猎犬
n02100583,维兹拉犬
n02100735,英国谍犬
n02100877,爱尔兰雪达犬,红色猎犬
n02101006,戈登雪达犬
n02101388,布列塔尼犬猎犬
n02101556,黄毛,黄毛猎犬
n02102040,英国史宾格犬
n02102177,威尔士史宾格犬
n02102318,可卡犬,英国可卡犬
n02102480,萨塞克斯猎犬
n02102973,爱尔兰水猎犬
n02104029,哥威斯犬
n02104365,舒柏奇犬
n02105056,比利时牧羊犬
n02105162,马里努阿犬
n02105251,伯瑞犬
n02105412,凯尔皮犬
n02105505,匈牙利牧羊犬
n02105641,老英国牧羊犬
n02105855,喜乐蒂牧羊犬
n02106030,牧羊犬
n02106166,边境牧羊犬
n02106382,法兰德斯牧牛狗
n02106550,罗特韦尔犬
n02106662,德国牧羊犬,德国警犬,阿尔萨斯
n02107142,多伯曼犬,杜宾犬
n02107312,迷你杜宾犬
n02107574,大瑞士山地犬
n02107683,伯恩山犬
n02107908,Appenzeller狗
n02108000,EntleBucher狗
n02108089,拳师狗
n02108422,斗牛獒
n02108551,藏獒
n02108915,法国斗牛犬
n02109047,大丹犬
n02109525,圣伯纳德狗
n02109961,爱斯基摩犬,哈士奇
n02110063,雪橇犬,阿拉斯加爱斯基摩狗
n02110185,哈士奇
n02110341,达尔马提亚,教练车狗
n02110627,狮毛狗
n02110806,巴辛吉狗
n02110958,哈巴狗,狮子狗
n02111129,莱昂贝格狗
n02111277,纽芬兰岛狗
n02111500,大白熊犬
n02111889,萨摩耶犬
n02112018,博美犬
n02112137,松狮,松狮
n02112350,荷兰卷尾狮毛狗
n02112706,布鲁塞尔格林芬犬
n02113023,彭布洛克威尔士科基犬
n02113186,威尔士柯基犬
n02113624,玩具贵宾犬
n02113712,迷你贵宾犬
n02113799,标准贵宾犬
n02113978,墨西哥无毛犬
n02114367,灰狼
n02114548,白狼,北极狼
n02114712,红太狼,鬃狼,犬犬鲁弗斯
n02114855,狼,草原狼,刷狼,郊狼
n02115641,澳洲野狗,澳大利亚野犬
n02115913,豺
n02116738,非洲猎犬,土狼犬
n02117135,鬣狗
n02119022,红狐狸
n02119789,沙狐
n02120079,北极狐狸,白狐狸
n02120505,灰狐狸
n02123045,虎斑猫
n02123159,山猫,虎猫
n02123394,波斯猫
n02123597,暹罗暹罗猫,
n02124075,埃及猫
n02125311,美洲狮,美洲豹
n02127052,猞猁,山猫
n02128385,豹子
n02128757,雪豹
n02128925,美洲虎
n02129165,狮子
n02129604,老虎
n02130308,猎豹
n02132136,棕熊
n02133161,美洲黑熊
n02134084,冰熊,北极熊
n02134418,懒熊
n02137549,猫鼬
n02138441,猫鼬,海猫
n02165105,虎甲虫
n02165456,瓢虫
n02167151,土鳖虫
n02168699,天牛
n02169497,龟甲虫
n02172182,粪甲虫
n02174001,犀牛甲虫
n02177972,象甲
n02190166,苍蝇
n02206856,蜜蜂
n02219486,蚂蚁
n02226429,蚱蜢
n02229544,蟋蟀
n02231487,竹节虫
n02233338,蟑螂
n02236044,螳螂
n02256656,蝉
n02259212,叶蝉
n02264363,草蜻蛉
n02268443,蜻蜓
n02268853,豆娘,蜻蛉
n02276258,优红蛱蝶
n02277742,小环蝴蝶
n02279972,君主蝴蝶,大斑蝶
n02280649,菜粉蝶
n02281406,白蝴蝶
n02281787,灰蝶
n02317335,海星
n02319095,海胆
n02321529,海参,海黄瓜
n02325366,野兔
n02326432,兔
n02328150,安哥拉兔
n02342885,仓鼠
n02346627,刺猬,豪猪,
n02356798,黑松鼠
n02361337,土拨鼠
n02363005,海狸
n02364673,豚鼠,豚鼠
n02389026,栗色马
n02391049,斑马
n02395406,猪
n02396427,野猪
n02397096,疣猪
n02398521,河马
n02403003,牛
n02408429,水牛,亚洲水牛
n02410509,野牛
n02412080,公羊
n02415577,大角羊,洛矶山大角羊
n02417914,山羊
n02422106,狷羚
n02422699,黑斑羚
n02423022,瞪羚
n02437312,阿拉伯单峰骆驼,骆驼
n02437616,骆驼
n02441942,黄鼠狼
n02442845,水貂
n02443114,臭猫
n02443484,黑足鼬
n02444819,水獭
n02445715,臭鼬,木猫
n02447366,獾
n02454379,犰狳
n02457408,树懒
n02480495,猩猩,婆罗洲猩猩
n02480855,大猩猩
n02481823,黑猩猩
n02483362,长臂猿
n02483708,合趾猿长臂猿,合趾猿
n02484975,长尾猴
n02486261,赤猴
n02486410,狒狒
n02487347,恒河猴,猕猴
n02488291,白头叶猴
n02488702,疣猴
n02489166,长鼻猴
n02490219,狨(美洲产小型长尾猴)
n02492035,卷尾猴
n02492660,吼猴
n02493509,伶猴
n02493793,蜘蛛猴
n02494079,松鼠猴
n02497673,马达加斯加环尾狐猴,鼠狐猴
n02500267,大狐猴,马达加斯加大狐猴
n02504013,印度大象,亚洲象
n02504458,非洲象,非洲象
n02509815,小熊猫
n02510455,大熊猫
n02514041,杖鱼
n02526121,鳗鱼
n02536864,银鲑,银鲑鱼
n02606052,三色刺蝶鱼
n02607072,海葵鱼
n02640242,鲟鱼
n02641379,雀鳝
n02643566,狮子鱼
n02655020,河豚
n02666196,算盘
n02667093,长袍
n02669723,学位袍
n02672831,手风琴
n02676566,原声吉他
n02687172,航空母舰
n02690373,客机
n02692877,飞艇
n02699494,祭坛
n02701002,救护车
n02704792,水陆两用车
n02708093,模拟时钟
n02727426,蜂房
n02730930,围裙
n02747177,垃圾桶
n02749479,攻击步枪,枪
n02769748,背包
n02776631,面包店,面包铺,
n02777292,平衡木
n02782093,热气球
n02783161,圆珠笔
n02786058,创可贴
n02787622,班卓琴
n02788148,栏杆,楼梯扶手
n02790996,杠铃
n02791124,理发师的椅子
n02791270,理发店
n02793495,牲口棚
n02794156,晴雨表
n02795169,圆筒
n02797295,园地小车,手推车
n02799071,棒球
n02802426,篮球
n02804414,婴儿床
n02804610,巴松管,低音管
n02807133,游泳帽
n02808304,沐浴毛巾
n02808440,浴缸,澡盆
n02814533,沙滩车,旅行车
n02814860,灯塔
n02815834,高脚杯
n02817516,熊皮高帽
n02823428,啤酒瓶
n02823750,啤酒杯
n02825657,钟塔
n02834397,(小儿用的)围嘴
n02835271,串联自行车,
n02837789,比基尼
n02840245,装订册
n02841315,双筒望远镜
n02843684,鸟舍
n02859443,船库
n02860847,雪橇
n02865351,饰扣式领带
n02869837,阔边女帽
n02870880,书橱
n02871525,书店,书摊
n02877765,瓶盖
n02879718,弓箭
n02883205,蝴蝶结领结
n02892201,铜制牌位
n02892767,奶罩
n02894605,防波堤,海堤
n02895154,铠甲
n02906734,扫帚
n02909870,桶
n02910353,扣环
n02916936,防弹背心
n02917067,动车,子弹头列车
n02927161,肉铺,肉菜市场
n02930766,出租车
n02939185,大锅
n02948072,蜡烛
n02950826,大炮
n02951358,独木舟
n02951585,开瓶器,开罐器
n02963159,开衫
n02965783,车镜
n02966193,旋转木马
n02966687,木匠的工具包,工具包
n02971356,纸箱
n02974003,车轮
n02977058,取款机,自动取款机
n02978881,盒式录音带
n02979186,卡带播放器
n02980441,城堡
n02981792,双体船
n02988304,CD播放器
n02992211,大提琴
n02992529,移动电话,手机
n02999410,铁链
n03000134,围栏
n03000247,链甲
n03000684,电锯,油锯
n03014705,箱子
n03016953,衣柜,洗脸台
n03017168,编钟,钟,锣
n03018349,中国橱柜
n03026506,圣诞袜
n03028079,教堂,教堂建筑
n03032252,电影院,剧场
n03041632,切肉刀,菜刀
n03042490,悬崖屋
n03045698,斗篷
n03047690,木屐,木鞋
n03062245,鸡尾酒调酒器
n03063599,咖啡杯
n03063689,咖啡壶
n03065424,螺旋结构(楼梯)
n03075370,组合锁
n03085013,电脑键盘,键盘
n03089624,糖果,糖果店
n03095699,集装箱船
n03100240,敞篷车
n03109150,开瓶器,瓶螺杆
n03110669,短号,喇叭
n03124043,牛仔靴
n03124170,牛仔帽
n03125729,摇篮
n03126707,起重机
n03127747,头盔
n03127925,板条箱
n03131574,小儿床
n03133878,砂锅
n03134739,槌球
n03141823,拐杖
n03146219,胸甲
n03160309,大坝,堤防
n03179701,书桌
n03180011,台式电脑
n03187595,有线电话
n03188531,尿布湿
n03196217,数字时钟
n03197337,数字手表
n03201208,餐桌板
n03207743,抹布
n03207941,洗碗机,洗碟机
n03208938,盘式制动器
n03216828,码头,船坞,码头设施
n03218198,狗拉雪橇
n03220513,圆顶
n03223299,门垫,垫子
n03240683,钻井平台,海上钻井
n03249569,鼓,乐器,鼓膜
n03250847,鼓槌
n03255030,哑铃
n03259280,荷兰烤箱
n03271574,电风扇,鼓风机
n03272010,电吉他
n03272562,电力机车
n03290653,电视,电视柜
n03291819,信封
n03297495,浓缩咖啡机
n03314780,扑面粉
n03325584,女用长围巾
n03337140,文件,文件柜,档案柜
n03344393,消防船
n03345487,消防车
n03347037,火炉栏
n03355925,旗杆
n03372029,长笛
n03376595,折叠椅
n03379051,橄榄球头盔
n03384352,叉车
n03388043,喷泉
n03388183,钢笔
n03388549,有四根帷柱的床
n03393912,运货车厢
n03394916,圆号,喇叭
n03400231,煎锅
n03404251,裘皮大衣
n03417042,垃圾车
n03424325,防毒面具,呼吸器
n03425413,汽油泵
n03443371,高脚杯
n03444034,卡丁车
n03445777,高尔夫球
n03445924,高尔夫球车
n03447447,狭长小船
n03447721,锣
n03450230,礼服
n03452741,钢琴
n03457902,温室,苗圃
n03459775,散热器格栅
n03461385,杂货店,食品市场
n03467068,断头台
n03476684,小发夹
n03476991,头发喷雾
n03478589,半履带装甲车
n03481172,锤子
n03482405,大篮子
n03483316,手摇鼓风机,吹风机
n03485407,手提电脑
n03485794,手帕
n03492542,硬盘
n03494278,口琴,口风琴
n03495258,竖琴
n03496892,收割机
n03498962,斧头
n03527444,手枪皮套
n03529860,家庭影院
n03530642,蜂窝
n03532672,钩爪
n03534580,衬裙
n03535780,单杠
n03538406,马车
n03544143,沙漏
n03584254,iPod
n03584829,熨斗
n03590841,南瓜灯笼
n03594734,牛仔裤,蓝色牛仔裤
n03594945,吉普车
n03595614,运动衫,T恤
n03598930,拼图
n03599486,人力车
n03602883,操纵杆
n03617480,和服
n03623198,护膝
n03627232,蝴蝶结
n03630383,大褂,实验室外套
n03633091,长柄勺
n03637318,灯罩
n03642806,笔记本电脑
n03649909,割草机
n03657121,镜头盖
n03658185,开信刀,裁纸刀
n03661043,图书馆
n03662601,救生艇
n03666591,点火器,打火机
n03670208,豪华轿车
n03673027,远洋班轮
n03676483,唇膏,口红
n03680355,平底便鞋
n03690938,洗剂
n03691459,扬声器
n03692522,放大镜
n03697007,锯木厂
n03706229,磁罗盘
n03709823,邮袋
n03710193,信箱
n03710637,女游泳衣
n03710721,有肩带浴衣
n03717622,窨井盖
n03720891,沙球(一种打击乐器)
n03721384,马林巴木琴
n03724870,面膜
n03729826,火柴
n03733131,花柱
n03733281,迷宫
n03733805,量杯
n03742115,药箱
n03743016,巨石,巨石结构
n03759954,麦克风
n03761084,微波炉
n03763968,军装
n03764736,奶桶
n03769881,迷你巴士
n03770439,迷你裙
n03770679,面包车
n03773504,导弹
n03775071,连指手套
n03775546,搅拌钵
n03776460,活动房屋(由汽车拖拉的)
n03777568,T型发动机小汽车
n03777754,调制解调器
n03781244,修道院
n03782006,显示器
n03785016,电瓶车
n03786901,砂浆
n03787032,学士
n03788195,清真寺
n03788365,蚊帐
n03791053,摩托车
n03792782,山地自行车
n03792972,登山帐
n03793489,鼠标,电脑鼠标
n03794056,捕鼠器
n03796401,搬家车
n03803284,口套
n03804744,钉子
n03814639,颈托
n03814906,项链
n03825788,乳头(瓶)
n03832673,笔记本,笔记本电脑
n03837869,方尖碑
n03838899,双簧管
n03840681,陶笛,卵形笛
n03841143,里程表
n03843555,滤油器
n03854065,风琴,管风琴
n03857828,示波器
n03866082,罩裙
n03868242,牛车
n03868863,氧气面罩
n03871628,包装
n03873416,船桨
n03874293,明轮,桨轮
n03874599,挂锁,扣锁
n03876231,画笔
n03877472,睡衣
n03877845,宫殿
n03884397,排箫,鸣管
n03887697,纸巾
n03888257,降落伞
n03888605,双杠
n03891251,公园长椅
n03891332,停车收费表,停车计时器
n03895866,客车,教练车
n03899768,露台,阳台
n03902125,付费电话
n03903868,基座,基脚
n03908618,铅笔盒
n03908714,卷笔刀
n03916031,香水(瓶)
n03920288,培养皿
n03924679,复印机
n03929660,拨弦片,拨子
n03929855,尖顶头盔
n03930313,栅栏,栅栏
n03930630,皮卡,皮卡车
n03933933,桥墩
n03935335,存钱罐
n03937543,药瓶
n03938244,枕头
n03942813,乒乓球
n03944341,风车
n03947888,海盗船
n03950228,水罐
n03954731,木工刨
n03956157,天文馆
n03958227,塑料袋
n03961711,板架
n03967562,犁型铲雪机
n03970156,手压皮碗泵
n03976467,宝丽来相机
n03976657,电线杆
n03977966,警车,巡逻车
n03980874,雨披
n03982430,台球桌
n03983396,充气饮料瓶
n03991062,花盆
n03992509,陶工旋盘
n03995372,电钻
n03998194,祈祷垫,地毯
n04004767,打印机
n04005630,监狱
n04008634,炮弹,导弹
n04009552,投影仪
n04019541,冰球
n04023962,沙包,吊球
n04026417,钱包
n04033901,羽管笔
n04033995,被子
n04037443,赛车
n04039381,球拍
n04040759,散热器
n04041544,收音机
n04044716,射电望远镜,无线电反射器
n04049303,雨桶
n04065272,休闲车,房车
n04067472,卷轴,卷筒
n04069434,反射式照相机
n04070727,冰箱,冰柜
n04074963,遥控器
n04081281,餐厅,饮食店,食堂
n04086273,左轮手枪
n04090263,步枪
n04099969,摇椅
n04111531,电转烤肉架
n04116512,橡皮
n04118538,橄榄球
n04118776,直尺
n04120489,跑步鞋
n04125021,保险柜
n04127249,安全别针
n04131690,盐瓶(调味用)
n04133789,凉鞋
n04136333,纱笼,围裙
n04141076,萨克斯管
n04141327,剑鞘
n04141975,秤,称重机
n04146614,校车
n04147183,帆船
n04149813,记分牌
n04152593,屏幕
n04153751,螺丝
n04154565,螺丝刀
n04162706,安全带
n04179913,缝纫机
n04192698,盾牌,盾牌
n04200800,皮鞋店,鞋店
n04201297,障子
n04204238,购物篮
n04204347,购物车
n04208210,铁锹
n04209133,浴帽
n04209239,浴帘
n04228054,滑雪板
n04229816,滑雪面罩
n04235860,睡袋
n04238763,滑尺
n04239074,滑动门
n04243546,角子老虎机
n04251144,潜水通气管
n04252077,雪橇
n04252225,扫雪机,扫雪机
n04254120,皂液器
n04254680,足球
n04254777,袜子
n04258138,碟式太阳能,太阳能集热器,太阳能炉
n04259630,宽边帽
n04263257,汤碗
n04264628,空格键
n04265275,空间加热器
n04266014,航天飞机
n04270147,铲(搅拌或涂敷用的)
n04273569,快艇
n04275548,蜘蛛网
n04277352,纺锤,纱锭
n04285008,跑车
n04286575,聚光灯
n04296562,舞台
n04310018,蒸汽机车
n04311004,钢拱桥
n04311174,钢滚筒
n04317175,听诊器
n04325704,女用披肩
n04326547,石头墙
n04328186,秒表
n04330267,火炉
n04332243,过滤器
n04335435,有轨电车,电车
n04336792,担架
n04344873,沙发床
n04346328,佛塔
n04347754,潜艇,潜水艇
n04350905,套装,衣服
n04355338,日晷
n04355933,太阳镜
n04356056,太阳镜,墨镜
n04357314,防晒霜,防晒剂
n04366367,悬索桥
n04367480,拖把
n04370456,运动衫
n04371430,游泳裤
n04371774,秋千
n04372370,开关,电器开关
n04376876,注射器
n04380533,台灯
n04389033,坦克,装甲战车,装甲战斗车辆
n04392985,磁带播放器
n04398044,茶壶
n04399382,泰迪,泰迪熊
n04404412,电视
n04409515,网球
n04417672,茅草,茅草屋顶
n04418357,幕布,剧院的帷幕
n04423845,顶针
n04428191,脱粒机
n04429376,宝座
n04435653,瓦屋顶
n04442312,烤面包机
n04443257,烟草店,烟草
n04447861,马桶
n04456115,火炬
n04458633,图腾柱
n04461696,拖车,牵引车,清障车
n04462240,玩具店
n04465501,拖拉机
n04467665,拖车,铰接式卡车
n04476259,托盘
n04479046,风衣
n04482393,三轮车
n04483307,三体船
n04485082,三脚架
n04486054,凯旋门
n04487081,无轨电车
n04487394,长号
n04493381,浴盆,浴缸
n04501370,旋转式栅门
n04505470,打字机键盘
n04507155,伞
n04509417,独轮车
n04515003,直立式钢琴
n04517823,真空吸尘器
n04522168,花瓶
n04523525,拱顶
n04525038,天鹅绒
n04525305,自动售货机
n04532106,祭服
n04532670,高架桥
n04536866,小提琴,小提琴
n04540053,排球
n04542943,松饼机
n04548280,挂钟
n04548362,钱包,皮夹
n04550184,衣柜,壁橱
n04552348,军用飞机
n04553703,洗脸盆,洗手盆
n04554684,洗衣机,自动洗衣机
n04557648,水瓶
n04560804,水壶
n04562935,水塔
n04579145,威士忌壶
n04579432,哨子
n04584207,假发
n04589890,纱窗
n04590129,百叶窗
n04591157,温莎领带
n04591713,葡萄酒瓶
n04592741,飞机翅膀,飞机
n04596742,炒菜锅
n04597913,木制的勺子
n04599235,毛织品,羊绒
n04604644,栅栏,围栏
n04606251,沉船
n04612504,双桅船
n04613696,蒙古包
n06359193,网站,互联网网站
n06596364,漫画
n06785654,纵横字谜
n06794110,路标
n06874185,交通信号灯
n07248320,防尘罩,书皮
n07565083,菜单
n07579787,盘子
n07583066,鳄梨酱
n07584110,清汤
n07590611,罐焖土豆烧肉
n07613480,蛋糕
n07614500,冰淇淋
n07615774,雪糕,冰棍,冰棒
n07684084,法式面包
n07693725,百吉饼
n07695742,椒盐脆饼
n07697313,芝士汉堡
n07697537,热狗
n07711569,土豆泥
n07714571,结球甘蓝
n07714990,西兰花
n07715103,菜花
n07716358,绿皮密生西葫芦
n07716906,西葫芦
n07717410,小青南瓜
n07717556,南瓜
n07718472,黄瓜
n07718747,朝鲜蓟
n07720875,甜椒
n07730033,刺棘蓟
n07734744,蘑菇
n07742313,绿苹果
n07745940,草莓
n07747607,橘子
n07749582,柠檬
n07753113,无花果
n07753275,菠萝
n07753592,香蕉
n07754684,菠萝蜜
n07760859,蛋奶冻苹果
n07768694,石榴
n07802026,干草
n07831146,烤面条加干酪沙司
n07836838,巧克力酱,巧克力糖浆
n07860988,面团
n07871810,瑞士肉包,肉饼
n07873807,披萨,披萨饼
n07875152,馅饼
n07880968,卷饼
n07892512,红葡萄酒
n07920052,意大利浓咖啡
n07930864,杯子
n07932039,蛋酒
n09193705,高山
n09229709,泡泡
n09246464,悬崖
n09256479,珊瑚礁
n09288635,间歇泉
n09332890,湖边,湖岸
n09399592,海角
n09421951,沙洲,沙坝
n09428293,海滨,海岸
n09468604,峡谷
n09472597,火山
n09835506,棒球,棒球运动员
n10148035,新郎
n10565667,潜水员
n11879895,油菜
n11939491,雏菊
n12057211,杓兰
n12144580,玉米
n12267677,橡子
n12620546,玫瑰果
n12768682,七叶树果实
n12985857,珊瑚菌
n12998815,木耳
n13037406,鹿花菌
n13040303,鬼笔菌
n13044778,地星
n13052670,多叶奇果菌
n13054560,牛肝菌
n13133613,玉米穗
n15075141,卫生纸
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file demo.cpp
* @author root(root@baidu.com)
* @date 2018/07/09 20:12:44
* @brief
*
**/
#include <fstream>
#include <unistd.h>
#include <stdlib.h>
#include <pthread.h>
#include "common.h"
#include "predictor_sdk.h"
#include "map_cnn.pb.h"
using baidu::paddle_serving::sdk_cpp::Predictor;
using baidu::paddle_serving::sdk_cpp::PredictorApi;
using baidu::infinite::map_model::SparseTensor;
using baidu::infinite::map_model::SparseInstance;
using baidu::infinite::map_model::DensePrediction;
using baidu::infinite::map_model::Request;
using baidu::infinite::map_model::Response;
using baidu::infinite::map_model::MapCnnService;
static const uint32_t SELECT_VALID_UNIT = 1000;
class InputData {
public:
InputData() {}
~InputData() {}
int create(const std::string file_name, size_t buf_size,
size_t batch_size, int qps) {
pthread_mutex_init(&_mutex, NULL);
FILE* fp = fopen(file_name.c_str(), "r");
if (!fp) {
LOG(FATAL) << "Failed open data file: "
<< file_name;
return -1;
}
_data.clear();
char buffer[2048000];
std::vector<std::string> tokens;
while (fgets(buffer, sizeof(buffer), fp) != NULL) {
tokens.clear();
baidu::paddle_serving::sdk_cpp::str_split(
buffer, ",", &tokens);
std::vector<float> feature_one;
for (size_t i = 0; i < tokens.size(); i++){
feature_one.push_back(
strtof(tokens[i].c_str(), NULL));
}
_data.push_back(feature_one);
}
printf("succ load data, size:%ld\n", _data.size());
for (size_t ri = 0; ri < buf_size; ri++) {
Request* req = new Request();
if (generate_one_req(*req, batch_size) != 0) {
LOG(FATAL) << "Failed generate req at: " << ri;
fclose(fp);
return -1;
}
_req_list.push_back(req);
}
fclose(fp);
_current = 0;
_waitingtm = 0;
_lasttm.tv_sec = _lasttm.tv_usec = 0;
if (qps == 0) {
_interval = 0;
} else if (qps < 1) {
_interval = 1000 * 1000;
} else {
_interval = 1000 * 1000 / qps;
}
LOG(INFO) << "Succ create req, size: " << buf_size
<< ", batch_size: " << batch_size;
return 0;
}
void destroy() {
size_t ds = _data.size();
for (size_t di = 0; di < ds; di++) {
_data[di].clear();
}
_data.clear();
size_t rs = _req_list.size();
for (size_t ri = 0; ri < rs; ri++) {
delete _req_list[ri];
}
_req_list.clear();
}
Request* next_req() {
pthread_mutex_lock(&_mutex);
if (_interval != 0)
{
if (_lasttm.tv_sec == 0 && _lasttm.tv_usec == 0)
{
gettimeofday(&_lasttm, NULL);
}
else
{
timeval curtm;
gettimeofday(&curtm, NULL);
long elapse =
((curtm.tv_sec - _lasttm.tv_sec) * 1000*1000 +
(curtm.tv_usec - _lasttm.tv_usec));
_waitingtm += _interval - elapse;
_lasttm = curtm;
if (_waitingtm >= SELECT_VALID_UNIT) // select的最小响应单位
{
long tm_unit
= _waitingtm / SELECT_VALID_UNIT * SELECT_VALID_UNIT;
timeval tmp_tm = {tm_unit / 1000000, tm_unit % 1000000};
select(1, NULL, NULL, NULL, &tmp_tm); //延时以控制压力speed
}
else if (_waitingtm <= SELECT_VALID_UNIT * (-2))
{
_waitingtm = -SELECT_VALID_UNIT;
}
}
}
size_t rs = _req_list.size();
Request* req = _req_list[(_current++) % rs];
pthread_mutex_unlock(&_mutex);
return req;
}
int generate_one_req(Request& req, int batch) {
int batch_size = batch;
std::vector<std::vector<int> > shapes;
shapes.clear();
int p_shape[] = {batch_size, 37, 1, 1};
std::vector<int> shape(p_shape, p_shape + 4);
shapes.push_back(shape);
int p_shape1[] = {batch_size, 1, 50, 12};
std::vector<int> shape1(p_shape1, p_shape1 + 4);
shapes.push_back(shape1);
int p_shape2[] = {batch_size, 1, 50, 19};
std::vector<int> shape2(p_shape2, p_shape2 + 4);
shapes.push_back(shape2);
int p_shape3[] = {batch_size, 1, 50, 1};
std::vector<int> shape3(p_shape3, p_shape3 + 4);
shapes.push_back(shape3);
int p_shape4[] = {batch_size, 4, 50, 1};
std::vector<int> shape4(p_shape4, p_shape4 + 4);
shapes.push_back(shape4);
int p_shape5[] = {batch_size, 1, 50, 1};
std::vector<int> shape5(p_shape5, p_shape5 + 4);
shapes.push_back(shape5);
int p_shape6[] = {batch_size, 5, 50, 1};
std::vector<int> shape6(p_shape6, p_shape6 + 4);
shapes.push_back(shape6);
int p_shape7[] = {batch_size, 7, 50, 1};
std::vector<int> shape7(p_shape7, p_shape7 + 4);
shapes.push_back(shape7);
int p_shape8[] = {batch_size, 3, 50, 1};
std::vector<int> shape8(p_shape8, p_shape8 + 4);
shapes.push_back(shape8);
int p_shape9[] = {batch_size, 32, 50, 1}; // added
std::vector<int> shape9(p_shape9, p_shape9 + 4);
shapes.push_back(shape9);
std::vector<std::string> tensor_names;
tensor_names.push_back("input_0");
tensor_names.push_back("input_1");
tensor_names.push_back("input_2");
tensor_names.push_back("input_3");
tensor_names.push_back("input_4");
tensor_names.push_back("input_5");
tensor_names.push_back("input_6");
tensor_names.push_back("input_7");
tensor_names.push_back("input_8");
tensor_names.push_back("input_9");
SparseInstance* ins = req.add_instances();
for (int fi = 0; fi < _data.size(); ++fi) {
SparseTensor* tensor = ins->add_tensors();
tensor->set_name(tensor_names[fi]);
int len = 1;
for (int si = 0; si < shapes[fi].size(); ++si) {
len *= shapes[fi][si];
}
for (int si = 0; si < shapes[fi].size(); ++si) {
tensor->add_shape(shapes[fi][si]);
}
tensor->set_features(&(_data[fi][0]), len * sizeof(float));
}
return 0;
}
private:
std::vector<std::vector<float> > _data;
std::vector<Request*> _req_list;
pthread_mutex_t _mutex;
long _waitingtm;
long _interval;
timeval _lasttm;
int _current;
};
void print_res(
const Request* req,
const Response& res,
std::string route_tag,
uint64_t elapse_ms) {
uint32_t sample_size = res.predictions_size();
for (int i = 0; i < sample_size; ++i) {
const ::baidu::infinite::map_model::DensePrediction& prediction = res.predictions(i);
int cat_size = prediction.categories_size();
for (int j = 0; j < cat_size; ++j) {
//LOG(INFO) << "categories:" << prediction.categories(j);
}
}
LOG(INFO)
<< "Succ call predictor[wasq], res sample size: "
<< sample_size << ", the tag is: "
<< route_tag << ", elapse_ms: " << elapse_ms;
}
struct Arg {
PredictorApi* api;
InputData* input;
};
void* work(void* p) {
Arg* arg = (Arg*) p;
InputData* input = arg->input;
PredictorApi* api = arg->api;
Response res;
LOG(WARNING) << "Thread entry!";
while (true) {
Predictor* predictor = api->fetch_predictor("mapcnn");
if (!predictor) {
LOG(FATAL) << "Failed fetch predictor: wasq";
return NULL;
}
Request* req = input->next_req();
res.Clear();
timeval start;
gettimeofday(&start, NULL);
if (predictor->inference(req, &res) != 0) {
LOG(FATAL) << "failed call predictor with req:"
<< req->ShortDebugString();
return NULL;
}
timeval end;
gettimeofday(&end, NULL);
uint64_t elapse_ms = (end.tv_sec * 1000 + end.tv_usec / 1000)
- (start.tv_sec * 1000 + start.tv_usec / 1000);
print_res(req, res, predictor->tag(), elapse_ms);
if (api->free_predictor(predictor) != 0) {
printf("failed free predictor\n");
}
}
LOG(WARNING) << "Thread exit!";
return NULL;
}
int main(int argc, char** argv) {
if (argc != 5) {
printf("Usage: demo req_buf_size batch_size threads qps\n");
return -1;
}
int req_buffer = atoi(argv[1]);
int batch_size = atoi(argv[2]);
int thread_num = atoi(argv[3]);
int qps = atoi(argv[4]);
PredictorApi api;
if (api.create("./conf", "predictors.conf") != 0) {
LOG(FATAL) << "Failed create predictors api!";
return -1;
}
InputData data;
if (data.create(
"./data/pure_feature", req_buffer, batch_size, qps) != 0) {
LOG(FATAL) << "Failed create inputdata!";
return -1;
}
Arg arg = {&api, &data};
pthread_t* threads = new pthread_t[thread_num];
if (!threads) {
LOG(FATAL) << "Failed create threads, num:" << thread_num;
return -1;
}
for (int i = 0; i < thread_num; ++i) {
pthread_create(threads + i, NULL, work, &arg);
}
for (int i = 0; i < thread_num; ++i) {
pthread_join(threads[i], NULL);
}
delete[] threads;
data.destroy();
return 0;
}
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file demo.cpp
* @author root(root@baidu.com)
* @date 2018/07/09 20:12:44
* @brief
*
**/
#include <fstream>
#include <unistd.h>
#include <stdlib.h>
#include <bthread/bthread.h>
#include "common.h"
#include "predictor_sdk.h"
#include "default_schema.pb.h"
using baidu::paddle_serving::sdk_cpp::Predictor;
using baidu::paddle_serving::sdk_cpp::PredictorApi;
using baidu::paddle_serving::fluid_engine::SparseTensor;
using baidu::paddle_serving::fluid_engine::SparseInstance;
using baidu::paddle_serving::fluid_engine::Prediction;
using baidu::paddle_serving::fluid_engine::SparseRequest;
using baidu::paddle_serving::fluid_engine::Response;
static const uint32_t SELECT_VALID_UNIT = 1000;
class InputData {
public:
InputData() {}
~InputData() {}
int create(const std::string file_name, size_t buf_size,
size_t batch_size, int qps) {
bthread_mutex_init(&_mutex, NULL);
FILE* fp = fopen(file_name.c_str(), "r");
if (!fp) {
LOG(FATAL) << "Failed open data file: "
<< file_name;
return -1;
}
_data.clear();
char buffer[2048000];
std::vector<std::string> tokens;
while (fgets(buffer, sizeof(buffer), fp) != NULL) {
tokens.clear();
baidu::paddle_serving::sdk_cpp::str_split(
buffer, ",", &tokens);
std::vector<float> feature_one;
for (size_t i = 0; i < tokens.size(); i++){
feature_one.push_back(
strtof(tokens[i].c_str(), NULL));
}
_data.push_back(feature_one);
}
printf("succ load data, size:%ld\n", _data.size());
for (size_t ri = 0; ri < buf_size; ri++) {
SparseRequest* req = new SparseRequest();
if (generate_one_req(*req, batch_size) != 0) {
LOG(FATAL) << "Failed generate req at: " << ri;
fclose(fp);
return -1;
}
_req_list.push_back(req);
}
fclose(fp);
_current = 0;
_waitingtm = 0;
_lasttm.tv_sec = _lasttm.tv_usec = 0;
if (qps == 0) {
_interval = 0;
} else if (qps < 1) {
_interval = 1000 * 1000;
} else {
_interval = 1000 * 1000 / qps;
}
LOG(INFO) << "Succ create req, size: " << buf_size
<< ", batch_size: " << batch_size;
return 0;
}
void destroy() {
size_t ds = _data.size();
for (size_t di = 0; di < ds; di++) {
_data[di].clear();
}
_data.clear();
size_t rs = _req_list.size();
for (size_t ri = 0; ri < rs; ri++) {
delete _req_list[ri];
}
_req_list.clear();
}
SparseRequest* next_req() {
bthread_mutex_lock(&_mutex);
/*
if (_interval != 0)
{
if (_lasttm.tv_sec == 0 && _lasttm.tv_usec == 0)
{
gettimeofday(&_lasttm, NULL);
}
else
{
timeval curtm;
gettimeofday(&curtm, NULL);
long elapse =
((curtm.tv_sec - _lasttm.tv_sec) * 1000*1000 +
(curtm.tv_usec - _lasttm.tv_usec));
_waitingtm += _interval - elapse;
_lasttm = curtm;
if (_waitingtm >= SELECT_VALID_UNIT) // select的最小响应单位
{
long tm_unit
= _waitingtm / SELECT_VALID_UNIT * SELECT_VALID_UNIT;
timeval tmp_tm = {tm_unit / 1000000, tm_unit % 1000000};
select(1, NULL, NULL, NULL, &tmp_tm); //延时以控制压力speed
}
else if (_waitingtm <= SELECT_VALID_UNIT * (-2))
{
_waitingtm = -SELECT_VALID_UNIT;
}
}
}*/
size_t rs = _req_list.size();
SparseRequest* req = _req_list[(_current++) % rs];
bthread_mutex_unlock(&_mutex);
return req;
}
int generate_one_req(SparseRequest& req, int batch) {
int batch_size = batch;
std::vector<std::vector<int> > shapes;
shapes.clear();
int p_shape[] = {batch_size, 37, 1, 1};
std::vector<int> shape(p_shape, p_shape + 4);
shapes.push_back(shape);
int p_shape1[] = {batch_size, 1, 50, 12};
std::vector<int> shape1(p_shape1, p_shape1 + 4);
shapes.push_back(shape1);
int p_shape2[] = {batch_size, 1, 50, 19};
std::vector<int> shape2(p_shape2, p_shape2 + 4);
shapes.push_back(shape2);
int p_shape3[] = {batch_size, 1, 50, 1};
std::vector<int> shape3(p_shape3, p_shape3 + 4);
shapes.push_back(shape3);
int p_shape4[] = {batch_size, 4, 50, 1};
std::vector<int> shape4(p_shape4, p_shape4 + 4);
shapes.push_back(shape4);
int p_shape5[] = {batch_size, 1, 50, 1};
std::vector<int> shape5(p_shape5, p_shape5 + 4);
shapes.push_back(shape5);
int p_shape6[] = {batch_size, 5, 50, 1};
std::vector<int> shape6(p_shape6, p_shape6 + 4);
shapes.push_back(shape6);
int p_shape7[] = {batch_size, 7, 50, 1};
std::vector<int> shape7(p_shape7, p_shape7 + 4);
shapes.push_back(shape7);
int p_shape8[] = {batch_size, 3, 50, 1};
std::vector<int> shape8(p_shape8, p_shape8 + 4);
shapes.push_back(shape8);
int p_shape9[] = {batch_size, 32, 50, 1}; // added
std::vector<int> shape9(p_shape9, p_shape9 + 4);
shapes.push_back(shape9);
std::vector<std::string> tensor_names;
/*
tensor_names.push_back("input_0");
tensor_names.push_back("input_1");
tensor_names.push_back("input_2");
tensor_names.push_back("input_3");
tensor_names.push_back("input_4");
tensor_names.push_back("input_5");
tensor_names.push_back("input_6");
tensor_names.push_back("input_7");
tensor_names.push_back("input_8");
tensor_names.push_back("input_9");
*/
tensor_names.push_back("attr_f");
tensor_names.push_back("realtime_f");
tensor_names.push_back("static_f");
tensor_names.push_back("eta_cost_f");
tensor_names.push_back("lukuang_f");
tensor_names.push_back("length_f");
tensor_names.push_back("path_f");
tensor_names.push_back("speed_f");
tensor_names.push_back("lane_f");
tensor_names.push_back("roadid_f");
std::vector<float> tensor_values;
SparseInstance* ins = req.add_instances();
for (int fi = 0; fi < _data.size(); ++fi) {
SparseTensor* tensor = ins->add_tensors();
tensor->set_name(tensor_names[fi]);
int len = 1;
for (int si = 0; si < shapes[fi].size(); ++si) {
len *= shapes[fi][si];
tensor->add_shape(shapes[fi][si]);
}
tensor_values.clear();
for (int vi = 0; vi < len; ++vi) {
if (std::abs(_data[fi][vi]) > 0.000001f) {
tensor_values.push_back(_data[fi][vi]);
tensor->add_keys(vi);
}
}
tensor->set_features(
&tensor_values[0], tensor_values.size() * sizeof(float));
}
tensor_values.clear();
return 0;
}
private:
std::vector<std::vector<float> > _data;
std::vector<SparseRequest*> _req_list;
bthread_mutex_t _mutex;
long _waitingtm;
long _interval;
timeval _lasttm;
int _current;
};
void print_res(
const SparseRequest* req,
const Response& res,
std::string route_tag,
uint64_t elapse_ms) {
uint32_t feature_size = res.predictions_size();
size_t sample_size = 0;
for (int i = 0; i < feature_size; ++i) {
const ::baidu::paddle_serving::fluid_engine::Prediction& prediction = res.predictions(i);
if (i == 0) {
sample_size = prediction.categories_size();
}
for (int j = 0; j < sample_size; ++j) {
//LOG(TRACE) << "categories:" << prediction.categories(j);
}
}
LOG(INFO)
<< "Succ call predictor[sparse_cnn], res sample size: "
<< sample_size << ", the tag is: "
<< route_tag << ", elapse_ms: " << elapse_ms;
}
struct Arg {
PredictorApi* api;
InputData* input;
};
void* work(void* p) {
Arg* arg = (Arg*) p;
InputData* input = arg->input;
PredictorApi* api = arg->api;
if (api->thrd_initialize() != 0) {
LOG(FATAL) << "Failed init api in thrd:" << bthread_self();
return NULL;
}
Response res;
LOG(WARNING) << "Thread entry!";
while (true) {
api->thrd_clear();
Predictor* predictor = api->fetch_predictor("sparse_cnn");
if (!predictor) {
LOG(FATAL) << "Failed fetch predictor: sparse_cnn";
continue;
}
SparseRequest* req = input->next_req();
res.Clear();
timeval start;
gettimeofday(&start, NULL);
if (predictor->inference(req, &res) != 0) {
LOG(FATAL) << "failed call predictor with req:"
<< req->ShortDebugString();
api->free_predictor(predictor);
continue;
}
timeval end;
gettimeofday(&end, NULL);
uint64_t elapse_ms = (end.tv_sec * 1000 + end.tv_usec / 1000)
- (start.tv_sec * 1000 + start.tv_usec / 1000);
print_res(req, res, predictor->tag(), elapse_ms);
}
api->thrd_finalize();
LOG(WARNING) << "Thread exit!";
return NULL;
}
int main(int argc, char** argv) {
if (argc != 5) {
printf("Usage: demo req_buf_size batch_size threads qps\n");
return -1;
}
int req_buffer = atoi(argv[1]);
int batch_size = atoi(argv[2]);
int thread_num = atoi(argv[3]);
int qps = atoi(argv[4]);
PredictorApi api;
if (api.create("./conf", "predictors.conf") != 0) {
LOG(FATAL) << "Failed create predictors api!";
return -1;
}
InputData data;
if (data.create(
//"./data/feature", req_buffer, batch_size, qps) != 0) {
"./data/pure_feature", req_buffer, batch_size, qps) != 0) {
LOG(FATAL) << "Failed create inputdata!";
return -1;
}
Arg arg = {&api, &data};
bthread_t* threads = new bthread_t[thread_num];
if (!threads) {
LOG(FATAL) << "Failed create threads, num:" << thread_num;
return -1;
}
for (int i = 0; i < thread_num; ++i) {
bthread_start_background(threads + i, NULL, work, &arg);
}
for (int i = 0; i < thread_num; ++i) {
bthread_join(threads[i], NULL);
}
delete[] threads;
data.destroy();
return 0;
};
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file demo.cpp
* @author root(root@baidu.com)
* @date 2018/07/09 20:12:44
* @brief
*
**/
#include <fstream>
#include <unistd.h>
#include <stdlib.h>
#include <bthread.h>
#include "common.h"
#include "predictor_sdk.h"
#include "map_rnn.pb.h"
using baidu::paddle_serving::sdk_cpp::Predictor;
using baidu::paddle_serving::sdk_cpp::PredictorApi;
//using baidu::infinite::map_rnn::Tensor;
using baidu::infinite::map_rnn::DenseInstance;
using baidu::infinite::map_rnn::DensePrediction;
using baidu::infinite::map_rnn::Request;
using baidu::infinite::map_rnn::Response;
using baidu::infinite::map_rnn::MapRnnService;
static const uint32_t SELECT_VALID_UNIT = 1000;
int split(std::string source, char spliter, std::vector<std::string>& result)
{
result.clear();
std::string::size_type pos;
std::string::size_type start = 0;
while ((pos = source.find(spliter, start)) != std::string::npos)
{
result.insert(result.end(), source.substr(start, pos-start));
start = pos+1;
}
result.insert(result.end(), source.substr(start));
return (int)result.size();
}
int load_data(std::string data_file_name, std::vector<std::vector<float> >& data){
std::ifstream data_file;
std::vector<std::string> token;
data_file.open(data_file_name, std::ios::in);
std::string input_line;
while (std::getline(data_file, input_line)) {
split(input_line, ',', token);
std::vector<float> feature_one;
for (size_t i = 0; i < token.size(); i++){
feature_one.push_back(std::stof(token[i]));
}
data.push_back(feature_one);
}
return 0;
}
void split(const std::string &str, char sep, std::vector<std::string> *pieces) {
pieces->clear();
if (str.empty()) {
return;
}
size_t pos = 0;
size_t next = str.find(sep, pos);
while (next != std::string::npos) {
pieces->push_back(str.substr(pos, next - pos));
pos = next + 1;
next = str.find(sep, pos);
}
if (!str.substr(pos).empty()) {
pieces->push_back(str.substr(pos));
}
}
void split_to_float(const std::string &str, char sep, std::vector<float> *fs) {
std::vector<std::string> pieces;
split(str, sep, &pieces);
std::transform(pieces.begin(), pieces.end(), std::back_inserter(*fs),
[](const std::string &v) {
return std::stof(v);
});
}
// clang-format off
/*void TensorAssignData(paddle::PaddleTensor *tensor, const std::vector<std::vector<float>> &data) {
// Assign buffer
int dim = std::accumulate(tensor->shape.begin(), tensor->shape.end(), 1, [](int a, int b) { return a * b; });
tensor->data.Resize(sizeof(float) * dim);
int c = 0;
for (const auto &f : data) {
for (float v : f) { static_cast<float *>(tensor->data.data())[c++] = v; }
}
}*/
// clang-format on
struct DataRecord {
std::vector<std::vector<std::vector<float>>> link_step_data_all;
std::vector<std::vector<float>> week_data_all, minute_data_all;
std::vector<std::vector<std::vector<char>>> ch_link_step_data_all;
std::vector<std::vector<char>> ch_week_data_all, ch_minute_data_all;
std::vector<size_t> lod1, lod2, lod3;
std::vector<std::vector<float>> rnn_link_data, rnn_week_datas,
rnn_minute_datas;
size_t batch_iter{0};
size_t batch_size{1};
DataRecord() = default;
DataRecord(const std::string &path, int batch_size = 1)
: batch_size(batch_size) {
Load(path);
for (std::vector<std::vector<std::vector<float>>>::iterator it1 = link_step_data_all.begin();
it1 != link_step_data_all.end(); ++it1) {
std::vector<std::vector<char>> links;
for (std::vector<std::vector<float>>::iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
int len = it2->size() * sizeof(float);
char* ch = (char*)malloc(len);
memcpy(ch, it2->data(), len);
std::vector<char> tmp(ch, ch + len);
links.push_back(tmp);
free(ch);
}
ch_link_step_data_all.push_back(links);
}
for (std::vector<std::vector<float>>::iterator it1 = week_data_all.begin(); it1 != week_data_all.end(); ++it1) {
int len = it1->size() * sizeof(float);
char* ch = (char*)malloc(len);
memcpy(ch, it1->data(), len);
std::vector<char> tmp(ch, ch + len);
ch_week_data_all.push_back(tmp);
free(ch);
}
for (std::vector<std::vector<float>>::iterator it1 = minute_data_all.begin(); it1 != minute_data_all.end(); ++it1) {
int len = it1->size() * sizeof(float);
char* ch = (char*)malloc(len);
memcpy(ch, it1->data(), len);
std::vector<char> tmp(ch, ch + len);
ch_minute_data_all.push_back(tmp);
free(ch);
}
}
DataRecord NextBatch() {
DataRecord data;
size_t batch_end = batch_iter + batch_size;
// NOTE skip the final batch, if no enough data is provided.
if (batch_end <= link_step_data_all.size()) {
data.link_step_data_all.assign(link_step_data_all.begin() + batch_iter,
link_step_data_all.begin() + batch_end);
data.week_data_all.assign(week_data_all.begin() + batch_iter,
week_data_all.begin() + batch_end);
data.minute_data_all.assign(minute_data_all.begin() + batch_iter,
minute_data_all.begin() + batch_end);
// Prepare LoDs
data.lod1.emplace_back(0);
data.lod2.emplace_back(0);
data.lod3.emplace_back(0);
//CHECK(!data.link_step_data_all.empty()) << "empty";
//CHECK(!data.week_data_all.empty());
//CHECK(!data.minute_data_all.empty());
//CHECK_EQ(data.link_step_data_all.size(), data.week_data_all.size());
//CHECK_EQ(data.minute_data_all.size(), data.link_step_data_all.size());
for (size_t j = 0; j < data.link_step_data_all.size(); j++) {
for (const auto &d : data.link_step_data_all[j]) {
data.rnn_link_data.push_back(d);
}
data.rnn_week_datas.push_back(data.week_data_all[j]);
data.rnn_minute_datas.push_back(data.minute_data_all[j]);
// calculate lod
data.lod1.push_back(data.lod1.back() +
data.link_step_data_all[j].size());
data.lod3.push_back(data.lod3.back() + 1);
for (size_t i = 1; i < data.link_step_data_all[j].size() + 1; i++) {
data.lod2.push_back(data.lod2.back() +
data.link_step_data_all[j].size());
}
}
}
batch_iter += batch_size;
return data;
}
void Load(const std::string &path) {
std::ifstream file(path);
std::string line;
int num_lines = 0;
while (std::getline(file, line)) {
num_lines++;
std::vector<std::string> data;
split(line, ':', &data);
std::vector<std::vector<float>> link_step_data;
std::vector<std::string> link_datas;
split(data[0], '|', &link_datas);
for (auto &step_data : link_datas) {
std::vector<float> tmp;
split_to_float(step_data, ',', &tmp);
link_step_data.emplace_back(tmp);
}
// load week data
std::vector<float> week_data;
split_to_float(data[2], ',', &week_data);
// load minute data
std::vector<float> minute_data;
split_to_float(data[1], ',', &minute_data);
link_step_data_all.emplace_back(std::move(link_step_data));
week_data_all.emplace_back(std::move(week_data));
minute_data_all.emplace_back(std::move(minute_data));
}
}
};
/*void PrepareInputs(std::vector<paddle::PaddleTensor> *input_slots, DataRecord *data,
int batch_size) {
// DataRecord data(FLAGS_datapath, batch_size);
paddle::PaddleTensor lod_attention_tensor, init_zero_tensor, lod_tensor_tensor,
week_tensor, minute_tensor;
lod_attention_tensor.name = "lod_attention";
init_zero_tensor.name = "init_zero";
lod_tensor_tensor.name = "lod_tensor";
week_tensor.name = "week";
minute_tensor.name = "minute";
auto one_batch = data->NextBatch();
printf("rnn_link_data.size:%lu,\n", one_batch.rnn_link_data.size());
printf("rnn_link_data.front().size:%lu\n", one_batch.rnn_link_data.front().size());
// clang-format off
std::vector<int> rnn_link_data_shape
({static_cast<int>(one_batch.rnn_link_data.size()), static_cast<int>(one_batch.rnn_link_data.front().size())});
//LOG(INFO) << "set 1";
lod_attention_tensor.shape.assign({1, 2});
lod_attention_tensor.lod.assign({one_batch.lod1, one_batch.lod2});
//LOG(INFO) << "set 1";
init_zero_tensor.shape.assign({batch_size, 15});
init_zero_tensor.lod.assign({one_batch.lod3});
//LOG(INFO) << "set 1";
lod_tensor_tensor.shape = rnn_link_data_shape;
lod_tensor_tensor.lod.assign({one_batch.lod1});
//LOG(INFO) << "set 1";
week_tensor.shape.assign({(int) one_batch.rnn_week_datas.size(), (int) one_batch.rnn_week_datas.front().size()});
week_tensor.lod.assign({one_batch.lod3});
//LOG(INFO) << "set 1";
minute_tensor.shape.assign({(int) one_batch.rnn_minute_datas.size(),
(int) one_batch.rnn_minute_datas.front().size()});
minute_tensor.lod.assign({one_batch.lod3});
// assign data
TensorAssignData(&lod_attention_tensor, std::vector<std::vector<float>>({{0, 0}}));
std::vector<float> tmp_zeros(batch_size * 15, 0.);
TensorAssignData(&init_zero_tensor, {tmp_zeros});
TensorAssignData(&lod_tensor_tensor, one_batch.rnn_link_data);
TensorAssignData(&week_tensor, one_batch.rnn_week_datas);
TensorAssignData(&minute_tensor, one_batch.rnn_minute_datas);
// clang-format on
input_slots->assign({lod_tensor_tensor, lod_attention_tensor,
init_zero_tensor, init_zero_tensor, week_tensor,
minute_tensor});
for (auto &tensor : *input_slots) {
tensor.dtype = paddle::PaddleDType::FLOAT32;
// LOG(INFO) << DescribeTensor(tensor);
}
}*/
class InputData {
public:
InputData() {}
~InputData() {}
int create(const std::string file_name, size_t buf_size,
size_t batch_size, int qps) {
bthread_mutex_init(&_mutex, NULL);
std::string datapath = "./data/test_features_sys";
DataRecord data(datapath, batch_size);
_data_record = data;
/*FILE* fp = fopen(file_name.c_str(), "r");
if (!fp) {
LOG(FATAL) << "Failed open data file: "
<< file_name;
return -1;
}
_data.clear();
char buffer[2048000];
std::vector<std::string> tokens;
while (fgets(buffer, sizeof(buffer), fp) != NULL) {
tokens.clear();
baidu::paddle_serving::sdk_cpp::str_split(
buffer, ",", &tokens);
std::vector<float> feature_one;
for (size_t i = 0; i < tokens.size(); i++){
feature_one.push_back(
strtof(tokens[i].c_str(), NULL));
}
_data.push_back(feature_one);
}
printf("succ load data, size:%ld\n", _data.size());
*/
for (size_t ri = 0; ri < buf_size; ri++) {
Request* req = new Request();
if (generate_one_req(*req, batch_size) != 0) {
LOG(FATAL) << "Failed generate req at: " << ri;
//fclose(fp);
return -1;
}
_req_list.push_back(req);
}
//fclose(fp);
_current = 0;
_waitingtm = 0;
_lasttm.tv_sec = _lasttm.tv_usec = 0;
if (qps == 0) {
_interval = 0;
} else if (qps < 1) {
_interval = 1000 * 1000;
} else {
_interval = 1000 * 1000 / qps;
}
LOG(INFO) << "Succ create req, size: " << buf_size
<< ", batch_size: " << batch_size;
return 0;
}
void destroy() {
size_t ds = _data.size();
for (size_t di = 0; di < ds; di++) {
_data[di].clear();
}
_data.clear();
size_t rs = _req_list.size();
for (size_t ri = 0; ri < rs; ri++) {
delete _req_list[ri];
}
_req_list.clear();
}
Request* next_req() {
bthread_mutex_lock(&_mutex);
if (_interval != 0)
{
if (_lasttm.tv_sec == 0 && _lasttm.tv_usec == 0)
{
gettimeofday(&_lasttm, NULL);
}
else
{
timeval curtm;
gettimeofday(&curtm, NULL);
long elapse =
((curtm.tv_sec - _lasttm.tv_sec) * 1000*1000 +
(curtm.tv_usec - _lasttm.tv_usec));
_waitingtm += _interval - elapse;
_lasttm = curtm;
if (_waitingtm >= SELECT_VALID_UNIT) // select的最小响应单位
{
long tm_unit
= _waitingtm / SELECT_VALID_UNIT * SELECT_VALID_UNIT;
timeval tmp_tm = {tm_unit / 1000000, tm_unit % 1000000};
select(1, NULL, NULL, NULL, &tmp_tm); //延时以控制压力speed
}
else if (_waitingtm <= SELECT_VALID_UNIT * (-2))
{
_waitingtm = -SELECT_VALID_UNIT;
}
}
}
size_t rs = _req_list.size();
Request* req = _req_list[(_current++) % rs];
bthread_mutex_unlock(&_mutex);
return req;
}
int generate_one_req(Request& req, int batch) {
int batch_size = batch;
int i = 0;
DenseInstance* ins = req.add_instances();
ins->set_batch_size(batch_size);
for (std::vector<std::vector<std::vector<char>>>::iterator it1 = _data_record.ch_link_step_data_all.begin();
it1 != _data_record.ch_link_step_data_all.end(); ++it1) {
::baidu::infinite::map_rnn::Lines* step_data = ins->add_step_data();
for (std::vector<std::vector<char>>::iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
::baidu::infinite::map_rnn::Line* line = step_data->add_line();
line->set_value(it2->data(), it2->size());
}
if (++i == batch_size) {
break;
}
}
i = 0;
::baidu::infinite::map_rnn::Lines* week_data = ins->mutable_week_data();
for (std::vector<std::vector<char>>::iterator it1 = _data_record.ch_week_data_all.begin();
it1 != _data_record.ch_week_data_all.end(); ++it1) {
::baidu::infinite::map_rnn::Line* line = week_data->add_line();
line->set_value(it1->data(), it1->size());
if (++i == batch_size) {
break;
}
}
i = 0;
::baidu::infinite::map_rnn::Lines* minute_data = ins->mutable_minute_data();
for (std::vector<std::vector<char>>::iterator it1 = _data_record.ch_minute_data_all.begin();
it1 != _data_record.ch_minute_data_all.end(); ++it1) {
::baidu::infinite::map_rnn::Line* line = minute_data->add_line();
line->set_value(it1->data(), it1->size());
if (++i == batch_size) {
break;
}
}
/*for (int fi = 0; fi < _data.size(); ++fi) {
Tensor* tensor = ins->add_tensors();
tensor->set_name(tensor_names[fi]);
int len = 1;
for (int si = 0; si < shapes[fi].size(); ++si) {
len *= shapes[fi][si];
}
for (int si = 0; si < shapes[fi].size(); ++si) {
tensor->add_shape(shapes[fi][si]);
}
tensor->set_features(&(_data[fi][0]), len * sizeof(float));
}*/
return 0;
}
private:
DataRecord _data_record;
std::vector<std::vector<float> > _data;
std::vector<Request*> _req_list;
bthread_mutex_t _mutex;
long _waitingtm;
long _interval;
timeval _lasttm;
int _current;
};
void print_res(
const Request* req,
const Response& res,
std::string route_tag,
uint64_t elapse_ms) {
uint32_t sample_size = res.predictions_size();
LOG(INFO)
<< "Succ call predictor[wasq], res sample size: "
<< sample_size << ", the tag is: "
<< route_tag << ", elapse_ms: " << elapse_ms;
}
struct Arg {
PredictorApi* api;
InputData* input;
};
void* work(void* p) {
Arg* arg = (Arg*) p;
InputData* input = arg->input;
if (PredictorApi::instance().thrd_initialize() != 0) {
LOG(FATAL) << "Failed create bthread local predictor";
return NULL;
}
Response res;
LOG(WARNING) << "Thread entry!";
while (true) {
if (PredictorApi::instance().thrd_clear() != 0) {
LOG(FATAL) << "Failed thrd clear predictor";
return NULL;
}
Predictor* predictor = PredictorApi::instance().fetch_predictor("wasq");
if (!predictor) {
LOG(FATAL) << "Failed fetch predictor: wasq";
return NULL;
}
Request* req = input->next_req();
res.Clear();
timeval start;
gettimeofday(&start, NULL);
if (predictor->inference(req, &res) != 0) {
LOG(FATAL) << "failed call predictor with req:"
<< req->ShortDebugString();
return NULL;
}
timeval end;
gettimeofday(&end, NULL);
uint64_t elapse_ms = (end.tv_sec * 1000 + end.tv_usec / 1000)
- (start.tv_sec * 1000 + start.tv_usec / 1000);
print_res(req, res, predictor->tag(), elapse_ms);
if (PredictorApi::instance().free_predictor(predictor) != 0) {
printf("failed free predictor\n");
}
//break;
//printf("done\n");
}
if (PredictorApi::instance().thrd_finalize() != 0) {
LOG(FATAL) << "Failed thrd finalize predictor api";
}
LOG(WARNING) << "Thread exit!";
return NULL;
}
int main(int argc, char** argv) {
if (argc != 5) {
printf("Usage: demo req_buf_size batch_size threads qps\n");
return -1;
}
int req_buffer = atoi(argv[1]);
int batch_size = atoi(argv[2]);
int thread_num = atoi(argv[3]);
int qps = atoi(argv[4]);
if (PredictorApi::instance().create("./conf", "predictors.conf") != 0) {
LOG(FATAL) << "Failed create predictors api!";
return -1;
}
InputData data;
if (data.create(
"./data/test_features_sys", req_buffer, batch_size, qps) != 0) {
LOG(FATAL) << "Failed create inputdata!";
return -1;
}
Arg arg = {NULL, &data};
bthread_t* threads = new bthread_t[thread_num];
if (!threads) {
LOG(FATAL) << "Failed create threads, num:" << thread_num;
return -1;
}
for (int i = 0; i < thread_num; ++i) {
bthread_start_background(threads + i, NULL, work, &arg);
}
for (int i = 0; i < thread_num; ++i) {
bthread_join(threads[i], NULL);
}
delete[] threads;
data.destroy();
return 0;
}
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file demo.cpp
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/09 20:12:44
* @brief
*
**/
#include "common.h"
#include <fstream>
#include "predictor_sdk.h"
#include "image_class.pb.h"
#include "builtin_format.pb.h"
using baidu::paddle_serving::sdk_cpp::Predictor;
using baidu::paddle_serving::sdk_cpp::PredictorApi;
using baidu::paddle_serving::predictor::format::XImageReqInstance;
using baidu::paddle_serving::predictor::format::DensePrediction;
using baidu::paddle_serving::predictor::image_classification::Request;
using baidu::paddle_serving::predictor::image_classification::Response;
int create_req(Request& req) {
static const char* TEST_IMAGE_PATH = "./data/images/what.jpg";
FILE* fp = fopen(TEST_IMAGE_PATH, "rb");
if (!fp) {
LOG(FATAL) << "Failed open image: " << TEST_IMAGE_PATH;
return -1;
}
fseek(fp, 0L, SEEK_END);
size_t isize = ftell(fp);
char* ibuf = new(std::nothrow) char[isize];
if (!ibuf) {
LOG(FATAL) << "Failed malloc image buffer";
fclose(fp);
return -1;
}
fseek(fp, 0, SEEK_SET);
fread(ibuf, sizeof(ibuf[0]), isize, fp);
XImageReqInstance* ins = req.add_instances();
if (!ins) {
LOG(FATAL) << "Failed create req instance";
delete[] ibuf;
fclose(fp);
return -1;
}
ins->set_image_binary(ibuf, isize);
ins->set_image_length(isize);
delete[] ibuf;
fclose(fp);
return 0;
}
void print_res(
const Request& req,
const Response& res,
std::string route_tag,
uint64_t elapse_ms) {
static const char* GT_TEXT_PATH
= "./data/images/groundtruth.txt";
std::vector<std::string> gt_labels;
std::ifstream file(GT_TEXT_PATH);
std::string temp_str;
while (std::getline(file, temp_str)) {
gt_labels.push_back(temp_str);
}
DensePrediction json_msg;
uint32_t sample_size = res.predictions_size();
std::string err_string;
for (uint32_t si = 0; si < sample_size; ++si) {
std::string json = res.predictions(si).response_json();
butil::IOBuf buf;
buf.append(json);
butil::IOBufAsZeroCopyInputStream wrapper(buf);
if (!json2pb::JsonToProtoMessage(&wrapper, &json_msg, &err_string)) {
LOG(FATAL) << "Failed parse json from str:" << json;
return ;
}
uint32_t csize = json_msg.categories_size();
if (csize <= 0) {
LOG(FATAL) << "sample-" << si << "has no"
<< "categories props";
continue;
}
float max_prop = json_msg.categories(0);
uint32_t max_idx = 0;
for (uint32_t ci = 1; ci < csize; ++ci) {
if (json_msg.categories(ci) > max_prop) {
max_prop = json_msg.categories(ci);
max_idx = ci;
}
}
LOG(INFO) << "sample-" << si << "'s classify result: "
<< gt_labels[max_idx] << ", prop: " << max_prop;
}
LOG(INFO)
<< "Succ call predictor[ximage], the tag is: "
<< route_tag << ", elapse_ms: " << elapse_ms;
}
int main(int argc, char** argv) {
PredictorApi api;
if (api.create("./conf", "predictors.conf") != 0) {
LOG(FATAL) << "Failed create predictors api!";
return -1;
}
Request req;
Response res;
api.thrd_initialize();
while (true) {
timeval start;
gettimeofday(&start, NULL);
api.thrd_clear();
Predictor* predictor = api.fetch_predictor("ximage");
if (!predictor) {
LOG(FATAL) << "Failed fetch predictor: wasq";
return -1;
}
req.Clear();
res.Clear();
if (create_req(req) != 0) {
return -1;
}
butil::IOBufBuilder debug_os;
if (predictor->debug(&req, &res, &debug_os) != 0) {
LOG(FATAL) << "failed call predictor with req:"
<< req.ShortDebugString();
return -1;
}
butil::IOBuf debug_buf;
debug_os.move_to(debug_buf);
LOG(INFO) << "Debug string: " << debug_buf;
timeval end;
gettimeofday(&end, NULL);
uint64_t elapse_ms = (end.tv_sec * 1000 + end.tv_usec / 1000)
- (start.tv_sec * 1000 + start.tv_usec / 1000);
print_res(req, res, predictor->tag(), elapse_ms);
res.Clear();
usleep(50);
} // while (true)
api.thrd_finalize();
api.destroy();
return 0;
}
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file abtest.h
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/06 17:11:38
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_ABTEST_H
#define BAIDU_PADDLE_SERVING_SDK_CPP_ABTEST_H
#include "stub.h"
#include "common.h"
#include "factory.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
class Stub;
class Variant;
static const std::string WEIGHT_SEPERATOR = "|";
class EndpointRouterBase {
public:
typedef std::vector<Variant*> VariantList;
virtual ~EndpointRouterBase() {}
virtual int initialize(
const comcfg::ConfigUnit& conf) = 0;
virtual Variant* route(const VariantList&) = 0;
virtual Variant* route(
const VariantList&,
const void*) = 0;
};
class WeightedRandomRender : public EndpointRouterBase {
public:
static int register_self() {
INLINE_REGIST_OBJECT(WeightedRandomRender, EndpointRouterBase, -1);
return 0;
}
WeightedRandomRender() : _normalized_sum(0) {}
~WeightedRandomRender() {}
int initialize(
const comcfg::ConfigUnit& conf);
Variant* route(const VariantList&);
Variant* route(
const VariantList&,
const void*);
private:
std::vector<uint32_t> _variant_weight_list;
uint32_t _normalized_sum;
};
class VariantRouterBase {
public:
typedef std::map<std::string, Stub*> StubMap;
virtual ~VariantRouterBase() {}
virtual int initialize(
const comcfg::ConfigUnit& conf) = 0;
virtual Stub* route(const StubMap&) = 0;
virtual Stub* route(
const StubMap&,
const void*) = 0;
};
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_ABTEST_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file common.h
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/09 20:24:19
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_CPP_SDK_COMMON_H
#define BAIDU_PADDLE_SERVING_CPP_SDK_COMMON_H
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdint.h>
#include <pthread.h>
#include <strings.h>
#include <getopt.h>
#include <exception>
#include <google/protobuf/message.h>
#include <boost/unordered_map.hpp>
#include <gflags/gflags.h>
#include <bvar/bvar.h>
#include <butil/logging.h>
#include <butil/time.h>
#include <butil/object_pool.h>
#include <brpc/channel.h>
#include <brpc/parallel_channel.h>
#include <brpc/traceprintf.h>
#include <bthread/bthread.h>
#include <error.h>
#include <json2pb/json_to_pb.h>
#include "Configure.h"
#include "utils.h"
#endif //BAIDU_PADDLE_SERVING_CPP_SDK_COMMON_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file config_manager.h
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/09 15:28:43
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_CONFIG_MANAGER_H
#define BAIDU_PADDLE_SERVING_SDK_CPP_CONFIG_MANAGER_H
#include "common.h"
#include "endpoint_config.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
class EndpointConfigManager {
public:
static EndpointConfigManager& instance() {
static EndpointConfigManager singleton;
return singleton;
}
EndpointConfigManager()
: _last_update_timestamp(0),
_current_endpointmap_id(1) {}
int create(const char* path, const char* file);
int load();
bool need_reload() {
return false;
}
int reload() {
if (!need_reload()) {
LOG(INFO) << "Noneed reload endpoin config";
return 0;
}
return load();
}
const std::map<std::string, EndpointInfo>& config() {
return _ep_map;
}
const std::map<std::string, EndpointInfo>& config() const {
return _ep_map;
}
private:
int init_one_variant(
const comcfg::ConfigUnit& conf,
VariantInfo& var);
int init_one_endpoint(
const comcfg::ConfigUnit& conf,
EndpointInfo& ep,
const VariantInfo& default_var);
int merge_variant(
const VariantInfo& default_var,
const comcfg::ConfigUnit& conf,
VariantInfo& merged_var);
int parse_tag_values(
SplitParameters& split);
private:
std::map<std::string, EndpointInfo> _ep_map;
std::string _endpoint_config_path;
std::string _endpoint_config_file;
uint32_t _last_update_timestamp;
uint32_t _current_endpointmap_id;
};
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_CONFIG_MANAGER_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file include/resource.h
* @author wanlijin(wanlijin01@baidu.com)
* @date 2018/07/06 17:06:25
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_SDK_ENDPOINT_H
#define BAIDU_PADDLE_SERVING_SDK_ENDPOINT_H
#include "common.h"
#include "endpoint_config.h"
#include "stub.h"
#include "variant.h"
#include "predictor.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
class Endpoint {
friend class EndpointRouterBase;
public:
virtual ~Endpoint() {}
Endpoint() {
_variant_list.clear();
}
int initialize(const EndpointInfo& ep_info);
int thrd_initialize();
int thrd_clear();
int thrd_finalize();
Predictor* get_predictor(const void* params);
Predictor* get_predictor();
int ret_predictor(Predictor* predictor);
const std::string& endpoint_name() const {
return _endpoint_name;
}
private:
int initialize_variant(
const VariantInfo& var_info,
const std::string& service,
const std::string& ep_name,
std::vector<Stub*>& stubs);
private:
std::string _endpoint_name;
std::vector<Variant*> _variant_list;
};
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPPRESOURCE_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file endpoint_config.h
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/12 15:17:56
* @brief
*
**/
#include "common.h"
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_ENDPOINT_CONFIG_H
#define BAIDU_PADDLE_SERVING_SDK_CPP_ENDPOINT_CONFIG_H
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
#define PARSE_CONF_ITEM(conf, item, name, fail) \
do { \
try { \
item.set(conf[name]); \
} catch (comcfg::NoSuchKeyException& e) { \
LOG(INFO) << "Not found key in configue: " << name;\
} catch (comcfg::ConfigException& e) { \
LOG(FATAL) << "Error config, key: " << name; \
return fail; \
} catch (...) { \
LOG(FATAL) << "Unkown error accurs when load config";\
return fail; \
} \
} while (0)
#define ASSIGN_CONF_ITEM(dest, src, fail) \
do { \
if (!src.init) { \
LOG(FATAL) << "Cannot assign an unintialized item: " \
<< #src << " to dest: " << #dest; \
return fail; \
} \
dest = src.value; \
} while (0)
template<typename T> struct type_traits {
static type_traits<T> tag;
};
template<typename T> struct ConfigItem {
T value;
bool init;
ConfigItem() : init(false) {}
void set(const comcfg::ConfigUnit& unit) {
set_impl(type_traits<T>::tag, unit);
init = true;
}
void set_impl(type_traits<int16_t>&,
const comcfg::ConfigUnit& unit) {
value = unit.to_int16();
}
void set_impl(type_traits<int32_t>&,
const comcfg::ConfigUnit& unit) {
value = unit.to_int32();
}
void set_impl(type_traits<int64_t>&,
const comcfg::ConfigUnit& unit) {
value = unit.to_int64();
}
void set_impl(type_traits<uint16_t>&,
const comcfg::ConfigUnit& unit) {
value = unit.to_uint16();
}
void set_impl(type_traits<uint32_t>&,
const comcfg::ConfigUnit& unit) {
value = unit.to_uint32();
}
void set_impl(type_traits<uint64_t>&,
const comcfg::ConfigUnit& unit) {
value = unit.to_uint64();
}
void set_impl(type_traits<float>&,
const comcfg::ConfigUnit& unit) {
value = unit.to_float();
}
void set_impl(type_traits<double>&,
const comcfg::ConfigUnit& unit) {
value = unit.to_double();
}
void set_impl(type_traits<std::string>&,
const comcfg::ConfigUnit& unit) {
value = unit.to_cstr();
}
};
struct Connection {
ConfigItem<int32_t> tmo_conn;
ConfigItem<int32_t> tmo_rpc;
ConfigItem<int32_t> tmo_hedge;
ConfigItem<uint32_t> cnt_retry_conn;
ConfigItem<uint32_t> cnt_retry_hedge;
ConfigItem<uint32_t> cnt_maxconn_per_host;
ConfigItem<std::string> type_conn;
};
struct NamingInfo {
ConfigItem<std::string> cluster_naming;
ConfigItem<std::string> load_balancer;
ConfigItem<std::string> cluster_filter;
};
struct RpcParameters {
ConfigItem<std::string> protocol;
ConfigItem<int32_t> compress_type;
ConfigItem<uint32_t> package_size;
ConfigItem<std::string> route_tag;
ConfigItem<uint32_t> max_channel;
};
struct SplitParameters {
ConfigItem<std::string> split_tag;
ConfigItem<std::string> tag_cands_str;
std::vector<std::string> tag_values;
};
struct VariantInfo {
VariantInfo() : ab_test(NULL) {}
Connection connection;
NamingInfo naminginfo;
RpcParameters parameters;
SplitParameters splitinfo;
void* ab_test;
};
struct EndpointInfo {
EndpointInfo() : ab_test(NULL) {}
std::string endpoint_name;
std::string stub_service;
std::vector<VariantInfo> vars;
void* ab_test;
};
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_ENDPOINT_CONFIG_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file include/factory.h
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/10 22:09:57
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_FACTORY_H
#define BAIDU_PADDLE_SERVING_SDK_CPP_FACTORY_H
#include "common.h"
#include "stub_impl.h"
#include "glog/raw_logging.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
#define INLINE_REGIST_OBJECT(D, B, E) \
do { \
Factory<D, B>* factory = \
new (std::nothrow) Factory<D, B>(); \
if (factory == NULL \
|| FactoryPool<B>::instance().register_factory(\
#D, factory) != 0) { \
RAW_LOG_FATAL("Failed regist factory: %s->%s in macro!", #D, #B); \
return E; \
} \
} while (0)
#define DECLARE_FACTORY_OBJECT(D, B) \
static int regist(const std::string& tag) { \
Factory<D, B>* factory = \
new (std::nothrow) Factory<D, B>();\
if (factory == NULL \
|| FactoryPool<B>::instance().register_factory(\
tag, factory) != 0) { \
RAW_LOG_FATAL("Failed regist factory: %s in macro!", #D);\
return -1; \
} \
return 0; \
}
#define PDS_STR_CAT(a, b) PDS_STR_CAT_I(a, b)
#define PDS_STR_CAT_I(a, b) a ## b
#define DEFINE_FACTORY_OBJECT(D) \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, __LINE__)(void) \
{ \
D::regist(#D); \
}
#define REGIST_FACTORY_OBJECT_IMPL(D, B) \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, __LINE__)(void) \
{ \
::baidu::paddle_serving::sdk_cpp::Factory<D, B>* factory =\
new (::std::nothrow) ::baidu::paddle_serving::sdk_cpp::Factory<D, B>();\
if (factory == NULL \
|| ::baidu::paddle_serving::sdk_cpp::FactoryPool<B>::instance().register_factory(\
#D, factory) != 0) { \
RAW_LOG_FATAL("Failed regist factory: %s->%s in macro!", #D, #B); \
return ; \
} \
return ; \
}
#define REGIST_FACTORY_OBJECT_IMPL_WITH_TAG(D, B, T)\
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, __LINE__)(void) \
{ \
::baidu::paddle_serving::sdk_cpp::Factory<D, B>* factory =\
new (::std::nothrow) ::baidu::paddle_serving::sdk_cpp::Factory<D, B>();\
if (factory == NULL \
|| ::baidu::paddle_serving::sdk_cpp::FactoryPool<B>::instance().register_factory(\
T, factory) != 0) { \
RAW_LOG_FATAL("Failed regist factory: %s->%s, tag %s in macro!", #D, #B, T); \
return ; \
} \
return ; \
}
#define REGIST_ABTEST_OBJECT(D) \
REGIST_FACTORY_OBJECT_IMPL( \
D, \
::baidu::paddle_serving::sdk_cpp::ABTestRouterBase)
#define REGIST_ABTEST_OBJECT_WITH_TAG(D, T) \
REGIST_FACTORY_OBJECT_IMPL_WITH_TAG( \
D, \
::baidu::paddle_serving::sdk_cpp::ABTestRouterBase,\
T)
#define REGIST_STUB_OBJECT_WITH_TAG(D, C, R, I, O, T) \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, __LINE__)(void) \
{ \
::baidu::paddle_serving::sdk_cpp::Factory< \
::baidu::paddle_serving::sdk_cpp::StubImpl<D, C, R, I, O>,\
::baidu::paddle_serving::sdk_cpp::Stub>* factory = \
new (::std::nothrow) ::baidu::paddle_serving::sdk_cpp::Factory< \
::baidu::paddle_serving::sdk_cpp::StubImpl<D, C, R, I, O>,\
::baidu::paddle_serving::sdk_cpp::Stub>(); \
if (factory == NULL \
|| ::baidu::paddle_serving::sdk_cpp::FactoryPool< \
::baidu::paddle_serving::sdk_cpp::Stub>::instance().register_factory(\
T, factory) != 0) { \
RAW_LOG_FATAL("Failed regist factory: %s->Stub, tag: %s in macro!", #D, T); \
return ; \
} \
return ; \
}
class Stub;
class EndpointRouterBase;
class VariantRouterBase;
template<typename B>
class FactoryBase {
public:
virtual B* gen() = 0;
virtual void del(B* obj) = 0;
};
template<typename D, typename B>
class Factory : public FactoryBase<B> {
public:
B* gen() {
return new(std::nothrow) D();
}
void del(B* obj) {
delete dynamic_cast<D*>(obj);
}
};
template<typename B>
class FactoryPool {
public:
static FactoryPool<B>& instance() {
static FactoryPool<B> singleton;
return singleton;
}
int register_factory(const std::string& tag,
FactoryBase<B>* factory) {
typename std::map<std::string, FactoryBase<B>*>::iterator it
= _pool.find(tag);
if (it != _pool.end()) {
RAW_LOG_FATAL("Insert duplicate with tag: %s", tag.c_str());
return -1;
}
std::pair<
typename std::map<std::string, FactoryBase<B>*>::iterator,
bool> r = _pool.insert(std::make_pair(tag, factory));
if (!r.second) {
RAW_LOG_FATAL("Failed insert new factory with: %s", tag.c_str());
return -1;
}
RAW_LOG_INFO("Succ insert one factory, tag: %s, base type %s", tag.c_str(), typeid(B).name());
return 0;
}
B* generate_object(const std::string& tag) {
typename std::map<std::string, FactoryBase<B>*>::iterator it
= _pool.find(tag);
if (it == _pool.end() || it->second == NULL) {
RAW_LOG_FATAL("Not found factory pool, tag: %s, pool size: %u", tag.c_str(), _pool.size());
return NULL;
}
return it->second->gen();
}
template<typename D>
void return_object(B* object) {
Factory<D, B> factory;
factory->del(object);
}
private:
std::map<std::string, FactoryBase<B>*> _pool;
};
typedef FactoryPool<Stub> StubFactory;
typedef FactoryPool<brpc::CallMapper> CallMapperFactory;
typedef FactoryPool<brpc::ResponseMerger> ResponseMergerFactory;
typedef FactoryPool<EndpointRouterBase> EndpointRouterFactory;
typedef FactoryPool<VariantRouterBase> VariantRouterFactory;
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_FACTORY_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file include/predictor.h
* @author wanlijin01(com@baidu.com)
* @date 2018/07/05 16:53:43
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_PREDICTOR_H
#define BAIDU_PADDLE_SERVING_SDK_CPP_PREDICTOR_H
#include "stub.h"
#include "common.h"
#include "endpoint_config.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
#define GET_OBJECT_FROM_POOL(param, T, err) \
do { \
param = butil::get_object<T>(); \
if (!param) { \
LOG(FATAL) << "Failed get object from pool" \
<< ", arg:" << #param << "type: " \
<< #T; \
return err; \
} \
} while (0)
static const brpc::CompressType compress_types[] = {
brpc::COMPRESS_TYPE_NONE,
brpc::COMPRESS_TYPE_SNAPPY,
brpc::COMPRESS_TYPE_GZIP,
brpc::COMPRESS_TYPE_ZLIB,
brpc::COMPRESS_TYPE_LZ4};
typedef void (*DoneType) (google::protobuf::Message* res,
brpc::Controller* controller);
template<typename Arg1, typename Arg2>
class FunctionClosure : public ::google::protobuf::Closure {
public:
typedef void (*FunctionType)(Arg1* arg1, Arg2* arg2);
FunctionClosure() {}
~FunctionClosure() {}
int init(FunctionType function, bool self_deleting,
bool arg1_deleting, bool arg2_deleting,
Arg1* arg1 = NULL, Arg2* arg2 = NULL);
void Run();
private:
FunctionType _function;
Arg1* _arg1;
Arg2* _arg2;
bool _self_deleting;
bool _arg1_deleting;
bool _arg2_deleting;
};
class InterfaceAdaptor {
public:
typedef google::protobuf::Message RequestMessage;
typedef google::protobuf::Message ResponseMessage;
virtual int partition(RequestMessage& request, std::vector<RequestMessage*>& out) = 0;
virtual int merge(std::vector<ResponseMessage*>& response, ResponseMessage& out) = 0;
};
class EchoAdaptor : public InterfaceAdaptor {
public:
typedef google::protobuf::Message RequestMessage;
typedef google::protobuf::Message ResponseMessage;
int partition(RequestMessage& request, std::vector<RequestMessage*>& out) {
return 0;
}
int merge(std::vector<ResponseMessage*>& response, ResponseMessage*& out) {
return 0;
}
};
class Predictor {
public:
// synchronize interface
virtual int inference(
google::protobuf::Message* req,
google::protobuf::Message* res) = 0;
// asynchronize interface
virtual int inference(
google::protobuf::Message* req,
google::protobuf::Message* res,
DoneType done,
brpc::CallId* cid = NULL) = 0;
// synchronize interface
virtual int debug(
google::protobuf::Message* req,
google::protobuf::Message* res,
butil::IOBufBuilder* debug_os) = 0;
// un-blocked interface
virtual int send_inference(
google::protobuf::Message* req,
google::protobuf::Message* res) = 0;
virtual int recv_inference() = 0;
virtual void cancel_inference() = 0;
virtual const char* tag() = 0;
virtual const google::protobuf::Service* service() = 0;
virtual const brpc::Controller* controller() = 0;
virtual const google::protobuf::RpcChannel* channel() = 0;
virtual const Stub* stub() = 0;
virtual bool is_inited() = 0;
};
template<typename T>
class PredictorImpl : public Predictor {
public:
typedef google::protobuf::MethodDescriptor MethodDescriptor;
PredictorImpl() : _service(NULL), _stub(NULL), _infer(NULL),
_debug(NULL), _channel(NULL), _inited(false) {
// _inferid = 0;
}
~PredictorImpl() {}
int init(
google::protobuf::RpcChannel* chnl,
T* service,
const MethodDescriptor* infer,
const MethodDescriptor* debug,
const RpcParameters& options,
Stub* stub,
const std::string& tag);
int reset(
const RpcParameters& options,
brpc::Controller& cntl);
int deinit();
bool is_inited() {
return _inited;
}
// 同步接口
int inference(
google::protobuf::Message* req,
google::protobuf::Message* res);
// 异步接口
int inference(
google::protobuf::Message* req,
google::protobuf::Message* res,
DoneType done,
brpc::CallId* cid = NULL);
// Debug同步接口
int debug(
google::protobuf::Message* req,
google::protobuf::Message* res,
butil::IOBufBuilder* debug_os);
// 半同步(非阻塞)接口
int send_inference(
google::protobuf::Message* req,
google::protobuf::Message* res);
// 半同步(非阻塞)接口
int recv_inference();
// 半同步(非阻塞)接口
void cancel_inference();
const char* tag();
const google::protobuf::Service* service() {
return _service;
}
const brpc::Controller* controller() {
return &_cntl;
}
const google::protobuf::RpcChannel* channel() {
return _channel;
}
const Stub* stub() {
return _stub;
}
private:
T* _service;
Stub* _stub;
const MethodDescriptor* _infer;
const MethodDescriptor* _debug;
google::protobuf::RpcChannel* _channel;
brpc::Controller _cntl;
brpc::CallId _inferid;
RpcParameters _options;
std::string _tag;
bool _inited;
};
} // sdk_cpp
} // paddle_serving
} // baidu
#include "predictor.hpp"
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_PREDICTOR_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_PREDICTOR_HPP
#define BAIDU_PADDLE_SERVING_SDK_CPP_PREDICTOR_HPP
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
class MetricScope;
class Stub;
template<typename T, typename C, typename R, typename I, typename O>
class StubImpl;
template<typename Arg1, typename Arg2>
inline ::google::protobuf::Closure* NewClosure(
void (*function)(Arg1*, Arg2*),
Arg1* arg1 = NULL, Arg2* arg2 = NULL) {
FunctionClosure<Arg1, Arg2>* closure = butil::get_object<
FunctionClosure<Arg1, Arg2> >();
if (closure) {
if (closure->init(function, true, false, true,
arg1, arg2) != 0) {
LOG(FATAL) << "Failed create closure objects";
return NULL;
}
}
return closure;
}
template<typename Arg1, typename Arg2>
int FunctionClosure<Arg1, Arg2>::init(
FunctionType function, bool self_deleting,
bool arg1_deleting, bool arg2_deleting,
Arg1* arg1, Arg2* arg2) {
_function = function;
_self_deleting = self_deleting;
_arg1_deleting = arg1_deleting;
_arg2_deleting = arg2_deleting;
if (arg2 == NULL) {
GET_OBJECT_FROM_POOL(_arg2, Arg2, -1);
_arg2_deleting = true;
}
return 0;
}
template<typename Arg1, typename Arg2>
void FunctionClosure<Arg1, Arg2>::Run() {
bool self_delete = _self_deleting;
bool arg1_delete = _arg1_deleting;
bool arg2_delete = _arg2_deleting;
_function(_arg1, _arg2);
if (self_delete) {
butil::return_object(this);
}
if (arg2_delete) {
butil::return_object(_arg2);
}
}
template<typename T> int PredictorImpl<T>::init(
google::protobuf::RpcChannel* chnl,
T* service,
const MethodDescriptor* infer,
const MethodDescriptor* debug,
const RpcParameters& options,
Stub* stub,
const std::string& tag) {
MetricScope metric(stub, "rpc_init");
butil::Timer tt(butil::Timer::STARTED);
_service = service;
_channel = chnl;
_infer = infer;
_debug = debug;
_options = options;
_stub = stub;
_tag = tag;
reset(_options, _cntl);
_inited = true;
return 0;
}
template<typename T> int PredictorImpl<T>::reset(
const RpcParameters& options,
brpc::Controller& cntl) {
cntl.Reset();
if (options.compress_type.init) {
cntl.set_request_compress_type(
compress_types[options.compress_type.value]);
}
return 0;
}
template<typename T> int PredictorImpl<T>::deinit() {
// do nothing
_inited = false;
return 0;
}
template<typename T> int PredictorImpl<T>::inference(
google::protobuf::Message* req,
google::protobuf::Message* res) {
MetricScope metric(_stub, "infer_sync");
_service->CallMethod(_infer, &_cntl, req, res, NULL);
if (_cntl.Failed()) {
LOG(WARNING)
<< "inference call failed, message: "
<< _cntl.ErrorText();
_stub->update_average(1, "failure");
return -1;
}
return 0;
}
template<typename T> int PredictorImpl<T>::inference(
google::protobuf::Message* req,
google::protobuf::Message* res,
DoneType done,
brpc::CallId* cid) {
MetricScope metric(_stub, "infer_async");
// 异步接口不能使用当前predictor的controller成员,而应该
// 在对象池临时申请一个独立的对象,且直到异步回调执行完
// 成后才能释放,而该释放行为被NewClosure自动托管,用户
// 无需关注。
brpc::Controller* cntl
= butil::get_object<brpc::Controller>();
if (!cntl || reset(_options, *cntl) != 0) {
LOG(FATAL) << "Failed get controller from object pool,"
<< "cntl is null: " << (cntl == NULL);
_stub->update_average(1, "failure");
return -1;
}
if (cid != NULL) { // you can join this rpc with cid
*cid = cntl->call_id();
}
_service->CallMethod(_infer, cntl, req, res, NewClosure(
done, res, cntl));
return 0;
}
template<typename T> int PredictorImpl<T>::debug(
google::protobuf::Message* req,
google::protobuf::Message* res,
butil::IOBufBuilder* debug_os) {
MetricScope metric(_stub, "debug");
_service->CallMethod(_debug, &_cntl, req, res, NULL);
if (_cntl.Failed()) {
LOG(WARNING)
<< "inference call failed, message: "
<< _cntl.ErrorText();
_stub->update_average(1, "failure");
return -1;
}
// copy debug info from response attachment
(*debug_os) << _cntl.response_attachment();
return 0;
}
template<typename T> int PredictorImpl<T>::send_inference(
google::protobuf::Message* req,
google::protobuf::Message* res) {
MetricScope metric(_stub, "infer_send");
_inferid = _cntl.call_id();
_service->CallMethod(
_infer, &_cntl, req, res, brpc::DoNothing());
return 0;
}
template<typename T> int PredictorImpl<T>::recv_inference() {
// waiting for callback done
MetricScope metric(_stub, "infer_recv");
brpc::Join(_inferid);
if (_cntl.Failed()) {
LOG(WARNING) << "Failed recv response from rpc"
<< ", err: " << _cntl.ErrorText();
_stub->update_average(1, "failure");
return -1;
}
return 0;
}
template<typename T> void PredictorImpl<T>::cancel_inference() {
MetricScope metric(_stub, "infer_cancel");
brpc::StartCancel(_inferid);
}
template<typename T> const char* PredictorImpl<T>::tag() {
return _tag.c_str();
}
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_PREDICTORR_HPP
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file include/predictor_api.h
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/09 17:33:59
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_PREDICTOR_SDK_H
#define BAIDU_PADDLE_SERVING_SDK_CPP_PREDICTOR_SDK_H
#include "stub.h"
#include "predictor.h"
#include "endpoint_config.h"
#include "endpoint.h"
#include "config_manager.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
class PredictorApi {
public:
PredictorApi() {}
int register_all();
int create(const char* path, const char* file);
int thrd_initialize();
int thrd_clear();
int thrd_finalize();
void destroy();
static PredictorApi& instance() {
static PredictorApi api;
return api;
}
Predictor* fetch_predictor(std::string ep_name) {
std::map<std::string, Endpoint*>::iterator it
= _endpoints.find(ep_name);
if (it == _endpoints.end() || !it->second) {
LOG(FATAL) << "Failed fetch predictor:"
<< ", ep_name: " << ep_name;
return NULL;
}
return it->second->get_predictor();
}
Predictor* fetch_predictor(std::string ep_name,
const void* params) {
std::map<std::string, Endpoint*>::iterator it
= _endpoints.find(ep_name);
if (it == _endpoints.end() || !it->second) {
LOG(FATAL) << "Failed fetch predictor:"
<< ", ep_name: " << ep_name;
return NULL;
}
return it->second->get_predictor(params);
}
int free_predictor(Predictor* predictor) {
const Stub* stub = predictor->stub();
if (!stub || stub->return_predictor(predictor) != 0) {
LOG(FATAL) << "Failed return predictor via stub";
return -1;
}
return 0;
}
private:
EndpointConfigManager _config_manager;
std::map<std::string, Endpoint*> _endpoints;
};
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_PREDICTOR_SDK_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file include/stub.h
* @author wanlijin(wanlijin01@baidu.com)
* @date 2018/12/04 16:42:29
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_STUB_H
#define BAIDU_PADDLE_SERVING_SDK_CPP_STUB_H
#include "common.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
class Predictor;
struct VariantInfo;
class Stub {
public:
typedef google::protobuf::Message Message;
virtual ~Stub() {}
virtual int initialize(const VariantInfo& var, const std::string& ep,
const std::string* tag, const std::string* tag_value) = 0;
// predictor
virtual Predictor* fetch_predictor() = 0;
virtual int return_predictor(Predictor* predictor) = 0;
virtual int return_predictor(Predictor* predictor) const = 0;
// request
virtual Message* fetch_request() = 0;
virtual int return_request(Message* request) = 0;
virtual int return_request(Message* request) const = 0;
// response
virtual Message* fetch_response() = 0;
virtual int return_response(Message* response) = 0;
virtual int return_response(Message* response) const = 0;
virtual const std::string& which_endpoint() const = 0;
// control logic for tls
virtual int thrd_initialize() = 0;
virtual int thrd_clear() = 0;
virtual int thrd_finalize() = 0;
virtual void update_average(int64_t acc, const char* name) = 0;
virtual void update_latency(int64_t acc, const char* name) = 0;
};
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_STUB_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file include/stub.h
* @author wanlijin(wanlijin01@baidu.com)
* @date 2018/07/04 16:42:29
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_STUB_IMPL_H
#define BAIDU_PADDLE_SERVING_SDK_CPP_STUB_IMPL_H
#include "common.h"
#include "predictor.h"
#include "stub.h"
#include "endpoint_config.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
static const std::string AVG_PREFIX = "avg_";
static const std::string LTC_PREFIX = "ltc_";
class Predictor;
template<typename T>
class PredictorImpl;
static const char* INFERENCE_METHOD_NAME = "inference";
static const char* DEBUG_METHOD_NAME = "debug";
class MetricScope {
public:
MetricScope(Stub* stub, const char* routine) :
_stub(stub), _tt(butil::Timer::STARTED), _routine(routine) {
TRACEPRINTF("enter %s", routine);
}
~MetricScope() {
TRACEPRINTF("exit %s", _routine.c_str());
_tt.stop();
_stub->update_latency(_tt.u_elapsed(), _routine.c_str());
}
private:
Stub* _stub;
butil::Timer _tt;
std::string _routine;
};
class TracePackScope {
public:
TracePackScope(const char* routine) :
_routine(routine), _index(-1) {
TRACEPRINTF("start pack: %s", routine);
}
TracePackScope(const char* routine, int index) :
_routine(routine), _index(index) {
TRACEPRINTF("start pack: %s, index: %d", routine, index);
}
~TracePackScope() {
if (_index >= 0) {
TRACEPRINTF("finish pack: %s, index: %d", _routine.c_str(), _index);
} else {
TRACEPRINTF("finish pack: %s", _routine.c_str());
}
}
private:
std::string _routine;
int _index;
};
class TagFilter : public brpc::NamingServiceFilter {
public:
class TagHelper {
public:
TagHelper(const std::string& kv_str) {
if (kv_str.compare("") == 0) {
return;
}
static const char TAG_DELIM = ',';
static const char KV_DELIM = ':';
std::string::size_type start_pos = 0;
std::string::size_type end_pos;
do {
end_pos = kv_str.find(TAG_DELIM, start_pos);
std::string kv_pair_str;
if (end_pos == std::string::npos) {
kv_pair_str = kv_str.substr(start_pos);
} else {
kv_pair_str = kv_str.substr(start_pos, end_pos - start_pos);
start_pos = end_pos + 1;
}
std::string::size_type kv_delim_pos = kv_pair_str.find(KV_DELIM, 0);
if (kv_delim_pos == std::string::npos) {
LOG(FATAL) << "invalid kv pair: " << kv_pair_str.c_str();
continue;
}
std::string key = kv_pair_str.substr(0, kv_delim_pos);
std::string value = kv_pair_str.substr(kv_delim_pos + 1);
_kv_map.insert(std::pair<std::string, std::string>(key, value));
} while (end_pos != std::string::npos);
}
bool container(const std::string& k, const std::string& v) const {
std::map<std::string, std::string>::const_iterator found
= _kv_map.find(k);
if (found == _kv_map.end()) {
// key not found
return false;
}
if (v.compare(found->second) != 0) {
// value not equals
return false;
}
return true;
}
private:
std::map<std::string, std::string> _kv_map;
};
TagFilter(const std::string& key, const std::string& val) {
_key = key;
_value = val;
}
bool Accept(const brpc::ServerNode& server) const {
TagHelper helper(server.tag);
return helper.container(_key, _value);
}
private:
std::string _key;
std::string _value;
};
class BvarWrapper {
public:
virtual void update_latency(int64_t acc) = 0;
virtual void update_average(int64_t acc) = 0;
};
class LatencyWrapper : public BvarWrapper {
public:
LatencyWrapper(const std::string& name) :
_ltc(name + "_ltc") {
}
void update_latency(int64_t acc) {
_ltc << acc;
}
void update_average(int64_t acc) {
LOG(FATAL) << "Cannot update average to a LatencyRecorder";
}
private:
bvar::LatencyRecorder _ltc;
};
class AverageWrapper : public BvarWrapper {
public:
AverageWrapper(const std::string& name) :
_win(name + "_avg", &_avg, ::bvar::FLAGS_bvar_dump_interval) {
}
void update_latency(int64_t acc) {
LOG(FATAL) << "Cannot update latency to a AverageWrapper";
}
void update_average(int64_t acc) {
_avg << acc;
}
private:
bvar::IntRecorder _avg;
bvar::Window<bvar::IntRecorder> _win;
};
struct StubTLS {
StubTLS() {
predictor_pools.clear();
request_pools.clear();
response_pools.clear();
}
std::vector<Predictor*> predictor_pools;
std::vector<google::protobuf::Message*> request_pools;
std::vector<google::protobuf::Message*> response_pools;
};
template<typename T, typename C, typename R, typename I, typename O>
class StubImpl : public Stub {
public:
typedef google::protobuf::Message Message;
StubImpl()
: _channel(NULL), _pchannel(NULL), _gchannel(NULL),
_service_stub(NULL), _infer(NULL), _debug(NULL) {}
~StubImpl() {}
int initialize(const VariantInfo& var, const std::string& ep,
const std::string* tag, const std::string* tag_value);
Predictor* fetch_predictor();
int return_predictor(Predictor* predictor);
int return_predictor(Predictor* predictor) const;
Message* fetch_request();
int return_request(Message* request);
int return_request(Message* request) const;
Message* fetch_response();
int return_response(Message* response);
int return_response(Message* response) const;
int thrd_initialize();
int thrd_clear();
int thrd_finalize();
const std::string& which_endpoint() const {
return _endpoint;
}
private:
google::protobuf::RpcChannel* init_channel(
const VariantInfo& var,
brpc::NamingServiceFilter* filter = NULL);
brpc::ParallelChannel* init_pchannel(
brpc::Channel* sub_channel, uint32_t channel_count,
uint32_t package_size, const brpc::ChannelOptions& options);
StubTLS* get_tls() {
return static_cast<StubTLS*>(bthread_getspecific(_bthread_key));
}
private:
brpc::Channel* _channel;
brpc::ParallelChannel* _pchannel;
google::protobuf::RpcChannel* _gchannel;
T* _service_stub;
const google::protobuf::MethodDescriptor* _infer;
const google::protobuf::MethodDescriptor* _debug;
std::string _endpoint;
RpcParameters _options;
std::string _tag;
uint32_t _max_channel;
uint32_t _package_size;
// tls handlers
bthread_key_t _bthread_key;
// bvar variables
std::map<std::string, BvarWrapper*> _ltc_bvars;
std::map<std::string, BvarWrapper*> _avg_bvars;
mutable butil::Mutex _bvar_mutex;
#ifndef DECLARE_LATENCY
#define DECLARE_LATENCY(item) \
LatencyWrapper* _ltc_##item;
#endif
DECLARE_LATENCY(infer_sync); // 同步请求
DECLARE_LATENCY(infer_async); // 异步请求
DECLARE_LATENCY(infer_send); // 半同步send
DECLARE_LATENCY(infer_recv); // 半同步recv
DECLARE_LATENCY(infer_cancel);// 半同步cancel
DECLARE_LATENCY(debug); // 调试请求
DECLARE_LATENCY(rpc_init); // rpc reset
DECLARE_LATENCY(thrd_clear); // thrd clear
DECLARE_LATENCY(pack_map); // thrd clear
DECLARE_LATENCY(pack_merge); // thrd clear
#undef DECLARE_LATENCY
#ifndef DECLARE_AVERAGE
#define DECLARE_AVERAGE(item) \
AverageWrapper* _avg_##item;
#endif
DECLARE_AVERAGE(failure); // 失败请求数
DECLARE_AVERAGE(item_size); // 单次请求item数
DECLARE_AVERAGE(pack); // 单次请求分包数
DECLARE_AVERAGE(pack_fail); // 单次请求分包失败数
#undef DECLARE_AVERAGE
public:
void update_average(int64_t acc, const char* name) {
std::map<std::string, BvarWrapper*>::iterator iter =
_avg_bvars.find(AVG_PREFIX + name);
if (iter == _avg_bvars.end()) {
LOG(FATAL) << "Not found average record:avg_" << name;
return ;
}
iter->second->update_average(acc);
}
void update_latency(int64_t acc, const char* name) {
std::map<std::string, BvarWrapper*>::iterator iter =
_ltc_bvars.find(LTC_PREFIX + name);
if (iter == _ltc_bvars.end()) {
LOG(FATAL) << "Not found latency record:ltc_" << name;
return ;
}
iter->second->update_latency(acc);
}
};
} // sdk_cpp
} // paddle_serving
} // baidu
#include "stub_impl.hpp"
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_STUB_IMPL_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_STUB_IMPL_HPP
#define BAIDU_PADDLE_SERVING_SDK_CPP_STUB_IMPL_HPP
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
template<typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::initialize(
const VariantInfo& var, const std::string& ep,
const std::string* tag, const std::string* tag_value) {
if (tag != NULL && tag_value != NULL) {
TagFilter* filter = new (std::nothrow) TagFilter(
*tag, *tag_value);
if (!filter) {
LOG(FATAL) << "Failed create tag filter, key: " << tag
<< ", value: " << tag_value;
return -1;
}
_gchannel = init_channel(var, filter);
LOG(INFO)
<< "Create stub with tag: " << *tag
<< ", " << *tag_value << ", ep: " << ep;
} else {
_gchannel = init_channel(var, NULL);
LOG(INFO) << "Create stub without tag, ep " << ep;
}
if (!_gchannel) {
LOG(FATAL) << "Failed init channel via var_info";
return -1;
}
_service_stub = new (std::nothrow) T(_gchannel);
if (!_service_stub) {
LOG(FATAL) << "Failed create stub with channel";
return -1;
}
_infer = _service_stub->GetDescriptor()->FindMethodByName(
INFERENCE_METHOD_NAME);
if (!_infer) {
LOG(FATAL) << "Failed get inference method, "
<< "method name: " << INFERENCE_METHOD_NAME;
return -1;
}
_debug = _service_stub->GetDescriptor()->FindMethodByName(
DEBUG_METHOD_NAME);
if (!_debug) {
LOG(FATAL) << "Failed get debug method, "
<< "method name: " << DEBUG_METHOD_NAME;
return -1;
}
_endpoint = ep;
if (bthread_key_create(&_bthread_key, NULL) != 0) {
LOG(FATAL) << "Failed create key for stub tls";
return -1;
}
const std::string& name
= _endpoint + "_" + _service_stub->GetDescriptor()->full_name() + "_" + _tag;
_ltc_bvars.clear();
_avg_bvars.clear();
BAIDU_SCOPED_LOCK(_bvar_mutex);
#ifndef DEFINE_LATENCY
#define DEFINE_LATENCY(item) \
do { \
_ltc_##item = new (std::nothrow) LatencyWrapper(name + "_"#item);\
if (!_ltc_##item) { \
LOG(FATAL) << "Failed create latency recorder:" \
<< name + "_"#item; \
return -1; \
} \
_ltc_bvars["ltc_"#item] = _ltc_##item; \
} while(0)
#endif
DEFINE_LATENCY(infer_sync);
DEFINE_LATENCY(infer_async);
DEFINE_LATENCY(infer_send);
DEFINE_LATENCY(infer_recv);
DEFINE_LATENCY(infer_cancel);
DEFINE_LATENCY(debug);
DEFINE_LATENCY(rpc_init);
DEFINE_LATENCY(thrd_clear);
DEFINE_LATENCY(pack_map);
DEFINE_LATENCY(pack_merge);
#undef DEFINE_LATENCY
#ifndef DEFINE_AVERAGE
#define DEFINE_AVERAGE(item) \
do { \
_avg_##item = new(std::nothrow) AverageWrapper(name + "_"#item);\
if (!_avg_##item) { \
LOG(FATAL) << "Failed create average recorder:" \
<< name + "_"#item; \
return -1; \
} \
_avg_bvars["avg_"#item] = _avg_##item; \
} while(0)
#endif
DEFINE_AVERAGE(failure);
DEFINE_AVERAGE(pack);
DEFINE_AVERAGE(item_size);
DEFINE_AVERAGE(pack_fail);
#undef DEFINE_AVERAGE
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::thrd_initialize() {
if (bthread_getspecific(_bthread_key) != NULL) {
LOG(WARNING) << "Already thread initialized for stub";
return 0;
}
StubTLS* tls = new (std::nothrow) StubTLS();
if (!tls || bthread_setspecific(_bthread_key, tls) != 0) {
LOG(FATAL) << "Failed binding tls data to bthread_key";
return -1;
}
LOG(WARNING) << "Succ thread initialize stub impl!";
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::thrd_clear() {
MetricScope metric(this, "thrd_clear");
StubTLS* tls = get_tls();
if (!tls) {
LOG(FATAL) << "Failed get tls stub object";
return -1;
}
// clear predictor
size_t ps = tls->predictor_pools.size();
for (size_t pi = 0; pi < ps; ++pi) {
Predictor* p = tls->predictor_pools[pi];
if (p && p->is_inited() && return_predictor(p) != 0) {
LOG(FATAL) << "Failed return predictor: " << pi;
return -1;
}
}
tls->predictor_pools.clear();
// clear request
size_t is = tls->request_pools.size();
for (size_t ii = 0; ii < is; ++ii) {
if (return_request(tls->request_pools[ii])!= 0) {
LOG(FATAL) << "Failed return request: " << ii;
return -1;
}
}
tls->request_pools.clear();
// clear response
size_t os = tls->response_pools.size();
for (size_t oi = 0; oi < os; ++oi) {
if (return_response(tls->response_pools[oi])!= 0) {
LOG(FATAL) << "Failed return response: " << oi;
return -1;
}
}
tls->response_pools.clear();
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::thrd_finalize() {
StubTLS* tls = get_tls();
if (!tls || thrd_clear() != 0) {
LOG(FATAL) << "Failed clreate tls in thrd finalize";
return -1;
}
delete tls;
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
Predictor* StubImpl<T, C, R, I, O>::fetch_predictor() {
StubTLS* tls = get_tls();
if (!tls) {
LOG(FATAL) << "Failed get tls data when fetching predictor";
return NULL;
}
PredictorImpl<T>* predictor = butil::get_object<PredictorImpl<T> >();
if (!predictor) {
LOG(FATAL) << "Failed fetch predictor";
return NULL;
}
if (predictor->init(_gchannel, _service_stub, _infer, _debug, _options,
this, _tag) != 0) {
LOG(FATAL) << "Failed init fetched predictor";
return NULL;
}
tls->predictor_pools.push_back(predictor);
return predictor;
}
template<typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::return_predictor(Predictor* predictor) {
if ((dynamic_cast<PredictorImpl<T>*>(predictor))->deinit() != 0) {
LOG(FATAL) << "Failed deinit fetched predictor";
return -1;
}
butil::return_object(dynamic_cast<PredictorImpl<T>*>(predictor));
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::return_predictor(Predictor* predictor) const {
if ((dynamic_cast<PredictorImpl<T>*>(predictor))->deinit() != 0) {
LOG(FATAL) << "Failed deinit fetched predictor";
return -1;
}
butil::return_object(dynamic_cast<PredictorImpl<T>*>(predictor));
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
google::protobuf::Message* StubImpl<T, C, R, I, O>::fetch_request() {
StubTLS* tls = get_tls();
if (!tls) {
LOG(FATAL) << "Failed get tls data when fetching request";
return NULL;
}
I* req = butil::get_object<I>();
if (!req) {
LOG(FATAL) << "Failed get tls request item, type: " << typeid(I).name();
return NULL;
}
req->Clear();
tls->request_pools.push_back(req);
return req;
}
template<typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::return_request(
google::protobuf::Message* request) const {
request->Clear();
butil::return_object(dynamic_cast<I*>(request));
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::return_request(
google::protobuf::Message* request) {
request->Clear();
butil::return_object(dynamic_cast<I*>(request));
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
google::protobuf::Message* StubImpl<T, C, R, I, O>::fetch_response() {
StubTLS* tls = get_tls();
if (!tls) {
LOG(FATAL) << "Failed get tls data when fetching response";
return NULL;
}
O* res = butil::get_object<O>();
if (!res) {
LOG(FATAL) << "Failed get tls response item, type: " << typeid(O).name();
return NULL;
}
res->Clear();
tls->response_pools.push_back(res);
return res;
}
template<typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::return_response(
google::protobuf::Message* response) const {
response->Clear();
butil::return_object(dynamic_cast<O*>(response));
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::return_response(
google::protobuf::Message* response) {
response->Clear();
butil::return_object(dynamic_cast<O*>(response));
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
google::protobuf::RpcChannel* StubImpl<T, C, R, I, O>::init_channel(
const VariantInfo& var, brpc::NamingServiceFilter* filter) {
brpc::ChannelOptions chn_options;
chn_options.ns_filter = filter;
// parameters
ASSIGN_CONF_ITEM(chn_options.protocol, var.parameters.protocol, NULL);
ASSIGN_CONF_ITEM(_tag, var.parameters.route_tag, NULL);
ASSIGN_CONF_ITEM(_max_channel, var.parameters.max_channel, NULL);
ASSIGN_CONF_ITEM(_package_size, var.parameters.package_size, NULL);
if (_max_channel < 1) {
LOG(ERROR) << "Invalid MaxChannelPerRequest: " << _max_channel;
return NULL;
}
// connection
ASSIGN_CONF_ITEM(chn_options.max_retry, var.connection.cnt_retry_conn, NULL);
ASSIGN_CONF_ITEM(chn_options.connect_timeout_ms, var.connection.tmo_conn, NULL);
ASSIGN_CONF_ITEM(chn_options.timeout_ms, var.connection.tmo_rpc, NULL);
ASSIGN_CONF_ITEM(chn_options.backup_request_ms, var.connection.tmo_hedge, NULL);
// connection type
std::string conn_type_str;
ASSIGN_CONF_ITEM(conn_type_str, var.connection.type_conn, NULL);
chn_options.connection_type
= brpc::StringToConnectionType(conn_type_str);
// naminginfo
std::string cluster_naming_info;
std::string cluster_loadbalancer;
ASSIGN_CONF_ITEM(cluster_naming_info, var.naminginfo.cluster_naming, NULL);
ASSIGN_CONF_ITEM(cluster_loadbalancer, var.naminginfo.load_balancer, NULL);
// brpc single channel
_channel = butil::get_object<brpc::Channel>();
if (!_channel) {
LOG(FATAL) << "Failed get channel object from butil::pool";
return NULL;
}
if (_channel->Init(
cluster_naming_info.c_str(),
cluster_loadbalancer.c_str(),
&chn_options) != 0) {
LOG(ERROR)
<< "Failed to initialize channel, path: "
<< cluster_naming_info;
return NULL;
}
// brpc parallel channel
_pchannel = init_pchannel(
_channel, _max_channel, _package_size, chn_options);
if (_pchannel) {
LOG(INFO) << "Succ create parallel channel, count: "
<< _max_channel;
return _pchannel;
}
return _channel;
}
template<typename T, typename C, typename R, typename I, typename O>
brpc::ParallelChannel* StubImpl<T, C, R, I, O>::init_pchannel(
brpc::Channel* sub_channel, uint32_t channel_count,
uint32_t package_size, const brpc::ChannelOptions& options) {
if (channel_count <= 1) { // noneed use parallel channel
LOG(INFO) << "channel count <= 1, noneed use pchannel.";
return NULL;
}
_pchannel = butil::get_object<brpc::ParallelChannel>();
if (!_pchannel) {
LOG(FATAL) << "Failed get pchannel from object pool";
return NULL;
}
brpc::ParallelChannelOptions pchan_options;
pchan_options.timeout_ms = options.timeout_ms;
if (_pchannel->Init(&pchan_options) != 0) {
LOG(FATAL) << "Failed init parallel channel with tmo_us: "
<< pchan_options.timeout_ms;
return NULL;
}
for (uint32_t si = 0; si < channel_count; ++si) {
if (_pchannel->AddChannel(
sub_channel,
brpc::DOESNT_OWN_CHANNEL,
new C(package_size, this),
new R(package_size, this)) != 0) {
LOG(FATAL) << "Failed add channel at: " << si
<< ", package_size:" << package_size;
return NULL;
}
}
return _pchannel;
}
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_STUB_IMPL_HPP
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file utils.h
* @author root(com@baidu.com)
* @date 2018/07/09 19:43:36
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_UTILS_H
#define BAIDU_PADDLE_SERVING_SDK_CPP_UTILS_H
#include "common.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
inline int str_split(
const std::string& source,
const std::string& delim,
std::vector<std::string>* vector_spliter) {
int delim_length = delim.length();
int total_length = source.length();
int last = 0;
if (delim_length == 0) {
vector_spliter->push_back(source);
return 0;
}
if (delim_length == 1) {
size_t index = source.find_first_of(delim, last);
while (index != std::string::npos) {
vector_spliter->push_back(source.substr(last,
index - last));
last = index + delim_length;
index = source.find_first_of(delim, last);
}
} else {
size_t index = source.find(delim, last);
while (index != std::string::npos) {
vector_spliter->push_back(source.substr(last,
index - last));
last = index + delim_length;
index = source.find(delim, last);
}
}
if (last < total_length) {
vector_spliter->push_back(source.substr(last,
total_length - last));
}
return 0;
}
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_UTILS_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file include/variant.h
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/27 17:37:31
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_CPP_SDK_VARIANT_H
#define BAIDU_PADDLE_SERVING_CPP_SDK_VARIANT_H
#include "common.h"
#include "endpoint_config.h"
#include "stub.h"
#include "predictor.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
class Variant {
friend class VariantRouterBase;
public:
virtual ~Variant() {}
Variant() : _default_stub(NULL) {
_stub_map.clear();
}
int initialize(
const EndpointInfo& ep_info,
const VariantInfo& var_info);
int thrd_initialize();
int thrd_clear();
int thrd_finalize();
Predictor* get_predictor(
const void* params);
Predictor* get_predictor();
int ret_predictor(Predictor* predictor);
const std::string& variant_tag() const {
return _variant_tag;
}
private:
std::string _endpoint_name;
std::string _stub_service;
std::string _variant_tag;
std::map<std::string, Stub*> _stub_map;
Stub* _default_stub;
};
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_CPP_SDK_VARIANT_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
FILE(GLOB protos ${CMAKE_CURRENT_LIST_DIR}/*.proto)
PROTOBUF_GENERATE_SERVING_CPP(PROTO_SRCS PROTO_HDRS ${protos})
LIST(APPEND sdk_cpp_srcs ${PROTO_SRCS})
syntax="proto2";
package baidu.paddle_serving.predictor.format;
// dense format
message DenseInstance {
repeated float features = 1;
};
message DensePrediction {
repeated float categories = 1;
};
// sparse format
message SparseInstance {
repeated uint32 keys = 1;
repeated uint32 shape = 2;
repeated float values = 3;
};
message SparsePrediction {
repeated float categories = 1;
};
// int64-tensor format
message Int64TensorInstance {
repeated int64 data = 1;
repeated uint32 shape = 2;
};
message Float32TensorPredictor {
repeated float data = 1;
repeated uint32 shape = 2;
};
// x-image format
message XImageReqInstance {
required bytes image_binary = 1;
required uint32 image_length = 2;
};
message XImageResInstance {
required string response_json = 1;
};
// x-record format
message XRecordInstance {
// TODO
required bytes data = 1;
};
syntax="proto2";
import "pds_option.proto";
package baidu.paddle_serving.fluid_engine;
option cc_generic_services = true;
// default dense request
message DenseTensor {
required string name = 1;
repeated uint32 shape = 2;
required bytes features = 3;
};
message DenseInstance {
repeated DenseTensor tensors = 1;
};
message DenseRequest {
repeated DenseInstance instances = 1;
};
// default sparse request
message SparseTensor {
required string name = 1;
repeated uint32 keys = 2;
repeated uint32 shape = 3;
required bytes features = 4;
};
message SparseInstance {
repeated SparseTensor tensors = 1;
};
message SparseRequest {
repeated SparseInstance instances = 1;
};
// default response
message Prediction {
repeated float categories = 1;
};
message Response {
repeated Prediction predictions = 1;
};
service DefaultSparseService {
rpc inference(SparseRequest) returns (Response);
rpc debug(SparseRequest) returns (Response);
option (pds.options).generate_stub = true;
};
service DefaultDenseService {
rpc inference(DenseRequest) returns (Response);
rpc debug(DenseRequest) returns (Response);
option (pds.options).generate_stub = true;
};
syntax="proto2";
import "pds_option.proto";
import "builtin_format.proto";
package baidu.paddle_serving.predictor.image_classification;
option cc_generic_services = true;
message ClassifyResponse {
repeated baidu.paddle_serving.predictor.format.DensePrediction predictions = 1;
};
message Request {
repeated baidu.paddle_serving.predictor.format.XImageReqInstance instances = 1;
};
message Response {
// Each json string is serialized from ClassifyResponse predictions
repeated baidu.paddle_serving.predictor.format.XImageResInstance predictions = 1;
};
service ImageClassifyService {
rpc inference(Request) returns (Response);
rpc debug(Request) returns (Response);
option (pds.options).generate_stub = true;
};
syntax="proto2";
import "pds_option.proto";
package baidu.infinite.map_model;
option cc_generic_services = true;
message Tensor {
required string name = 1;
repeated uint32 shape = 2;
required bytes features = 3;
};
message SparseTensor {
required string name = 1;
repeated uint32 keys = 2;
repeated uint32 shape = 3;
required bytes features = 4;
};
message DenseInstance {
repeated Tensor tensors = 1;
};
message SparseInstance {
repeated SparseTensor tensors = 1;
};
message DenseRequest {
repeated DenseInstance instances = 1;
};
message Request {
repeated SparseInstance instances = 1;
};
message DensePrediction {
repeated float categories = 1;
};
message Response {
repeated DensePrediction predictions = 1;
};
service MapCnnService {
rpc inference(Request) returns (Response);
rpc debug(Request) returns (Response);
option (pds.options).generate_stub = true;
};
service MapDenseCnnService {
rpc inference(DenseRequest) returns (Response);
rpc debug(DenseRequest) returns (Response);
option (pds.options).generate_stub = true;
};
syntax="proto2";
import "pds_option.proto";
package baidu.infinite.map_rnn;
//package baidu.paddle_serving.predictor.map_rnn;
option cc_generic_services = true;
/*message Tensor {
required string name = 1;
repeated uint32 shape = 2;
required bytes features = 3;
};*/
message Line {
required bytes value = 1;
};
message Lines {
repeated Line line = 1;
};
message DenseInstance {
repeated Lines step_data = 1;
required Lines week_data = 2;
required Lines minute_data = 3;
required uint32 batch_size = 4;
};
message Request {
repeated DenseInstance instances = 1;
};
message DensePrediction {
repeated float categories = 1;
};
message Response {
repeated DensePrediction predictions = 1;
};
service MapRnnService {
rpc inference(Request) returns (Response);
rpc debug(Request) returns (Response);
//option (pds.options).generate_impl = true;
option (pds.options).generate_stub = true;
};
syntax="proto2";
import "google/protobuf/descriptor.proto";
package pds;
extend google.protobuf.FieldOptions {
optional bool pack_on = 70000 [default=false];
};
extend google.protobuf.ServiceOptions {
optional PaddleServiceOption options = 80000;
};
message PaddleServiceOption {
optional bool generate_impl = 1 [default = false];
optional bool generate_stub = 2 [default = false];
};
FILE(GLOB srcs ${CMAKE_CURRENT_LIST_DIR}/*.cpp)
LIST(APPEND sdk_cpp_srcs ${srcs})
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file ../src/abtest.cpp
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/09 17:41:27
* @brief
*
**/
#include "abtest.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
int WeightedRandomRender::initialize(
const comcfg::ConfigUnit& conf) {
srand((unsigned)time(NULL));
try {
std::string weights
= conf["VariantWeightList"].to_cstr();
std::vector<std::string> splits;
if (str_split(weights, WEIGHT_SEPERATOR, &splits) != 0) {
LOG(FATAL) << "Failed split string:" <<
weights;
return -1;
}
uint32_t weight_size = splits.size();
_normalized_sum = 0;
for (uint32_t wi = 0; wi < weight_size; ++wi) {
char* end_pos = NULL;
uint32_t ratio = strtoul(
splits[wi].c_str(), &end_pos, 10);
if (end_pos == splits[wi].c_str()) {
LOG(FATAL) << "Error ratio(uint32) format:"
<< splits[wi] << " at " << wi;
return -1;
}
_variant_weight_list.push_back(ratio);
_normalized_sum += ratio;
}
if (_normalized_sum <= 0) {
LOG(FATAL) << "Zero normalized weight sum";
return -1;
}
LOG(INFO) << "Succ read weights list: " << weights
<< ", count: " << _variant_weight_list.size()
<< ", normalized: " << _normalized_sum;
} catch (comcfg::ConfigException& e) {
LOG(FATAL) << "Failed init WeightedRandomRender"
<< "from configure, err:" << e.what();
return -1;
} catch (...) {
LOG(FATAL) << "Failed init WeightedRandomRender"
<< "from configure, err message is unkown.";
return -1;
}
return 0;
}
Variant* WeightedRandomRender::route(
const VariantList& variants,
const void* params) {
return route(variants);
}
Variant* WeightedRandomRender::route(
const VariantList& variants) {
if (variants.size() != _variant_weight_list.size()) {
LOG(FATAL) << "#(Weights) is not equal #(Stubs)"
<< ", size: " << _variant_weight_list.size()
<< " vs. " << variants.size();
return NULL;
}
uint32_t sample = rand() % _normalized_sum;
uint32_t cand_size = _variant_weight_list.size();
uint32_t cur_total = 0;
for (uint32_t ci = 0; ci < cand_size; ++ci) {
cur_total += _variant_weight_list[ci];
if (sample < cur_total) {
LOG(INFO) << "Sample " << sample << " on " << ci
<< ", _normalized: " << _normalized_sum
<< ", weight: " << _variant_weight_list[ci];
return variants[ci];
}
}
LOG(FATAL) << "Errors accurs in sampling, sample:"
<< sample << ", total: " << _normalized_sum;
return NULL;
}
} // sdk_cpp
} // paddle_serving
} // baidu
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file endpoint_config.cpp
* @author wanlijin01(com@baidu.com)
* @date 2018/07/09 15:30:09
* @brief
*
**/
#include "abtest.h"
#include "config_manager.h"
#include <brpc/server.h>
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
int EndpointConfigManager::create(const char* path, const char* file) {
_endpoint_config_path = path;
_endpoint_config_file = file;
if (load() != 0) {
LOG(FATAL) << "Failed reload endpoint config";
return -1;
}
return 0;
}
int EndpointConfigManager::load() {
try {
comcfg::Configure conf;
if (conf.load(
_endpoint_config_path.c_str(),
_endpoint_config_file.c_str()) != 0) {
LOG(FATAL)
<< "Failed initialize endpoint list"
<< ", config: " << _endpoint_config_path
<< "/" << _endpoint_config_file;
return -1;
}
VariantInfo default_var;
if (init_one_variant(conf["DefaultVariantInfo"],
default_var) != 0) {
LOG(FATAL) << "Failed read default var conf";
return -1;
}
uint32_t ep_size = conf["Predictor"].size();
for (uint32_t ei = 0; ei < ep_size; ++ei) {
EndpointInfo ep;
if (init_one_endpoint(conf["Predictor"][ei], ep,
default_var) != 0) {
LOG(FATAL) << "Failed read endpoint info at: "
<< ei;
return -1;
}
std::map<std::string, EndpointInfo>::iterator it;
if (_ep_map.find(ep.endpoint_name) != _ep_map.end()) {
LOG(FATAL) << "Cannot insert duplicated endpoint"
<< ", ep name: " << ep.endpoint_name;
}
std::pair<std::map<
std::string, EndpointInfo>::iterator, bool> r
= _ep_map.insert(std::make_pair(ep.endpoint_name, ep));
if (!r.second) {
LOG(FATAL) << "Failed insert endpoint, name"
<< ep.endpoint_name;
return -1;
}
}
} catch (bsl::Exception& e) {
LOG(FATAL) << "Failed load configure, err: " << e.what();
return -1;
}
LOG(INFO)
<< "Success reload endpoint config file, id: "
<< _current_endpointmap_id;
return 0;
}
int EndpointConfigManager::init_one_endpoint(
const comcfg::ConfigUnit& conf, EndpointInfo& ep,
const VariantInfo& dft_var) {
try {
// name
ep.endpoint_name = conf["name"].to_cstr();
// stub
ep.stub_service = conf["service_name"].to_cstr();
// abtest
ConfigItem<std::string> ep_router;
PARSE_CONF_ITEM(conf, ep_router, "endpoint_router", -1);
if (ep_router.init) {
std::string endpoint_router_info
= conf["endpoint_router"].to_cstr();
EndpointRouterBase* router
= EndpointRouterFactory::instance().generate_object(
ep_router.value);
if (!router || router->initialize(
conf[endpoint_router_info.c_str()]) != 0) {
LOG(FATAL) << "Failed fetch valid ab test strategy"
<< ", name:" << endpoint_router_info;
return -1;
}
ep.ab_test = router;
}
// varlist
uint32_t var_size = conf["VariantInfo"].size();
for (uint32_t vi = 0; vi < var_size; ++vi) {
VariantInfo var;
if (merge_variant(dft_var, conf["VariantInfo"][vi],
var) != 0) {
LOG(FATAL) << "Failed merge variant info at: "
<< vi;
return -1;
}
ep.vars.push_back(var);
}
if (ep.vars.size() > 1 && ep.ab_test == NULL) {
LOG(FATAL) << "EndpointRouter must be configured, when"
<< " #Variants > 1.";
return -1;
}
LOG(INFO)
<< "Succ load one endpoint, name: " << ep.endpoint_name
<< ", count of variants: " << ep.vars.size() << ".";
} catch (bsl::Exception& e) {
LOG(FATAL) << "Exception acccurs when load endpoint conf"
<< ", message: " << e.what();
return -1;
}
return 0;
}
int EndpointConfigManager::init_one_variant(
const comcfg::ConfigUnit& conf, VariantInfo& var) {
try {
// Connect
const comcfg::ConfigUnit& conn = conf["Connection"];
PARSE_CONF_ITEM(conn, var.connection.tmo_conn,
"ConnectTimeoutMilliSec", -1);
PARSE_CONF_ITEM(conn, var.connection.tmo_rpc,
"RpcTimeoutMilliSec", -1);
PARSE_CONF_ITEM(conn, var.connection.tmo_hedge,
"HedgeRequestTimeoutMilliSec", -1);
PARSE_CONF_ITEM(conn, var.connection.cnt_retry_conn,
"ConnectRetryCount", -1);
PARSE_CONF_ITEM(conn, var.connection.cnt_retry_hedge,
"HedgeFetchRetryCount", -1);
PARSE_CONF_ITEM(conn, var.connection.cnt_maxconn_per_host,
"MaxConnectionPerHost", -1);
PARSE_CONF_ITEM(conn, var.connection.type_conn,
"ConnectionType", -1);
// Naming
const comcfg::ConfigUnit& name = conf["NamingInfo"];
PARSE_CONF_ITEM(name, var.naminginfo.cluster_naming,
"Cluster", -1);
PARSE_CONF_ITEM(name, var.naminginfo.load_balancer,
"LoadBalanceStrategy", -1);
PARSE_CONF_ITEM(name, var.naminginfo.cluster_filter,
"ClusterFilterStrategy", -1);
// Rpc
const comcfg::ConfigUnit& params = conf["RpcParameter"];
PARSE_CONF_ITEM(params, var.parameters.protocol,
"Protocol", -1);
PARSE_CONF_ITEM(params, var.parameters.compress_type,
"CompressType", -1);
PARSE_CONF_ITEM(params, var.parameters.package_size,
"PackageSize", -1);
PARSE_CONF_ITEM(params, var.parameters.max_channel,
"MaxChannelPerRequest", -1);
// Split
const comcfg::ConfigUnit& splits = conf["SplitInfo"];
PARSE_CONF_ITEM(splits, var.splitinfo.split_tag,
"split_tag_name", -1);
PARSE_CONF_ITEM(splits, var.splitinfo.tag_cands_str,
"tag_candidates", -1);
if (parse_tag_values(var.splitinfo) != 0) {
LOG(FATAL) << "Failed parse tag_values:" <<
var.splitinfo.tag_cands_str.value;
return -1;
}
// tag
PARSE_CONF_ITEM(conf, var.parameters.route_tag,
"Tag", -1);
// router
ConfigItem<std::string> var_router;
PARSE_CONF_ITEM(conf, var_router, "variant_router", -1);
if (var_router.init) {
VariantRouterBase* router
= VariantRouterFactory::instance().generate_object(
var_router.value);
if (!router || router->initialize(
conf[var_router.value.c_str()]) != 0) {
LOG(FATAL) << "Failed fetch valid variant router"
<< ", name:" << var_router.value;
return -1;
}
var.ab_test = router;
}
} catch (...) {
LOG(FATAL) << "Failed load variant from configure unit";
return -1;
}
return 0;
}
int EndpointConfigManager::merge_variant(
const VariantInfo& default_var,
const comcfg::ConfigUnit& conf,
VariantInfo& merged_var) {
merged_var = default_var;
// VariantRouter cannot be merged!
merged_var.ab_test = NULL;
return init_one_variant(conf, merged_var);
}
int EndpointConfigManager::parse_tag_values(
SplitParameters& split) {
split.tag_values.clear();
if (!split.split_tag.init || !split.tag_cands_str.init) {
LOG(WARNING) << "split info not set, skip...";
return 0;
}
static const char SPLIT_DELIM = ',';
const std::string& tag_str = split.tag_cands_str.value;
std::string::size_type start_pos = 0;
std::string::size_type end_pos;
do {
end_pos = tag_str.find(SPLIT_DELIM, start_pos);
std::string tag_value_str;
if (end_pos == std::string::npos) {
tag_value_str = tag_str.substr(start_pos);
} else {
tag_value_str = tag_str.substr(
start_pos, end_pos - start_pos);
start_pos = end_pos + 1;
}
split.tag_values.push_back(tag_value_str);
} while (end_pos != std::string::npos);
return 0;
}
} // sdk_cpp
} // paddle_serving
} // baidu
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file endpoint.cpp
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/09 14:10:44
* @brief
*
**/
#include "endpoint.h"
#include "factory.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
int Endpoint::initialize(const EndpointInfo& ep_info) {
_variant_list.clear();
_endpoint_name = ep_info.endpoint_name;
uint32_t var_size = ep_info.vars.size();
for (uint32_t vi = 0; vi < var_size; ++vi) {
const VariantInfo& var_info = ep_info.vars[vi];
Variant* var = new (std::nothrow) Variant;
if (!var || var->initialize(ep_info, var_info) != 0) {
LOG(FATAL) << "Failed initialize variant, tag:"
<< var_info.parameters.route_tag.value
<< ", endpoint: " << ep_info.endpoint_name
<< ", var index: " << vi;
return -1;
}
_variant_list.push_back(var);
LOG(INFO) << "Succ create variant: " << vi << ", endpoint:"
<< _endpoint_name;
}
return 0;
}
int Endpoint::thrd_initialize() {
uint32_t var_size = _variant_list.size();
for (uint32_t vi = 0; vi < var_size; ++vi) {
Variant* var = _variant_list[vi];
if (!var || var->thrd_initialize()) {
LOG(FATAL) << "Failed thrd initialize var: " << vi;
return -1;
}
}
LOG(WARNING) << "Succ thrd initialize all vars: " << var_size;
return 0;
}
int Endpoint::thrd_clear() {
uint32_t var_size = _variant_list.size();
for (uint32_t vi = 0; vi < var_size; ++vi) {
Variant* var = _variant_list[vi];
if (!var || var->thrd_clear()) {
LOG(FATAL) << "Failed thrd clear var: " << vi;
return -1;
}
}
LOG(INFO) << "Succ thrd clear all vars: " << var_size;
return 0;
}
int Endpoint::thrd_finalize() {
uint32_t var_size = _variant_list.size();
for (uint32_t vi = 0; vi < var_size; ++vi) {
Variant* var = _variant_list[vi];
if (!var || var->thrd_finalize()) {
LOG(FATAL) << "Failed thrd finalize var: " << vi;
return -1;
}
}
LOG(INFO) << "Succ thrd finalize all vars: " << var_size;
return 0;
}
// 带全流量分层实验路由信息
Predictor* Endpoint::get_predictor(
const void* params) {
Variant* var = NULL;
if (_variant_list.size() == 1) {
var = _variant_list[0];
}
if (!var) {
LOG(FATAL) << "get null var from endpoint.";
return NULL;
}
return var->get_predictor(params);
}
Predictor* Endpoint::get_predictor() {
if (_variant_list.size() == 1) {
if (_variant_list[0] == NULL) {
LOG(FATAL) << "Not valid variant info";
return NULL;
}
return _variant_list[0]->get_predictor();
}
return NULL;
}
int Endpoint::ret_predictor(Predictor* predictor) {
const Stub* stub = predictor->stub();
if (!stub || stub->return_predictor(
predictor) != 0) {
LOG(FATAL) << "Failed return predictor to pool";
return -1;
}
return 0;
}
} // sdk_cpp
} // paddle_serving
} // baidu
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file src/predictor_api.cpp
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/09 17:36:13
* @brief
*
**/
#include "abtest.h"
#include "predictor_sdk.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
int PredictorApi::register_all() {
if (WeightedRandomRender::register_self() != 0) {
LOG(FATAL) << "Failed register WeightedRandomRender";
return -1;
}
LOG(WARNING) << "Succ register all components!";
return 0;
}
int PredictorApi::create(const char* path, const char* file) {
if (register_all() != 0) {
LOG(FATAL) << "Failed do register all!";
return -1;
}
if (_config_manager.create(path, file) != 0) {
LOG(FATAL) << "Failed create config manager from conf:"
<< path << "/" << file;
return -1;
}
const std::map<std::string, EndpointInfo>& map
= _config_manager.config();
std::map<std::string, EndpointInfo>::const_iterator it;
for (it = map.begin(); it != map.end(); ++it) {
const EndpointInfo& ep_info = it->second;
Endpoint* ep = new (std::nothrow) Endpoint();
if (ep->initialize(ep_info) != 0) {
LOG(FATAL) << "Failed intialize endpoint:"
<< ep_info.endpoint_name;
return -1;
}
if (_endpoints.find(
ep_info.endpoint_name) != _endpoints.end()) {
LOG(FATAL) << "Cannot insert duplicated endpoint:"
<< ep_info.endpoint_name;
return -1;
}
std::pair<std::map<std::string, Endpoint*>::iterator, bool> r
= _endpoints.insert(std::make_pair(
ep_info.endpoint_name, ep));
if (!r.second) {
LOG(FATAL) << "Failed insert endpoint:"
<< ep_info.endpoint_name;
return -1;
}
LOG(INFO) << "Succ create endpoint instance with name: "
<< ep_info.endpoint_name;
}
return 0;
}
int PredictorApi::thrd_initialize() {
std::map<std::string, Endpoint*>::const_iterator it;
for (it = _endpoints.begin(); it != _endpoints.end(); ++it) {
Endpoint* ep = it->second;
if (ep->thrd_initialize() != 0) {
LOG(FATAL) << "Failed thrd initialize endpoint:"
<< it->first;
return -1;
}
LOG(WARNING) << "Succ thrd initialize endpoint:"
<< it->first;
}
return 0;
}
int PredictorApi::thrd_clear() {
std::map<std::string, Endpoint*>::const_iterator it;
for (it = _endpoints.begin(); it != _endpoints.end(); ++it) {
Endpoint* ep = it->second;
if (ep->thrd_clear() != 0) {
LOG(FATAL) << "Failed thrd clear endpoint:"
<< it->first;
return -1;
}
LOG(INFO) << "Succ thrd clear endpoint:"
<< it->first;
}
return 0;
}
int PredictorApi::thrd_finalize() {
std::map<std::string, Endpoint*>::const_iterator it;
for (it = _endpoints.begin(); it != _endpoints.end(); ++it) {
Endpoint* ep = it->second;
if (ep->thrd_finalize() != 0) {
LOG(FATAL) << "Failed thrd finalize endpoint:"
<< it->first;
return -1;
}
LOG(INFO) << "Succ thrd finalize endpoint:"
<< it->first;
}
return 0;
}
void PredictorApi::destroy() {
// TODO
return ;
}
} // sdk_cpp
} // paddle_serving
} // baidu
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file src/variant.cpp
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/27 17:42:21
* @brief
*
**/
#include "variant.h"
#include "factory.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
int Variant::initialize(const EndpointInfo& ep_info,
const VariantInfo& var_info) {
_endpoint_name = ep_info.endpoint_name;
_stub_service = ep_info.stub_service;
_variant_tag = var_info.parameters.route_tag.value;
_stub_map.clear();
const SplitParameters& split_info = var_info.splitinfo;
uint32_t tag_size = split_info.tag_values.size();
for (uint32_t ti = 0; ti < tag_size; ++ti) { // split
Stub* stub = StubFactory::instance().generate_object(
_stub_service);
const std::string& tag_value = split_info.tag_values[ti];
if (!stub || stub->initialize(var_info, ep_info.endpoint_name,
&split_info.split_tag.value, &tag_value) != 0) {
LOG(FATAL) << "Failed init stub from factory"
<< ", stub name: " << ep_info.stub_service
<< ", filter tag: " << tag_value;
return -1;
}
// 判重
std::map<std::string, Stub*>::iterator iter =
_stub_map.find(tag_value);
if (iter != _stub_map.end()) {
LOG(FATAL) << "duplicated tag value: "
<< tag_value;
return -1;
}
_stub_map[tag_value] = stub;
}
if (_stub_map.size() > 0) {
LOG(INFO) << "Initialize variants from VariantInfo"
<< ", stubs count: " << _stub_map.size();
return 0;
}
Stub* stub = StubFactory::instance().generate_object(
ep_info.stub_service);
if (!stub || stub->initialize(
var_info, _endpoint_name, NULL, NULL) != 0) {
LOG(FATAL) << "Failed init stub from factory"
<< ", stub name: " << ep_info.stub_service;
return -1;
}
_default_stub = stub;
LOG(INFO) << "Succ create default debug";
return 0;
}
int Variant::thrd_initialize() {
if (_stub_map.size() <= 0) {
return _default_stub->thrd_initialize();
}
std::map<std::string, Stub*>::iterator iter;
for (iter = _stub_map.begin(); iter != _stub_map.end(); ++iter) {
Stub* stub = iter->second;
if (!stub || stub->thrd_initialize() != 0) {
LOG(FATAL) << "Failed thrd initialize stub: " << iter->first;
return -1;
}
LOG(INFO) << "Succ thrd initialize stub:" << iter->first;
}
LOG(WARNING) << "Succ thrd initialize all stubs";
return 0;
}
int Variant::thrd_clear() {
if (_stub_map.size() <= 0) {
return _default_stub->thrd_clear();
}
std::map<std::string, Stub*>::iterator iter;
for (iter = _stub_map.begin(); iter != _stub_map.end(); ++iter) {
Stub* stub = iter->second;
if (!stub || stub->thrd_clear() != 0) {
LOG(FATAL) << "Failed thrd clear stub: " << iter->first;
return -1;
}
}
return 0;
}
int Variant::thrd_finalize() {
if (_stub_map.size() <= 0) {
return _default_stub->thrd_finalize();
}
std::map<std::string, Stub*>::iterator iter;
for (iter = _stub_map.begin(); iter != _stub_map.end(); ++iter) {
Stub* stub = iter->second;
if (!stub || stub->thrd_finalize() != 0) {
LOG(FATAL) << "Failed thrd finalize stub: " << iter->first;
return -1;
}
}
return 0;
}
Predictor* Variant::get_predictor() {
if (_default_stub) {
return _default_stub->fetch_predictor();
}
return NULL;
}
Predictor* Variant::get_predictor(
const void* params) {
if (_default_stub) {
return _default_stub->fetch_predictor();
}
return NULL;
}
} // sdk_cpp
} // paddle_serving
} // baidu
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
...@@ -2,8 +2,13 @@ find_library(MKLML_LIBS NAMES libmklml_intel.so libiomp5.so) ...@@ -2,8 +2,13 @@ find_library(MKLML_LIBS NAMES libmklml_intel.so libiomp5.so)
include(op/CMakeLists.txt) include(op/CMakeLists.txt)
include(proto/CMakeLists.txt) include(proto/CMakeLists.txt)
add_executable(image_class ${serving_srcs}) add_executable(image_class ${serving_srcs})
add_dependencies(image_class pdcodegen fluid_cpu_engine pdserving paddle_fluid) add_dependencies(image_class pdcodegen fluid_cpu_engine pdserving paddle_fluid
target_link_libraries(image_class -Wl,--whole-archive fluid_cpu_engine opencv_imgcodecs)
target_include_directories(image_class PUBLIC
${CMAKE_CURRENT_LIST_DIR}/
${CMAKE_CURRENT_BINARY_DIR}/
)
target_link_libraries(image_class opencv_imgcodecs
${opencv_depend_libs} -Wl,--whole-archive fluid_cpu_engine
-Wl,--no-whole-archive pdserving paddle_fluid ${paddle_depend_libs} -Wl,--no-whole-archive pdserving paddle_fluid ${paddle_depend_libs}
${MKLML_LIB} ${MKLML_IOMP_LIB} -lpthread -lcrypto -lm -lrt -lssl -ldl ${MKLML_LIB} ${MKLML_IOMP_LIB} -lpthread -lcrypto -lm -lrt -lssl -ldl -lz)
-lz)
COMLOG_LEVEL : 2 COMLOG_LEVEL : 16
COMLOG_DEVICE_NUM : 2 COMLOG_DEVICE_NUM : 2
COMLOG_DEVICE0 : TRACE COMLOG_DEVICE0 : TRACE
COMLOG_DEVICE1 : WARNING COMLOG_DEVICE1 : WARNING
......
FILE(GLOB op_srcs ${CMAKE_CURRENT_LIST_DIR}/*.cpp) FILE(GLOB op_srcs ${CMAKE_CURRENT_LIST_DIR}/*.cpp)
LIST(APPEND serving_srcs ${framework_srcs}) LIST(APPEND serving_srcs ${op_srcs})
...@@ -23,14 +23,14 @@ int ClassifyOp::inference() { ...@@ -23,14 +23,14 @@ int ClassifyOp::inference() {
const TensorVector* in = &reader_out->tensors; const TensorVector* in = &reader_out->tensors;
uint32_t sample_size = in->size(); uint32_t sample_size = in->size();
TensorVector* out = base::get_object<TensorVector>(); TensorVector* out = butil::get_object<TensorVector>();
if (!out) { if (!out) {
LOG(ERROR) << "Failed get tls output object failed"; LOG(ERROR) << "Failed get tls output object failed";
return -1; return -1;
} }
if (sample_size <= 0) { if (sample_size <= 0) {
LOG(TRACE) << "No samples need to to predicted"; LOG(INFO) << "No samples need to to predicted";
return 0; return 0;
} }
...@@ -80,9 +80,9 @@ int ClassifyOp::inference() { ...@@ -80,9 +80,9 @@ int ClassifyOp::inference() {
(*out)[oi].shape.clear(); (*out)[oi].shape.clear();
} }
out->clear(); out->clear();
base::return_object<TensorVector>(out); butil::return_object<TensorVector>(out);
LOG(DEBUG) << "Response in image classification:" LOG(INFO) << "Response in image classification:"
<< "length:" << res->ByteSize() << "," << "length:" << res->ByteSize() << ","
<< "data:" << res->ShortDebugString(); << "data:" << res->ShortDebugString();
......
#include "base/iobuf.h" // #include "base/iobuf.h"
#include "op/dense_op.h" #include "op/dense_op.h"
#include "framework/memory.h" #include "framework/memory.h"
#include "framework/infer.h" #include "framework/infer.h"
...@@ -69,7 +69,7 @@ int DenseOp::inference() { ...@@ -69,7 +69,7 @@ int DenseOp::inference() {
uint32_t tensor_size = req->tensors_size(); uint32_t tensor_size = req->tensors_size();
if (tensor_size <= 0) { if (tensor_size <= 0) {
LOG(TRACE) << "No samples need to to predicted"; LOG(INFO) << "No samples need to to predicted";
return -1; return -1;
} }
for (uint32_t ti = 0; ti < tensor_size; ++ti) { for (uint32_t ti = 0; ti < tensor_size; ++ti) {
...@@ -139,7 +139,7 @@ int DenseOp::inference() { ...@@ -139,7 +139,7 @@ int DenseOp::inference() {
} }
} }
LOG(DEBUG) << "Response in builtin dense format:" LOG(INFO) << "Response in builtin dense format:"
<< "length:" << res->ByteSize() << "," << "length:" << res->ByteSize() << ","
<< "data:" << res->ShortDebugString() << "," << "data:" << res->ShortDebugString() << ","
<< "in: " << _in.size() << "," << "in: " << _in.size() << ","
......
...@@ -12,7 +12,7 @@ using baidu::paddle_serving::predictor::image_classification::Request; ...@@ -12,7 +12,7 @@ using baidu::paddle_serving::predictor::image_classification::Request;
int ReaderOp::inference() { int ReaderOp::inference() {
const Request* req = const Request* req =
dynamic_cast<const Request*>(get_request_message()); dynamic_cast<const Request*>(get_request_message());
LOG(DEBUG) << "Receive request in dense service:" LOG(INFO) << "Receive request in dense service:"
<< req->ShortDebugString(); << req->ShortDebugString();
ReaderOutput* res = mutable_data<ReaderOutput>(); ReaderOutput* res = mutable_data<ReaderOutput>();
...@@ -80,7 +80,7 @@ int ReaderOp::inference() { ...@@ -80,7 +80,7 @@ int ReaderOp::inference() {
_image_8u_tmp = resize_image; _image_8u_tmp = resize_image;
} }
LOG(TRACE) << "Succ crop one image[CHW=" LOG(INFO) << "Succ crop one image[CHW="
<< _image_8u_tmp.channels() << ", " << _image_8u_tmp.channels() << ", "
<< _image_8u_tmp.cols << ", " << _image_8u_tmp.cols << ", "
<< _image_8u_tmp.rows << "]" << _image_8u_tmp.rows << "]"
...@@ -109,7 +109,7 @@ int ReaderOp::inference() { ...@@ -109,7 +109,7 @@ int ReaderOp::inference() {
in_tensor.shape.push_back(W); in_tensor.shape.push_back(W);
in_tensor.shape.push_back(H); in_tensor.shape.push_back(H);
LOG(TRACE) << "Succ read one image, C: " << C LOG(INFO) << "Succ read one image, C: " << C
<< ", W: " << W << ", H: " << H; << ", W: " << W << ", H: " << H;
// tls resource assignment // tls resource assignment
......
#include "pb_to_json.h" #include "json2pb/pb_to_json.h"
#include <google/protobuf/text_format.h> #include <google/protobuf/text_format.h>
#include "op/write_op.h" #include "op/write_op.h"
...@@ -38,7 +38,7 @@ int WriteOp::inference() { ...@@ -38,7 +38,7 @@ int WriteOp::inference() {
return -1; return -1;
} }
std::string* text = ins->mutable_response_json(); std::string* text = ins->mutable_response_json();
if (!ProtoMessageToJson(classify_out->predictions(si), if (!json2pb::ProtoMessageToJson(classify_out->predictions(si),
text, &err_string)) { text, &err_string)) {
LOG(ERROR) << "Failed convert message[" LOG(ERROR) << "Failed convert message["
<< classify_out->predictions(si).ShortDebugString() << classify_out->predictions(si).ShortDebugString()
...@@ -47,7 +47,7 @@ int WriteOp::inference() { ...@@ -47,7 +47,7 @@ int WriteOp::inference() {
} }
} }
LOG(TRACE) << "Succ write json:" LOG(INFO) << "Succ write json:"
<< classify_out->ShortDebugString(); << classify_out->ShortDebugString();
return 0; return 0;
......
...@@ -4,4 +4,4 @@ LIST(APPEND protofiles ...@@ -4,4 +4,4 @@ LIST(APPEND protofiles
) )
PROTOBUF_GENERATE_SERVING_CPP(PROTO_SRCS PROTO_HDRS ${protofiles}) PROTOBUF_GENERATE_SERVING_CPP(PROTO_SRCS PROTO_HDRS ${protofiles})
LIST(APPEND serving_srcs ${PROTO_SRCS} ${pdcodegen_proto_srcs}) LIST(APPEND serving_srcs ${PROTO_SRCS})
syntax="proto2";
import "pds_option.proto";
package baidu.paddle_serving.predictor.format;
// dense format
message DenseInstance {
repeated float features = 1;
};
message DensePrediction {
repeated float categories = 1;
};
// sparse format
message SparseInstance {
repeated uint32 keys = 1;
repeated uint32 shape = 2;
repeated float values = 3;
};
message SparsePrediction {
repeated float categories = 1;
};
// int64-tensor format
message Int64TensorInstance {
repeated int64 data = 1;
repeated uint32 shape = 2;
};
message Float32TensorPredictor {
repeated float data = 1;
repeated uint32 shape = 2;
};
// x-image format
message XImageReqInstance {
required bytes image_binary = 1;
required uint32 image_length = 2;
};
message XImageResInstance {
required string response_json = 1;
};
// x-record format
message XRecordInstance {
// TODO
required bytes data = 1;
};
syntax="proto2";
import "google/protobuf/descriptor.proto";
package pds;
extend google.protobuf.FieldOptions {
optional bool pack_on = 70000 [default=false];
};
extend google.protobuf.ServiceOptions {
optional PaddleServiceOption options = 80000;
};
message PaddleServiceOption {
optional bool generate_impl = 1 [default = false];
optional bool generate_stub = 2 [default = false];
};
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册