Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Serving
提交
50da611b
S
Serving
项目概览
PaddlePaddle
/
Serving
大约 1 年 前同步成功
通知
186
Star
833
Fork
253
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
105
列表
看板
标记
里程碑
合并请求
10
Wiki
2
Wiki
分析
仓库
DevOps
项目成员
Pages
S
Serving
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
105
Issue
105
列表
看板
标记
里程碑
合并请求
10
合并请求
10
Pages
分析
分析
仓库分析
DevOps
Wiki
2
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
50da611b
编写于
3月 10, 2020
作者:
B
barrierye
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add code style check
上级
25d07270
变更
114
展开全部
显示空白变更内容
内联
并排
Showing
114 changed file
with
913 addition
and
612 deletion
+913
-612
.travis.yml
.travis.yml
+2
-1
core/configure/CMakeLists.txt
core/configure/CMakeLists.txt
+0
-1
core/configure/include/configure_parser.h
core/configure/include/configure_parser.h
+10
-10
core/cube/cube-server/src/server.cpp
core/cube/cube-server/src/server.cpp
+1
-1
core/general-client/src/general_model_main.cpp
core/general-client/src/general_model_main.cpp
+11
-10
core/general-server/op/general_copy_op.cpp
core/general-server/op/general_copy_op.cpp
+1
-2
core/general-server/op/general_copy_op.h
core/general-server/op/general_copy_op.h
+4
-5
core/general-server/op/general_infer_op.h
core/general-server/op/general_infer_op.h
+0
-1
core/general-server/op/general_reader_op.cpp
core/general-server/op/general_reader_op.cpp
+2
-4
core/general-server/op/general_reader_op.h
core/general-server/op/general_reader_op.h
+4
-5
core/general-server/op/general_response_op.cpp
core/general-server/op/general_response_op.cpp
+4
-6
core/general-server/op/general_response_op.h
core/general-server/op/general_response_op.h
+0
-1
core/general-server/op/general_text_reader_op.cpp
core/general-server/op/general_text_reader_op.cpp
+5
-10
core/general-server/op/general_text_reader_op.h
core/general-server/op/general_text_reader_op.h
+4
-4
core/general-server/op/general_text_response_op.h
core/general-server/op/general_text_response_op.h
+0
-1
core/general-server/proto/general_model_service.proto
core/general-server/proto/general_model_service.proto
+2
-6
core/kvdb/include/kvdb/kvdb_impl.h
core/kvdb/include/kvdb/kvdb_impl.h
+8
-7
core/kvdb/include/kvdb/paddle_rocksdb.h
core/kvdb/include/kvdb/paddle_rocksdb.h
+2
-1
core/kvdb/src/mock_param_dict_impl.cpp
core/kvdb/src/mock_param_dict_impl.cpp
+7
-6
core/kvdb/src/paddle_rocksdb.cpp
core/kvdb/src/paddle_rocksdb.cpp
+1
-1
core/kvdb/src/rockskvdb_impl.cpp
core/kvdb/src/rockskvdb_impl.cpp
+2
-6
core/pdcodegen/src/pdcodegen.cpp
core/pdcodegen/src/pdcodegen.cpp
+7
-5
core/predictor/common/inner_common.h
core/predictor/common/inner_common.h
+1
-1
core/predictor/framework/dag_view.cpp
core/predictor/framework/dag_view.cpp
+4
-4
core/predictor/framework/factory.h
core/predictor/framework/factory.h
+1
-1
core/predictor/framework/resource.cpp
core/predictor/framework/resource.cpp
+3
-6
core/predictor/framework/resource.h
core/predictor/framework/resource.h
+9
-9
core/predictor/src/pdserving.cpp
core/predictor/src/pdserving.cpp
+3
-4
core/predictor/unittest/test_message_op.h
core/predictor/unittest/test_message_op.h
+1
-1
core/predictor/unittest/test_server_manager.cpp
core/predictor/unittest/test_server_manager.cpp
+1
-1
core/sdk-cpp/include/common.h
core/sdk-cpp/include/common.h
+1
-1
core/sdk-cpp/include/config_manager.h
core/sdk-cpp/include/config_manager.h
+2
-2
core/sdk-cpp/include/factory.h
core/sdk-cpp/include/factory.h
+1
-1
core/sdk-cpp/include/predictor_sdk.h
core/sdk-cpp/include/predictor_sdk.h
+1
-1
core/sdk-cpp/proto/general_model_service.proto
core/sdk-cpp/proto/general_model_service.proto
+2
-6
core/sdk-cpp/src/endpoint.cpp
core/sdk-cpp/src/endpoint.cpp
+1
-2
core/sdk-cpp/src/predictor_sdk.cpp
core/sdk-cpp/src/predictor_sdk.cpp
+1
-1
core/sdk-cpp/src/variant.cpp
core/sdk-cpp/src/variant.cpp
+1
-1
core/util/CMakeLists.txt
core/util/CMakeLists.txt
+0
-1
core/util/include/timer.h
core/util/include/timer.h
+0
-1
core/util/src/CMakeLists.txt
core/util/src/CMakeLists.txt
+0
-1
doc/COMPILE.md
doc/COMPILE.md
+0
-1
doc/CONTRIBUTE.md
doc/CONTRIBUTE.md
+0
-1
doc/IMDB_GO_CLIENT.md
doc/IMDB_GO_CLIENT.md
+0
-3
doc/NEW_OPERATOR.md
doc/NEW_OPERATOR.md
+0
-3
doc/SERVER_DAG.md
doc/SERVER_DAG.md
+0
-7
paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h
...inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h
+4
-4
paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h
...inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h
+3
-3
python/examples/bert/benchmark.py
python/examples/bert/benchmark.py
+14
-9
python/examples/bert/bert_client.py
python/examples/bert/bert_client.py
+8
-4
python/examples/bert/bert_reader.py
python/examples/bert/bert_reader.py
+21
-4
python/examples/bert/bert_web_service.py
python/examples/bert/bert_web_service.py
+4
-3
python/examples/bert/prepare_model.py
python/examples/bert/prepare_model.py
+12
-9
python/examples/bert/tokenization.py
python/examples/bert/tokenization.py
+10
-10
python/examples/criteo_ctr/README.md
python/examples/criteo_ctr/README.md
+0
-1
python/examples/criteo_ctr/args.py
python/examples/criteo_ctr/args.py
+102
-87
python/examples/criteo_ctr/criteo_reader.py
python/examples/criteo_ctr/criteo_reader.py
+29
-6
python/examples/criteo_ctr/local_train.py
python/examples/criteo_ctr/local_train.py
+30
-13
python/examples/criteo_ctr/network_conf.py
python/examples/criteo_ctr/network_conf.py
+44
-16
python/examples/criteo_ctr/test_client.py
python/examples/criteo_ctr/test_client.py
+21
-4
python/examples/criteo_ctr/test_server.py
python/examples/criteo_ctr/test_server.py
+15
-0
python/examples/fit_a_line/README.md
python/examples/fit_a_line/README.md
+1
-1
python/examples/fit_a_line/benchmark.py
python/examples/fit_a_line/benchmark.py
+15
-6
python/examples/fit_a_line/local_train.py
python/examples/fit_a_line/local_train.py
+29
-12
python/examples/fit_a_line/test_client.py
python/examples/fit_a_line/test_client.py
+19
-3
python/examples/fit_a_line/test_server.py
python/examples/fit_a_line/test_server.py
+15
-0
python/examples/imdb/benchmark.py
python/examples/imdb/benchmark.py
+9
-4
python/examples/imdb/imdb_reader.py
python/examples/imdb/imdb_reader.py
+9
-3
python/examples/imdb/imdb_web_service_demo.sh
python/examples/imdb/imdb_web_service_demo.sh
+0
-1
python/examples/imdb/local_train.py
python/examples/imdb/local_train.py
+1
-0
python/examples/imdb/nets.py
python/examples/imdb/nets.py
+23
-14
python/examples/imdb/test_client.py
python/examples/imdb/test_client.py
+1
-1
python/examples/imdb/test_client_batch.py
python/examples/imdb/test_client_batch.py
+1
-0
python/examples/imdb/text_classify_service.py
python/examples/imdb/text_classify_service.py
+6
-2
python/examples/util/get_acc.py
python/examples/util/get_acc.py
+15
-0
python/paddle_serving_client/io/__init__.py
python/paddle_serving_client/io/__init__.py
+17
-11
python/paddle_serving_client/metric/acc.py
python/paddle_serving_client/metric/acc.py
+2
-2
python/paddle_serving_client/metric/auc.py
python/paddle_serving_client/metric/auc.py
+14
-12
python/paddle_serving_client/utils/__init__.py
python/paddle_serving_client/utils/__init__.py
+11
-3
python/paddle_serving_server/serve.py
python/paddle_serving_server/serve.py
+18
-7
python/paddle_serving_server/web_serve.py
python/paddle_serving_server/web_serve.py
+20
-8
python/paddle_serving_server/web_service.py
python/paddle_serving_server/web_service.py
+15
-6
python/paddle_serving_server_gpu/__init__.py
python/paddle_serving_server_gpu/__init__.py
+4
-2
python/paddle_serving_server_gpu/serve.py
python/paddle_serving_server_gpu/serve.py
+10
-5
python/paddle_serving_server_gpu/web_service.py
python/paddle_serving_server_gpu/web_service.py
+21
-14
python/setup.py.in
python/setup.py.in
+0
-1
python/setup.py.server.in
python/setup.py.server.in
+0
-1
tools/Dockerfile.ci
tools/Dockerfile.ci
+19
-8
tools/cpp_examples/demo-client/paddle_serving_client.egg-info/SOURCES.txt
...es/demo-client/paddle_serving_client.egg-info/SOURCES.txt
+1
-1
tools/cpp_examples/demo-client/paddle_serving_client.egg-info/dependency_links.txt
...lient/paddle_serving_client.egg-info/dependency_links.txt
+0
-1
tools/cpp_examples/demo-client/src/general_model.cpp
tools/cpp_examples/demo-client/src/general_model.cpp
+30
-33
tools/cpp_examples/demo-client/src/general_model.h
tools/cpp_examples/demo-client/src/general_model.h
+18
-22
tools/cpp_examples/demo-client/src/general_model_main.cpp
tools/cpp_examples/demo-client/src/general_model_main.cpp
+12
-11
tools/cpp_examples/demo-client/src/load_general_model.cpp
tools/cpp_examples/demo-client/src/load_general_model.cpp
+3
-3
tools/cpp_examples/demo-client/src/pybind_general_model.cpp
tools/cpp_examples/demo-client/src/pybind_general_model.cpp
+32
-16
tools/cpp_examples/demo-serving/op/bert_service_op.h
tools/cpp_examples/demo-serving/op/bert_service_op.h
+2
-2
tools/cpp_examples/demo-serving/op/classify_op.cpp
tools/cpp_examples/demo-serving/op/classify_op.cpp
+1
-1
tools/cpp_examples/demo-serving/op/classify_op.h
tools/cpp_examples/demo-serving/op/classify_op.h
+1
-1
tools/cpp_examples/demo-serving/op/ctr_prediction_op.h
tools/cpp_examples/demo-serving/op/ctr_prediction_op.h
+1
-1
tools/cpp_examples/demo-serving/op/general_model_op.h
tools/cpp_examples/demo-serving/op/general_model_op.h
+1
-2
tools/cpp_examples/demo-serving/op/kvdb_echo_op.h
tools/cpp_examples/demo-serving/op/kvdb_echo_op.h
+1
-1
tools/cpp_examples/demo-serving/op/load_general_model_conf_op.h
...cpp_examples/demo-serving/op/load_general_model_conf_op.h
+6
-8
tools/cpp_examples/demo-serving/op/reader_op.h
tools/cpp_examples/demo-serving/op/reader_op.h
+1
-1
tools/cpp_examples/demo-serving/op/text_classification_op.h
tools/cpp_examples/demo-serving/op/text_classification_op.h
+1
-1
tools/cpp_examples/demo-serving/op/write_json_op.cpp
tools/cpp_examples/demo-serving/op/write_json_op.cpp
+1
-1
tools/cpp_examples/demo-serving/op/write_json_op.h
tools/cpp_examples/demo-serving/op/write_json_op.h
+1
-1
tools/cpp_examples/demo-serving/op/write_op.cpp
tools/cpp_examples/demo-serving/op/write_op.cpp
+2
-2
tools/cpp_examples/demo-serving/op/write_op.h
tools/cpp_examples/demo-serving/op/write_op.h
+1
-1
tools/cpp_examples/demo-serving/proto/general_model_service.proto
...p_examples/demo-serving/proto/general_model_service.proto
+4
-12
tools/cpp_examples/elastic-ctr/client/demo/elastic_ctr.py
tools/cpp_examples/elastic-ctr/client/demo/elastic_ctr.py
+19
-19
tools/cpp_examples/elastic-ctr/serving/op/elastic_ctr_prediction_op.cpp
...ples/elastic-ctr/serving/op/elastic_ctr_prediction_op.cpp
+12
-15
tools/cpp_examples/elastic-ctr/serving/op/elastic_ctr_prediction_op.h
...amples/elastic-ctr/serving/op/elastic_ctr_prediction_op.h
+2
-2
tools/serving_build.sh
tools/serving_build.sh
+0
-0
tools/serving_check_style.sh
tools/serving_check_style.sh
+38
-0
未找到文件。
.travis.yml
浏览文件 @
50da611b
...
...
@@ -10,4 +10,5 @@ services:
before_install
:
-
docker build -f ${DOCKERFILE_CPU} -t serving-img:${COMPILE_TYPE} .
install
:
-
docker run -it -v $PWD:/Serving serving-img:${COMPILE_TYPE} /bin/bash Serving/tools/serving-build.sh $COMPILE_TYPE
-
if [ $COMPILE_TYPE == "CPU" ]; then docker run -it -v $PWD:/Serving serving-img:${COMPILE_TYPE} /bin/bash Serving/tools/serving_check_style.sh ; fi;
-
docker run -it -v $PWD:/Serving serving-img:${COMPILE_TYPE} /bin/bash Serving/tools/serving_build.sh $COMPILE_TYPE
core/configure/CMakeLists.txt
浏览文件 @
50da611b
...
...
@@ -87,4 +87,3 @@ add_custom_command(TARGET general_model_config_py_proto POST_BUILD
WORKING_DIRECTORY
${
CMAKE_CURRENT_BINARY_DIR
}
)
endif
()
endif
()
core/configure/include/configure_parser.h
浏览文件 @
50da611b
...
...
@@ -20,14 +20,14 @@ namespace baidu {
namespace
paddle_serving
{
namespace
configure
{
int
read_proto_conf
(
const
std
::
string
&
conf_full_path
,
int
read_proto_conf
(
const
std
::
string
&
conf_full_path
,
google
::
protobuf
::
Message
*
conf
);
int
read_proto_conf
(
const
std
::
string
&
conf_path
,
int
read_proto_conf
(
const
std
::
string
&
conf_path
,
const
std
::
string
&
conf_file
,
google
::
protobuf
::
Message
*
conf
);
int
write_proto_conf
(
google
::
protobuf
::
Message
*
message
,
int
write_proto_conf
(
google
::
protobuf
::
Message
*
message
,
const
std
::
string
&
output_path
,
const
std
::
string
&
output_file
);
...
...
core/cube/cube-server/src/server.cpp
浏览文件 @
50da611b
...
...
@@ -12,9 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "core/cube/cube-server/include/cube/server.h"
#include <brpc/server.h>
#include "core/cube/cube-server/include/cube/framework.h"
#include "core/cube/cube-server/include/cube/server.h"
namespace
rec
{
namespace
mcube
{
...
...
core/general-client/src/general_model_main.cpp
浏览文件 @
50da611b
...
...
@@ -17,18 +17,18 @@
#include "core/general-client/include/general_model.h"
using
namespace
std
;
using
namespace
std
;
// NOLINT
using
baidu
::
paddle_serving
::
general_model
::
PredictorClient
;
using
baidu
::
paddle_serving
::
general_model
::
FetchedMap
;
int
main
(
int
argc
,
char
*
argv
[])
{
PredictorClient
*
client
=
new
PredictorClient
();
int
main
(
int
argc
,
char
*
argv
[])
{
PredictorClient
*
client
=
new
PredictorClient
();
client
->
init
(
"inference.conf"
);
client
->
set_predictor_conf
(
"./"
,
"predictor.conf"
);
client
->
create_predictor
();
std
::
vector
<
std
::
vector
<
float
>
>
float_feed
;
std
::
vector
<
std
::
vector
<
int64_t
>
>
int_feed
;
std
::
vector
<
std
::
vector
<
float
>>
float_feed
;
std
::
vector
<
std
::
vector
<
int64_t
>>
int_feed
;
std
::
vector
<
std
::
string
>
float_feed_name
;
std
::
vector
<
std
::
string
>
int_feed_name
=
{
"words"
,
"label"
};
std
::
vector
<
std
::
string
>
fetch_name
=
{
"cost"
,
"acc"
,
"prediction"
};
...
...
@@ -53,12 +53,13 @@ int main(int argc, char * argv[]) {
cin
>>
label
;
int_feed
.
push_back
({
label
});
FetchedMap
result
;
client
->
predict
(
float_feed
,
float_feed_name
,
int_feed
,
int_feed_name
,
fetch_name
,
client
->
predict
(
float_feed
,
float_feed_name
,
int_feed
,
int_feed_name
,
fetch_name
,
&
result
);
cout
<<
label
<<
"
\t
"
<<
result
[
"prediction"
][
1
]
<<
endl
;
...
...
core/general-server/op/general_copy_op.cpp
浏览文件 @
50da611b
...
...
@@ -69,8 +69,7 @@ int GeneralCopyOp::inference() {
for
(
int
i
=
0
;
i
<
out
->
size
();
++
i
)
{
int64_t
*
src_ptr
=
static_cast
<
int64_t
*>
(
in
->
at
(
i
).
data
.
data
());
out
->
at
(
i
).
data
.
Resize
(
out
->
at
(
i
).
lod
[
0
].
back
()
*
sizeof
(
int64_t
));
out
->
at
(
i
).
data
.
Resize
(
out
->
at
(
i
).
lod
[
0
].
back
()
*
sizeof
(
int64_t
));
out
->
at
(
i
).
shape
=
{
out
->
at
(
i
).
lod
[
0
].
back
(),
1
};
int64_t
*
tgt_ptr
=
static_cast
<
int64_t
*>
(
out
->
at
(
i
).
data
.
data
());
for
(
int
j
=
0
;
j
<
out
->
at
(
i
).
lod
[
0
].
back
();
++
j
)
{
...
...
core/general-server/op/general_copy_op.h
浏览文件 @
50da611b
...
...
@@ -24,23 +24,22 @@
#include "paddle_inference_api.h" // NOLINT
#endif
#include <string>
#include "core/predictor/framework/resource.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/general-server/general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/predictor/framework/resource.h"
namespace
baidu
{
namespace
paddle_serving
{
namespace
serving
{
class
GeneralCopyOp
:
public
baidu
::
paddle_serving
::
predictor
::
OpWithChannel
<
GeneralBlob
>
{
class
GeneralCopyOp
:
public
baidu
::
paddle_serving
::
predictor
::
OpWithChannel
<
GeneralBlob
>
{
public:
typedef
std
::
vector
<
paddle
::
PaddleTensor
>
TensorVector
;
DECLARE_OP
(
GeneralCopyOp
);
int
inference
();
};
}
// namespace serving
...
...
core/general-server/op/general_infer_op.h
浏览文件 @
50da611b
...
...
@@ -39,7 +39,6 @@ class GeneralInferOp
DECLARE_OP
(
GeneralInferOp
);
int
inference
();
};
}
// namespace serving
...
...
core/general-server/op/general_reader_op.cpp
浏览文件 @
50da611b
...
...
@@ -188,8 +188,7 @@ int GeneralReaderOp::inference() {
for
(
int
j
=
0
;
j
<
batch_size
;
++
j
)
{
int
elem_num
=
req
->
insts
(
j
).
tensor_array
(
i
).
int64_data_size
();
for
(
int
k
=
0
;
k
<
elem_num
;
++
k
)
{
dst_ptr
[
offset
+
k
]
=
req
->
insts
(
j
).
tensor_array
(
i
).
int64_data
(
k
);
dst_ptr
[
offset
+
k
]
=
req
->
insts
(
j
).
tensor_array
(
i
).
int64_data
(
k
);
}
if
(
out
->
at
(
i
).
lod
.
size
()
==
1
)
{
offset
=
out
->
at
(
i
).
lod
[
0
][
j
+
1
];
...
...
@@ -203,8 +202,7 @@ int GeneralReaderOp::inference() {
for
(
int
j
=
0
;
j
<
batch_size
;
++
j
)
{
int
elem_num
=
req
->
insts
(
j
).
tensor_array
(
i
).
float_data_size
();
for
(
int
k
=
0
;
k
<
elem_num
;
++
k
)
{
dst_ptr
[
offset
+
k
]
=
req
->
insts
(
j
).
tensor_array
(
i
).
float_data
(
k
);
dst_ptr
[
offset
+
k
]
=
req
->
insts
(
j
).
tensor_array
(
i
).
float_data
(
k
);
}
if
(
out
->
at
(
i
).
lod
.
size
()
==
1
)
{
offset
=
out
->
at
(
i
).
lod
[
0
][
j
+
1
];
...
...
core/general-server/op/general_reader_op.h
浏览文件 @
50da611b
...
...
@@ -24,24 +24,23 @@
#include "paddle_inference_api.h" // NOLINT
#endif
#include <string>
#include "core/predictor/framework/resource.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/general-server/general_model_service.pb.h"
#include "core/general-server/load_general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/predictor/framework/resource.h"
namespace
baidu
{
namespace
paddle_serving
{
namespace
serving
{
class
GeneralReaderOp
:
public
baidu
::
paddle_serving
::
predictor
::
OpWithChannel
<
GeneralBlob
>
{
class
GeneralReaderOp
:
public
baidu
::
paddle_serving
::
predictor
::
OpWithChannel
<
GeneralBlob
>
{
public:
typedef
std
::
vector
<
paddle
::
PaddleTensor
>
TensorVector
;
DECLARE_OP
(
GeneralReaderOp
);
int
inference
();
};
}
// namespace serving
...
...
core/general-server/op/general_response_op.cpp
浏览文件 @
50da611b
...
...
@@ -122,8 +122,7 @@ int GeneralResponseOp::inference() {
}
else
{
for
(
int
j
=
0
;
j
<
batch_size
;
++
j
)
{
FetchInst
*
fetch_p
=
res
->
mutable_insts
(
j
);
fetch_p
->
mutable_tensor_array
(
var_idx
)
->
add_int64_data
(
data_ptr
[
0
]);
fetch_p
->
mutable_tensor_array
(
var_idx
)
->
add_int64_data
(
data_ptr
[
0
]);
}
}
}
...
...
@@ -143,16 +142,15 @@ int GeneralResponseOp::inference() {
if
(
var_size
==
batch_size
)
{
for
(
int
j
=
0
;
j
<
batch_size
;
++
j
)
{
for
(
int
k
=
j
*
cap
;
k
<
(
j
+
1
)
*
cap
;
++
k
)
{
FetchInst
*
fetch_p
=
res
->
mutable_insts
(
j
);
FetchInst
*
fetch_p
=
res
->
mutable_insts
(
j
);
fetch_p
->
mutable_tensor_array
(
var_idx
)
->
add_float_data
(
data_ptr
[
k
]);
}
}
}
else
{
for
(
int
j
=
0
;
j
<
batch_size
;
++
j
)
{
FetchInst
*
fetch_p
=
res
->
mutable_insts
(
j
);
fetch_p
->
mutable_tensor_array
(
var_idx
)
->
add_float_data
(
data_ptr
[
0
]);
FetchInst
*
fetch_p
=
res
->
mutable_insts
(
j
);
fetch_p
->
mutable_tensor_array
(
var_idx
)
->
add_float_data
(
data_ptr
[
0
]);
}
}
}
...
...
core/general-server/op/general_response_op.h
浏览文件 @
50da611b
...
...
@@ -39,7 +39,6 @@ class GeneralResponseOp
DECLARE_OP
(
GeneralResponseOp
);
int
inference
();
};
}
// namespace serving
...
...
core/general-server/op/general_text_reader_op.cpp
浏览文件 @
50da611b
...
...
@@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "core/general-server/op/general_text_reader_op.h"
#include <algorithm>
#include <iostream>
#include <memory>
#include <sstream>
#include "core/general-server/op/general_text_reader_op.h"
#include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h"
#include "core/util/include/timer.h"
...
...
@@ -32,7 +32,6 @@ using baidu::paddle_serving::predictor::general_model::Request;
using
baidu
::
paddle_serving
::
predictor
::
general_model
::
FeedInst
;
using
baidu
::
paddle_serving
::
predictor
::
PaddleGeneralModelConfig
;
int
GeneralTextReaderOp
::
inference
()
{
// reade request from client
const
Request
*
req
=
dynamic_cast
<
const
Request
*>
(
get_request_message
());
...
...
@@ -132,11 +131,9 @@ int GeneralTextReaderOp::inference() {
int64_t
*
dst_ptr
=
static_cast
<
int64_t
*>
(
out
->
at
(
i
).
data
.
data
());
int
offset
=
0
;
for
(
int
j
=
0
;
j
<
batch_size
;
++
j
)
{
for
(
int
k
=
0
;
k
<
req
->
insts
(
j
).
tensor_array
(
i
).
int_data_size
();
for
(
int
k
=
0
;
k
<
req
->
insts
(
j
).
tensor_array
(
i
).
int_data_size
();
++
k
)
{
dst_ptr
[
offset
+
k
]
=
req
->
insts
(
j
).
tensor_array
(
i
).
int_data
(
k
);
dst_ptr
[
offset
+
k
]
=
req
->
insts
(
j
).
tensor_array
(
i
).
int_data
(
k
);
}
if
(
out
->
at
(
i
).
lod
.
size
()
==
1
)
{
offset
=
out
->
at
(
i
).
lod
[
0
][
j
+
1
];
...
...
@@ -148,11 +145,9 @@ int GeneralTextReaderOp::inference() {
float
*
dst_ptr
=
static_cast
<
float
*>
(
out
->
at
(
i
).
data
.
data
());
int
offset
=
0
;
for
(
int
j
=
0
;
j
<
batch_size
;
++
j
)
{
for
(
int
k
=
0
;
k
<
req
->
insts
(
j
).
tensor_array
(
i
).
int_data_size
();
for
(
int
k
=
0
;
k
<
req
->
insts
(
j
).
tensor_array
(
i
).
int_data_size
();
++
k
)
{
dst_ptr
[
offset
+
k
]
=
req
->
insts
(
j
).
tensor_array
(
i
).
int_data
(
k
);
dst_ptr
[
offset
+
k
]
=
req
->
insts
(
j
).
tensor_array
(
i
).
int_data
(
k
);
}
if
(
out
->
at
(
i
).
lod
.
size
()
==
1
)
{
offset
=
out
->
at
(
i
).
lod
[
0
][
j
+
1
];
...
...
core/general-server/op/general_text_reader_op.h
浏览文件 @
50da611b
...
...
@@ -24,17 +24,17 @@
#include "paddle_inference_api.h" // NOLINT
#endif
#include <string>
#include "core/predictor/framework/resource.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/general-server/general_model_service.pb.h"
#include "core/general-server/load_general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/predictor/framework/resource.h"
namespace
baidu
{
namespace
paddle_serving
{
namespace
serving
{
class
GeneralTextReaderOp
:
public
baidu
::
paddle_serving
::
predictor
::
OpWithChannel
<
GeneralBlob
>
{
class
GeneralTextReaderOp
:
public
baidu
::
paddle_serving
::
predictor
::
OpWithChannel
<
GeneralBlob
>
{
public:
typedef
std
::
vector
<
paddle
::
PaddleTensor
>
TensorVector
;
...
...
core/general-server/op/general_text_response_op.h
浏览文件 @
50da611b
...
...
@@ -40,7 +40,6 @@ class GeneralTextResponseOp
DECLARE_OP
(
GeneralTextResponseOp
);
int
inference
();
};
}
// namespace serving
...
...
core/general-server/proto/general_model_service.proto
浏览文件 @
50da611b
...
...
@@ -28,13 +28,9 @@ message Tensor {
repeated
int32
shape
=
6
;
};
message
FeedInst
{
repeated
Tensor
tensor_array
=
1
;
};
message
FeedInst
{
repeated
Tensor
tensor_array
=
1
;
};
message
FetchInst
{
repeated
Tensor
tensor_array
=
1
;
};
message
FetchInst
{
repeated
Tensor
tensor_array
=
1
;
};
message
Request
{
repeated
FeedInst
insts
=
1
;
...
...
core/kvdb/include/kvdb/kvdb_impl.h
浏览文件 @
50da611b
...
...
@@ -27,11 +27,11 @@
// limitations under the License.
#pragma once
#include <chrono>
#include <chrono> // NOLINT
#include <functional>
#include <memory>
#include <unordered_map>
#include <vector>
#include <functional>
class
AbstractKVDB
;
class
FileReader
;
class
ParamDict
;
...
...
@@ -65,7 +65,7 @@ class FileReader {
std
::
string
data
;
FILE
*
stream
=
nullptr
;
const
int
max_buffer
=
256
;
char
buffer
[
max_buffer
];
char
buffer
[
max_buffer
];
// NOLINT
cmd
.
append
(
" 2>&1"
);
stream
=
popen
(
cmd
.
c_str
(),
"r"
);
if
(
stream
)
{
...
...
@@ -76,7 +76,8 @@ class FileReader {
return
data
;
};
std
::
string
cmd
=
"md5sum "
+
this
->
filename_
;
// TODO: throw exception if error occurs during execution of shell command
// NOLINT TODO: throw exception if error occurs during execution of shell
// command
std
::
string
md5val
=
getCmdOut
(
cmd
);
this
->
time_stamp_
=
md5val
==
this
->
last_md5_val_
?
this
->
time_stamp_
...
...
@@ -93,7 +94,7 @@ class FileReader {
return
this
->
time_stamp_
;
}
inline
virtual
~
FileReader
()
{};
inline
virtual
~
FileReader
()
{}
private:
std
::
string
filename_
;
...
...
@@ -128,7 +129,7 @@ class ParamDict {
virtual
~
ParamDict
();
private:
std
::
function
<
std
::
pair
<
Key
,
Value
>
(
std
::
string
)
>
read_func_
;
std
::
function
<
std
::
pair
<
Key
,
Value
>
(
std
::
string
)
>
read_func_
;
// NOLINT
std
::
vector
<
FileReaderPtr
>
file_reader_lst_
;
AbsKVDBPtr
front_db
,
back_db
;
};
...
...
@@ -139,5 +140,5 @@ class ParamDictMgr {
void
InsertParamDict
(
std
::
string
,
ParamDictPtr
);
private:
std
::
unordered_map
<
std
::
string
,
ParamDictPtr
>
ParamDictMap
;
std
::
unordered_map
<
std
::
string
,
ParamDictPtr
>
ParamDictMap
;
// NOLINT
};
core/kvdb/include/kvdb/paddle_rocksdb.h
浏览文件 @
50da611b
...
...
@@ -25,7 +25,7 @@
class
RocksDBWrapper
{
public:
RocksDBWrapper
(
std
::
string
db_name
);
RocksDBWrapper
(
std
::
string
db_name
);
// NOLINT
std
::
string
Get
(
std
::
string
key
);
bool
Put
(
std
::
string
key
,
std
::
string
value
);
...
...
@@ -33,6 +33,7 @@ class RocksDBWrapper {
static
std
::
shared_ptr
<
RocksDBWrapper
>
RocksDBWrapperFactory
(
std
::
string
db_name
=
"SparseMatrix"
);
void
Close
();
private:
rocksdb
::
DB
*
db_
;
std
::
string
db_name_
;
...
...
core/kvdb/src/mock_param_dict_impl.cpp
浏览文件 @
50da611b
...
...
@@ -16,7 +16,7 @@
#include <fstream>
#include <iterator>
#include <sstream>
#include <thread>
#include <thread>
// NOLINT
#include "core/kvdb/include/kvdb/rocksdb_impl.h"
std
::
vector
<
FileReaderPtr
>
ParamDict
::
GetDictReaderLst
()
{
...
...
@@ -33,8 +33,10 @@ void ParamDict::SetFileReaderLst(std::vector<std::string> lst) {
std
::
vector
<
float
>
ParamDict
::
GetSparseValue
(
std
::
string
feasign
,
std
::
string
slot
)
{
auto
BytesToFloat
=
[](
uint8_t
*
byte_array
)
{
return
*
((
float
*
)
byte_array
);
};
// TODO: the concatation of feasign and slot is TBD.
auto
BytesToFloat
=
[](
uint8_t
*
byte_array
)
{
return
*
((
float
*
)
byte_array
);
// NOLINT
};
// NOLINT TODO: the concatation of feasign and slot is TBD.
std
::
string
result
=
front_db
->
Get
(
feasign
+
slot
);
std
::
vector
<
float
>
value
;
if
(
result
==
"NOT_FOUND"
)
return
value
;
...
...
@@ -87,7 +89,7 @@ bool ParamDict::InsertSparseValue(std::string feasign,
value
.
push_back
(
raw_values_ptr
[
i
]);
}
back_db
->
Set
(
key
,
value
);
// TODO: change stateless to stateful
//
NOLINT
TODO: change stateless to stateful
return
true
;
}
...
...
@@ -140,5 +142,4 @@ void ParamDict::CreateKVDB() {
this
->
back_db
->
CreateDB
();
}
ParamDict
::~
ParamDict
()
{
}
ParamDict
::~
ParamDict
()
{}
core/kvdb/src/paddle_rocksdb.cpp
浏览文件 @
50da611b
...
...
@@ -51,7 +51,7 @@ void RocksDBWrapper::SetDBName(std::string db_name) {
void
RocksDBWrapper
::
Close
()
{
if
(
db_
!=
nullptr
)
{
db_
->
Close
();
delete
(
db_
);
delete
(
db_
);
db_
=
nullptr
;
}
}
...
...
core/kvdb/src/rockskvdb_impl.cpp
浏览文件 @
50da611b
...
...
@@ -32,12 +32,8 @@ void RocksKVDB::Set(std::string key, std::string value) {
return
;
}
void
RocksKVDB
::
Close
()
{
this
->
db_
->
Close
();
}
void
RocksKVDB
::
Close
()
{
this
->
db_
->
Close
();
}
std
::
string
RocksKVDB
::
Get
(
std
::
string
key
)
{
return
this
->
db_
->
Get
(
key
);
}
RocksKVDB
::~
RocksKVDB
()
{
this
->
db_
->
Close
();
}
RocksKVDB
::~
RocksKVDB
()
{
this
->
db_
->
Close
();
}
core/pdcodegen/src/pdcodegen.cpp
浏览文件 @
50da611b
...
...
@@ -15,14 +15,14 @@
#include <list>
#include "boost/algorithm/string.hpp"
#include "boost/scoped_ptr.hpp"
#include "core/pdcodegen/pds_option.pb.h"
#include "core/pdcodegen/plugin/strutil.h"
#include "core/pdcodegen/plugin/substitute.h"
#include "google/protobuf/compiler/code_generator.h"
#include "google/protobuf/compiler/plugin.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/io/printer.h"
#include "google/protobuf/io/zero_copy_stream.h"
#include "core/pdcodegen/pds_option.pb.h"
#include "core/pdcodegen/plugin/strutil.h"
#include "core/pdcodegen/plugin/substitute.h"
using
std
::
string
;
using
google
::
protobuf
::
Descriptor
;
using
google
::
protobuf
::
FileDescriptor
;
...
...
@@ -115,7 +115,8 @@ class PdsCodeGenerator : public CodeGenerator {
printer
.
Print
(
"#include
\"
core/predictor/common/inner_common.h
\"\n
"
);
printer
.
Print
(
"#include
\"
core/predictor/framework/service.h
\"\n
"
);
printer
.
Print
(
"#include
\"
core/predictor/framework/manager.h
\"\n
"
);
printer
.
Print
(
"#include
\"
core/predictor/framework/service_manager.h
\"\n
"
);
printer
.
Print
(
"#include
\"
core/predictor/framework/service_manager.h
\"\n
"
);
}
if
(
generate_stub
)
{
printer
.
Print
(
"#include <baidu/rpc/parallel_channel.h>
\n
"
);
...
...
@@ -845,7 +846,8 @@ class PdsCodeGenerator : public CodeGenerator {
printer
.
Print
(
"#include
\"
core/predictor/common/inner_common.h
\"\n
"
);
printer
.
Print
(
"#include
\"
core/predictor/framework/service.h
\"\n
"
);
printer
.
Print
(
"#include
\"
core/predictor/framework/manager.h
\"\n
"
);
printer
.
Print
(
"#include
\"
core/predictor/framework/service_manager.h
\"\n
"
);
printer
.
Print
(
"#include
\"
core/predictor/framework/service_manager.h
\"\n
"
);
}
if
(
generate_stub
)
{
printer
.
Print
(
"#include <brpc/parallel_channel.h>
\n
"
);
...
...
core/predictor/common/inner_common.h
浏览文件 @
50da611b
...
...
@@ -52,9 +52,9 @@
#include "glog/raw_logging.h"
#include "core/configure/general_model_config.pb.h"
#include "core/configure/include/configure_parser.h"
#include "core/configure/server_configure.pb.h"
#include "core/configure/general_model_config.pb.h"
#include "core/predictor/common/constant.h"
#include "core/predictor/common/types.h"
...
...
core/predictor/framework/dag_view.cpp
浏览文件 @
50da611b
...
...
@@ -85,9 +85,9 @@ int DagView::init(Dag* dag, const std::string& service_name) {
VLOG
(
2
)
<<
"set op pre name:
\n
"
<<
"current op name: "
<<
vstage
->
nodes
.
back
()
->
op
->
op_name
()
<<
" previous op name: "
<<
_view
[
si
-
1
]
->
nodes
.
back
()
->
op
->
op_name
();
<<
_view
[
si
-
1
]
->
nodes
.
back
()
->
op
->
op_name
();
vstage
->
nodes
.
back
()
->
op
->
set_pre_node_name
(
_view
[
si
-
1
]
->
nodes
.
back
()
->
op
->
op_name
());
_view
[
si
-
1
]
->
nodes
.
back
()
->
op
->
op_name
());
}
_view
.
push_back
(
vstage
);
}
...
...
core/predictor/framework/factory.h
浏览文件 @
50da611b
...
...
@@ -16,8 +16,8 @@
#include <map>
#include <string>
#include <utility>
#include "glog/raw_logging.h"
#include "core/predictor/common/inner_common.h"
#include "glog/raw_logging.h"
namespace
baidu
{
namespace
paddle_serving
{
namespace
predictor
{
...
...
core/predictor/framework/resource.cpp
浏览文件 @
50da611b
...
...
@@ -197,13 +197,10 @@ int Resource::general_model_initialize(const std::string& path,
for
(
int
i
=
0
;
i
<
feed_var_num
;
++
i
)
{
_config
->
_feed_name
[
i
]
=
model_config
.
feed_var
(
i
).
name
();
_config
->
_feed_alias_name
[
i
]
=
model_config
.
feed_var
(
i
).
alias_name
();
VLOG
(
2
)
<<
"feed var["
<<
i
<<
"]: "
<<
_config
->
_feed_name
[
i
];
VLOG
(
2
)
<<
"feed var["
<<
i
<<
"]: "
<<
_config
->
_feed_alias_name
[
i
];
VLOG
(
2
)
<<
"feed var["
<<
i
<<
"]: "
<<
_config
->
_feed_name
[
i
];
VLOG
(
2
)
<<
"feed var["
<<
i
<<
"]: "
<<
_config
->
_feed_alias_name
[
i
];
_config
->
_feed_type
[
i
]
=
model_config
.
feed_var
(
i
).
feed_type
();
VLOG
(
2
)
<<
"feed type["
<<
i
<<
"]: "
<<
_config
->
_feed_type
[
i
];
VLOG
(
2
)
<<
"feed type["
<<
i
<<
"]: "
<<
_config
->
_feed_type
[
i
];
if
(
model_config
.
feed_var
(
i
).
is_lod_tensor
())
{
VLOG
(
2
)
<<
"var["
<<
i
<<
"] is lod tensor"
;
...
...
core/predictor/framework/resource.h
浏览文件 @
50da611b
...
...
@@ -13,10 +13,10 @@
// limitations under the License.
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include <map>
#include "core/cube/cube-api/include/cube_api.h"
#include "core/kvdb/include/kvdb/paddle_rocksdb.h"
#include "core/predictor/common/inner_common.h"
...
...
core/predictor/src/pdserving.cpp
浏览文件 @
50da611b
...
...
@@ -99,8 +99,8 @@ static void g_change_server_port() {
if
(
read_proto_conf
(
FLAGS_inferservice_path
.
c_str
(),
FLAGS_inferservice_file
.
c_str
(),
&
conf
)
!=
0
)
{
VLOG
(
2
)
<<
"failed to load configure["
<<
FLAGS_inferservice_path
<<
","
<<
FLAGS_inferservice_file
<<
"]."
;
VLOG
(
2
)
<<
"failed to load configure["
<<
FLAGS_inferservice_path
<<
","
<<
FLAGS_inferservice_file
<<
"]."
;
return
;
}
uint32_t
port
=
conf
.
port
();
...
...
@@ -157,8 +157,7 @@ int main(int argc, char** argv) {
mkdir
(
FLAGS_log_dir
.
c_str
(),
0777
);
ret
=
stat
(
FLAGS_log_dir
.
c_str
(),
&
st_buf
);
if
(
ret
!=
0
)
{
VLOG
(
2
)
<<
"Log path "
<<
FLAGS_log_dir
<<
" not exist, and create fail"
;
VLOG
(
2
)
<<
"Log path "
<<
FLAGS_log_dir
<<
" not exist, and create fail"
;
return
-
1
;
}
}
...
...
core/predictor/unittest/test_message_op.h
浏览文件 @
50da611b
...
...
@@ -15,8 +15,8 @@
#pragma once
#include <gtest/gtest.h>
#include "core/predictor/framework/channel.h"
#include "core/predictor/op/op.h"
#include "core/predictor/msg_data.pb.h"
#include "core/predictor/op/op.h"
namespace
baidu
{
namespace
paddle_serving
{
...
...
core/predictor/unittest/test_server_manager.cpp
浏览文件 @
50da611b
core/sdk-cpp/include/common.h
浏览文件 @
50da611b
...
...
@@ -53,9 +53,9 @@
#include "json2pb/json_to_pb.h"
#endif
#include "core/configure/general_model_config.pb.h"
#include "core/configure/include/configure_parser.h"
#include "core/configure/sdk_configure.pb.h"
#include "core/configure/general_model_config.pb.h"
#include "core/sdk-cpp/include/utils.h"
...
...
core/sdk-cpp/include/config_manager.h
浏览文件 @
50da611b
...
...
@@ -32,9 +32,9 @@ class EndpointConfigManager {
EndpointConfigManager
()
:
_last_update_timestamp
(
0
),
_current_endpointmap_id
(
1
)
{}
int
create
(
const
std
::
string
&
sdk_desc_str
);
int
create
(
const
std
::
string
&
sdk_desc_str
);
int
load
(
const
std
::
string
&
sdk_desc_str
);
int
load
(
const
std
::
string
&
sdk_desc_str
);
int
create
(
const
char
*
path
,
const
char
*
file
);
...
...
core/sdk-cpp/include/factory.h
浏览文件 @
50da611b
...
...
@@ -16,9 +16,9 @@
#include <map>
#include <string>
#include <utility>
#include "glog/raw_logging.h"
#include "core/sdk-cpp/include/common.h"
#include "core/sdk-cpp/include/stub_impl.h"
#include "glog/raw_logging.h"
namespace
baidu
{
namespace
paddle_serving
{
...
...
core/sdk-cpp/include/predictor_sdk.h
浏览文件 @
50da611b
...
...
@@ -31,7 +31,7 @@ class PredictorApi {
int
register_all
();
int
create
(
const
std
::
string
&
sdk_desc_str
);
int
create
(
const
std
::
string
&
sdk_desc_str
);
int
create
(
const
char
*
path
,
const
char
*
file
);
...
...
core/sdk-cpp/proto/general_model_service.proto
浏览文件 @
50da611b
...
...
@@ -28,13 +28,9 @@ message Tensor {
repeated
int32
shape
=
6
;
};
message
FeedInst
{
repeated
Tensor
tensor_array
=
1
;
};
message
FeedInst
{
repeated
Tensor
tensor_array
=
1
;
};
message
FetchInst
{
repeated
Tensor
tensor_array
=
1
;
};
message
FetchInst
{
repeated
Tensor
tensor_array
=
1
;
};
message
Request
{
repeated
FeedInst
insts
=
1
;
...
...
core/sdk-cpp/src/endpoint.cpp
浏览文件 @
50da611b
...
...
@@ -35,8 +35,7 @@ int Endpoint::initialize(const EndpointInfo& ep_info) {
return
-
1
;
}
_variant_list
.
push_back
(
var
);
VLOG
(
2
)
<<
"Succ create variant: "
<<
vi
<<
", endpoint:"
<<
_endpoint_name
;
VLOG
(
2
)
<<
"Succ create variant: "
<<
vi
<<
", endpoint:"
<<
_endpoint_name
;
}
return
0
;
...
...
core/sdk-cpp/src/predictor_sdk.cpp
浏览文件 @
50da611b
...
...
@@ -30,7 +30,7 @@ int PredictorApi::register_all() {
return
0
;
}
int
PredictorApi
::
create
(
const
std
::
string
&
api_desc_str
)
{
int
PredictorApi
::
create
(
const
std
::
string
&
api_desc_str
)
{
VLOG
(
2
)
<<
api_desc_str
;
if
(
register_all
()
!=
0
)
{
LOG
(
ERROR
)
<<
"Failed do register all!"
;
...
...
core/sdk-cpp/src/variant.cpp
浏览文件 @
50da611b
core/util/CMakeLists.txt
浏览文件 @
50da611b
include
(
src/CMakeLists.txt
)
add_library
(
utils
${
util_srcs
}
)
core/util/include/timer.h
浏览文件 @
50da611b
...
...
@@ -15,7 +15,6 @@ limitations under the License. */
#pragma once
#include <stdlib.h>
namespace
baidu
{
namespace
paddle_serving
{
...
...
core/util/src/CMakeLists.txt
浏览文件 @
50da611b
FILE
(
GLOB srcs
${
CMAKE_CURRENT_LIST_DIR
}
/*.cc
)
LIST
(
APPEND util_srcs
${
srcs
}
)
doc/COMPILE.md
浏览文件 @
50da611b
...
...
@@ -27,4 +27,3 @@ make -j10
cmake
-DPYTHON_INCLUDE_DIR
=
$PYTHONROOT
/include/python2.7/
-DPYTHON_LIBRARIES
=
$PYTHONROOT
/lib/libpython2.7.so
-DPYTHON_EXECUTABLE
=
/home/users/dongdaxiang/software/baidu/third-party/python/bin/python
-DCLIENT_ONLY
=
ON ..
make
-j10
```
doc/CONTRIBUTE.md
浏览文件 @
50da611b
...
...
@@ -152,4 +152,3 @@ GLOG_minloglevel=1 bin/serving
2 -ERROR
3 - FATAL (Be careful as FATAL log will generate a coredump)
doc/IMDB_GO_CLIENT.md
浏览文件 @
50da611b
...
...
@@ -193,6 +193,3 @@ total num: 25000
acc num: 22014
acc: 0.88056
```
doc/NEW_OPERATOR.md
浏览文件 @
50da611b
...
...
@@ -143,6 +143,3 @@ self.op_dict = {
"general_dist_kv"
:
"GeneralDistKVOp"
}
```
doc/SERVER_DAG.md
浏览文件 @
50da611b
...
...
@@ -54,10 +54,3 @@ op_seq_maker.add_op(dist_kv_op)
op_seq_maker
.
add_op
(
general_infer_op
)
op_seq_maker
.
add_op
(
general_response_op
)
```
paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h
浏览文件 @
50da611b
...
...
@@ -21,8 +21,8 @@
#include <vector>
#include "core/configure/include/configure_parser.h"
#include "core/configure/inferencer_configure.pb.h"
#include "paddle_inference_api.h" // NOLINT
#include "core/predictor/framework/infer.h"
#include "paddle_inference_api.h" // NOLINT
namespace
baidu
{
namespace
paddle_serving
{
...
...
@@ -373,7 +373,7 @@ class SigmoidFluidModel {
clone_model
.
reset
(
new
SigmoidFluidModel
());
clone_model
->
_sigmoid_core
=
_sigmoid_core
;
clone_model
->
_fluid_core
=
_fluid_core
->
Clone
();
return
std
::
move
(
clone_model
);
return
std
::
move
(
clone_model
);
// NOLINT
}
public:
...
...
@@ -459,7 +459,7 @@ class FluidCpuWithSigmoidCore : public FluidFamilyCore {
}
protected:
std
::
unique_ptr
<
SigmoidFluidModel
>
_core
;
std
::
unique_ptr
<
SigmoidFluidModel
>
_core
;
// NOLINT
};
class
FluidCpuNativeDirWithSigmoidCore
:
public
FluidCpuWithSigmoidCore
{
...
...
paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h
浏览文件 @
50da611b
...
...
@@ -23,8 +23,8 @@
#include <vector>
#include "core/configure/include/configure_parser.h"
#include "core/configure/inferencer_configure.pb.h"
#include "paddle_inference_api.h" // NOLINT
#include "core/predictor/framework/infer.h"
#include "paddle_inference_api.h" // NOLINT
DECLARE_int32
(
gpuid
);
...
...
python/examples/bert/benchmark.py
浏览文件 @
50da611b
...
...
@@ -13,6 +13,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from
__future__
import
unicode_literals
,
absolute_import
import
os
...
...
@@ -29,6 +30,7 @@ from bert_reader import BertReader
args
=
benchmark_args
()
def
single_func
(
idx
,
resource
):
fin
=
open
(
"data-c.txt"
)
if
args
.
request
==
"rpc"
:
...
...
@@ -42,25 +44,28 @@ def single_func(idx, resource):
start
=
time
.
time
()
for
line
in
fin
:
feed_dict
=
reader
.
process
(
line
)
result
=
client
.
predict
(
feed
=
feed_dict
,
fetch
=
fetch
)
result
=
client
.
predict
(
feed
=
feed_dict
,
fetch
=
fetch
)
end
=
time
.
time
()
elif
args
.
request
==
"http"
:
start
=
time
.
time
()
header
=
{
"Content-Type"
:
"application/json"
}
header
=
{
"Content-Type"
:
"application/json"
}
for
line
in
fin
:
#dict_data = {"words": "this is for output ", "fetch": ["pooled_output"]}
dict_data
=
{
"words"
:
line
,
"fetch"
:
[
"pooled_output"
]}
r
=
requests
.
post
(
'http://{}/bert/prediction'
.
format
(
resource
[
"endpoint"
][
0
]),
data
=
json
.
dumps
(
dict_data
),
headers
=
header
)
r
=
requests
.
post
(
'http://{}/bert/prediction'
.
format
(
resource
[
"endpoint"
][
0
]),
data
=
json
.
dumps
(
dict_data
),
headers
=
header
)
end
=
time
.
time
()
return
[[
end
-
start
]]
if
__name__
==
'__main__'
:
multi_thread_runner
=
MultiThreadRunner
()
endpoint_list
=
[
"127.0.0.1:9494"
,
"127.0.0.1:9495"
,
"127.0.0.1:9496"
,
"127.0.0.1:9497"
]
endpoint_list
=
[
"127.0.0.1:9494"
,
"127.0.0.1:9495"
,
"127.0.0.1:9496"
,
"127.0.0.1:9497"
]
#endpoint_list = endpoint_list + endpoint_list + endpoint_list
#result = multi_thread_runner.run(single_func, args.thread, {"endpoint":endpoint_list})
result
=
single_func
(
0
,
{
"endpoint"
:
endpoint_list
})
result
=
single_func
(
0
,
{
"endpoint"
:
endpoint_list
})
print
(
result
)
python/examples/bert/bert_client.py
浏览文件 @
50da611b
# coding:utf-8
# pylint: disable=doc-string-missing
import
os
import
sys
import
numpy
as
np
...
...
@@ -143,9 +144,12 @@ def single_func(idx, resource):
end
=
time
.
time
()
return
[[
end
-
start
]]
if
__name__
==
'__main__'
:
multi_thread_runner
=
MultiThreadRunner
()
result
=
multi_thread_runner
.
run
(
single_func
,
args
.
thread
,
{
"endpoint"
:[
"127.0.0.1:9494"
,
"127.0.0.1:9495"
,
"127.0.0.1:9496"
,
"127.0.0.1:9497"
]})
result
=
multi_thread_runner
.
run
(
single_func
,
args
.
thread
,
{
"endpoint"
:
[
"127.0.0.1:9494"
,
"127.0.0.1:9495"
,
"127.0.0.1:9496"
,
"127.0.0.1:9497"
]
})
python/examples/bert/bert_reader.py
浏览文件 @
50da611b
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from
batching
import
pad_batch_data
import
tokenization
class
BertReader
():
def
__init__
(
self
,
vocab_file
=
""
,
max_seq_len
=
128
):
self
.
vocab_file
=
vocab_file
...
...
@@ -48,8 +63,10 @@ class BertReader():
position_ids
=
list
(
range
(
len
(
token_ids
)))
p_token_ids
,
p_pos_ids
,
p_text_type_ids
,
input_mask
=
\
self
.
pad_batch
(
token_ids
,
text_type_ids
,
position_ids
)
feed_result
=
{
"input_ids"
:
p_token_ids
.
reshape
(
-
1
).
tolist
(),
feed_result
=
{
"input_ids"
:
p_token_ids
.
reshape
(
-
1
).
tolist
(),
"position_ids"
:
p_pos_ids
.
reshape
(
-
1
).
tolist
(),
"segment_ids"
:
p_text_type_ids
.
reshape
(
-
1
).
tolist
(),
"input_mask"
:
input_mask
.
reshape
(
-
1
).
tolist
()}
"input_mask"
:
input_mask
.
reshape
(
-
1
).
tolist
()
}
return
feed_result
python/examples/bert/bert_web_service.py
浏览文件 @
50da611b
...
...
@@ -12,12 +12,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from
paddle_serving_server_gpu.web_service
import
WebService
from
bert_reader
import
BertReader
import
sys
import
os
class
BertService
(
WebService
):
def
load
(
self
):
self
.
reader
=
BertReader
(
vocab_file
=
"vocab.txt"
,
max_seq_len
=
20
)
...
...
@@ -26,12 +27,12 @@ class BertService(WebService):
feed_res
=
self
.
reader
.
process
(
feed
[
"words"
].
encode
(
"utf-8"
))
return
feed_res
,
fetch
bert_service
=
BertService
(
name
=
"bert"
)
bert_service
.
load
()
bert_service
.
load_model_config
(
sys
.
argv
[
1
])
gpu_ids
=
os
.
environ
[
"CUDA_VISIBLE_DEVICES"
]
gpus
=
[
int
(
x
)
for
x
in
gpu_ids
.
split
(
","
)]
bert_service
.
set_gpus
(
gpus
)
bert_service
.
prepare_server
(
workdir
=
"workdir"
,
port
=
9494
,
device
=
"gpu"
)
bert_service
.
prepare_server
(
workdir
=
"workdir"
,
port
=
9494
,
device
=
"gpu"
)
bert_service
.
run_server
()
python/examples/bert/prepare_model.py
浏览文件 @
50da611b
...
...
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import
paddlehub
as
hub
import
paddle.fluid
as
fluid
import
sys
...
...
@@ -19,7 +19,8 @@ import paddle_serving_client.io as serving_io
model_name
=
"bert_chinese_L-12_H-768_A-12"
module
=
hub
.
Module
(
model_name
)
inputs
,
outputs
,
program
=
module
.
context
(
trainable
=
True
,
max_seq_len
=
int
(
sys
.
argv
[
1
]))
inputs
,
outputs
,
program
=
module
.
context
(
trainable
=
True
,
max_seq_len
=
int
(
sys
.
argv
[
1
]))
place
=
fluid
.
core_avx
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
input_ids
=
inputs
[
"input_ids"
]
...
...
@@ -35,10 +36,12 @@ feed_var_names = [
target_vars
=
[
pooled_output
,
sequence_output
]
serving_io
.
save_model
(
"bert_seq{}_model"
.
format
(
sys
.
argv
[
1
]),
"bert_seq{}_client"
.
format
(
sys
.
argv
[
1
]),
{
serving_io
.
save_model
(
"bert_seq{}_model"
.
format
(
sys
.
argv
[
1
]),
"bert_seq{}_client"
.
format
(
sys
.
argv
[
1
]),
{
"input_ids"
:
input_ids
,
"position_ids"
:
position_ids
,
"segment_ids"
:
segment_ids
,
"input_mask"
:
input_mask
,
},
{
"pooled_output"
:
pooled_output
,
},
{
"pooled_output"
:
pooled_output
,
"sequence_output"
:
sequence_output
},
program
)
python/examples/bert/tokenization.py
浏览文件 @
50da611b
...
...
@@ -26,7 +26,7 @@ import sentencepiece as spm
import
pickle
def
convert_to_unicode
(
text
):
def
convert_to_unicode
(
text
):
# pylint: disable=doc-string-with-all-args
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if
six
.
PY3
:
if
isinstance
(
text
,
str
):
...
...
@@ -46,7 +46,7 @@ def convert_to_unicode(text):
raise
ValueError
(
"Not running on Python2 or Python 3?"
)
def
printable_text
(
text
):
def
printable_text
(
text
):
# pylint: disable=doc-string-with-all-args
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
...
...
@@ -69,7 +69,7 @@ def printable_text(text):
raise
ValueError
(
"Not running on Python2 or Python 3?"
)
def
load_vocab
(
vocab_file
):
def
load_vocab
(
vocab_file
):
# pylint: disable=doc-string-with-all-args, doc-string-with-returns
"""Loads a vocabulary file into a dictionary."""
vocab
=
collections
.
OrderedDict
()
fin
=
io
.
open
(
vocab_file
,
"r"
,
encoding
=
"UTF-8"
)
...
...
@@ -163,7 +163,7 @@ class CharTokenizer(object):
return
convert_by_vocab
(
self
.
inv_vocab
,
ids
)
class
WSSPTokenizer
(
object
):
class
WSSPTokenizer
(
object
):
# pylint: disable=doc-string-missing
def
__init__
(
self
,
vocab_file
,
sp_model_dir
,
word_dict
,
ws
=
True
,
lower
=
True
):
self
.
vocab
=
load_vocab
(
vocab_file
)
...
...
@@ -175,7 +175,7 @@ class WSSPTokenizer(object):
self
.
window_size
=
5
self
.
sp_model
.
Load
(
sp_model_dir
)
def
cut
(
self
,
chars
):
def
cut
(
self
,
chars
):
# pylint: disable=doc-string-missing
words
=
[]
idx
=
0
while
idx
<
len
(
chars
):
...
...
@@ -192,7 +192,7 @@ class WSSPTokenizer(object):
idx
+=
i
return
words
def
tokenize
(
self
,
text
,
unk_token
=
"[UNK]"
):
def
tokenize
(
self
,
text
,
unk_token
=
"[UNK]"
):
# pylint: disable=doc-string-missing
text
=
convert_to_unicode
(
text
)
if
self
.
ws
:
text
=
[
s
for
s
in
self
.
cut
(
text
)
if
s
!=
' '
]
...
...
@@ -228,7 +228,7 @@ class BasicTokenizer(object):
"""
self
.
do_lower_case
=
do_lower_case
def
tokenize
(
self
,
text
):
def
tokenize
(
self
,
text
):
# pylint: disable=doc-string-with-all-args, doc-string-with-returns
"""Tokenizes a piece of text."""
text
=
convert_to_unicode
(
text
)
text
=
self
.
_clean_text
(
text
)
...
...
@@ -345,7 +345,7 @@ class WordpieceTokenizer(object):
self
.
max_input_chars_per_word
=
max_input_chars_per_word
self
.
use_sentence_piece_vocab
=
use_sentence_piece_vocab
def
tokenize
(
self
,
text
):
def
tokenize
(
self
,
text
):
# pylint: disable=doc-string-with-all-args
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
...
...
@@ -432,8 +432,8 @@ def _is_punctuation(char):
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if
((
cp
>=
33
and
cp
<=
47
)
or
(
cp
>=
58
and
cp
<=
64
)
or
(
cp
>=
91
and
cp
<=
96
)
or
(
cp
>=
123
and
cp
<=
126
)):
if
((
cp
>=
33
and
cp
<=
47
)
or
(
cp
>=
58
and
cp
<=
64
)
or
(
cp
>=
91
and
cp
<=
96
)
or
(
cp
>=
123
and
cp
<=
126
)):
return
True
cat
=
unicodedata
.
category
(
char
)
if
cat
.
startswith
(
"P"
):
...
...
python/examples/criteo_ctr/README.md
浏览文件 @
50da611b
# CTR task on Criteo Dataset
python/examples/criteo_ctr/args.py
浏览文件 @
50da611b
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import
argparse
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
"PaddlePaddle CTR example"
)
parser
.
add_argument
(
...
...
python/examples/criteo_ctr/criteo_reader.py
浏览文件 @
50da611b
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import
sys
import
paddle.fluid.incubate.data_generator
as
dg
class
CriteoDataset
(
dg
.
MultiSlotDataGenerator
):
def
setup
(
self
,
sparse_feature_dim
):
self
.
cont_min_
=
[
0
,
-
3
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
]
self
.
cont_max_
=
[
20
,
600
,
100
,
50
,
64000
,
500
,
100
,
50
,
500
,
10
,
10
,
10
,
50
]
self
.
cont_diff_
=
[
20
,
603
,
100
,
50
,
64000
,
500
,
100
,
50
,
500
,
10
,
10
,
10
,
50
]
self
.
cont_max_
=
[
20
,
600
,
100
,
50
,
64000
,
500
,
100
,
50
,
500
,
10
,
10
,
10
,
50
]
self
.
cont_diff_
=
[
20
,
603
,
100
,
50
,
64000
,
500
,
100
,
50
,
500
,
10
,
10
,
10
,
50
]
self
.
hash_dim_
=
sparse_feature_dim
# here, training data are lines with line_index < train_idx_
self
.
train_idx_
=
41256555
...
...
@@ -23,7 +43,8 @@ class CriteoDataset(dg.MultiSlotDataGenerator):
dense_feature
.
append
((
float
(
features
[
idx
])
-
self
.
cont_min_
[
idx
-
1
])
/
\
self
.
cont_diff_
[
idx
-
1
])
for
idx
in
self
.
categorical_range_
:
sparse_feature
.
append
([
hash
(
str
(
idx
)
+
features
[
idx
])
%
self
.
hash_dim_
])
sparse_feature
.
append
(
[
hash
(
str
(
idx
)
+
features
[
idx
])
%
self
.
hash_dim_
])
return
dense_feature
,
sparse_feature
,
[
int
(
features
[
0
])]
...
...
@@ -32,9 +53,11 @@ class CriteoDataset(dg.MultiSlotDataGenerator):
for
fname
in
filelist
:
with
open
(
fname
.
strip
(),
"r"
)
as
fin
:
for
line
in
fin
:
dense_feature
,
sparse_feature
,
label
=
self
.
_process_line
(
line
)
dense_feature
,
sparse_feature
,
label
=
self
.
_process_line
(
line
)
#yield dense_feature, sparse_feature, label
yield
[
dense_feature
]
+
sparse_feature
+
[
label
]
import
paddle
batch_iter
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
...
...
@@ -42,7 +65,6 @@ class CriteoDataset(dg.MultiSlotDataGenerator):
batch_size
=
batch
)
return
batch_iter
def
generate_sample
(
self
,
line
):
def
data_iter
():
dense_feature
,
sparse_feature
,
label
=
self
.
_process_line
(
line
)
...
...
@@ -54,6 +76,7 @@ class CriteoDataset(dg.MultiSlotDataGenerator):
return
data_iter
if
__name__
==
"__main__"
:
criteo_dataset
=
CriteoDataset
()
criteo_dataset
.
setup
(
int
(
sys
.
argv
[
1
]))
...
...
python/examples/criteo_ctr/local_train.py
浏览文件 @
50da611b
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from
__future__
import
print_function
from
args
import
parse_args
...
...
@@ -17,15 +32,17 @@ def train():
dense_input
=
fluid
.
layers
.
data
(
name
=
"dense_input"
,
shape
=
[
dense_feature_dim
],
dtype
=
'float32'
)
sparse_input_ids
=
[
fluid
.
layers
.
data
(
name
=
"C"
+
str
(
i
),
shape
=
[
1
],
lod_level
=
1
,
dtype
=
"int64"
)
for
i
in
range
(
1
,
27
)]
fluid
.
layers
.
data
(
name
=
"C"
+
str
(
i
),
shape
=
[
1
],
lod_level
=
1
,
dtype
=
"int64"
)
for
i
in
range
(
1
,
27
)
]
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
#nn_input = None if sparse_only else dense_input
nn_input
=
dense_input
predict_y
,
loss
,
auc_var
,
batch_auc_var
=
dnn_model
(
nn_input
,
sparse_input_ids
,
label
,
args
.
embedding_size
,
args
.
sparse_feature_dim
)
nn_input
,
sparse_input_ids
,
label
,
args
.
embedding_size
,
args
.
sparse_feature_dim
)
optimizer
=
fluid
.
optimizer
.
SGD
(
learning_rate
=
1e-4
)
optimizer
.
minimize
(
loss
)
...
...
@@ -36,16 +53,17 @@ def train():
dataset
.
set_use_var
([
dense_input
]
+
sparse_input_ids
+
[
label
])
python_executable
=
"python"
pipe_command
=
"{} criteo_reader.py {}"
.
format
(
python_executable
,
args
.
sparse_feature_dim
)
pipe_command
=
"{} criteo_reader.py {}"
.
format
(
python_executable
,
args
.
sparse_feature_dim
)
dataset
.
set_pipe_command
(
pipe_command
)
dataset
.
set_batch_size
(
128
)
thread_num
=
10
dataset
.
set_thread
(
thread_num
)
whole_filelist
=
[
"raw_data/part-%d"
%
x
for
x
in
range
(
len
(
os
.
listdir
(
"raw_data"
)))]
whole_filelist
=
[
"raw_data/part-%d"
%
x
for
x
in
range
(
len
(
os
.
listdir
(
"raw_data"
)))
]
dataset
.
set_filelist
(
whole_filelist
[:
thread_num
])
dataset
.
load_into_memory
()
...
...
@@ -53,8 +71,7 @@ def train():
epochs
=
1
for
i
in
range
(
epochs
):
exe
.
train_from_dataset
(
program
=
fluid
.
default_main_program
(),
dataset
=
dataset
,
debug
=
True
)
program
=
fluid
.
default_main_program
(),
dataset
=
dataset
,
debug
=
True
)
print
(
"epoch {} finished"
.
format
(
i
))
import
paddle_serving_client.io
as
server_io
...
...
@@ -63,9 +80,9 @@ def train():
feed_var_dict
[
"sparse_{}"
.
format
(
i
)]
=
sparse
fetch_var_dict
=
{
"prob"
:
predict_y
}
server_io
.
save_model
(
"ctr_serving_model"
,
"ctr_client_conf"
,
feed_var_dict
,
fetch_var_dict
,
fluid
.
default_main_program
())
server_io
.
save_model
(
"ctr_serving_model"
,
"ctr_client_conf"
,
feed_var_dict
,
fetch_var_dict
,
fluid
.
default_main_program
())
if
__name__
==
'__main__'
:
train
()
python/examples/criteo_ctr/network_conf.py
浏览文件 @
50da611b
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import
paddle.fluid
as
fluid
import
math
def
dnn_model
(
dense_input
,
sparse_inputs
,
label
,
embedding_size
,
sparse_feature_dim
):
def
dnn_model
(
dense_input
,
sparse_inputs
,
label
,
embedding_size
,
sparse_feature_dim
):
def
embedding_layer
(
input
):
emb
=
fluid
.
layers
.
embedding
(
input
=
input
,
is_sparse
=
True
,
is_distributed
=
False
,
size
=
[
sparse_feature_dim
,
embedding_size
],
param_attr
=
fluid
.
ParamAttr
(
name
=
"SparseFeatFactors"
,
param_attr
=
fluid
.
ParamAttr
(
name
=
"SparseFeatFactors"
,
initializer
=
fluid
.
initializer
.
Uniform
()))
return
fluid
.
layers
.
sequence_pool
(
input
=
emb
,
pool_type
=
'sum'
)
...
...
@@ -21,17 +37,29 @@ def dnn_model(dense_input, sparse_inputs, label,
return
fluid
.
layers
.
concat
(
emb_sums
+
[
dense_tensor
],
axis
=
1
)
def
mlp
(
mlp_input
):
fc1
=
fluid
.
layers
.
fc
(
input
=
mlp_input
,
size
=
400
,
act
=
'relu'
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Normal
(
fc1
=
fluid
.
layers
.
fc
(
input
=
mlp_input
,
size
=
400
,
act
=
'relu'
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Normal
(
scale
=
1
/
math
.
sqrt
(
mlp_input
.
shape
[
1
]))))
fc2
=
fluid
.
layers
.
fc
(
input
=
fc1
,
size
=
400
,
act
=
'relu'
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Normal
(
fc2
=
fluid
.
layers
.
fc
(
input
=
fc1
,
size
=
400
,
act
=
'relu'
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Normal
(
scale
=
1
/
math
.
sqrt
(
fc1
.
shape
[
1
]))))
fc3
=
fluid
.
layers
.
fc
(
input
=
fc2
,
size
=
400
,
act
=
'relu'
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Normal
(
fc3
=
fluid
.
layers
.
fc
(
input
=
fc2
,
size
=
400
,
act
=
'relu'
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Normal
(
scale
=
1
/
math
.
sqrt
(
fc2
.
shape
[
1
]))))
pre
=
fluid
.
layers
.
fc
(
input
=
fc3
,
size
=
2
,
act
=
'softmax'
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Normal
(
pre
=
fluid
.
layers
.
fc
(
input
=
fc3
,
size
=
2
,
act
=
'softmax'
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Normal
(
scale
=
1
/
math
.
sqrt
(
fc3
.
shape
[
1
]))))
return
pre
...
...
python/examples/criteo_ctr/test_client.py
浏览文件 @
50da611b
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from
paddle_serving_client
import
Client
import
paddle
import
sys
...
...
@@ -13,9 +28,12 @@ batch = 1
buf_size
=
100
dataset
=
criteo
.
CriteoDataset
()
dataset
.
setup
(
1000001
)
test_filelists
=
[
"{}/part-%d"
.
format
(
sys
.
argv
[
2
])
%
x
for
x
in
range
(
len
(
os
.
listdir
(
sys
.
argv
[
2
])))]
reader
=
dataset
.
infer_reader
(
test_filelists
[
len
(
test_filelists
)
-
40
:],
batch
,
buf_size
)
test_filelists
=
[
"{}/part-%d"
.
format
(
sys
.
argv
[
2
])
%
x
for
x
in
range
(
len
(
os
.
listdir
(
sys
.
argv
[
2
])))
]
reader
=
dataset
.
infer_reader
(
test_filelists
[
len
(
test_filelists
)
-
40
:],
batch
,
buf_size
)
label_list
=
[]
prob_list
=
[]
...
...
@@ -25,4 +43,3 @@ for data in reader():
feed_dict
[
"sparse_{}"
.
format
(
i
-
1
)]
=
data
[
0
][
i
]
fetch_map
=
client
.
predict
(
feed
=
feed_dict
,
fetch
=
[
"prob"
])
print
(
fetch_map
)
python/examples/criteo_ctr/test_server.py
浏览文件 @
50da611b
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import
os
import
sys
from
paddle_serving_server
import
OpMaker
...
...
python/examples/fit_a_line/README.md
浏览文件 @
50da611b
python/examples/fit_a_line/benchmark.py
浏览文件 @
50da611b
...
...
@@ -11,6 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from
paddle_serving_client
import
Client
from
paddle_serving_client.utils
import
MultiThreadRunner
from
paddle_serving_client.utils
import
benchmark_args
...
...
@@ -21,28 +23,35 @@ import requests
args
=
benchmark_args
()
def
single_func
(
idx
,
resource
):
if
args
.
request
==
"rpc"
:
client
=
Client
()
client
.
load_client_config
(
args
.
model
)
client
.
connect
([
args
.
endpoint
])
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
uci_housing
.
train
(),
buf_size
=
500
),
batch_size
=
1
)
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
uci_housing
.
train
(),
buf_size
=
500
),
batch_size
=
1
)
start
=
time
.
time
()
for
data
in
train_reader
():
fetch_map
=
client
.
predict
(
feed
=
{
"x"
:
data
[
0
][
0
]},
fetch
=
[
"price"
])
end
=
time
.
time
()
return
[[
end
-
start
]]
elif
args
.
request
==
"http"
:
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
uci_housing
.
train
(),
buf_size
=
500
),
batch_size
=
1
)
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
uci_housing
.
train
(),
buf_size
=
500
),
batch_size
=
1
)
start
=
time
.
time
()
for
data
in
train_reader
():
r
=
requests
.
post
(
'http://{}/uci/prediction'
.
format
(
args
.
endpoint
),
data
=
{
"x"
:
data
[
0
]})
r
=
requests
.
post
(
'http://{}/uci/prediction'
.
format
(
args
.
endpoint
),
data
=
{
"x"
:
data
[
0
]})
end
=
time
.
time
()
return
[[
end
-
start
]]
multi_thread_runner
=
MultiThreadRunner
()
result
=
multi_thread_runner
.
run
(
single_func
,
args
.
thread
,
{})
print
(
result
)
python/examples/fit_a_line/local_train.py
浏览文件 @
50da611b
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import
sys
import
paddle
import
paddle.fluid
as
fluid
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
uci_housing
.
train
(),
buf_size
=
500
),
batch_size
=
16
)
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
uci_housing
.
train
(),
buf_size
=
500
),
batch_size
=
16
)
test_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
uci_housing
.
test
(),
buf_size
=
500
),
batch_size
=
16
)
test_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
uci_housing
.
test
(),
buf_size
=
500
),
batch_size
=
16
)
x
=
fluid
.
data
(
name
=
'x'
,
shape
=
[
None
,
13
],
dtype
=
'float32'
)
y
=
fluid
.
data
(
name
=
'y'
,
shape
=
[
None
,
1
],
dtype
=
'float32'
)
...
...
@@ -26,11 +45,9 @@ import paddle_serving_client.io as serving_io
for
pass_id
in
range
(
30
):
for
data_train
in
train_reader
():
avg_loss_value
,
=
exe
.
run
(
fluid
.
default_main_program
(),
avg_loss_value
,
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
feeder
.
feed
(
data_train
),
fetch_list
=
[
avg_loss
])
serving_io
.
save_model
(
"uci_housing_model"
,
"uci_housing_client"
,
{
"x"
:
x
},
{
"price"
:
y_predict
},
fluid
.
default_main_program
())
serving_io
.
save_model
(
"uci_housing_model"
,
"uci_housing_client"
,
{
"x"
:
x
},
{
"price"
:
y_predict
},
fluid
.
default_main_program
())
python/examples/fit_a_line/test_client.py
浏览文件 @
50da611b
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from
paddle_serving_client
import
Client
import
sys
...
...
@@ -6,10 +21,11 @@ client.load_client_config(sys.argv[1])
client
.
connect
([
"127.0.0.1:9393"
])
import
paddle
test_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
uci_housing
.
test
(),
buf_size
=
500
),
batch_size
=
1
)
test_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
uci_housing
.
test
(),
buf_size
=
500
),
batch_size
=
1
)
for
data
in
test_reader
():
fetch_map
=
client
.
predict
(
feed
=
{
"x"
:
data
[
0
][
0
]},
fetch
=
[
"price"
])
print
(
"{} {}"
.
format
(
fetch_map
[
"price"
][
0
],
data
[
0
][
1
][
0
]))
python/examples/fit_a_line/test_server.py
浏览文件 @
50da611b
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import
os
import
sys
from
paddle_serving_server
import
OpMaker
...
...
python/examples/imdb/benchmark.py
浏览文件 @
50da611b
...
...
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import
sys
import
time
...
...
@@ -22,6 +23,7 @@ from paddle_serving_client.utils import benchmark_args
args
=
benchmark_args
()
def
single_func
(
idx
,
resource
):
imdb_dataset
=
IMDBDataset
()
imdb_dataset
.
load_resource
(
args
.
vocab
)
...
...
@@ -40,18 +42,21 @@ def single_func(idx, resource):
fin
=
open
(
fn
)
for
line
in
fin
:
word_ids
,
label
=
imdb_dataset
.
get_words_and_label
(
line
)
fetch_map
=
client
.
predict
(
feed
=
{
"words"
:
word_ids
},
fetch
=
[
"prediction"
])
fetch_map
=
client
.
predict
(
feed
=
{
"words"
:
word_ids
},
fetch
=
[
"prediction"
])
elif
args
.
request
==
"http"
:
for
fn
in
filelist
:
fin
=
open
(
fn
)
for
line
in
fin
:
word_ids
,
label
=
imdb_dataset
.
get_words_and_label
(
line
)
r
=
requests
.
post
(
"http://{}/imdb/prediction"
.
format
(
args
.
endpoint
),
data
=
{
"words"
:
word_ids
,
"fetch"
:
[
"prediction"
]})
r
=
requests
.
post
(
"http://{}/imdb/prediction"
.
format
(
args
.
endpoint
),
data
=
{
"words"
:
word_ids
,
"fetch"
:
[
"prediction"
]})
end
=
time
.
time
()
return
[[
end
-
start
]]
multi_thread_runner
=
MultiThreadRunner
()
result
=
multi_thread_runner
.
run
(
single_func
,
args
.
thread
,
{})
print
(
result
)
python/examples/imdb/imdb_reader.py
浏览文件 @
50da611b
...
...
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import
sys
import
os
...
...
@@ -18,6 +19,7 @@ import paddle
import
re
import
paddle.fluid.incubate.data_generator
as
dg
class
IMDBDataset
(
dg
.
MultiSlotDataGenerator
):
def
load_resource
(
self
,
dictfile
):
self
.
_vocab
=
{}
...
...
@@ -56,9 +58,11 @@ class IMDBDataset(dg.MultiSlotDataGenerator):
for
line
in
fin
:
feas
,
label
=
self
.
get_words_and_label
(
line
)
yield
feas
,
label
import
paddle
batch_iter
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
local_iter
,
buf_size
=
buf_size
),
paddle
.
reader
.
shuffle
(
local_iter
,
buf_size
=
buf_size
),
batch_size
=
batch
)
return
batch_iter
...
...
@@ -66,13 +70,15 @@ class IMDBDataset(dg.MultiSlotDataGenerator):
def
memory_iter
():
for
i
in
range
(
1000
):
yield
self
.
return_value
def
data_iter
():
feas
,
label
=
self
.
get_words_and_label
(
line
)
yield
(
"words"
,
feas
),
(
"label"
,
label
)
return
data_iter
if
__name__
==
"__main__"
:
imdb
=
IMDBDataset
()
imdb
.
load_resource
(
"imdb.vocab"
)
imdb
.
run_from_stdin
()
python/examples/imdb/imdb_web_service_demo.sh
浏览文件 @
50da611b
...
...
@@ -3,4 +3,3 @@ tar -xzf imdb_service.tar.gz
wget
--no-check-certificate
https://fleet.bj.bcebos.com/text_classification_data.tar.gz
tar
-zxvf
text_classification_data.tar.gz
python text_classify_service.py serving_server_model/ workdir imdb.vocab
python/examples/imdb/local_train.py
浏览文件 @
50da611b
...
...
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import
os
import
sys
import
paddle
...
...
python/examples/imdb/nets.py
浏览文件 @
50da611b
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing, doc-string-with-all-args, doc-string-with-returns
import
sys
import
time
import
numpy
as
np
...
...
@@ -13,10 +28,9 @@ def bow_net(data,
hid_dim
=
128
,
hid_dim2
=
96
,
class_dim
=
2
):
"""
bow net
"""
emb
=
fluid
.
layers
.
embedding
(
input
=
data
,
size
=
[
dict_dim
,
emb_dim
],
is_sparse
=
True
)
""" bow net. """
emb
=
fluid
.
layers
.
embedding
(
input
=
data
,
size
=
[
dict_dim
,
emb_dim
],
is_sparse
=
True
)
bow
=
fluid
.
layers
.
sequence_pool
(
input
=
emb
,
pool_type
=
'sum'
)
bow_tanh
=
fluid
.
layers
.
tanh
(
bow
)
fc_1
=
fluid
.
layers
.
fc
(
input
=
bow_tanh
,
size
=
hid_dim
,
act
=
"tanh"
)
...
...
@@ -37,10 +51,9 @@ def cnn_net(data,
hid_dim2
=
96
,
class_dim
=
2
,
win_size
=
3
):
"""
conv net
"""
emb
=
fluid
.
layers
.
embedding
(
input
=
data
,
size
=
[
dict_dim
,
emb_dim
],
is_sparse
=
True
)
""" conv net. """
emb
=
fluid
.
layers
.
embedding
(
input
=
data
,
size
=
[
dict_dim
,
emb_dim
],
is_sparse
=
True
)
conv_3
=
fluid
.
nets
.
sequence_conv_pool
(
input
=
emb
,
...
...
@@ -67,9 +80,7 @@ def lstm_net(data,
hid_dim2
=
96
,
class_dim
=
2
,
emb_lr
=
30.0
):
"""
lstm net
"""
""" lstm net. """
emb
=
fluid
.
layers
.
embedding
(
input
=
data
,
size
=
[
dict_dim
,
emb_dim
],
...
...
@@ -103,9 +114,7 @@ def gru_net(data,
hid_dim2
=
96
,
class_dim
=
2
,
emb_lr
=
400.0
):
"""
gru net
"""
""" gru net. """
emb
=
fluid
.
layers
.
embedding
(
input
=
data
,
size
=
[
dict_dim
,
emb_dim
],
...
...
python/examples/imdb/test_client.py
浏览文件 @
50da611b
...
...
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from
paddle_serving_client
import
Client
from
imdb_reader
import
IMDBDataset
import
sys
...
...
@@ -31,4 +32,3 @@ for line in sys.stdin:
fetch
=
[
"acc"
,
"cost"
,
"prediction"
]
fetch_map
=
client
.
predict
(
feed
=
feed
,
fetch
=
fetch
)
print
(
"{} {}"
.
format
(
fetch_map
[
"prediction"
][
1
],
label
[
0
]))
python/examples/imdb/test_client_batch.py
浏览文件 @
50da611b
...
...
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from
paddle_serving_client
import
Client
import
sys
...
...
python/examples/imdb/text_classify_service.py
浏览文件 @
50da611b
...
...
@@ -11,10 +11,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from
paddle_serving_server.web_service
import
WebService
from
imdb_reader
import
IMDBDataset
import
sys
class
IMDBService
(
WebService
):
def
prepare_dict
(
self
,
args
=
{}):
if
len
(
args
)
==
0
:
...
...
@@ -29,8 +32,9 @@ class IMDBService(WebService):
res_feed
[
"words"
]
=
self
.
dataset
.
get_words_only
(
feed
[
"words"
])[
0
]
return
res_feed
,
fetch
imdb_service
=
IMDBService
(
name
=
"imdb"
)
imdb_service
.
load_model_config
(
sys
.
argv
[
1
])
imdb_service
.
prepare_server
(
workdir
=
sys
.
argv
[
2
],
port
=
9393
,
device
=
"cpu"
)
imdb_service
.
prepare_dict
({
"dict_file_path"
:
sys
.
argv
[
3
]})
imdb_service
.
prepare_dict
({
"dict_file_path"
:
sys
.
argv
[
3
]})
imdb_service
.
run_server
()
python/examples/util/get_acc.py
浏览文件 @
50da611b
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import
sys
import
os
...
...
python/paddle_serving_client/io/__init__.py
浏览文件 @
50da611b
...
...
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from
paddle.fluid
import
Executor
from
paddle.fluid.compiler
import
CompiledProgram
...
...
@@ -22,6 +23,7 @@ from paddle.fluid.io import save_inference_model
from
..proto
import
general_model_config_pb2
as
model_conf
import
os
def
save_model
(
server_model_folder
,
client_config_folder
,
feed_var_dict
,
...
...
@@ -32,8 +34,12 @@ def save_model(server_model_folder,
feed_var_names
=
[
feed_var_dict
[
x
].
name
for
x
in
feed_var_dict
]
target_vars
=
fetch_var_dict
.
values
()
save_inference_model
(
server_model_folder
,
feed_var_names
,
target_vars
,
executor
,
main_program
=
main_program
)
save_inference_model
(
server_model_folder
,
feed_var_names
,
target_vars
,
executor
,
main_program
=
main_program
)
config
=
model_conf
.
GeneralModelConfig
()
...
...
@@ -82,15 +88,15 @@ def save_model(server_model_folder,
cmd
=
"mkdir -p {}"
.
format
(
client_config_folder
)
os
.
system
(
cmd
)
with
open
(
"{}/serving_client_conf.prototxt"
.
format
(
client_config_folder
),
"w"
)
as
fout
:
with
open
(
"{}/serving_client_conf.prototxt"
.
format
(
client_config_folder
),
"w"
)
as
fout
:
fout
.
write
(
str
(
config
))
with
open
(
"{}/serving_server_conf.prototxt"
.
format
(
server_model_folder
),
"w"
)
as
fout
:
with
open
(
"{}/serving_server_conf.prototxt"
.
format
(
server_model_folder
),
"w"
)
as
fout
:
fout
.
write
(
str
(
config
))
with
open
(
"{}/serving_client_conf.stream.prototxt"
.
format
(
client_config_folder
),
"wb"
)
as
fout
:
with
open
(
"{}/serving_client_conf.stream.prototxt"
.
format
(
client_config_folder
),
"wb"
)
as
fout
:
fout
.
write
(
config
.
SerializeToString
())
with
open
(
"{}/serving_server_conf.stream.prototxt"
.
format
(
server_model_folder
),
"wb"
)
as
fout
:
with
open
(
"{}/serving_server_conf.stream.prototxt"
.
format
(
server_model_folder
),
"wb"
)
as
fout
:
fout
.
write
(
config
.
SerializeToString
())
python/paddle_serving_client/metric/acc.py
浏览文件 @
50da611b
...
...
@@ -11,6 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
def
acc
(
prob
,
label
,
threshold
):
# we support prob is the probability for label to be one
...
...
@@ -21,5 +23,3 @@ def acc(prob, label, threshold):
if
(
prob
-
threshold
)
*
(
label
-
prob
)
>
0
:
right
+=
1
return
float
(
right
)
/
total
python/paddle_serving_client/metric/auc.py
浏览文件 @
50da611b
...
...
@@ -11,6 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing, doc-string-with-all-args, doc-string-with-returns
def
tied_rank
(
x
):
"""
...
...
@@ -24,7 +26,7 @@ def tied_rank(x):
score : list of numbers
The tied rank f each element in x
"""
sorted_x
=
sorted
(
zip
(
x
,
range
(
len
(
x
))))
sorted_x
=
sorted
(
zip
(
x
,
range
(
len
(
x
))))
r
=
[
0
for
k
in
x
]
cur_val
=
sorted_x
[
0
][
0
]
last_rank
=
0
...
...
@@ -32,13 +34,14 @@ def tied_rank(x):
if
cur_val
!=
sorted_x
[
i
][
0
]:
cur_val
=
sorted_x
[
i
][
0
]
for
j
in
range
(
last_rank
,
i
):
r
[
sorted_x
[
j
][
1
]]
=
float
(
last_rank
+
1
+
i
)
/
2.0
r
[
sorted_x
[
j
][
1
]]
=
float
(
last_rank
+
1
+
i
)
/
2.0
last_rank
=
i
if
i
==
len
(
sorted_x
)
-
1
:
for
j
in
range
(
last_rank
,
i
+
1
):
r
[
sorted_x
[
j
][
1
]]
=
float
(
last_rank
+
i
+
2
)
/
2.0
if
i
==
len
(
sorted_x
)
-
1
:
for
j
in
range
(
last_rank
,
i
+
1
):
r
[
sorted_x
[
j
][
1
]]
=
float
(
last_rank
+
i
+
2
)
/
2.0
return
r
def
auc
(
actual
,
posterior
):
"""
Computes the area under the receiver-operater characteristic (AUC)
...
...
@@ -56,10 +59,9 @@ def auc(actual, posterior):
The mean squared error between actual and posterior
"""
r
=
tied_rank
(
posterior
)
num_positive
=
len
([
0
for
x
in
actual
if
x
==
1
])
num_negative
=
len
(
actual
)
-
num_positive
sum_positive
=
sum
([
r
[
i
]
for
i
in
range
(
len
(
r
))
if
actual
[
i
]
==
1
])
auc
=
((
sum_positive
-
num_positive
*
(
num_positive
+
1
)
/
2.0
)
/
(
num_negative
*
num_positive
))
num_positive
=
len
([
0
for
x
in
actual
if
x
==
1
])
num_negative
=
len
(
actual
)
-
num_positive
sum_positive
=
sum
([
r
[
i
]
for
i
in
range
(
len
(
r
))
if
actual
[
i
]
==
1
])
auc
=
((
sum_positive
-
num_positive
*
(
num_positive
+
1
)
/
2.0
)
/
(
num_negative
*
num_positive
))
return
auc
python/paddle_serving_client/utils/__init__.py
浏览文件 @
50da611b
...
...
@@ -11,18 +11,26 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import
os
import
sys
import
subprocess
import
argparse
from
multiprocessing
import
Pool
def
benchmark_args
():
parser
=
argparse
.
ArgumentParser
(
"benchmark"
)
parser
.
add_argument
(
"--thread"
,
type
=
int
,
default
=
10
,
help
=
"concurrecy"
)
parser
.
add_argument
(
"--model"
,
type
=
str
,
default
=
""
,
help
=
"model for evaluation"
)
parser
.
add_argument
(
"--endpoint"
,
type
=
str
,
default
=
"127.0.0.1:9292"
,
help
=
"endpoint of server"
)
parser
.
add_argument
(
"--request"
,
type
=
str
,
default
=
"rpc"
,
help
=
"mode of service"
)
parser
.
add_argument
(
"--model"
,
type
=
str
,
default
=
""
,
help
=
"model for evaluation"
)
parser
.
add_argument
(
"--endpoint"
,
type
=
str
,
default
=
"127.0.0.1:9292"
,
help
=
"endpoint of server"
)
parser
.
add_argument
(
"--request"
,
type
=
str
,
default
=
"rpc"
,
help
=
"mode of service"
)
return
parser
.
parse_args
()
...
...
python/paddle_serving_server/serve.py
浏览文件 @
50da611b
...
...
@@ -19,16 +19,26 @@ Usage:
"""
import
argparse
def
parse_args
():
def
parse_args
():
# pylint: disable=doc-string-missing
parser
=
argparse
.
ArgumentParser
(
"serve"
)
parser
.
add_argument
(
"--thread"
,
type
=
int
,
default
=
10
,
help
=
"Concurrency of server"
)
parser
.
add_argument
(
"--model"
,
type
=
str
,
default
=
""
,
help
=
"Model for serving"
)
parser
.
add_argument
(
"--port"
,
type
=
int
,
default
=
9292
,
help
=
"Port the server"
)
parser
.
add_argument
(
"--workdir"
,
type
=
str
,
default
=
"workdir"
,
help
=
"Working dir of current service"
)
parser
.
add_argument
(
"--device"
,
type
=
str
,
default
=
"cpu"
,
help
=
"Type of device"
)
parser
.
add_argument
(
"--thread"
,
type
=
int
,
default
=
10
,
help
=
"Concurrency of server"
)
parser
.
add_argument
(
"--model"
,
type
=
str
,
default
=
""
,
help
=
"Model for serving"
)
parser
.
add_argument
(
"--port"
,
type
=
int
,
default
=
9292
,
help
=
"Port the server"
)
parser
.
add_argument
(
"--workdir"
,
type
=
str
,
default
=
"workdir"
,
help
=
"Working dir of current service"
)
parser
.
add_argument
(
"--device"
,
type
=
str
,
default
=
"cpu"
,
help
=
"Type of device"
)
return
parser
.
parse_args
()
def
start_standard_model
():
def
start_standard_model
():
# pylint: disable=doc-string-missing
args
=
parse_args
()
thread_num
=
args
.
thread
model
=
args
.
model
...
...
@@ -59,5 +69,6 @@ def start_standard_model():
server
.
prepare_server
(
workdir
=
workdir
,
port
=
port
,
device
=
device
)
server
.
run_server
()
if
__name__
==
"__main__"
:
start_standard_model
()
python/paddle_serving_server/web_serve.py
浏览文件 @
50da611b
...
...
@@ -21,19 +21,31 @@ import argparse
from
multiprocessing
import
Pool
,
Process
from
.web_service
import
WebService
def
parse_args
():
def
parse_args
():
# pylint: disable=doc-string-missing
parser
=
argparse
.
ArgumentParser
(
"web_serve"
)
parser
.
add_argument
(
"--thread"
,
type
=
int
,
default
=
10
,
help
=
"Concurrency of server"
)
parser
.
add_argument
(
"--model"
,
type
=
str
,
default
=
""
,
help
=
"Model for serving"
)
parser
.
add_argument
(
"--port"
,
type
=
int
,
default
=
9292
,
help
=
"Port the server"
)
parser
.
add_argument
(
"--workdir"
,
type
=
str
,
default
=
"workdir"
,
help
=
"Working dir of current service"
)
parser
.
add_argument
(
"--device"
,
type
=
str
,
default
=
"cpu"
,
help
=
"Type of device"
)
parser
.
add_argument
(
"--name"
,
type
=
str
,
default
=
"default"
,
help
=
"Default service name"
)
parser
.
add_argument
(
"--thread"
,
type
=
int
,
default
=
10
,
help
=
"Concurrency of server"
)
parser
.
add_argument
(
"--model"
,
type
=
str
,
default
=
""
,
help
=
"Model for serving"
)
parser
.
add_argument
(
"--port"
,
type
=
int
,
default
=
9292
,
help
=
"Port the server"
)
parser
.
add_argument
(
"--workdir"
,
type
=
str
,
default
=
"workdir"
,
help
=
"Working dir of current service"
)
parser
.
add_argument
(
"--device"
,
type
=
str
,
default
=
"cpu"
,
help
=
"Type of device"
)
parser
.
add_argument
(
"--name"
,
type
=
str
,
default
=
"default"
,
help
=
"Default service name"
)
return
parser
.
parse_args
()
if
__name__
==
"__main__"
:
args
=
parse_args
()
service
=
WebService
(
name
=
args
.
name
)
service
.
load_model_config
(
args
.
model
)
service
.
prepare_server
(
workdir
=
args
.
workdir
,
port
=
args
.
port
,
device
=
args
.
device
)
service
.
prepare_server
(
workdir
=
args
.
workdir
,
port
=
args
.
port
,
device
=
args
.
device
)
service
.
run_server
()
python/paddle_serving_server/web_service.py
浏览文件 @
50da611b
...
...
@@ -12,11 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#!flask/bin/python
# pylint: disable=doc-string-missing
from
flask
import
Flask
,
request
,
abort
from
multiprocessing
import
Pool
,
Process
from
paddle_serving_server
import
OpMaker
,
OpSeqMaker
,
Server
from
paddle_serving_client
import
Client
class
WebService
(
object
):
def
__init__
(
self
,
name
=
"default_service"
):
self
.
name
=
name
...
...
@@ -38,7 +41,7 @@ class WebService(object):
server
.
set_num_threads
(
16
)
server
.
load_model_config
(
self
.
model_config
)
server
.
prepare_server
(
workdir
=
self
.
workdir
,
port
=
self
.
port
+
1
,
device
=
self
.
device
)
workdir
=
self
.
workdir
,
port
=
self
.
port
+
1
,
device
=
self
.
device
)
server
.
run_server
()
def
prepare_server
(
self
,
workdir
=
""
,
port
=
9393
,
device
=
"cpu"
):
...
...
@@ -51,8 +54,9 @@ class WebService(object):
client_service
=
Client
()
client_service
.
load_client_config
(
"{}/serving_server_conf.prototxt"
.
format
(
self
.
model_config
))
client_service
.
connect
([
"127.0.0.1:{}"
.
format
(
self
.
port
+
1
)])
client_service
.
connect
([
"127.0.0.1:{}"
.
format
(
self
.
port
+
1
)])
service_name
=
"/"
+
self
.
name
+
"/prediction"
@
app_instance
.
route
(
service_name
,
methods
=
[
'POST'
])
def
get_prediction
():
if
not
request
.
json
:
...
...
@@ -63,15 +67,21 @@ class WebService(object):
if
"fetch"
in
feed
:
del
feed
[
"fetch"
]
fetch_map
=
client_service
.
predict
(
feed
=
feed
,
fetch
=
fetch
)
fetch_map
=
self
.
postprocess
(
feed
=
request
.
json
,
fetch
=
fetch
,
fetch_map
=
fetch_map
)
fetch_map
=
self
.
postprocess
(
feed
=
request
.
json
,
fetch
=
fetch
,
fetch_map
=
fetch_map
)
return
fetch_map
app_instance
.
run
(
host
=
"127.0.0.1"
,
port
=
self
.
port
,
threaded
=
False
,
processes
=
1
)
app_instance
.
run
(
host
=
"127.0.0.1"
,
port
=
self
.
port
,
threaded
=
False
,
processes
=
1
)
def
run_server
(
self
):
import
socket
localIP
=
socket
.
gethostbyname
(
socket
.
gethostname
())
print
(
"web service address:"
)
print
(
"http://{}:{}/{}/prediction"
.
format
(
localIP
,
self
.
port
,
self
.
name
))
print
(
"http://{}:{}/{}/prediction"
.
format
(
localIP
,
self
.
port
,
self
.
name
))
p_rpc
=
Process
(
target
=
self
.
_launch_rpc_service
)
p_web
=
Process
(
target
=
self
.
_launch_web_service
)
p_rpc
.
start
()
...
...
@@ -84,4 +94,3 @@ class WebService(object):
def
postprocess
(
self
,
feed
=
{},
fetch
=
[],
fetch_map
=
{}):
return
fetch_map
python/paddle_serving_server_gpu/__init__.py
浏览文件 @
50da611b
...
...
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import
os
from
.proto
import
server_configure_pb2
as
server_sdk
...
...
@@ -22,6 +23,7 @@ import paddle_serving_server_gpu as paddle_serving_server
from
version
import
serving_server_version
from
contextlib
import
closing
def
serve_args
():
parser
=
argparse
.
ArgumentParser
(
"serve"
)
parser
.
add_argument
(
...
...
@@ -37,12 +39,12 @@ def serve_args():
help
=
"Working dir of current service"
)
parser
.
add_argument
(
"--device"
,
type
=
str
,
default
=
"gpu"
,
help
=
"Type of device"
)
parser
.
add_argument
(
"--gpu_ids"
,
type
=
str
,
default
=
""
,
help
=
"gpu ids"
)
parser
.
add_argument
(
"--gpu_ids"
,
type
=
str
,
default
=
""
,
help
=
"gpu ids"
)
parser
.
add_argument
(
"--name"
,
type
=
str
,
default
=
"default"
,
help
=
"Default service name"
)
return
parser
.
parse_args
()
class
OpMaker
(
object
):
def
__init__
(
self
):
self
.
op_dict
=
{
...
...
python/paddle_serving_server_gpu/serve.py
浏览文件 @
50da611b
...
...
@@ -22,7 +22,7 @@ from multiprocessing import Pool, Process
from
paddle_serving_server_gpu
import
serve_args
def
start_gpu_card_model
(
gpuid
,
args
):
def
start_gpu_card_model
(
gpuid
,
args
):
# pylint: disable=doc-string-missing
gpuid
=
int
(
gpuid
)
device
=
"gpu"
port
=
args
.
port
...
...
@@ -59,7 +59,8 @@ def start_gpu_card_model(gpuid, args):
server
.
set_gpuid
(
gpuid
)
server
.
run_server
()
def
start_multi_card
(
args
):
def
start_multi_card
(
args
):
# pylint: disable=doc-string-missing
gpus
=
""
if
args
.
gpu_ids
==
""
:
gpus
=
os
.
environ
[
"CUDA_VISIBLE_DEVICES"
]
...
...
@@ -70,13 +71,17 @@ def start_multi_card(args):
else
:
gpu_processes
=
[]
for
i
,
gpu_id
in
enumerate
(
gpus
):
p
=
Process
(
target
=
start_gpu_card_model
,
args
=
(
i
,
args
,
))
p
=
Process
(
target
=
start_gpu_card_model
,
args
=
(
i
,
args
,
))
gpu_processes
.
append
(
p
)
for
p
in
gpu_processes
:
p
.
start
()
for
p
in
gpu_processes
:
p
.
join
()
if
__name__
==
"__main__"
:
args
=
serve_args
()
start_multi_card
(
args
)
python/paddle_serving_server_gpu/web_service.py
浏览文件 @
50da611b
...
...
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#!flask/bin/python
# pylint: disable=doc-string-missing
from
flask
import
Flask
,
request
,
abort
from
multiprocessing
import
Pool
,
Process
from
paddle_serving_server_gpu
import
OpMaker
,
OpSeqMaker
,
Server
...
...
@@ -34,8 +36,11 @@ class WebService(object):
def
set_gpus
(
self
,
gpus
):
self
.
gpus
=
gpus
def
default_rpc_service
(
self
,
workdir
=
"conf"
,
port
=
9292
,
gpuid
=
0
,
thread_num
=
10
):
def
default_rpc_service
(
self
,
workdir
=
"conf"
,
port
=
9292
,
gpuid
=
0
,
thread_num
=
10
):
device
=
"gpu"
if
gpuid
==
-
1
:
device
=
"cpu"
...
...
@@ -70,14 +75,16 @@ class WebService(object):
if
len
(
self
.
gpus
)
==
0
:
# init cpu service
self
.
rpc_service_list
.
append
(
self
.
default_rpc_service
(
self
.
workdir
,
self
.
port
+
1
,
-
1
,
thread_num
=
10
))
self
.
default_rpc_service
(
self
.
workdir
,
self
.
port
+
1
,
-
1
,
thread_num
=
10
))
else
:
for
i
,
gpuid
in
enumerate
(
self
.
gpus
):
self
.
rpc_service_list
.
append
(
self
.
default_rpc_service
(
"{}_{}"
.
format
(
self
.
workdir
,
i
),
self
.
port
+
1
+
i
,
gpuid
,
thread_num
=
10
))
self
.
default_rpc_service
(
"{}_{}"
.
format
(
self
.
workdir
,
i
),
self
.
port
+
1
+
i
,
gpuid
,
thread_num
=
10
))
def
_launch_web_service
(
self
,
gpu_num
):
app_instance
=
Flask
(
__name__
)
...
...
@@ -100,8 +107,7 @@ class WebService(object):
if
"fetch"
not
in
request
.
json
:
abort
(
400
)
feed
,
fetch
=
self
.
preprocess
(
request
.
json
,
request
.
json
[
"fetch"
])
fetch_map
=
client_list
[
0
].
predict
(
feed
=
feed
,
fetch
=
fetch
)
fetch_map
=
client_list
[
0
].
predict
(
feed
=
feed
,
fetch
=
fetch
)
fetch_map
=
self
.
postprocess
(
feed
=
request
.
json
,
fetch
=
fetch
,
fetch_map
=
fetch_map
)
return
fetch_map
...
...
@@ -120,13 +126,14 @@ class WebService(object):
rpc_processes
=
[]
for
idx
in
range
(
len
(
self
.
rpc_service_list
)):
p_rpc
=
Process
(
target
=
self
.
_launch_rpc_service
,
args
=
(
idx
,))
p_rpc
=
Process
(
target
=
self
.
_launch_rpc_service
,
args
=
(
idx
,
))
rpc_processes
.
append
(
p_rpc
)
for
p
in
rpc_processes
:
p
.
start
()
p_web
=
Process
(
target
=
self
.
_launch_web_service
,
args
=
(
len
(
self
.
gpus
),))
p_web
=
Process
(
target
=
self
.
_launch_web_service
,
args
=
(
len
(
self
.
gpus
),
))
p_web
.
start
()
for
p
in
rpc_processes
:
p
.
join
()
...
...
python/setup.py.in
浏览文件 @
50da611b
...
...
@@ -77,4 +77,3 @@ setup(
],
license='Apache 2.0',
keywords=('paddle-serving serving-client deployment industrial easy-to-use'))
python/setup.py.server.in
浏览文件 @
50da611b
...
...
@@ -73,4 +73,3 @@ setup(
],
license='Apache 2.0',
keywords=('paddle-serving serving-server deployment industrial easy-to-use'))
tools/Dockerfile.ci
浏览文件 @
50da611b
FROM centos:7.3.1611
RUN yum -y install wget \
&& yum -y install gcc gcc-c++ make glibc-static which \
&& yum -y install git openssl-devel curl-devel bzip2-devel python-devel \
&& wget https://cmake.org/files/v3.2/cmake-3.2.0-Linux-x86_64.tar.gz \
RUN yum -y install wget
>/dev/null
\
&& yum -y install gcc gcc-c++ make glibc-static which
>/dev/null
\
&& yum -y install git openssl-devel curl-devel bzip2-devel python-devel
>/dev/null
\
&& wget https://cmake.org/files/v3.2/cmake-3.2.0-Linux-x86_64.tar.gz
>/dev/null
\
&& tar xzf cmake-3.2.0-Linux-x86_64.tar.gz \
&& mv cmake-3.2.0-Linux-x86_64 /usr/local/cmake3.2.0 \
&& echo 'export PATH=/usr/local/cmake3.2.0/bin:$PATH' >> /root/.bashrc \
&& wget https://dl.google.com/go/go1.14.linux-amd64.tar.gz \
&& rm cmake-3.2.0-Linux-x86_64.tar.gz \
&& wget https://dl.google.com/go/go1.14.linux-amd64.tar.gz >/dev/null \
&& tar xzf go1.14.linux-amd64.tar.gz \
&& mv go /usr/local/go \
&& echo 'export GOROOT=/usr/local/go' >> /root/.bashrc \
&& echo 'export PATH=/usr/local/go/bin:$PATH' >> /root/.bashrc \
&& yum -y install python-devel sqlite-devel \
&& rm go1.14.linux-amd64.tar.gz \
&& yum -y install python-devel sqlite-devel >/dev/null \
&& curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py >/dev/null \
&& python get-pip.py >/dev/null \
&& pip install google protobuf setuptools wheel flask >/dev/null \
&& rm get-pip.py \
&& wget http://nixos.org/releases/patchelf/patchelf-0.10/patchelf-0.10.tar.bz2 \
&& yum -y install bzip2 \
&& yum -y install bzip2
>/dev/null
\
&& tar -jxf patchelf-0.10.tar.bz2 \
&& cd patchelf-0.10 \
&& ./configure --prefix=/usr \
&& make >/dev/null && make install >/dev/null
&& make >/dev/null && make install >/dev/null \
&& cd .. \
&& rm -rf patchelf-0.10* \
&& yum -y update >/dev/null \
&& yum -y install dnf >/dev/null \
&& yum -y install dnf-plugins-core >/dev/null \
&& dnf copr enable alonid/llvm-3.8.0 -y \
&& dnf install llvm-3.8.0 clang-3.8.0 compiler-rt-3.8.0 -y \
&& echo 'export PATH=/opt/llvm-3.8.0/bin:$PATH' >> /root/.bashrc
tools/cpp_examples/demo-client/paddle_serving_client.egg-info/SOURCES.txt
浏览文件 @
50da611b
tools/cpp_examples/demo-client/paddle_serving_client.egg-info/dependency_links.txt
浏览文件 @
50da611b
tools/cpp_examples/demo-client/src/general_model.cpp
浏览文件 @
50da611b
...
...
@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "general_model.h" // NOLINT
#include <fstream>
#include "general_model.h"
#include "sdk-cpp/builtin_format.pb.h"
#include "sdk-cpp/include/common.h"
#include "sdk-cpp/include/predictor_sdk.h"
...
...
@@ -28,7 +28,7 @@ namespace baidu {
namespace
paddle_serving
{
namespace
general_model
{
void
PredictorClient
::
init
(
const
std
::
string
&
conf_file
)
{
void
PredictorClient
::
init
(
const
std
::
string
&
conf_file
)
{
_conf_file
=
conf_file
;
std
::
ifstream
fin
(
conf_file
);
if
(
!
fin
)
{
...
...
@@ -65,9 +65,8 @@ void PredictorClient::init(const std::string & conf_file) {
}
}
void
PredictorClient
::
set_predictor_conf
(
const
std
::
string
&
conf_path
,
const
std
::
string
&
conf_file
)
{
void
PredictorClient
::
set_predictor_conf
(
const
std
::
string
&
conf_path
,
const
std
::
string
&
conf_file
)
{
_predictor_path
=
conf_path
;
_predictor_conf
=
conf_file
;
}
...
...
@@ -80,53 +79,51 @@ int PredictorClient::create_predictor() {
_api
.
thrd_initialize
();
}
void
PredictorClient
::
predict
(
const
std
::
vector
<
std
::
vector
<
float
>
>
&
float_feed
,
const
std
::
vector
<
std
::
string
>
&
float_feed_name
,
const
std
::
vector
<
std
::
vector
<
int64_t
>
>
&
int_feed
,
const
std
::
vector
<
std
::
string
>
&
int_feed_name
,
const
std
::
vector
<
std
::
string
>
&
fetch_name
,
FetchedMap
*
fetch_result
)
{
void
PredictorClient
::
predict
(
const
std
::
vector
<
std
::
vector
<
float
>>
&
float_feed
,
const
std
::
vector
<
std
::
string
>
&
float_feed_name
,
const
std
::
vector
<
std
::
vector
<
int64_t
>>
&
int_feed
,
const
std
::
vector
<
std
::
string
>
&
int_feed_name
,
const
std
::
vector
<
std
::
string
>
&
fetch_name
,
FetchedMap
*
fetch_result
)
{
_api
.
thrd_clear
();
_predictor
=
_api
.
fetch_predictor
(
"general_model"
);
Request
req
;
std
::
vector
<
Tensor
*>
tensor_vec
;
FeedInst
*
inst
=
req
.
add_insts
();
for
(
auto
&
name
:
float_feed_name
)
{
FeedInst
*
inst
=
req
.
add_insts
();
for
(
auto
&
name
:
float_feed_name
)
{
tensor_vec
.
push_back
(
inst
->
add_tensor_array
());
}
for
(
auto
&
name
:
int_feed_name
)
{
for
(
auto
&
name
:
int_feed_name
)
{
tensor_vec
.
push_back
(
inst
->
add_tensor_array
());
}
int
vec_idx
=
0
;
for
(
auto
&
name
:
float_feed_name
)
{
for
(
auto
&
name
:
float_feed_name
)
{
int
idx
=
_feed_name_to_idx
[
name
];
Tensor
*
tensor
=
tensor_vec
[
idx
];
Tensor
*
tensor
=
tensor_vec
[
idx
];
for
(
int
j
=
0
;
j
<
_shape
[
idx
].
size
();
++
j
)
{
tensor
->
add_shape
(
_shape
[
idx
][
j
]);
}
tensor
->
set_elem_type
(
1
);
for
(
int
j
=
0
;
j
<
float_feed
[
vec_idx
].
size
();
++
j
)
{
tensor
->
add_data
(
(
char
*
)(
&
(
float_feed
[
vec_idx
][
j
])),
sizeof
(
float
));
tensor
->
add_data
(
(
char
*
)(
&
(
float_feed
[
vec_idx
][
j
])),
// NOLINT
sizeof
(
float
));
}
vec_idx
++
;
}
vec_idx
=
0
;
for
(
auto
&
name
:
int_feed_name
)
{
for
(
auto
&
name
:
int_feed_name
)
{
int
idx
=
_feed_name_to_idx
[
name
];
Tensor
*
tensor
=
tensor_vec
[
idx
];
Tensor
*
tensor
=
tensor_vec
[
idx
];
for
(
int
j
=
0
;
j
<
_shape
[
idx
].
size
();
++
j
)
{
tensor
->
add_shape
(
_shape
[
idx
][
j
]);
}
tensor
->
set_elem_type
(
0
);
for
(
int
j
=
0
;
j
<
int_feed
[
vec_idx
].
size
();
++
j
)
{
tensor
->
add_data
(
(
char
*
)(
&
(
int_feed
[
vec_idx
][
j
])),
sizeof
(
int64_t
));
tensor
->
add_data
(
(
char
*
)(
&
(
int_feed
[
vec_idx
][
j
])),
// NOLINT
sizeof
(
int64_t
));
}
vec_idx
++
;
}
...
...
@@ -139,13 +136,13 @@ void PredictorClient::predict(
LOG
(
ERROR
)
<<
"failed call predictor with req: "
<<
req
.
ShortDebugString
();
exit
(
-
1
);
}
else
{
for
(
auto
&
name
:
fetch_name
)
{
for
(
auto
&
name
:
fetch_name
)
{
int
idx
=
_fetch_name_to_idx
[
name
];
int
len
=
res
.
insts
(
0
).
tensor_array
(
idx
).
data_size
();
(
*
fetch_result
)[
name
].
resize
(
len
);
for
(
int
i
=
0
;
i
<
len
;
++
i
)
{
(
*
fetch_result
)[
name
][
i
]
=
*
(
const
float
*
)
res
.
insts
(
0
).
tensor_array
(
idx
).
data
(
i
).
c_str
();
(
*
fetch_result
)[
name
][
i
]
=
*
(
const
float
*
)
res
.
insts
(
0
).
tensor_array
(
idx
).
data
(
i
).
c_str
();
}
}
}
...
...
@@ -154,12 +151,12 @@ void PredictorClient::predict(
}
void
PredictorClient
::
predict_with_profile
(
const
std
::
vector
<
std
::
vector
<
float
>
>
&
float_feed
,
const
std
::
vector
<
std
::
string
>
&
float_feed_name
,
const
std
::
vector
<
std
::
vector
<
int64_t
>
>
&
int_feed
,
const
std
::
vector
<
std
::
string
>
&
int_feed_name
,
const
std
::
vector
<
std
::
string
>
&
fetch_name
,
FetchedMap
*
fetch_result
)
{
const
std
::
vector
<
std
::
vector
<
float
>
>
&
float_feed
,
const
std
::
vector
<
std
::
string
>
&
float_feed_name
,
const
std
::
vector
<
std
::
vector
<
int64_t
>
>
&
int_feed
,
const
std
::
vector
<
std
::
string
>
&
int_feed_name
,
const
std
::
vector
<
std
::
string
>
&
fetch_name
,
FetchedMap
*
fetch_result
)
{
return
;
}
...
...
tools/cpp_examples/demo-client/src/general_model.h
浏览文件 @
50da611b
...
...
@@ -18,9 +18,9 @@
#include <unistd.h>
#include <fstream>
#include <map>
#include <string>
#include <vector>
#include <map>
#include "sdk-cpp/builtin_format.pb.h"
#include "sdk-cpp/general_model_service.pb.h"
...
...
@@ -37,44 +37,40 @@ namespace general_model {
typedef
std
::
map
<
std
::
string
,
std
::
vector
<
float
>>
FetchedMap
;
class
PredictorClient
{
public:
PredictorClient
()
{}
~
PredictorClient
()
{}
void
init
(
const
std
::
string
&
client_conf
);
void
set_predictor_conf
(
const
std
::
string
&
conf_path
,
void
init
(
const
std
::
string
&
client_conf
);
void
set_predictor_conf
(
const
std
::
string
&
conf_path
,
const
std
::
string
&
conf_file
);
int
create_predictor
();
void
predict
(
const
std
::
vector
<
std
::
vector
<
float
>
>
&
float_feed
,
const
std
::
vector
<
std
::
string
>
&
float_feed_name
,
const
std
::
vector
<
std
::
vector
<
int64_t
>
>
&
int_feed
,
const
std
::
vector
<
std
::
string
>
&
int_feed_name
,
const
std
::
vector
<
std
::
string
>
&
fetch_name
,
FetchedMap
*
result_map
);
void
predict
(
const
std
::
vector
<
std
::
vector
<
float
>>&
float_feed
,
const
std
::
vector
<
std
::
string
>&
float_feed_name
,
const
std
::
vector
<
std
::
vector
<
int64_t
>>&
int_feed
,
const
std
::
vector
<
std
::
string
>&
int_feed_name
,
const
std
::
vector
<
std
::
string
>&
fetch_name
,
FetchedMap
*
result_map
);
void
predict_with_profile
(
const
std
::
vector
<
std
::
vector
<
float
>
>
&
float_feed
,
const
std
::
vector
<
std
::
string
>
&
float_feed_name
,
const
std
::
vector
<
std
::
vector
<
int64_t
>
>
&
int_feed
,
const
std
::
vector
<
std
::
string
>
&
int_feed_name
,
const
std
::
vector
<
std
::
string
>
&
fetch_name
,
FetchedMap
*
result_map
);
void
predict_with_profile
(
const
std
::
vector
<
std
::
vector
<
float
>>&
float_feed
,
const
std
::
vector
<
std
::
string
>&
float_feed_name
,
const
std
::
vector
<
std
::
vector
<
int64_t
>>&
int_feed
,
const
std
::
vector
<
std
::
string
>&
int_feed_name
,
const
std
::
vector
<
std
::
string
>&
fetch_name
,
FetchedMap
*
result_map
);
private:
PredictorApi
_api
;
Predictor
*
_predictor
;
Predictor
*
_predictor
;
std
::
string
_predictor_conf
;
std
::
string
_predictor_path
;
std
::
string
_conf_file
;
std
::
map
<
std
::
string
,
int
>
_feed_name_to_idx
;
std
::
map
<
std
::
string
,
int
>
_fetch_name_to_idx
;
std
::
map
<
std
::
string
,
std
::
string
>
_fetch_name_to_var_name
;
std
::
vector
<
std
::
vector
<
int
>
>
_shape
;
std
::
vector
<
std
::
vector
<
int
>>
_shape
;
};
}
// namespace general_model
...
...
tools/cpp_examples/demo-client/src/general_model_main.cpp
浏览文件 @
50da611b
...
...
@@ -15,20 +15,20 @@
#include <fstream>
#include <vector>
#include "general_model.h"
#include "general_model.h"
// NOLINT
using
namespace
std
;
using
namespace
std
;
// NOLINT
using
baidu
::
paddle_serving
::
general_model
::
PredictorClient
;
using
baidu
::
paddle_serving
::
general_model
::
FetchedMap
;
int
main
(
int
argc
,
char
*
argv
[])
{
PredictorClient
*
client
=
new
PredictorClient
();
int
main
(
int
argc
,
char
*
argv
[])
{
PredictorClient
*
client
=
new
PredictorClient
();
client
->
init
(
"inference.conf"
);
client
->
set_predictor_conf
(
"./"
,
"predictor.conf"
);
client
->
create_predictor
();
std
::
vector
<
std
::
vector
<
float
>
>
float_feed
;
std
::
vector
<
std
::
vector
<
int64_t
>
>
int_feed
;
std
::
vector
<
std
::
vector
<
float
>>
float_feed
;
std
::
vector
<
std
::
vector
<
int64_t
>>
int_feed
;
std
::
vector
<
std
::
string
>
float_feed_name
;
std
::
vector
<
std
::
string
>
int_feed_name
=
{
"words"
,
"label"
};
std
::
vector
<
std
::
string
>
fetch_name
=
{
"cost"
,
"acc"
,
"prediction"
};
...
...
@@ -53,12 +53,13 @@ int main(int argc, char * argv[]) {
cin
>>
label
;
int_feed
.
push_back
({
label
});
FetchedMap
result
;
client
->
predict
(
float_feed
,
float_feed_name
,
int_feed
,
int_feed_name
,
fetch_name
,
client
->
predict
(
float_feed
,
float_feed_name
,
int_feed
,
int_feed_name
,
fetch_name
,
&
result
);
cout
<<
label
<<
"
\t
"
<<
result
[
"prediction"
][
1
]
<<
endl
;
...
...
tools/cpp_examples/demo-client/src/load_general_model.cpp
浏览文件 @
50da611b
...
...
@@ -18,14 +18,14 @@
#include <fstream>
#include "core/sdk-cpp/builtin_format.pb.h"
#include "core/sdk-cpp/load_general_model_service.pb.h"
#include "core/sdk-cpp/include/common.h"
#include "core/sdk-cpp/include/predictor_sdk.h"
#include "core/sdk-cpp/load_general_model_service.pb.h"
using
baidu
::
paddle_serving
::
sdk_cpp
::
Predictor
;
using
baidu
::
paddle_serving
::
sdk_cpp
::
PredictorApi
;
using
baidu
::
paddle_serving
::
predictor
::
load_general_model_service
::
RequestAndResponse
;
using
baidu
::
paddle_serving
::
predictor
::
load_general_model_service
::
RequestAndResponse
;
int
create_req
(
RequestAndResponse
&
req
)
{
// NOLINT
req
.
set_a
(
1
);
...
...
tools/cpp_examples/demo-client/src/pybind_general_model.cpp
浏览文件 @
50da611b
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pybind11/pybind11.h>
#include "general_model.h"
#include "general_model.h"
// NOLINT
#include <pybind11/stl.h>
#include <pybind11/stl.h>
// NOLINT
namespace
py
=
pybind11
;
...
...
@@ -17,28 +31,30 @@ PYBIND11_MODULE(paddle_serving_client, m) {
py
::
class_
<
PredictorClient
>
(
m
,
"PredictorClient"
,
py
::
buffer_protocol
())
.
def
(
py
::
init
())
.
def
(
"init"
,
[](
PredictorClient
&
self
,
const
std
::
string
&
conf
)
{
[](
PredictorClient
&
self
,
const
std
::
string
&
conf
)
{
self
.
init
(
conf
);
})
.
def
(
"set_predictor_conf"
,
[](
PredictorClient
&
self
,
const
std
::
string
&
conf_path
,
const
std
::
string
&
conf_file
)
{
[](
PredictorClient
&
self
,
const
std
::
string
&
conf_path
,
const
std
::
string
&
conf_file
)
{
self
.
set_predictor_conf
(
conf_path
,
conf_file
);
})
.
def
(
"create_predictor"
,
[](
PredictorClient
&
self
)
{
self
.
create_predictor
();
})
[](
PredictorClient
&
self
)
{
self
.
create_predictor
();
})
.
def
(
"predict"
,
[](
PredictorClient
&
self
,
const
std
::
vector
<
std
::
vector
<
float
>
>
&
float_feed
,
const
std
::
vector
<
std
::
string
>
&
float_feed_name
,
const
std
::
vector
<
std
::
vector
<
int64_t
>
>
&
int_feed
,
const
std
::
vector
<
std
::
string
>
&
int_feed_name
,
const
std
::
vector
<
std
::
string
>
&
fetch_name
,
FetchedMap
*
fetch_result
)
{
return
self
.
predict
(
float_feed
,
float_feed_name
,
int_feed
,
int_feed_name
,
fetch_name
,
const
std
::
vector
<
std
::
vector
<
float
>>
&
float_feed
,
const
std
::
vector
<
std
::
string
>
&
float_feed_name
,
const
std
::
vector
<
std
::
vector
<
int64_t
>>
&
int_feed
,
const
std
::
vector
<
std
::
string
>
&
int_feed_name
,
const
std
::
vector
<
std
::
string
>
&
fetch_name
,
FetchedMap
*
fetch_result
)
{
return
self
.
predict
(
float_feed
,
float_feed_name
,
int_feed
,
int_feed_name
,
fetch_name
,
fetch_result
);
});
}
...
...
tools/cpp_examples/demo-serving/op/bert_service_op.h
浏览文件 @
50da611b
...
...
@@ -14,10 +14,10 @@
#pragma once
#include <vector>
#include "paddle_inference_api.h" // NOLINT
#include "examples/demo-serving/bert_service.pb.h"
#include "paddle_inference_api.h" // NOLINT
#include <sys/time.h>
#include <sys/time.h>
// NOLINT
namespace
baidu
{
namespace
paddle_serving
{
...
...
tools/cpp_examples/demo-serving/op/classify_op.cpp
浏览文件 @
50da611b
...
...
@@ -13,9 +13,9 @@
// limitations under the License.
#include "examples/demo-serving/op/classify_op.h"
#include "examples/demo-serving/op/reader_op.h"
#include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h"
#include "examples/demo-serving/op/reader_op.h"
namespace
baidu
{
namespace
paddle_serving
{
...
...
tools/cpp_examples/demo-serving/op/classify_op.h
浏览文件 @
50da611b
...
...
@@ -14,8 +14,8 @@
#pragma once
#include <vector>
#include "paddle_inference_api.h" // NOLINT
#include "examples/demo-serving/image_class.pb.h"
#include "paddle_inference_api.h" // NOLINT
namespace
baidu
{
namespace
paddle_serving
{
...
...
tools/cpp_examples/demo-serving/op/ctr_prediction_op.h
浏览文件 @
50da611b
...
...
@@ -14,8 +14,8 @@
#pragma once
#include <vector>
#include "paddle_inference_api.h" // NOLINT
#include "examples/demo-serving/ctr_prediction.pb.h"
#include "paddle_inference_api.h" // NOLINT
namespace
baidu
{
namespace
paddle_serving
{
...
...
tools/cpp_examples/demo-serving/op/general_model_op.h
浏览文件 @
50da611b
...
...
@@ -25,7 +25,6 @@
#endif
#include "examples/demo-serving/general_model_service.pb.h"
namespace
baidu
{
namespace
paddle_serving
{
namespace
serving
{
...
...
tools/cpp_examples/demo-serving/op/kvdb_echo_op.h
浏览文件 @
50da611b
此差异已折叠。
点击以展开。
tools/cpp_examples/demo-serving/op/load_general_model_conf_op.h
浏览文件 @
50da611b
此差异已折叠。
点击以展开。
tools/cpp_examples/demo-serving/op/reader_op.h
浏览文件 @
50da611b
此差异已折叠。
点击以展开。
tools/cpp_examples/demo-serving/op/text_classification_op.h
浏览文件 @
50da611b
此差异已折叠。
点击以展开。
tools/cpp_examples/demo-serving/op/write_json_op.cpp
浏览文件 @
50da611b
此差异已折叠。
点击以展开。
tools/cpp_examples/demo-serving/op/write_json_op.h
浏览文件 @
50da611b
此差异已折叠。
点击以展开。
tools/cpp_examples/demo-serving/op/write_op.cpp
浏览文件 @
50da611b
此差异已折叠。
点击以展开。
tools/cpp_examples/demo-serving/op/write_op.h
浏览文件 @
50da611b
此差异已折叠。
点击以展开。
tools/cpp_examples/demo-serving/proto/general_model_service.proto
浏览文件 @
50da611b
此差异已折叠。
点击以展开。
tools/cpp_examples/elastic-ctr/client/demo/elastic_ctr.py
浏览文件 @
50da611b
此差异已折叠。
点击以展开。
tools/cpp_examples/elastic-ctr/serving/op/elastic_ctr_prediction_op.cpp
浏览文件 @
50da611b
此差异已折叠。
点击以展开。
tools/cpp_examples/elastic-ctr/serving/op/elastic_ctr_prediction_op.h
浏览文件 @
50da611b
此差异已折叠。
点击以展开。
tools/serving
-
build.sh
→
tools/serving
_
build.sh
浏览文件 @
50da611b
文件已移动
tools/serving_check_style.sh
0 → 100644
浏览文件 @
50da611b
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录