Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Serving
提交
a6f3a1a7
S
Serving
项目概览
PaddlePaddle
/
Serving
大约 1 年 前同步成功
通知
186
Star
833
Fork
253
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
105
列表
看板
标记
里程碑
合并请求
10
Wiki
2
Wiki
分析
仓库
DevOps
项目成员
Pages
S
Serving
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
105
Issue
105
列表
看板
标记
里程碑
合并请求
10
合并请求
10
Pages
分析
分析
仓库分析
DevOps
Wiki
2
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a6f3a1a7
编写于
1月 19, 2020
作者:
M
MRXLT
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add general server c++ code && fix batch predict bug
上级
43c55732
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
197 addition
and
84 deletion
+197
-84
core/configure/proto/general_model_config.proto
core/configure/proto/general_model_config.proto
+13
-6
core/configure/proto/sdk_configure.proto
core/configure/proto/sdk_configure.proto
+17
-17
core/configure/proto/server_configure.proto
core/configure/proto/server_configure.proto
+2
-0
core/predictor/framework/resource.cpp
core/predictor/framework/resource.cpp
+59
-19
core/predictor/framework/resource.h
core/predictor/framework/resource.h
+14
-11
core/predictor/src/pdserving.cpp
core/predictor/src/pdserving.cpp
+3
-6
examples/demo-serving/conf/general_model.prototxt
examples/demo-serving/conf/general_model.prototxt
+31
-17
examples/demo-serving/conf/gflags.conf
examples/demo-serving/conf/gflags.conf
+0
-2
examples/demo-serving/conf/resource.prototxt
examples/demo-serving/conf/resource.prototxt
+3
-0
examples/demo-serving/conf/service.prototxt
examples/demo-serving/conf/service.prototxt
+3
-3
examples/demo-serving/conf/workflow.prototxt
examples/demo-serving/conf/workflow.prototxt
+19
-2
examples/demo-serving/op/general_model_op.cpp
examples/demo-serving/op/general_model_op.cpp
+27
-0
python/examples/imdb/test_client_batch.py
python/examples/imdb/test_client_batch.py
+6
-1
未找到文件。
core/configure/proto/general_model_config.proto
浏览文件 @
a6f3a1a7
...
...
@@ -15,12 +15,19 @@
syntax
=
"proto2"
;
package
baidu
.
paddle_serving.configure
;
message
Shape
{
repeated
int32
shape
=
1
;
};
message
Shape
{
repeated
int32
shape
=
1
;
};
message
FeedVar
{
required
string
name
=
1
;
required
bool
is_lod_tensor
=
2
;
required
int32
feed_type
=
3
;
required
Shape
feed_shape
=
4
;
}
message
FetchVar
{
required
string
name
=
1
;
required
Shape
fetch_shape
=
2
;
}
message
GeneralModelConfig
{
repeated
bool
is_lod_feed
=
1
;
repeated
int32
feed_type
=
2
;
repeated
Shape
feed_shape
=
4
;
repeated
FeedVar
feed_var
=
1
;
repeated
FetchVar
fetch_var
=
2
;
};
core/configure/proto/sdk_configure.proto
浏览文件 @
a6f3a1a7
...
...
@@ -16,27 +16,27 @@ syntax = "proto2";
package
baidu
.
paddle_serving.configure
;
message
ConnectionConf
{
required
int32
connect_timeout_ms
=
1
[
default
=
2000
]
;
required
int32
rpc_timeout_ms
=
2
[
default
=
20000
]
;
required
int32
connect_retry_count
=
3
[
default
=
2
]
;
required
int32
max_connection_per_host
=
4
[
default
=
100
]
;
required
int32
hedge_request_timeout_ms
=
5
[
default
=
-
1
]
;
required
int32
hedge_fetch_retry_count
=
6
[
default
=
2
]
;
required
string
connection_type
=
7
[
default
=
"pooled"
]
;
required
int32
connect_timeout_ms
=
1
;
required
int32
rpc_timeout_ms
=
2
;
required
int32
connect_retry_count
=
3
;
required
int32
max_connection_per_host
=
4
;
required
int32
hedge_request_timeout_ms
=
5
;
required
int32
hedge_fetch_retry_count
=
6
;
required
string
connection_type
=
7
;
};
message
NamingConf
{
optional
string
cluster_filter_strategy
=
1
[
default
=
"Default"
]
;
optional
string
load_balance_strategy
=
2
[
default
=
"la"
]
;
optional
string
cluster_filter_strategy
=
1
;
optional
string
load_balance_strategy
=
2
;
optional
string
cluster
=
3
;
};
message
RpcParameter
{
// 0-NONE, 1-SNAPPY, 2-GZIP, 3-ZLIB, 4-LZ4
required
int32
compress_type
=
1
[
default
=
0
]
;
required
int32
package_size
=
2
[
default
=
20
]
;
required
string
protocol
=
3
[
default
=
"baidu_std"
]
;
required
int32
max_channel_per_request
=
4
[
default
=
3
]
;
required
int32
compress_type
=
1
;
required
int32
package_size
=
2
;
required
string
protocol
=
3
;
required
int32
max_channel_per_request
=
4
;
};
message
SplitConf
{
...
...
@@ -53,12 +53,12 @@ message VariantConf {
optional
string
variant_router
=
6
;
};
message
WeightedRandomRenderConf
{
required
string
variant_weight_list
=
1
[
default
=
"50"
]
;
};
message
WeightedRandomRenderConf
{
required
string
variant_weight_list
=
1
;
};
message
Predictor
{
required
string
name
=
1
[
default
=
"general_model"
]
;
required
string
service_name
=
2
[
default
=
"baidu.paddle_serving.predictor.general_model.GeneralModelService"
]
;
required
string
endpoint_router
=
3
[
default
=
"WeightedRandomRender"
]
;
required
string
name
=
1
;
required
string
service_name
=
2
;
required
string
endpoint_router
=
3
;
required
WeightedRandomRenderConf
weighted_random_render_conf
=
4
;
repeated
VariantConf
variants
=
5
;
};
...
...
core/configure/proto/server_configure.proto
浏览文件 @
a6f3a1a7
...
...
@@ -53,6 +53,8 @@ message ResourceConf {
required
string
model_toolkit_path
=
1
;
required
string
model_toolkit_file
=
2
;
optional
string
cube_config_file
=
3
;
optional
string
general_model_path
=
4
;
optional
string
general_model_file
=
5
;
};
// DAG node depency info
...
...
core/predictor/framework/resource.cpp
浏览文件 @
a6f3a1a7
...
...
@@ -47,15 +47,22 @@ std::shared_ptr<PaddleGeneralModelConfig> Resource::get_general_model_config() {
}
void
Resource
::
print_general_model_config
(
const
std
::
shared_ptr
<
PaddleGeneralModelConfig
>
&
config
)
{
const
std
::
shared_ptr
<
PaddleGeneralModelConfig
>&
config
)
{
if
(
config
==
nullptr
)
{
LOG
(
INFO
)
<<
"paddle general model config is not set"
;
return
;
}
LOG
(
INFO
)
<<
"Number of Feed Tensor: "
<<
config
->
_feed_
typ
e
.
size
();
LOG
(
INFO
)
<<
"Number of Feed Tensor: "
<<
config
->
_feed_
nam
e
.
size
();
std
::
ostringstream
oss
;
LOG
(
INFO
)
<<
"Feed Name Info"
;
for
(
auto
&
feed_name
:
config
->
_feed_name
)
{
oss
<<
feed_name
<<
" "
;
}
LOG
(
INFO
)
<<
oss
.
str
();
oss
.
clear
();
oss
.
str
(
""
);
LOG
(
INFO
)
<<
"Feed Type Info"
;
for
(
auto
&
feed_type
:
config
->
_feed_type
)
{
for
(
auto
&
feed_type
:
config
->
_feed_type
)
{
oss
<<
feed_type
<<
" "
;
}
LOG
(
INFO
)
<<
oss
.
str
();
...
...
@@ -71,7 +78,7 @@ void Resource::print_general_model_config(
oss
.
clear
();
oss
.
str
(
""
);
LOG
(
INFO
)
<<
"Capacity Info"
;
for
(
auto
&
cap
:
config
->
_capacity
)
{
for
(
auto
&
cap
:
config
->
_capacity
)
{
oss
<<
cap
<<
" "
;
}
LOG
(
INFO
)
<<
oss
.
str
();
...
...
@@ -79,8 +86,8 @@ void Resource::print_general_model_config(
oss
.
str
(
""
);
LOG
(
INFO
)
<<
"Feed Shape Info"
;
int
tensor_idx
=
0
;
for
(
auto
&
shape
:
config
->
_feed_shape
)
{
for
(
auto
&
dim
:
shape
)
{
for
(
auto
&
shape
:
config
->
_feed_shape
)
{
for
(
auto
&
dim
:
shape
)
{
oss
<<
dim
<<
" "
;
}
LOG
(
INFO
)
<<
"Tensor["
<<
tensor_idx
++
<<
"].shape: "
<<
oss
.
str
();
...
...
@@ -146,38 +153,71 @@ int Resource::initialize(const std::string& path, const std::string& file) {
return
0
;
}
int
Resource
::
general_model_initialize
(
const
std
::
string
&
path
,
const
std
::
string
&
file
)
{
// model config
int
Resource
::
general_model_initialize
(
const
std
::
string
&
path
,
const
std
::
string
&
file
)
{
if
(
!
FLAGS_enable_general_model
)
{
return
0
;
}
ResourceConf
resource_conf
;
if
(
configure
::
read_proto_conf
(
path
,
file
,
&
resource_conf
)
!=
0
)
{
LOG
(
ERROR
)
<<
"Failed initialize resource from: "
<<
path
<<
"/"
<<
file
;
return
-
1
;
}
int
err
=
0
;
std
::
string
general_model_path
=
resource_conf
.
general_model_path
();
std
::
string
general_model_file
=
resource_conf
.
general_model_file
();
if
(
err
!=
0
)
{
LOG
(
ERROR
)
<<
"read general_model_path failed, path["
<<
path
<<
"], file["
<<
file
<<
"]"
;
return
-
1
;
}
GeneralModelConfig
model_config
;
if
(
configure
::
read_proto_conf
(
path
,
file
,
&
model_config
)
!=
0
)
{
LOG
(
ERROR
)
<<
"Failed initialize resource from: "
<<
path
<<
"/"
<<
file
;
if
(
configure
::
read_proto_conf
(
general_model_path
.
c_str
(),
general_model_file
.
c_str
(),
&
model_config
)
!=
0
)
{
LOG
(
ERROR
)
<<
"Failed initialize model config from: "
<<
general_model_path
<<
"/"
<<
general_model_file
;
return
-
1
;
}
_config
.
reset
(
new
PaddleGeneralModelConfig
());
_config
->
_feed_type
.
resize
(
model_config
.
feed_type_size
());
_config
->
_is_lod_feed
.
resize
(
model_config
.
is_lod_feed_size
());
_config
->
_capacity
.
resize
(
model_config
.
feed_shape_size
());
_config
->
_feed_shape
.
resize
(
model_config
.
feed_shape_size
());
for
(
int
i
=
0
;
i
<
model_config
.
is_lod_feed_size
();
++
i
)
{
_config
->
_feed_type
[
i
]
=
model_config
.
feed_type
(
i
);
if
(
model_config
.
is_lod_feed
(
i
))
{
int
feed_var_num
=
model_config
.
feed_var_size
();
_config
->
_feed_name
.
resize
(
feed_var_num
);
_config
->
_feed_type
.
resize
(
feed_var_num
);
_config
->
_is_lod_feed
.
resize
(
feed_var_num
);
_config
->
_capacity
.
resize
(
feed_var_num
);
_config
->
_feed_shape
.
resize
(
feed_var_num
);
for
(
int
i
=
0
;
i
<
feed_var_num
;
++
i
)
{
_config
->
_feed_name
[
i
]
=
model_config
.
feed_var
(
i
).
name
();
_config
->
_feed_type
[
i
]
=
model_config
.
feed_var
(
i
).
feed_type
();
if
(
model_config
.
feed_var
(
i
).
is_lod_tensor
())
{
_config
->
_feed_shape
[
i
]
=
{
-
1
};
_config
->
_is_lod_feed
[
i
]
=
true
;
}
else
{
_config
->
_capacity
[
i
]
=
1
;
_config
->
_is_lod_feed
[
i
]
=
false
;
for
(
int
j
=
0
;
j
<
model_config
.
feed_shape
(
i
).
shape_size
();
++
j
)
{
int
dim
=
model_config
.
feed_shape
(
i
).
shape
(
j
);
for
(
int
j
=
0
;
j
<
model_config
.
feed_var
(
i
).
feed_shape
().
shape_size
();
++
j
)
{
int32_t
dim
=
model_config
.
feed_var
(
i
).
feed_shape
().
shape
(
j
);
_config
->
_feed_shape
[
i
].
push_back
(
dim
);
_config
->
_capacity
[
i
]
*=
dim
;
}
}
}
int
fetch_var_num
=
model_config
.
fetch_var_size
();
_config
->
_fetch_name
.
resize
(
fetch_var_num
);
_config
->
_fetch_shape
.
resize
(
fetch_var_num
);
for
(
int
i
=
0
;
i
<
fetch_var_num
;
++
i
)
{
_config
->
_fetch_name
[
i
]
=
model_config
.
fetch_var
(
i
).
name
();
for
(
int
j
=
0
;
j
<
model_config
.
fetch_var
(
i
).
fetch_shape
().
shape_size
();
++
j
)
{
int
dim
=
model_config
.
fetch_var
(
i
).
fetch_shape
().
shape
(
j
);
_config
->
_fetch_shape
[
i
].
push_back
(
dim
);
}
}
return
0
;
}
...
...
core/predictor/framework/resource.h
浏览文件 @
a6f3a1a7
...
...
@@ -33,16 +33,19 @@ class PaddleGeneralModelConfig {
~
PaddleGeneralModelConfig
()
{}
public:
std
::
vector
<
int
>
_feed_type
;
// 0 int64, 1 float
std
::
vector
<
std
::
string
>
_feed_name
;
std
::
vector
<
int
>
_feed_type
;
// 0 int64, 1 float
std
::
vector
<
bool
>
_is_lod_feed
;
// true lod tensor
std
::
vector
<
int
>
_capacity
;
// capacity for each tensor
/*
feed_shape_ for feeded variable
feed_shape_[i][j] represents the jth dim for ith input Tensor
if is_lod_feed_[i] == False, feed_shape_[i][0] = -1
*/
std
::
vector
<
int
>
_capacity
;
// capacity for each tensor
/*
feed_shape_ for feeded variable
feed_shape_[i][j] represents the jth dim for ith input Tensor
if is_lod_feed_[i] == False, feed_shape_[i][0] = -1
*/
std
::
vector
<
std
::
vector
<
int
>>
_feed_shape
;
std
::
vector
<
std
::
string
>
_fetch_name
;
std
::
vector
<
std
::
vector
<
int
>>
_fetch_shape
;
};
class
BaseRdDict
;
...
...
@@ -76,8 +79,8 @@ class Resource {
int
initialize
(
const
std
::
string
&
path
,
const
std
::
string
&
file
);
int
cube_initialize
(
const
std
::
string
&
path
,
const
std
::
string
&
file
);
int
general_model_initialize
(
const
std
::
string
&
path
,
const
std
::
string
&
file
);
int
general_model_initialize
(
const
std
::
string
&
path
,
const
std
::
string
&
file
);
int
thread_initialize
();
...
...
@@ -90,7 +93,7 @@ class Resource {
std
::
shared_ptr
<
PaddleGeneralModelConfig
>
get_general_model_config
();
void
print_general_model_config
(
const
std
::
shared_ptr
<
PaddleGeneralModelConfig
>
&
config
);
const
std
::
shared_ptr
<
PaddleGeneralModelConfig
>&
config
);
std
::
shared_ptr
<
RocksDBWrapper
>
getDB
();
...
...
core/predictor/src/pdserving.cpp
浏览文件 @
a6f3a1a7
...
...
@@ -45,8 +45,6 @@ using baidu::paddle_serving::predictor::FLAGS_logger_path;
using
baidu
::
paddle_serving
::
predictor
::
FLAGS_logger_file
;
using
baidu
::
paddle_serving
::
predictor
::
FLAGS_resource_path
;
using
baidu
::
paddle_serving
::
predictor
::
FLAGS_resource_file
;
using
baidu
::
paddle_serving
::
predictor
::
FLAGS_general_model_path
;
using
baidu
::
paddle_serving
::
predictor
::
FLAGS_general_model_file
;
using
baidu
::
paddle_serving
::
predictor
::
FLAGS_reload_interval_s
;
using
baidu
::
paddle_serving
::
predictor
::
FLAGS_port
;
...
...
@@ -219,11 +217,10 @@ int main(int argc, char** argv) {
#ifndef BCLOUD
if
(
Resource
::
instance
().
general_model_initialize
(
FLAGS_general_model_path
,
FLAGS_general_model
_file
)
!=
0
)
{
if
(
Resource
::
instance
().
general_model_initialize
(
FLAGS_resource_path
,
FLAGS_resource
_file
)
!=
0
)
{
LOG
(
ERROR
)
<<
"Failed to initialize general model conf: "
<<
FLAGS_general_model_path
<<
"/"
<<
FLAGS_general_model_file
;
<<
FLAGS_resource_path
<<
"/"
<<
FLAGS_resource_file
;
return
-
1
;
}
...
...
examples/demo-serving/conf/general_model.prototxt
浏览文件 @
a6f3a1a7
is_lod_feed: true
is_lod_feed: false
is_lod_feed: true
feed_type: 1
feed_type: 0
feed_type: 1
feed_shape {
shape: -1
feed_var {
name: "words"
is_lod_tensor: false
feed_type: 0
feed_shape {
shape: -1
}
}
feed_shape {
shape: 1
shape: 2
shape: 3
feed_var {
name: "label"
is_lod_tensor: false
feed_type: 0
feed_shape {
shape: 1
}
}
feed_shape {
shape: -1
fetch_var {
name: "mean_0.tmp_0"
fetch_shape {
shape: 1
}
}
fetch_var {
name: "accuracy_0.tmp_0"
fetch_shape {
shape: 1
}
}
fetch_var {
name: "fc_1.tmp_2"
fetch_shape {
shape: 1
}
}
examples/demo-serving/conf/gflags.conf
浏览文件 @
a6f3a1a7
--
enable_model_toolkit
--
enable_cube
=
false
--
enable_general_model
=
true
--
general_model_path
=./
conf
--
general_model_file
=
general_model
.
prototxt
examples/demo-serving/conf/resource.prototxt
浏览文件 @
a6f3a1a7
model_toolkit_path: "./conf/"
model_toolkit_file: "model_toolkit.prototxt"
cube_config_file: "./conf/cube.conf"
general_model_path: "./conf/"
general_model_file: "general_model.prototxt"
examples/demo-serving/conf/service.prototxt
浏览文件 @
a6f3a1a7
...
...
@@ -40,6 +40,6 @@ services {
workflows: "workflow9"
}
services {
name: "LoadGeneralModelService"
workflows: "workflow10"
}
\ No newline at end of file
name: "GeneralModelService"
workflows: "workflow11"
}
examples/demo-serving/conf/workflow.prototxt
浏览文件 @
a6f3a1a7
...
...
@@ -95,7 +95,24 @@ workflows {
name: "workflow10"
workflow_type: "Sequence"
nodes {
name: "load_general_model_conf_op"
type: "LoadGeneralModelConfOp"
name: "general_model_op"
type: "GeneralModelOp"
}
}
workflows {
name: "workflow11"
workflow_type: "Sequence"
nodes {
name: "general_reader_op"
type: "GeneralReaderOp"
}
nodes {
name: "general_infer_op"
type: "GeneralInferOp"
dependencies {
name: "general_reader_op"
mode: "RO"
}
}
}
examples/demo-serving/op/general_model_op.cpp
浏览文件 @
a6f3a1a7
...
...
@@ -15,9 +15,11 @@
#include "examples/demo-serving/op/general_model_op.h"
#include <algorithm>
#include <iostream>
#include <memory>
#include <sstream>
#include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h"
#include "core/predictor/framework/resource.h"
namespace
baidu
{
namespace
paddle_serving
{
...
...
@@ -29,10 +31,12 @@ using baidu::paddle_serving::predictor::general_model::Request;
using
baidu
::
paddle_serving
::
predictor
::
general_model
::
FeedInst
;
using
baidu
::
paddle_serving
::
predictor
::
general_model
::
Response
;
using
baidu
::
paddle_serving
::
predictor
::
general_model
::
FetchInst
;
using
baidu
::
paddle_serving
::
predictor
::
PaddleGeneralModelConfig
;
static
std
::
once_flag
g_proto_init_flag
;
int
GeneralModelOp
::
inference
()
{
// request
const
Request
*
req
=
dynamic_cast
<
const
Request
*>
(
get_request_message
());
TensorVector
*
in
=
butil
::
get_object
<
TensorVector
>
();
...
...
@@ -44,6 +48,17 @@ int GeneralModelOp::inference() {
std
::
vector
<
int
>
elem_size
;
std
::
vector
<
int
>
capacity
;
// config
LOG
(
INFO
)
<<
"start to call load general model_conf op"
;
baidu
::
paddle_serving
::
predictor
::
Resource
&
resource
=
baidu
::
paddle_serving
::
predictor
::
Resource
::
instance
();
LOG
(
INFO
)
<<
"get resource pointer done."
;
std
::
shared_ptr
<
PaddleGeneralModelConfig
>
model_config
=
resource
.
get_general_model_config
();
LOG
(
INFO
)
<<
"get general model config pointer done."
;
resource
.
print_general_model_config
(
model_config
);
// infer
if
(
batch_size
>
0
)
{
int
var_num
=
req
->
insts
(
0
).
tensor_array_size
();
VLOG
(
3
)
<<
"var num: "
<<
var_num
;
...
...
@@ -146,11 +161,23 @@ int GeneralModelOp::inference() {
return
-
1
;
}
// print request
std
::
ostringstream
oss
;
int64_t
*
example
=
reinterpret_cast
<
int64_t
*>
((
*
in
)[
0
].
data
.
data
());
for
(
uint32_t
i
=
0
;
i
<
10
;
i
++
)
{
oss
<<
*
(
example
+
i
)
<<
" "
;
}
VLOG
(
3
)
<<
"msg: "
<<
oss
.
str
();
// infer
if
(
predictor
::
InferManager
::
instance
().
infer
(
GENERAL_MODEL_NAME
,
in
,
out
,
batch_size
))
{
LOG
(
ERROR
)
<<
"Failed do infer in fluid model: "
<<
GENERAL_MODEL_NAME
;
return
-
1
;
}
// print response
float
*
example_1
=
reinterpret_cast
<
float
*>
((
*
out
)[
0
].
data
.
data
());
VLOG
(
3
)
<<
"result: "
<<
*
example_1
;
Response
*
res
=
mutable_data
<
Response
>
();
...
...
python/examples/imdb/test_client_batch.py
浏览文件 @
a6f3a1a7
...
...
@@ -24,13 +24,13 @@ def batch_predict(batch_size=4):
client
.
load_client_config
(
conf_file
)
client
.
connect
([
"127.0.0.1:8010"
])
start
=
time
.
time
()
fetch
=
[
"acc"
,
"cost"
,
"prediction"
]
feed_batch
=
[]
for
line
in
sys
.
stdin
:
group
=
line
.
strip
().
split
()
words
=
[
int
(
x
)
for
x
in
group
[
1
:
int
(
group
[
0
])]]
label
=
[
int
(
group
[
-
1
])]
feed
=
{
"words"
:
words
,
"label"
:
label
}
fetch
=
[
"acc"
,
"cost"
,
"prediction"
]
feed_batch
.
append
(
feed
)
if
len
(
feed_batch
)
==
batch_size
:
fetch_batch
=
client
.
batch_predict
(
...
...
@@ -39,6 +39,11 @@ def batch_predict(batch_size=4):
print
(
"{} {}"
.
format
(
fetch_batch
[
i
][
"prediction"
][
1
],
feed_batch
[
i
][
"label"
][
0
]))
feed_batch
=
[]
if
len
(
feed_batch
)
>
0
:
fetch_batch
=
client
.
batch_predict
(
feed_batch
=
feed_batch
,
fetch
=
fetch
)
for
i
in
range
(
len
(
feed_batch
)):
print
(
"{} {}"
.
format
(
fetch_batch
[
i
][
"prediction"
][
1
],
feed_batch
[
i
][
"label"
][
0
]))
cost
=
time
.
time
()
-
start
print
(
"total cost : {}"
.
format
(
cost
))
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录