Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
1e7ccf9f
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1e7ccf9f
编写于
9月 03, 2018
作者:
T
tensor-tang
提交者:
GitHub
9月 03, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #13126 from tensor-tang/fea/infer/ut/lac-new
add lac infer test
上级
3fe0575b
9f02497b
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
225 addition
and
16 deletion
+225
-16
paddle/fluid/inference/analysis/CMakeLists.txt
paddle/fluid/inference/analysis/CMakeLists.txt
+20
-10
paddle/fluid/inference/analysis/analyzer_lac_tester.cc
paddle/fluid/inference/analysis/analyzer_lac_tester.cc
+199
-0
paddle/fluid/inference/analysis/analyzer_ner_tester.cc
paddle/fluid/inference/analysis/analyzer_ner_tester.cc
+1
-1
paddle/fluid/inference/analysis/analyzer_tester.cc
paddle/fluid/inference/analysis/analyzer_tester.cc
+3
-3
paddle/fluid/inference/api/api_impl.cc
paddle/fluid/inference/api/api_impl.cc
+2
-2
未找到文件。
paddle/fluid/inference/analysis/CMakeLists.txt
浏览文件 @
1e7ccf9f
...
@@ -25,9 +25,8 @@ function (inference_analysis_test TARGET)
...
@@ -25,9 +25,8 @@ function (inference_analysis_test TARGET)
if
(
WITH_TESTING
)
if
(
WITH_TESTING
)
set
(
options
""
)
set
(
options
""
)
set
(
oneValueArgs
""
)
set
(
oneValueArgs
""
)
set
(
multiValueArgs SRCS EXTRA_DEPS
)
set
(
multiValueArgs SRCS
ARGS
EXTRA_DEPS
)
cmake_parse_arguments
(
analysis_test
"
${
options
}
"
"
${
oneValueArgs
}
"
"
${
multiValueArgs
}
"
${
ARGN
}
)
cmake_parse_arguments
(
analysis_test
"
${
options
}
"
"
${
oneValueArgs
}
"
"
${
multiValueArgs
}
"
${
ARGN
}
)
set
(
mem_opt
""
)
set
(
mem_opt
""
)
if
(
WITH_GPU
)
if
(
WITH_GPU
)
set
(
mem_opt
"--fraction_of_gpu_memory_to_use=0.5"
)
set
(
mem_opt
"--fraction_of_gpu_memory_to_use=0.5"
)
...
@@ -35,7 +34,7 @@ function (inference_analysis_test TARGET)
...
@@ -35,7 +34,7 @@ function (inference_analysis_test TARGET)
cc_test
(
${
TARGET
}
cc_test
(
${
TARGET
}
SRCS
"
${
analysis_test_SRCS
}
"
SRCS
"
${
analysis_test_SRCS
}
"
DEPS analysis graph fc_fuse_pass graph_viz_pass infer_clean_graph_pass graph_pattern_detector pass
${
analysis_test_EXTRA_DEPS
}
DEPS analysis graph fc_fuse_pass graph_viz_pass infer_clean_graph_pass graph_pattern_detector pass
${
analysis_test_EXTRA_DEPS
}
ARGS --inference_model_dir=
${
PYTHON_TESTS_DIR
}
/book/word2vec.inference.model
${
mem_opt
}
)
ARGS --inference_model_dir=
${
PYTHON_TESTS_DIR
}
/book/word2vec.inference.model
${
mem_opt
}
${
analysis_test_ARGS
}
)
set_tests_properties
(
${
TARGET
}
PROPERTIES DEPENDS test_word2vec
)
set_tests_properties
(
${
TARGET
}
PROPERTIES DEPENDS test_word2vec
)
endif
(
WITH_TESTING
)
endif
(
WITH_TESTING
)
endfunction
(
inference_analysis_test
)
endfunction
(
inference_analysis_test
)
...
@@ -51,7 +50,7 @@ endfunction(inference_download_and_uncompress)
...
@@ -51,7 +50,7 @@ endfunction(inference_download_and_uncompress)
set
(
DITU_RNN_MODEL_URL
"http://paddle-inference-dist.bj.bcebos.com/ditu_rnn_fluid%2Fmodel.tar.gz"
)
set
(
DITU_RNN_MODEL_URL
"http://paddle-inference-dist.bj.bcebos.com/ditu_rnn_fluid%2Fmodel.tar.gz"
)
set
(
DITU_RNN_DATA_URL
"http://paddle-inference-dist.bj.bcebos.com/ditu_rnn_fluid%2Fdata.txt.tar.gz"
)
set
(
DITU_RNN_DATA_URL
"http://paddle-inference-dist.bj.bcebos.com/ditu_rnn_fluid%2Fdata.txt.tar.gz"
)
set
(
DITU_INSTALL_DIR
"
${
THIRD_PARTY_PATH
}
/inference_demo/ditu_rnn"
CACHE PATH
"Ditu RNN model and data root."
FORCE
)
set
(
DITU_INSTALL_DIR
"
${
THIRD_PARTY_PATH
}
/inference_demo/ditu_rnn"
CACHE PATH
"Ditu RNN model and data root."
FORCE
)
if
(
NOT EXISTS
${
DITU_INSTALL_DIR
}
)
if
(
NOT EXISTS
${
DITU_INSTALL_DIR
}
AND WITH_TESTING
)
inference_download_and_uncompress
(
${
DITU_INSTALL_DIR
}
${
DITU_RNN_MODEL_URL
}
"ditu_rnn_fluid%2Fmodel.tar.gz"
)
inference_download_and_uncompress
(
${
DITU_INSTALL_DIR
}
${
DITU_RNN_MODEL_URL
}
"ditu_rnn_fluid%2Fmodel.tar.gz"
)
inference_download_and_uncompress
(
${
DITU_INSTALL_DIR
}
${
DITU_RNN_DATA_URL
}
"ditu_rnn_fluid%2Fdata.txt.tar.gz"
)
inference_download_and_uncompress
(
${
DITU_INSTALL_DIR
}
${
DITU_RNN_DATA_URL
}
"ditu_rnn_fluid%2Fdata.txt.tar.gz"
)
endif
()
endif
()
...
@@ -70,8 +69,7 @@ inference_analysis_test(test_analyzer SRCS analyzer_tester.cc
...
@@ -70,8 +69,7 @@ inference_analysis_test(test_analyzer SRCS analyzer_tester.cc
attention_lstm_fuse_pass
attention_lstm_fuse_pass
paddle_inference_api
paddle_inference_api
pass
pass
ARGS --inference_model_dir=
${
PYTHON_TESTS_DIR
}
/book/word2vec.inference.model
ARGS --infer_ditu_rnn_model=
${
DITU_INSTALL_DIR
}
/model
--infer_ditu_rnn_model=
${
DITU_INSTALL_DIR
}
/model
--infer_ditu_rnn_data=
${
DITU_INSTALL_DIR
}
/data.txt
)
--infer_ditu_rnn_data=
${
DITU_INSTALL_DIR
}
/data.txt
)
inference_analysis_test
(
test_data_flow_graph SRCS data_flow_graph_tester.cc
)
inference_analysis_test
(
test_data_flow_graph SRCS data_flow_graph_tester.cc
)
...
@@ -88,13 +86,25 @@ inference_analysis_test(test_model_store_pass SRCS model_store_pass_tester.cc)
...
@@ -88,13 +86,25 @@ inference_analysis_test(test_model_store_pass SRCS model_store_pass_tester.cc)
set
(
CHINESE_NER_MODEL_URL
"http://paddle-inference-dist.bj.bcebos.com/chinese_ner_model.tar.gz"
)
set
(
CHINESE_NER_MODEL_URL
"http://paddle-inference-dist.bj.bcebos.com/chinese_ner_model.tar.gz"
)
set
(
CHINESE_NER_DATA_URL
"http://paddle-inference-dist.bj.bcebos.com/chinese_ner-data.txt.tar.gz"
)
set
(
CHINESE_NER_DATA_URL
"http://paddle-inference-dist.bj.bcebos.com/chinese_ner-data.txt.tar.gz"
)
set
(
CHINESE_NER_INSTALL_DIR
"
${
THIRD_PARTY_PATH
}
/inference_demo/chinese_ner"
CACHE PATH
"Chinese ner model and data root."
FORCE
)
set
(
CHINESE_NER_INSTALL_DIR
"
${
THIRD_PARTY_PATH
}
/inference_demo/chinese_ner"
CACHE PATH
"Chinese ner model and data root."
FORCE
)
if
(
NOT EXISTS
${
CHINESE_NER_INSTALL_DIR
}
)
if
(
NOT EXISTS
${
CHINESE_NER_INSTALL_DIR
}
AND WITH_TESTING
)
inference_download_and_uncompress
(
${
CHINESE_NER_INSTALL_DIR
}
${
CHINESE_NER_MODEL_URL
}
"chinese_ner_model.tar.gz"
)
inference_download_and_uncompress
(
${
CHINESE_NER_INSTALL_DIR
}
${
CHINESE_NER_MODEL_URL
}
"chinese_ner_model.tar.gz"
)
inference_download_and_uncompress
(
${
CHINESE_NER_INSTALL_DIR
}
${
CHINESE_NER_DATA_URL
}
"chinese_ner-data.txt.tar.gz"
)
inference_download_and_uncompress
(
${
CHINESE_NER_INSTALL_DIR
}
${
CHINESE_NER_DATA_URL
}
"chinese_ner-data.txt.tar.gz"
)
endif
()
endif
()
inference_analysis_test
(
test_
chinese_ner SRCS chinese
_ner_tester.cc
inference_analysis_test
(
test_
analyzer_ner SRCS analyzer
_ner_tester.cc
EXTRA_DEPS paddle_inference_api paddle_fluid_api
EXTRA_DEPS paddle_inference_api paddle_fluid_api
ARGS --inference_model_dir=
${
PYTHON_TESTS_DIR
}
/book/word2vec.inference.model
ARGS --infer_model=
${
CHINESE_NER_INSTALL_DIR
}
/model
--infer_model=
${
CHINESE_NER_INSTALL_DIR
}
/model
--infer_data=
${
CHINESE_NER_INSTALL_DIR
}
/data.txt
)
--infer_data=
${
CHINESE_NER_INSTALL_DIR
}
/data.txt
)
set
(
LAC_MODEL_URL
"http://paddle-inference-dist.bj.bcebos.com/lac_model.tar.gz"
)
set
(
LAC_DATA_URL
"http://paddle-inference-dist.bj.bcebos.com/lac_data.txt.tar.gz"
)
set
(
LAC_INSTALL_DIR
"
${
THIRD_PARTY_PATH
}
/inference_demo/lac"
CACHE PATH
"LAC model and data root."
FORCE
)
if
(
NOT EXISTS
${
LAC_INSTALL_DIR
}
AND WITH_TESTING
)
inference_download_and_uncompress
(
${
LAC_INSTALL_DIR
}
${
LAC_MODEL_URL
}
"lac_model.tar.gz"
)
inference_download_and_uncompress
(
${
LAC_INSTALL_DIR
}
${
LAC_DATA_URL
}
"lac_data.txt.tar.gz"
)
endif
()
inference_analysis_test
(
test_analyzer_lac SRCS analyzer_lac_tester.cc
EXTRA_DEPS paddle_inference_api paddle_fluid_api
ARGS --infer_model=
${
LAC_INSTALL_DIR
}
/model
--infer_data=
${
LAC_INSTALL_DIR
}
/data.txt
)
paddle/fluid/inference/analysis/analyzer_lac_tester.cc
0 → 100644
浏览文件 @
1e7ccf9f
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/analysis/analyzer.h"
#include <google/protobuf/text_format.h>
#include <gtest/gtest.h>
#include "paddle/fluid/framework/ir/pass.h"
#include "paddle/fluid/inference/analysis/ut_helper.h"
#include "paddle/fluid/inference/api/helper.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#include "paddle/fluid/platform/profiler.h"
DEFINE_string
(
infer_model
,
""
,
"model path for LAC"
);
DEFINE_string
(
infer_data
,
""
,
"data file for LAC"
);
DEFINE_int32
(
batch_size
,
1
,
"batch size."
);
DEFINE_int32
(
burning
,
0
,
"Burning before repeat."
);
DEFINE_int32
(
repeat
,
1
,
"Running the inference program repeat times."
);
DEFINE_bool
(
test_all_data
,
false
,
"Test the all dataset in data file."
);
namespace
paddle
{
namespace
inference
{
namespace
analysis
{
struct
DataRecord
{
std
::
vector
<
int64_t
>
data
;
std
::
vector
<
size_t
>
lod
;
// for dataset and nextbatch
size_t
batch_iter
{
0
};
std
::
vector
<
std
::
vector
<
size_t
>>
batched_lods
;
std
::
vector
<
std
::
vector
<
int64_t
>>
batched_datas
;
std
::
vector
<
std
::
vector
<
int64_t
>>
datasets
;
DataRecord
()
=
default
;
explicit
DataRecord
(
const
std
::
string
&
path
,
int
batch_size
=
1
)
{
Load
(
path
);
Prepare
(
batch_size
);
batch_iter
=
0
;
}
void
Load
(
const
std
::
string
&
path
)
{
std
::
ifstream
file
(
path
);
std
::
string
line
;
int
num_lines
=
0
;
datasets
.
resize
(
0
);
while
(
std
::
getline
(
file
,
line
))
{
num_lines
++
;
std
::
vector
<
std
::
string
>
data
;
split
(
line
,
';'
,
&
data
);
std
::
vector
<
int64_t
>
words_ids
;
split_to_int64
(
data
[
1
],
' '
,
&
words_ids
);
datasets
.
emplace_back
(
words_ids
);
}
}
void
Prepare
(
int
bs
)
{
if
(
bs
==
1
)
{
batched_datas
=
datasets
;
for
(
auto
one_sentence
:
datasets
)
{
batched_lods
.
push_back
({
0
,
one_sentence
.
size
()});
}
}
else
{
std
::
vector
<
int64_t
>
one_batch
;
std
::
vector
<
size_t
>
lod
{
0
};
int
bs_id
=
0
;
for
(
auto
one_sentence
:
datasets
)
{
bs_id
++
;
one_batch
.
insert
(
one_batch
.
end
(),
one_sentence
.
begin
(),
one_sentence
.
end
());
lod
.
push_back
(
lod
.
back
()
+
one_sentence
.
size
());
if
(
bs_id
==
bs
)
{
bs_id
=
0
;
batched_datas
.
push_back
(
one_batch
);
batched_lods
.
push_back
(
lod
);
one_batch
.
clear
();
one_batch
.
resize
(
0
);
lod
.
clear
();
lod
.
resize
(
0
);
lod
.
push_back
(
0
);
}
}
if
(
one_batch
.
size
()
!=
0
)
{
batched_datas
.
push_back
(
one_batch
);
batched_lods
.
push_back
(
lod
);
}
}
}
DataRecord
NextBatch
()
{
DataRecord
data
;
data
.
data
=
batched_datas
[
batch_iter
];
data
.
lod
=
batched_lods
[
batch_iter
];
batch_iter
++
;
if
(
batch_iter
>=
batched_datas
.
size
())
{
batch_iter
=
0
;
}
return
data
;
}
};
void
GetOneBatch
(
std
::
vector
<
PaddleTensor
>
*
input_slots
,
DataRecord
*
data
,
int
batch_size
)
{
auto
one_batch
=
data
->
NextBatch
();
PaddleTensor
input_tensor
;
input_tensor
.
name
=
"word"
;
input_tensor
.
shape
.
assign
({
static_cast
<
int
>
(
one_batch
.
data
.
size
()),
1
});
input_tensor
.
lod
.
assign
({
one_batch
.
lod
});
input_tensor
.
dtype
=
PaddleDType
::
INT64
;
TensorAssignData
<
int64_t
>
(
&
input_tensor
,
{
one_batch
.
data
});
PADDLE_ENFORCE_EQ
(
batch_size
,
static_cast
<
int
>
(
one_batch
.
lod
.
size
()
-
1
));
input_slots
->
assign
({
input_tensor
});
}
static
void
PrintTime
(
const
double
latency
,
const
int
bs
,
const
int
repeat
)
{
LOG
(
INFO
)
<<
"===========profile result==========="
;
LOG
(
INFO
)
<<
"batch_size: "
<<
bs
<<
", repeat: "
<<
repeat
<<
", avg latency: "
<<
latency
/
repeat
<<
"ms"
;
LOG
(
INFO
)
<<
"====================================="
;
}
void
BenchAllData
(
const
std
::
string
&
model_path
,
const
std
::
string
&
data_file
,
const
int
batch_size
,
const
int
repeat
)
{
NativeConfig
config
;
config
.
model_dir
=
model_path
;
config
.
use_gpu
=
false
;
config
.
device
=
0
;
config
.
specify_input_name
=
true
;
std
::
vector
<
PaddleTensor
>
input_slots
,
outputs_slots
;
DataRecord
data
(
data_file
,
batch_size
);
auto
predictor
=
CreatePaddlePredictor
<
NativeConfig
,
PaddleEngineKind
::
kNative
>
(
config
);
GetOneBatch
(
&
input_slots
,
&
data
,
batch_size
);
for
(
int
i
=
0
;
i
<
FLAGS_burning
;
i
++
)
{
predictor
->
Run
(
input_slots
,
&
outputs_slots
);
}
Timer
timer
;
double
sum
=
0
;
for
(
int
i
=
0
;
i
<
repeat
;
i
++
)
{
for
(
size_t
bid
=
0
;
bid
<
data
.
batched_datas
.
size
();
++
bid
)
{
GetOneBatch
(
&
input_slots
,
&
data
,
batch_size
);
timer
.
tic
();
predictor
->
Run
(
input_slots
,
&
outputs_slots
);
sum
+=
timer
.
toc
();
}
}
PrintTime
(
sum
,
batch_size
,
repeat
);
}
const
int64_t
lac_ref_data
[]
=
{
24
,
25
,
25
,
25
,
38
,
30
,
31
,
14
,
15
,
44
,
24
,
25
,
25
,
25
,
25
,
25
,
44
,
24
,
25
,
25
,
25
,
36
,
42
,
43
,
44
,
14
,
15
,
44
,
14
,
15
,
44
,
14
,
15
,
44
,
38
,
39
,
14
,
15
,
44
,
22
,
23
,
23
,
23
,
23
,
23
,
23
,
23
};
void
TestLACPrediction
(
const
std
::
string
&
model_path
,
const
std
::
string
&
data_file
,
const
int
batch_size
,
const
int
repeat
,
bool
test_all_data
)
{
if
(
test_all_data
)
{
BenchAllData
(
model_path
,
data_file
,
batch_size
,
repeat
);
return
;
}
NativeConfig
config
;
config
.
model_dir
=
model_path
;
config
.
use_gpu
=
false
;
config
.
device
=
0
;
config
.
specify_input_name
=
true
;
std
::
vector
<
PaddleTensor
>
input_slots
,
outputs_slots
;
DataRecord
data
(
data_file
,
batch_size
);
GetOneBatch
(
&
input_slots
,
&
data
,
batch_size
);
auto
predictor
=
CreatePaddlePredictor
<
NativeConfig
,
PaddleEngineKind
::
kNative
>
(
config
);
for
(
int
i
=
0
;
i
<
FLAGS_burning
;
i
++
)
{
predictor
->
Run
(
input_slots
,
&
outputs_slots
);
}
Timer
timer
;
timer
.
tic
();
for
(
int
i
=
0
;
i
<
repeat
;
i
++
)
{
predictor
->
Run
(
input_slots
,
&
outputs_slots
);
}
PrintTime
(
timer
.
toc
(),
batch_size
,
repeat
);
EXPECT_EQ
(
outputs_slots
.
size
(),
1UL
);
auto
&
out
=
outputs_slots
[
0
];
size_t
size
=
std
::
accumulate
(
out
.
shape
.
begin
(),
out
.
shape
.
end
(),
1
,
[](
int
a
,
int
b
)
{
return
a
*
b
;
});
size_t
batch1_size
=
sizeof
(
lac_ref_data
)
/
sizeof
(
int64_t
);
PADDLE_ENFORCE_GT
(
size
,
0
);
EXPECT_GE
(
size
,
batch1_size
);
int64_t
*
pdata
=
static_cast
<
int64_t
*>
(
out
.
data
.
data
());
for
(
size_t
i
=
0
;
i
<
batch1_size
;
++
i
)
{
EXPECT_EQ
(
pdata
[
i
],
lac_ref_data
[
i
]);
}
}
TEST
(
Analyzer_LAC
,
native
)
{
LOG
(
INFO
)
<<
"LAC with native"
;
TestLACPrediction
(
FLAGS_infer_model
,
FLAGS_infer_data
,
FLAGS_batch_size
,
FLAGS_repeat
,
FLAGS_test_all_data
);
}
}
// namespace analysis
}
// namespace inference
}
// namespace paddle
paddle/fluid/inference/analysis/
chinese
_ner_tester.cc
→
paddle/fluid/inference/analysis/
analyzer
_ner_tester.cc
浏览文件 @
1e7ccf9f
...
@@ -12,10 +12,10 @@
...
@@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/fluid/inference/analysis/analyzer.h"
#include <google/protobuf/text_format.h>
#include <google/protobuf/text_format.h>
#include <gtest/gtest.h>
#include <gtest/gtest.h>
#include "paddle/fluid/framework/ir/pass.h"
#include "paddle/fluid/framework/ir/pass.h"
#include "paddle/fluid/inference/analysis/analyzer.h"
#include "paddle/fluid/inference/analysis/ut_helper.h"
#include "paddle/fluid/inference/analysis/ut_helper.h"
#include "paddle/fluid/inference/api/helper.h"
#include "paddle/fluid/inference/api/helper.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h"
...
...
paddle/fluid/inference/analysis/analyzer_tester.cc
浏览文件 @
1e7ccf9f
...
@@ -34,7 +34,7 @@ namespace paddle {
...
@@ -34,7 +34,7 @@ namespace paddle {
namespace
inference
{
namespace
inference
{
namespace
analysis
{
namespace
analysis
{
using
namespace
framework
;
using
namespace
framework
;
// NOLINT
TEST
(
Analyzer
,
analysis_without_tensorrt
)
{
TEST
(
Analyzer
,
analysis_without_tensorrt
)
{
FLAGS_IA_enable_tensorrt_subgraph_engine
=
false
;
FLAGS_IA_enable_tensorrt_subgraph_engine
=
false
;
...
@@ -312,8 +312,8 @@ void TestDituRNNPrediction(const std::string &model_path,
...
@@ -312,8 +312,8 @@ void TestDituRNNPrediction(const std::string &model_path,
PADDLE_ENFORCE_GT
(
size
,
0
);
PADDLE_ENFORCE_GT
(
size
,
0
);
float
*
data
=
static_cast
<
float
*>
(
out
.
data
.
data
());
float
*
data
=
static_cast
<
float
*>
(
out
.
data
.
data
());
float
*
base_data
=
static_cast
<
float
*>
(
base_out
.
data
.
data
());
float
*
base_data
=
static_cast
<
float
*>
(
base_out
.
data
.
data
());
for
(
size_t
i
=
0
;
i
<
size
;
i
++
)
{
for
(
size_t
j
=
0
;
j
<
size
;
j
++
)
{
EXPECT_NEAR
(
data
[
i
],
base_data
[
i
],
1e-3
);
EXPECT_NEAR
(
data
[
j
],
base_data
[
j
],
1e-3
);
}
}
}
}
...
...
paddle/fluid/inference/api/api_impl.cc
浏览文件 @
1e7ccf9f
...
@@ -62,14 +62,14 @@ void NativePaddlePredictor::PrepareFeedFetch() {
...
@@ -62,14 +62,14 @@ void NativePaddlePredictor::PrepareFeedFetch() {
for
(
auto
*
op
:
inference_program_
->
Block
(
0
).
AllOps
())
{
for
(
auto
*
op
:
inference_program_
->
Block
(
0
).
AllOps
())
{
if
(
op
->
Type
()
==
"feed"
)
{
if
(
op
->
Type
()
==
"feed"
)
{
int
idx
=
boost
::
get
<
int
>
(
op
->
GetAttr
(
"col"
));
int
idx
=
boost
::
get
<
int
>
(
op
->
GetAttr
(
"col"
));
if
(
feeds_
.
size
()
<=
(
size_t
)
idx
)
{
if
(
feeds_
.
size
()
<=
static_cast
<
size_t
>
(
idx
)
)
{
feeds_
.
resize
(
idx
+
1
);
feeds_
.
resize
(
idx
+
1
);
}
}
feeds_
[
idx
]
=
op
;
feeds_
[
idx
]
=
op
;
feed_names_
[
op
->
Output
(
"Out"
)[
0
]]
=
idx
;
feed_names_
[
op
->
Output
(
"Out"
)[
0
]]
=
idx
;
}
else
if
(
op
->
Type
()
==
"fetch"
)
{
}
else
if
(
op
->
Type
()
==
"fetch"
)
{
int
idx
=
boost
::
get
<
int
>
(
op
->
GetAttr
(
"col"
));
int
idx
=
boost
::
get
<
int
>
(
op
->
GetAttr
(
"col"
));
if
(
fetchs_
.
size
()
<=
(
size_t
)
idx
)
{
if
(
fetchs_
.
size
()
<=
static_cast
<
size_t
>
(
idx
)
)
{
fetchs_
.
resize
(
idx
+
1
);
fetchs_
.
resize
(
idx
+
1
);
}
}
fetchs_
[
idx
]
=
op
;
fetchs_
[
idx
]
=
op
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录