Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
597d9217
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
597d9217
编写于
10月 24, 2018
作者:
D
dzhwinter
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
clean demo_ci
上级
dbd0075b
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
32 addition
and
20 deletion
+32
-20
paddle/fluid/framework/executor.cc
paddle/fluid/framework/executor.cc
+7
-0
paddle/fluid/framework/operator.cc
paddle/fluid/framework/operator.cc
+5
-4
paddle/fluid/inference/api/demo_ci/CMakeLists.txt
paddle/fluid/inference/api/demo_ci/CMakeLists.txt
+9
-12
paddle/fluid/inference/api/demo_ci/real_data_icnet_tester.cc
paddle/fluid/inference/api/demo_ci/real_data_icnet_tester.cc
+5
-4
paddle/fluid/operators/fetch_op.cc
paddle/fluid/operators/fetch_op.cc
+2
-0
paddle/fluid/operators/load_combine_op.cc
paddle/fluid/operators/load_combine_op.cc
+4
-0
未找到文件。
paddle/fluid/framework/executor.cc
浏览文件 @
597d9217
...
...
@@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/channel.h"
...
...
@@ -384,6 +386,7 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope,
CreateVariables
(
ctx
->
prog_
,
local_scope
,
ctx
->
block_id_
);
}
VLOG
(
3
)
<<
"Scope ptr "
<<
local_scope
;
for
(
auto
&
op
:
ctx
->
ops_
)
{
op
->
Run
(
*
local_scope
,
place_
);
// CheckResult(op->Type(), ctx, local_scope);
...
...
@@ -445,7 +448,11 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope,
VLOG
(
3
)
<<
"after tensor copy"
;
float
sum
=
.0
;
for
(
size_t
i
=
0
;
i
<
check
.
numel
();
++
i
)
{
if
(
std
::
type_index
(
check
.
type
())
==
std
::
type_index
(
typeid
(
int64_t
)))
{
sum
+=
static_cast
<
float
>
(
check
.
data
<
int64_t
>
()[
i
]);
}
else
{
sum
+=
check
.
data
<
float
>
()[
i
];
}
}
VLOG
(
3
)
<<
"op "
<<
op
->
Type
()
<<
" output var "
<<
var_name
<<
" sum "
<<
sum
;
}
...
...
paddle/fluid/framework/operator.cc
浏览文件 @
597d9217
...
...
@@ -62,7 +62,7 @@ static DDim GetDims(const Scope& scope, const std::string& name,
if
(
var
->
IsType
<
LoDTensor
>
())
{
const
LoDTensor
&
tensor
=
var
->
Get
<
LoDTensor
>
();
if
(
UNLIKELY
(
!
tensor
.
IsInitialized
()
))
{
if
(
!
tensor
.
IsInitialized
(
))
{
return
DDim
({
-
1
});
}
return
tensor
.
dims
();
...
...
@@ -91,13 +91,13 @@ static std::string GetDtype(const Scope& scope, const std::string& name) {
if
(
var
->
IsType
<
LoDTensor
>
())
{
const
LoDTensor
&
tensor
=
var
->
Get
<
LoDTensor
>
();
if
(
UNLIKELY
(
!
tensor
.
IsInitialized
()
))
{
if
(
!
tensor
.
IsInitialized
(
))
{
return
""
;
}
return
DataTypeToString
(
ToDataType
(
tensor
.
type
()));
}
else
if
(
var
->
IsType
<
SelectedRows
>
())
{
auto
tensor
=
var
->
Get
<
SelectedRows
>
().
value
();
if
(
UNLIKELY
(
!
tensor
.
IsInitialized
()
))
{
if
(
!
tensor
.
IsInitialized
(
))
{
return
"uninited"
;
}
else
{
return
DataTypeToString
(
ToDataType
(
tensor
.
type
()));
...
...
@@ -130,7 +130,7 @@ static LoD GetLoD(const Scope& scope, const std::string& name) {
if
(
var
->
IsType
<
LoDTensor
>
())
{
const
LoDTensor
&
tensor
=
var
->
Get
<
LoDTensor
>
();
if
(
UNLIKELY
(
!
tensor
.
IsInitialized
()
))
{
if
(
!
tensor
.
IsInitialized
(
))
{
return
default_lod
;
}
return
tensor
.
lod
();
...
...
@@ -206,6 +206,7 @@ const std::vector<std::string>& OperatorBase::Outputs(
}
std
::
string
OperatorBase
::
DebugStringEx
(
const
Scope
*
scope
)
const
{
VLOG
(
3
)
<<
this
->
Type
()
<<
" scope ptr "
<<
scope
;
std
::
stringstream
ss
;
ss
<<
"Op("
<<
type_
<<
"), inputs:{"
;
for
(
auto
it
=
inputs_
.
begin
();
it
!=
inputs_
.
end
();)
{
...
...
paddle/fluid/inference/api/demo_ci/CMakeLists.txt
浏览文件 @
597d9217
...
...
@@ -73,10 +73,11 @@ link_directories("${PADDLE_LIB}/paddle/fluid/inference")
# add_executable(${DEMO_NAME} ${DEMO_NAME}.cc)
# add_library(${DEMO_NAME} ${DEMO_NAME}.cc)
add_library
(
${
DEMO_NAME
}
SHARED
${
DEMO_NAME
}
.cc
)
add_executable
(
real_data_icnet_tester real_data_icnet_tester.cc
)
add_executable
(
test test.cc
)
add_executable
(
thread_icnet_test thread_icnet_test.cc
)
# add_library(${DEMO_NAME} SHARED ${DEMO_NAME}.cc)
# add_executable(test test.cc)
# add_executable(thread_icnet_test thread_icnet_test.cc)
if
(
WITH_MKL
)
include_directories
(
"
${
PADDLE_LIB
}
/third_party/install/mklml/include"
)
...
...
@@ -94,11 +95,7 @@ endif()
# Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a
if
(
WITH_STATIC_LIB
)
set
(
DEPS
# ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}
D:/Paddle/bazel-dll/fluid_install_dir/paddle/fluid/inference/libpaddle_fluid
${
CMAKE_STATIC_LIBRARY_SUFFIX
}
# E:/Paddle/build/paddle/fluid/inference/api/Release/libpaddle_inference_api${CMAKE_STATIC_LIBRARY_SUFFIX}
D:/Paddle/bazel-dll/paddle/fluid/inference/api/Release/libpaddle_inference_api
${
CMAKE_STATIC_LIBRARY_SUFFIX
}
)
${
PADDLE_LIB
}
/paddle/fluid/inference/libpaddle_fluid
${
CMAKE_STATIC_LIBRARY_SUFFIX
}
)
else
()
set
(
DEPS
${
PADDLE_LIB
}
/paddle/fluid/inference/libpaddle_fluid
${
CMAKE_SHARED_LIBRARY_SUFFIX
}
)
...
...
@@ -129,10 +126,10 @@ if(WITH_GPU)
endif
()
endif
()
target_link_libraries
(
${
DEMO_NAME
}
${
DEPS
}
)
target_link_libraries
(
test
${
DEMO_NAME
}
)
target_link_libraries
(
thread_icnet_test
${
DEPS
}
)
target_link_libraries
(
real_data_icnet_tester
${
DEPS
}
)
target_compile_definitions
(
${
DEMO_NAME
}
PRIVATE
"API_DEFINITION"
)
# target_link_libraries(${DEMO_NAME} ${DEPS})
# target_link_libraries(test ${DEMO_NAME} )
# target_link_libraries(thread_icnet_test ${DEPS})
# target_compile_definitions(${DEMO_NAME} PRIVATE "API_DEFINITION")
paddle/fluid/inference/api/demo_ci/real_data_icnet_tester.cc
浏览文件 @
597d9217
...
...
@@ -19,6 +19,7 @@
#include <iostream>
#include "paddle/fluid/inference/api/paddle_inference_api.h"
namespace
paddle
{
// DEFINE_string(dirname, "./lb",
...
...
@@ -27,8 +28,8 @@ namespace paddle {
NativeConfig
GetConfig
()
{
NativeConfig
config
;
// config.model_dir = FLAGS_dirname;
config
.
prog_file
=
"
lb
/__model__"
;
config
.
param_file
=
"
lb
/__params__"
;
config
.
prog_file
=
"
hs_lb_without_bn
/__model__"
;
config
.
param_file
=
"
hs_lb_without_bn
/__params__"
;
config
.
fraction_of_gpu_memory
=
0.8
;
config
.
use_gpu
=
true
;
config
.
device
=
0
;
...
...
@@ -44,6 +45,7 @@ double time_diff(Time t1, Time t2) {
return
counter
.
count
()
/
1000.0
;
}
void
test_naive
(
int
batch_size
){
NativeConfig
config
=
GetConfig
();
auto
predictor
=
CreatePaddlePredictor
<
NativeConfig
>
(
config
);
...
...
@@ -88,10 +90,9 @@ void test_naive(int batch_size){
PaddleTensor
tensor_out
;
std
::
vector
<
PaddleTensor
>
outputs
(
1
,
tensor_out
);
predictor
->
Run
(
paddle_tensor_feeds
,
&
outputs
,
batch_size
);
//
predictor->Run(paddle_tensor_feeds, &outputs, batch_size);
std
::
cout
<<
"start predict123:"
<<
std
::
endl
;
auto
time1
=
time
();
for
(
size_t
i
=
0
;
i
<
1
;
i
++
)
{
predictor
->
Run
(
paddle_tensor_feeds
,
&
outputs
,
batch_size
);
...
...
paddle/fluid/operators/fetch_op.cc
浏览文件 @
597d9217
...
...
@@ -42,6 +42,8 @@ class FetchOp : public framework::OperatorBase {
"Cannot find out_var in scope, out_var_name is %s"
,
out_name
);
VLOG
(
3
)
<<
"fetch_var ptr "
<<
fetch_var
<<
" is "
<<
(
fetch_var
==
nullptr
);
VLOG
(
3
)
<<
"out_var ptr "
<<
out_var
<<
" is "
<<
(
out_var
==
nullptr
);
auto
col
=
static_cast
<
size_t
>
(
Attr
<
int
>
(
"col"
));
auto
*
fetch_list
=
out_var
->
GetMutable
<
framework
::
FeedFetchList
>
();
...
...
paddle/fluid/operators/load_combine_op.cc
浏览文件 @
597d9217
...
...
@@ -67,7 +67,11 @@ class LoadCombineOp : public framework::OperatorBase {
framework
::
TensorCopy
(
*
tensor
,
platform
::
CPUPlace
(),
dev_ctx
,
&
check
);
float
sum
=
.0
;
for
(
size_t
i
=
0
;
i
<
check
.
numel
();
++
i
)
{
if
(
std
::
type_index
(
check
.
type
())
==
std
::
type_index
(
typeid
(
int64_t
)))
{
sum
+=
static_cast
<
float
>
(
check
.
data
<
int64_t
>
()[
i
]);
}
else
{
sum
+=
check
.
data
<
float
>
()[
i
];
}
}
VLOG
(
3
)
<<
"sum result"
<<
sum
;
auto
in_dtype
=
framework
::
ToDataType
(
tensor
->
type
());
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录