Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
ba538012
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ba538012
编写于
6月 29, 2018
作者:
F
fengjiayi
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'fix_Mac_compile_errors' into dev_data_balance
上级
077434c2
15be5138
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
44 addition
and
28 deletion
+44
-28
paddle/contrib/inference/CMakeLists.txt
paddle/contrib/inference/CMakeLists.txt
+5
-3
paddle/fluid/framework/parallel_executor.cc
paddle/fluid/framework/parallel_executor.cc
+3
-0
paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass.h
...uid/inference/analysis/tensorrt_subgraph_node_mark_pass.h
+10
-3
paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h
paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h
+5
-4
paddle/scripts/paddle_build.sh
paddle/scripts/paddle_build.sh
+1
-0
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+5
-5
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+5
-12
python/paddle/fluid/tests/unittests/test_layers.py
python/paddle/fluid/tests/unittests/test_layers.py
+10
-1
未找到文件。
paddle/contrib/inference/CMakeLists.txt
浏览文件 @
ba538012
...
...
@@ -61,7 +61,7 @@ cc_library(paddle_inference_tensorrt_subgraph_engine
inference_api_test
(
test_paddle_inference_api_tensorrt_subgraph_engine ARGS test_word2vec
)
endif
()
if
(
WITH_ANAKIN
AND WITH_TESTING
)
# only needed in CI
if
(
WITH_ANAKIN
)
# only needed in CI
# Due to Anakin do not have official library releases and the versions of protobuf and cuda do not match Paddle's,
# so anakin library will not be merged to our official inference library. To use anakin prediction API, one need to
# compile the libinference_anakin_api.a and compile with anakin.so.
...
...
@@ -71,10 +71,12 @@ if (WITH_ANAKIN AND WITH_TESTING) # only needed in CI
target_compile_options
(
inference_anakin_api_shared BEFORE PUBLIC
${
ANAKIN_COMPILE_EXTRA_FLAGS
}
)
target_link_libraries
(
inference_anakin_api anakin anakin_saber_common
)
target_link_libraries
(
inference_anakin_api_shared anakin anakin_saber_common
)
cc_test
(
inference_anakin_test SRCS paddle_inference_api_anakin_engine_tester.cc
if
(
WITH_TESTING
)
cc_test
(
inference_anakin_test SRCS paddle_inference_api_anakin_engine_tester.cc
ARGS --model=
${
ANAKIN_INSTALL_DIR
}
/mobilenet_v2.anakin.bin
DEPS inference_anakin_api
)
target_compile_options
(
inference_anakin_test BEFORE PUBLIC
${
ANAKIN_COMPILE_EXTRA_FLAGS
}
)
target_compile_options
(
inference_anakin_test BEFORE PUBLIC
${
ANAKIN_COMPILE_EXTRA_FLAGS
}
)
endif
(
WITH_TESTING
)
endif
()
if
(
WITH_TESTING
)
...
...
paddle/fluid/framework/parallel_executor.cc
浏览文件 @
ba538012
...
...
@@ -253,6 +253,9 @@ void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
t
->
set_lod
(
lod_tensors
[
j
].
lod
());
}
}
for
(
auto
&
p
:
member_
->
places_
)
{
platform
::
DeviceContextPool
::
Instance
().
Get
(
p
)
->
Wait
();
}
}
ParallelExecutor
::~
ParallelExecutor
()
{
...
...
paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass.h
浏览文件 @
ba538012
...
...
@@ -16,6 +16,10 @@
* This file defines TensorRTSubgraphNodeMarkPass which helps to mark the ops
* that supported by TensorRT engine.
*/
#pragma once
#include <string>
#include "paddle/fluid/inference/analysis/pass.h"
#include "paddle/fluid/inference/analysis/subgraph_splitter.h"
...
...
@@ -30,7 +34,8 @@ class TensorRTSubgraphNodeMarkPass : public DataFlowGraphPass {
public:
using
teller_t
=
SubGraphSplitter
::
NodeInsideSubgraphTeller
;
TensorRTSubgraphNodeMarkPass
(
const
teller_t
&
teller
)
:
teller_
(
teller
)
{}
explicit
TensorRTSubgraphNodeMarkPass
(
const
teller_t
&
teller
)
:
teller_
(
teller
)
{}
bool
Initialize
(
Argument
*
argument
)
override
{
return
true
;
}
...
...
@@ -38,8 +43,10 @@ class TensorRTSubgraphNodeMarkPass : public DataFlowGraphPass {
// sub-graph into TensorRT.
void
Run
(
DataFlowGraph
*
graph
)
override
;
std
::
string
repr
()
const
{
return
"tensorrt-sub-subgraph-mark"
;
}
std
::
string
description
()
const
{
return
"tensorrt sub-graph mark pass"
;
}
std
::
string
repr
()
const
override
{
return
"tensorrt-sub-subgraph-mark"
;
}
std
::
string
description
()
const
override
{
return
"tensorrt sub-graph mark pass"
;
}
Pass
*
CreateGraphvizDebugerPass
()
const
override
;
bool
Finalize
()
override
;
...
...
paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h
浏览文件 @
ba538012
...
...
@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once
#include <string>
#include "paddle/fluid/inference/analysis/node.h"
#include "paddle/fluid/inference/analysis/pass.h"
#include "paddle/fluid/inference/analysis/subgraph_splitter.h"
...
...
@@ -30,7 +31,7 @@ class TensorRTSubGraphPass : public DataFlowGraphPass {
// Tell whether to transform a sub-graph into TensorRT.
using
NodeInsideSubgraphTeller
=
SubGraphFuse
::
NodeInsideSubgraphTeller
;
TensorRTSubGraphPass
(
const
NodeInsideSubgraphTeller
&
teller
);
explicit
TensorRTSubGraphPass
(
const
NodeInsideSubgraphTeller
&
teller
);
bool
Initialize
(
Argument
*
argument
)
override
{
return
true
;
}
...
...
@@ -40,8 +41,8 @@ class TensorRTSubGraphPass : public DataFlowGraphPass {
bool
Finalize
()
override
{
return
true
;
}
std
::
string
repr
()
const
{
return
"tensorrt-sub-graph"
;
}
std
::
string
description
()
const
{
return
"tensorrt sub graph pass"
;
}
std
::
string
repr
()
const
override
{
return
"tensorrt-sub-graph"
;
}
std
::
string
description
()
const
override
{
return
"tensorrt sub graph pass"
;
}
private:
NodeInsideSubgraphTeller
node_inside_subgraph_teller_
;
...
...
@@ -49,4 +50,4 @@ class TensorRTSubGraphPass : public DataFlowGraphPass {
}
// namespace analysis
}
// namespace inference
}
// paddle
}
//
namespace
paddle
paddle/scripts/paddle_build.sh
浏览文件 @
ba538012
...
...
@@ -106,6 +106,7 @@ function cmake_gen() {
-DWITH_FLUID_ONLY=
${
WITH_FLUID_ONLY
:-
OFF
}
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON
-DWITH_CONTRIB=
${
WITH_CONTRIB
:-
ON
}
-DWITH_ANAKIN=
${
WITH_ANAKIN
:-
ON
}
-DWITH_INFERENCE_DEMO=
${
WITH_INFERENCE_DEMO
:-
ON
}
========================================
EOF
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
ba538012
...
...
@@ -5078,12 +5078,12 @@ def mean_iou(input, label, num_classes):
out_correct
=
helper
.
create_tmp_variable
(
dtype
=
'int32'
)
helper
.
append_op
(
type
=
"mean_iou"
,
inputs
=
{
"
p
redictions"
:
input
,
"
l
abels"
:
label
},
inputs
=
{
"
P
redictions"
:
input
,
"
L
abels"
:
label
},
outputs
=
{
"
out_mean_i
ou"
:
out_mean_iou
,
"
out_w
rong"
:
out_wrong
,
"
out_c
orrect"
:
out_correct
"
OutMeanI
ou"
:
out_mean_iou
,
"
OutW
rong"
:
out_wrong
,
"
OutC
orrect"
:
out_correct
},
attrs
=
{
"num_classes"
:
num_classes
})
return
out_mean_iou
,
out_wrong
,
out_correct
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
ba538012
...
...
@@ -1113,7 +1113,6 @@ class ModelAverage(Optimizer):
Args:
average_window_rate: The rate of average window.
params_grads: A list of parameter-grad variable pairs.
min_average_window: The minimum size of average window.
max_average_window: The maximum size of average window.
...
...
@@ -1122,8 +1121,8 @@ class ModelAverage(Optimizer):
.. code-block:: python
optimizer = fluid.optimizer.Momentum()
_, params_grads =
optimizer.minimize(cost)
model_average = fluid.optimizer.ModelAverage(
params_grads,
0.15,
optimizer.minimize(cost)
model_average = fluid.optimizer.ModelAverage(0.15,
min_average_window=10000,
max_average_window=20000)
for pass_id in range(args.pass_num):
...
...
@@ -1137,7 +1136,6 @@ class ModelAverage(Optimizer):
def
__init__
(
self
,
average_window_rate
,
params_grads
=
None
,
min_average_window
=
10000
,
max_average_window
=
10000
,
**
kwargs
):
...
...
@@ -1146,21 +1144,16 @@ class ModelAverage(Optimizer):
self
.
min_average_window
=
min_average_window
self
.
max_average_window
=
max_average_window
self
.
params_grads
=
[]
if
params_grads
is
None
else
params_grads
params
=
{}
for
param
,
grad
in
self
.
params_grads
:
if
param
.
do_model_average
!=
False
:
params
[
param
.
name
]
=
(
param
,
grad
)
self
.
params_grads
=
[]
for
param
in
framework
.
default_main_program
().
global_block
(
).
all_parameters
():
if
param
.
name
not
in
params
and
param
.
do_model_average
!=
False
:
if
param
.
do_model_average
!=
False
:
grad
=
param
.
block
.
create_var
(
name
=
unique_name
.
generate
(
"."
.
join
([
param
.
name
,
'tmp'
])),
dtype
=
param
.
dtype
,
persistable
=
False
,
stop_gradient
=
True
)
params
[
param
.
name
]
=
(
param
,
grad
)
self
.
params_grads
=
params
.
values
()
self
.
params_grads
.
append
((
param
,
grad
))
for
param
,
grad
in
self
.
params_grads
:
self
.
_append_average_accumulate_op
(
param
)
...
...
python/paddle/fluid/tests/unittests/test_layers.py
浏览文件 @
ba538012
...
...
@@ -401,7 +401,7 @@ class TestBook(unittest.TestCase):
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
def
test_
maxout
(
self
):
def
test_
crop
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
5
],
dtype
=
"float32"
)
...
...
@@ -410,6 +410,15 @@ class TestBook(unittest.TestCase):
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
def
test_mean_iou
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
16
],
dtype
=
'float32'
)
y
=
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
iou
=
layers
.
mean_iou
(
x
,
y
,
2
)
self
.
assertIsNotNone
(
iou
)
print
(
str
(
program
))
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录