Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
9643f906
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9643f906
编写于
4月 12, 2019
作者:
Z
zhoukunsheng
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into rsqrt
上级
2b2b4ca2
06809ebb
变更
11
显示空白变更内容
内联
并排
Showing
11 changed file
with
86 addition
and
67 deletion
+86
-67
paddle/fluid/framework/ir/graph_helper.cc
paddle/fluid/framework/ir/graph_helper.cc
+17
-19
paddle/fluid/framework/ir/graph_helper.h
paddle/fluid/framework/ir/graph_helper.h
+10
-2
paddle/fluid/framework/op_desc.cc
paddle/fluid/framework/op_desc.cc
+1
-0
paddle/fluid/inference/api/analysis_predictor.cc
paddle/fluid/inference/api/analysis_predictor.cc
+3
-0
paddle/fluid/inference/api/api.cc
paddle/fluid/inference/api/api.cc
+1
-0
paddle/fluid/inference/api/api_impl.cc
paddle/fluid/inference/api/api_impl.cc
+5
-0
paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc
...le/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc
+1
-0
paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc
.../fluid/inference/tests/api/analyzer_transformer_tester.cc
+17
-22
paddle/fluid/op_use_default_grad_op_maker.spec
paddle/fluid/op_use_default_grad_op_maker.spec
+2
-0
paddle/fluid/operators/detection/gpc.cc
paddle/fluid/operators/detection/gpc.cc
+5
-0
paddle/fluid/operators/squared_l2_distance_op.h
paddle/fluid/operators/squared_l2_distance_op.h
+24
-24
未找到文件。
paddle/fluid/framework/ir/graph_helper.cc
浏览文件 @
9643f906
...
...
@@ -31,8 +31,8 @@ namespace paddle {
namespace
framework
{
namespace
ir
{
namespace
{
void
SortHelper
(
const
std
::
map
<
ir
::
Node
*
,
std
::
unordered_set
<
ir
::
Node
*>
>
&
adj_list
,
void
SortHelper
(
const
std
::
map
<
ir
::
Node
*
,
std
::
set
<
ir
::
Node
*
,
ir
::
NodeComp
>
,
ir
::
NodeComp
>
&
adj_list
,
ir
::
Node
*
node
,
std
::
unordered_set
<
ir
::
Node
*>
*
visited
,
std
::
vector
<
ir
::
Node
*>
*
ret
)
{
visited
->
insert
(
node
);
...
...
@@ -50,7 +50,8 @@ void SortHelper(
bool
HasCircleHelper
(
ir
::
Node
*
node
,
const
std
::
map
<
ir
::
Node
*
,
std
::
unordered_set
<
ir
::
Node
*>>
&
adj_list
,
const
std
::
map
<
ir
::
Node
*
,
std
::
set
<
ir
::
Node
*
,
ir
::
NodeComp
>
,
ir
::
NodeComp
>
&
adj_list
,
std
::
unordered_set
<
ir
::
Node
*>
*
visited
,
std
::
unordered_set
<
ir
::
Node
*>
*
in_trace
,
std
::
vector
<
std
::
vector
<
ir
::
Node
*>>
*
circles
)
{
...
...
@@ -84,7 +85,8 @@ bool HasCircleHelper(
}
bool
HasCircleInternal
(
const
std
::
map
<
ir
::
Node
*
,
std
::
unordered_set
<
ir
::
Node
*>>
&
adj_list
,
const
std
::
map
<
ir
::
Node
*
,
std
::
set
<
ir
::
Node
*
,
ir
::
NodeComp
>
,
ir
::
NodeComp
>
&
adj_list
,
std
::
vector
<
std
::
vector
<
ir
::
Node
*>>
*
circles
)
{
std
::
unordered_set
<
ir
::
Node
*>
visited
;
std
::
unordered_set
<
ir
::
Node
*>
in_trace
;
...
...
@@ -107,8 +109,8 @@ bool FindCircleSubGraph(const Graph &graph,
}
std
::
vector
<
ir
::
Node
*>
TopologySortOperations
(
const
Graph
&
graph
)
{
std
::
map
<
ir
::
Node
*
,
std
::
unordered_set
<
ir
::
Node
*>>
adj_list
=
BuildOperationAdjList
(
graph
);
std
::
map
<
ir
::
Node
*
,
std
::
set
<
ir
::
Node
*
,
ir
::
NodeComp
>
,
ir
::
NodeComp
>
adj_list
=
BuildOperationAdjList
(
graph
);
PADDLE_ENFORCE
(
!
HasCircleInternal
(
adj_list
,
nullptr
));
std
::
unordered_set
<
ir
::
Node
*>
visited
;
std
::
vector
<
ir
::
Node
*>
ret
;
...
...
@@ -117,34 +119,30 @@ std::vector<ir::Node *> TopologySortOperations(const Graph &graph) {
SortHelper
(
adj_list
,
adj
.
first
,
&
visited
,
&
ret
);
}
}
return
ret
;
}
// Build operator inlink edge table.
std
::
map
<
ir
::
Node
*
,
std
::
unordered_set
<
ir
::
Node
*>>
BuildOperationAdjList
(
const
Graph
&
graph
)
{
std
::
map
<
ir
::
Node
*
,
std
::
unordered_set
<
ir
::
Node
*>>
adj_list
;
std
::
map
<
ir
::
Node
*
,
std
::
set
<
ir
::
Node
*
,
ir
::
NodeComp
>
,
ir
::
NodeComp
>
BuildOperationAdjList
(
const
Graph
&
graph
)
{
std
::
map
<
ir
::
Node
*
,
std
::
set
<
ir
::
Node
*
,
ir
::
NodeComp
>
,
ir
::
NodeComp
>
adj_list
;
for
(
auto
&
n
:
graph
.
Nodes
())
{
if
(
!
n
->
IsOp
())
continue
;
if
(
adj_list
.
find
(
n
)
==
adj_list
.
end
())
{
adj_list
[
n
]
=
std
::
unordered_set
<
ir
::
Node
*
>
();
adj_list
[
n
]
=
std
::
set
<
ir
::
Node
*
,
ir
::
NodeComp
>
();
}
std
::
vector
<
ir
::
Node
*>
nodes
;
for
(
auto
&
var
:
n
->
inputs
)
{
for
(
auto
&
adj_n
:
var
->
inputs
)
{
PADDLE_ENFORCE
(
adj_n
->
NodeType
()
==
ir
::
Node
::
Type
::
kOperation
);
VLOG
(
4
)
<<
"adj "
<<
adj_n
->
Name
()
<<
reinterpret_cast
<
void
*>
(
adj_n
)
<<
" -> "
<<
n
->
Name
()
<<
reinterpret_cast
<
void
*>
(
n
)
<<
" via "
<<
var
->
Name
()
<<
reinterpret_cast
<
void
*>
(
var
);
nodes
.
push_back
(
adj_n
);
adj_list
[
n
].
insert
(
adj_n
);
}
}
std
::
sort
(
nodes
.
begin
(),
nodes
.
end
(),
[](
ir
::
Node
*
node1
,
ir
::
Node
*
node2
)
{
return
node1
->
id
()
>
node2
->
id
();
});
adj_list
[
n
].
insert
(
std
::
make_move_iterator
(
nodes
.
begin
()),
std
::
make_move_iterator
(
nodes
.
end
()));
}
return
adj_list
;
}
...
...
paddle/fluid/framework/ir/graph_helper.h
浏览文件 @
9643f906
...
...
@@ -16,6 +16,7 @@ limitations under the License. */
#include <map>
#include <memory>
#include <set>
#include <vector>
#include "paddle/fluid/framework/ir/graph.h"
...
...
@@ -25,6 +26,13 @@ namespace paddle {
namespace
framework
{
namespace
ir
{
// Compare nodes via node id.
struct
NodeComp
{
bool
operator
()(
ir
::
Node
*
const
&
node1
,
ir
::
Node
*
const
&
node2
)
const
{
return
node1
->
id
()
<
node2
->
id
();
}
};
// Test if the graph contains circle.
bool
HasCircle
(
const
Graph
&
graph
);
...
...
@@ -57,8 +65,8 @@ std::vector<Node *> TopologyVarientSort(const Graph &graph, SortKind sort_kind);
void
CleanIndividualNodes
(
Graph
*
graph
);
// Build an adjacency list of operations for the `graph`.
std
::
map
<
ir
::
Node
*
,
std
::
unordered_set
<
ir
::
Node
*>>
BuildOperationAdjList
(
const
Graph
&
graph
);
std
::
map
<
ir
::
Node
*
,
std
::
set
<
ir
::
Node
*
,
ir
::
NodeComp
>
,
ir
::
NodeComp
>
BuildOperationAdjList
(
const
Graph
&
graph
);
template
<
typename
T
>
std
::
vector
<
T
*>
FilterByNodeWrapper
(
const
Graph
&
graph
)
{
...
...
paddle/fluid/framework/op_desc.cc
浏览文件 @
9643f906
...
...
@@ -241,6 +241,7 @@ OpDesc::OpDesc(const std::string &type, const VariableNameMap &inputs,
outputs_
=
outputs
;
attrs_
=
attrs
;
need_update_
=
true
;
block_
=
nullptr
;
}
OpDesc
::
OpDesc
(
const
OpDesc
&
other
,
BlockDesc
*
block
)
{
...
...
paddle/fluid/inference/api/analysis_predictor.cc
浏览文件 @
9643f906
...
...
@@ -259,6 +259,9 @@ bool AnalysisPredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
return
false
;
}
PADDLE_ENFORCE_NOT_NULL
(
input_ptr
);
PADDLE_ENFORCE_NOT_NULL
(
inputs
[
i
].
data
.
data
());
if
(
platform
::
is_cpu_place
(
place_
))
{
// TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
std
::
memcpy
(
static_cast
<
void
*>
(
input_ptr
),
inputs
[
i
].
data
.
data
(),
...
...
paddle/fluid/inference/api/api.cc
浏览文件 @
9643f906
...
...
@@ -54,6 +54,7 @@ PaddleBuf &PaddleBuf::operator=(const PaddleBuf &other) {
memory_owned_
=
other
.
memory_owned_
;
}
else
{
Resize
(
other
.
length
());
PADDLE_ENFORCE
(
!
(
other
.
length
()
>
0
&&
other
.
data
()
==
nullptr
));
memcpy
(
data_
,
other
.
data
(),
other
.
length
());
length_
=
other
.
length
();
memory_owned_
=
true
;
...
...
paddle/fluid/inference/api/api_impl.cc
浏览文件 @
9643f906
...
...
@@ -169,6 +169,7 @@ std::unique_ptr<PaddlePredictor> NativePaddlePredictor::Clone() {
std
::
unique_ptr
<
PaddlePredictor
>
cls
(
new
NativePaddlePredictor
(
config_
));
// Hot fix the bug that result diff in multi-thread.
// TODO(Superjomn) re-implement a real clone here.
PADDLE_ENFORCE_NOT_NULL
(
dynamic_cast
<
NativePaddlePredictor
*>
(
cls
.
get
()));
if
(
!
dynamic_cast
<
NativePaddlePredictor
*>
(
cls
.
get
())
->
Init
(
nullptr
))
{
LOG
(
ERROR
)
<<
"fail to call Init"
;
return
nullptr
;
...
...
@@ -210,6 +211,8 @@ bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
return
false
;
}
PADDLE_ENFORCE_NOT_NULL
(
input_ptr
);
PADDLE_ENFORCE_NOT_NULL
(
inputs
[
i
].
data
.
data
());
if
(
platform
::
is_cpu_place
(
place_
))
{
// TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
std
::
memcpy
(
static_cast
<
void
*>
(
input_ptr
),
inputs
[
i
].
data
.
data
(),
...
...
@@ -316,6 +319,8 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
}
std
::
unique_ptr
<
PaddlePredictor
>
predictor
(
new
NativePaddlePredictor
(
config
));
PADDLE_ENFORCE_NOT_NULL
(
dynamic_cast
<
NativePaddlePredictor
*>
(
predictor
.
get
()));
if
(
!
dynamic_cast
<
NativePaddlePredictor
*>
(
predictor
.
get
())
->
Init
(
nullptr
))
{
return
nullptr
;
}
...
...
paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc
浏览文件 @
9643f906
...
...
@@ -47,6 +47,7 @@ struct DataRecord {
num_lines
++
;
std
::
vector
<
std
::
string
>
data
;
split
(
line
,
'\t'
,
&
data
);
PADDLE_ENFORCE
(
data
.
size
()
>=
4
);
// load title1 data
std
::
vector
<
int64_t
>
title1_data
;
split_to_int64
(
data
[
0
],
' '
,
&
title1_data
);
...
...
paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc
浏览文件 @
9643f906
...
...
@@ -214,28 +214,23 @@ TEST(Analyzer_Transformer, fuse_statis) {
}
// Compare result of NativeConfig and AnalysisConfig
// void compare(bool use_mkldnn = false) {
// AnalysisConfig cfg;
// SetConfig(&cfg);
// if (use_mkldnn) {
// cfg.EnableMKLDNN();
// }
//
// std::vector<std::vector<PaddleTensor>> input_slots_all;
// SetInput(&input_slots_all);
// CompareNativeAndAnalysis(
// reinterpret_cast<const PaddlePredictor::Config *>(&cfg),
// input_slots_all);
// }
// TODO(yihuaxu):
// Disable compare and compare_mkldnn temporary, see
// https://github.com/paddlePaddle/Paddle/issues/16316 for details.
// TEST(Analyzer_Transformer, compare) { compare(); }
// #ifdef PADDLE_WITH_MKLDNN
// TEST(Analyzer_Transformer, compare_mkldnn) { compare(true /* use_mkldnn */);
// }
// #endif
void
compare
(
bool
use_mkldnn
=
false
)
{
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
if
(
use_mkldnn
)
{
cfg
.
EnableMKLDNN
();
}
std
::
vector
<
std
::
vector
<
PaddleTensor
>>
input_slots_all
;
SetInput
(
&
input_slots_all
);
CompareNativeAndAnalysis
(
reinterpret_cast
<
const
PaddlePredictor
::
Config
*>
(
&
cfg
),
input_slots_all
);
}
TEST
(
Analyzer_Transformer
,
compare
)
{
compare
();
}
#ifdef PADDLE_WITH_MKLDNN
TEST
(
Analyzer_Transformer
,
compare_mkldnn
)
{
compare
(
true
/* use_mkldnn */
);
}
#endif
}
// namespace inference
}
// namespace paddle
paddle/fluid/op_use_default_grad_op_maker.spec
浏览文件 @
9643f906
...
...
@@ -29,6 +29,8 @@ pool3d
prelu
quantize
rank_loss
reduce_all
reduce_any
reduce_max
reduce_mean
reduce_min
...
...
paddle/fluid/operators/detection/gpc.cc
浏览文件 @
9643f906
...
...
@@ -24,6 +24,7 @@
**/
#include "paddle/fluid/operators/detection/gpc.h"
#include "paddle/fluid/platform/enforce.h"
namespace
gpc
{
...
...
@@ -689,6 +690,7 @@ static bbox *create_contour_bboxes(gpc_polygon *p) {
gpc_malloc
<
bbox
>
(
box
,
p
->
num_contours
*
sizeof
(
bbox
),
const_cast
<
char
*>
(
"Bounding box creation"
));
PADDLE_ENFORCE_NOT_NULL
(
box
);
/* Construct contour bounding boxes */
for
(
c
=
0
;
c
<
p
->
num_contours
;
c
++
)
{
...
...
@@ -852,6 +854,7 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) {
/* Create an extended hole array */
gpc_malloc
<
int
>
(
extended_hole
,
(
p
->
num_contours
+
1
)
*
sizeof
(
int
),
const_cast
<
char
*>
(
"contour hole addition"
));
PADDLE_ENFORCE_NOT_NULL
(
extended_hole
);
/* Create an extended contour array */
gpc_malloc
<
gpc_vertex_list
>
(
extended_contour
,
...
...
@@ -969,6 +972,7 @@ void gpc_polygon_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
/* Build scanbeam table from scanbeam tree */
gpc_malloc
<
double
>
(
sbt
,
sbt_entries
*
sizeof
(
double
),
const_cast
<
char
*>
(
"sbt creation"
));
PADDLE_ENFORCE_NOT_NULL
(
sbt
);
build_sbt
(
&
scanbeam
,
sbt
,
sbtree
);
scanbeam
=
0
;
free_sbtree
(
&
sbtree
);
...
...
@@ -1604,6 +1608,7 @@ void gpc_tristrip_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
/* Build scanbeam table from scanbeam tree */
gpc_malloc
<
double
>
(
sbt
,
sbt_entries
*
sizeof
(
double
),
const_cast
<
char
*>
(
"sbt creation"
));
PADDLE_ENFORCE_NOT_NULL
(
sbt
);
build_sbt
(
&
scanbeam
,
sbt
,
sbtree
);
scanbeam
=
0
;
free_sbtree
(
&
sbtree
);
...
...
paddle/fluid/operators/squared_l2_distance_op.h
浏览文件 @
9643f906
...
...
@@ -77,6 +77,9 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel<T> {
auto
*
x_g
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
y_g
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
PADDLE_ENFORCE_NOT_NULL
(
x_g
);
PADDLE_ENFORCE_NOT_NULL
(
y_g
);
auto
sub_result
=
EigenMatrix
<
T
>::
From
(
*
in0
);
auto
out_grad
=
EigenMatrix
<
T
>::
From
(
*
in1
);
...
...
@@ -92,16 +95,14 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel<T> {
// propagate back to input
auto
&
eigen_place
=
*
context
.
template
device_context
<
DeviceContext
>().
eigen_device
();
if
(
x_g
)
{
x_g
->
mutable_data
<
T
>
(
context
.
GetPlace
());
// eigen matrix
auto
x_grad
=
EigenMatrix
<
T
>::
From
(
*
x_g
,
framework
::
make_ddim
({
x_dims
[
0
],
cols
}));
// dimensions are same with subResult
x_grad
.
device
(
eigen_place
)
=
grad_mat
;
}
if
(
y_g
)
{
y_g
->
mutable_data
<
T
>
(
context
.
GetPlace
());
PADDLE_ENFORCE_GE
(
sub_result
.
dimensions
()[
0
],
y_dims
[
0
],
...
...
@@ -118,7 +119,6 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel<T> {
y_grad
.
device
(
eigen_place
)
=
col_sum_res
;
}
}
}
};
}
// namespace operators
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录