Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
ebf6cf9f
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ebf6cf9f
编写于
4月 12, 2019
作者:
Z
zhoukunsheng
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into zeros_like
上级
380df828
93cedfdb
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
84 addition
and
67 deletion
+84
-67
paddle/fluid/framework/ir/graph_helper.cc
paddle/fluid/framework/ir/graph_helper.cc
+17
-19
paddle/fluid/framework/ir/graph_helper.h
paddle/fluid/framework/ir/graph_helper.h
+10
-2
paddle/fluid/framework/op_desc.cc
paddle/fluid/framework/op_desc.cc
+1
-0
paddle/fluid/inference/api/analysis_predictor.cc
paddle/fluid/inference/api/analysis_predictor.cc
+3
-0
paddle/fluid/inference/api/api.cc
paddle/fluid/inference/api/api.cc
+1
-0
paddle/fluid/inference/api/api_impl.cc
paddle/fluid/inference/api/api_impl.cc
+5
-0
paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc
...le/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc
+1
-0
paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc
.../fluid/inference/tests/api/analyzer_transformer_tester.cc
+17
-22
paddle/fluid/operators/detection/gpc.cc
paddle/fluid/operators/detection/gpc.cc
+5
-0
paddle/fluid/operators/squared_l2_distance_op.h
paddle/fluid/operators/squared_l2_distance_op.h
+24
-24
未找到文件。
paddle/fluid/framework/ir/graph_helper.cc
浏览文件 @
ebf6cf9f
...
@@ -31,10 +31,10 @@ namespace paddle {
...
@@ -31,10 +31,10 @@ namespace paddle {
namespace
framework
{
namespace
framework
{
namespace
ir
{
namespace
ir
{
namespace
{
namespace
{
void
SortHelper
(
void
SortHelper
(
const
std
::
map
<
ir
::
Node
*
,
std
::
set
<
ir
::
Node
*
,
ir
::
NodeComp
>
,
const
std
::
map
<
ir
::
Node
*
,
std
::
unordered_set
<
ir
::
Node
*>
>
&
adj_list
,
ir
::
NodeComp
>
&
adj_list
,
ir
::
Node
*
node
,
std
::
unordered_set
<
ir
::
Node
*>
*
visited
,
ir
::
Node
*
node
,
std
::
unordered_set
<
ir
::
Node
*>
*
visited
,
std
::
vector
<
ir
::
Node
*>
*
ret
)
{
std
::
vector
<
ir
::
Node
*>
*
ret
)
{
visited
->
insert
(
node
);
visited
->
insert
(
node
);
for
(
auto
adj
:
adj_list
.
at
(
node
))
{
for
(
auto
adj
:
adj_list
.
at
(
node
))
{
...
@@ -50,7 +50,8 @@ void SortHelper(
...
@@ -50,7 +50,8 @@ void SortHelper(
bool
HasCircleHelper
(
bool
HasCircleHelper
(
ir
::
Node
*
node
,
ir
::
Node
*
node
,
const
std
::
map
<
ir
::
Node
*
,
std
::
unordered_set
<
ir
::
Node
*>>
&
adj_list
,
const
std
::
map
<
ir
::
Node
*
,
std
::
set
<
ir
::
Node
*
,
ir
::
NodeComp
>
,
ir
::
NodeComp
>
&
adj_list
,
std
::
unordered_set
<
ir
::
Node
*>
*
visited
,
std
::
unordered_set
<
ir
::
Node
*>
*
visited
,
std
::
unordered_set
<
ir
::
Node
*>
*
in_trace
,
std
::
unordered_set
<
ir
::
Node
*>
*
in_trace
,
std
::
vector
<
std
::
vector
<
ir
::
Node
*>>
*
circles
)
{
std
::
vector
<
std
::
vector
<
ir
::
Node
*>>
*
circles
)
{
...
@@ -84,7 +85,8 @@ bool HasCircleHelper(
...
@@ -84,7 +85,8 @@ bool HasCircleHelper(
}
}
bool
HasCircleInternal
(
bool
HasCircleInternal
(
const
std
::
map
<
ir
::
Node
*
,
std
::
unordered_set
<
ir
::
Node
*>>
&
adj_list
,
const
std
::
map
<
ir
::
Node
*
,
std
::
set
<
ir
::
Node
*
,
ir
::
NodeComp
>
,
ir
::
NodeComp
>
&
adj_list
,
std
::
vector
<
std
::
vector
<
ir
::
Node
*>>
*
circles
)
{
std
::
vector
<
std
::
vector
<
ir
::
Node
*>>
*
circles
)
{
std
::
unordered_set
<
ir
::
Node
*>
visited
;
std
::
unordered_set
<
ir
::
Node
*>
visited
;
std
::
unordered_set
<
ir
::
Node
*>
in_trace
;
std
::
unordered_set
<
ir
::
Node
*>
in_trace
;
...
@@ -107,8 +109,8 @@ bool FindCircleSubGraph(const Graph &graph,
...
@@ -107,8 +109,8 @@ bool FindCircleSubGraph(const Graph &graph,
}
}
std
::
vector
<
ir
::
Node
*>
TopologySortOperations
(
const
Graph
&
graph
)
{
std
::
vector
<
ir
::
Node
*>
TopologySortOperations
(
const
Graph
&
graph
)
{
std
::
map
<
ir
::
Node
*
,
std
::
unordered_set
<
ir
::
Node
*>>
adj_list
=
std
::
map
<
ir
::
Node
*
,
std
::
set
<
ir
::
Node
*
,
ir
::
NodeComp
>
,
ir
::
NodeComp
>
BuildOperationAdjList
(
graph
);
adj_list
=
BuildOperationAdjList
(
graph
);
PADDLE_ENFORCE
(
!
HasCircleInternal
(
adj_list
,
nullptr
));
PADDLE_ENFORCE
(
!
HasCircleInternal
(
adj_list
,
nullptr
));
std
::
unordered_set
<
ir
::
Node
*>
visited
;
std
::
unordered_set
<
ir
::
Node
*>
visited
;
std
::
vector
<
ir
::
Node
*>
ret
;
std
::
vector
<
ir
::
Node
*>
ret
;
...
@@ -117,34 +119,30 @@ std::vector<ir::Node *> TopologySortOperations(const Graph &graph) {
...
@@ -117,34 +119,30 @@ std::vector<ir::Node *> TopologySortOperations(const Graph &graph) {
SortHelper
(
adj_list
,
adj
.
first
,
&
visited
,
&
ret
);
SortHelper
(
adj_list
,
adj
.
first
,
&
visited
,
&
ret
);
}
}
}
}
return
ret
;
return
ret
;
}
}
// Build operator inlink edge table.
// Build operator inlink edge table.
std
::
map
<
ir
::
Node
*
,
std
::
unordered_set
<
ir
::
Node
*>>
BuildOperationAdjList
(
std
::
map
<
ir
::
Node
*
,
std
::
set
<
ir
::
Node
*
,
ir
::
NodeComp
>
,
ir
::
NodeComp
>
const
Graph
&
graph
)
{
BuildOperationAdjList
(
const
Graph
&
graph
)
{
std
::
map
<
ir
::
Node
*
,
std
::
unordered_set
<
ir
::
Node
*>>
adj_list
;
std
::
map
<
ir
::
Node
*
,
std
::
set
<
ir
::
Node
*
,
ir
::
NodeComp
>
,
ir
::
NodeComp
>
adj_list
;
for
(
auto
&
n
:
graph
.
Nodes
())
{
for
(
auto
&
n
:
graph
.
Nodes
())
{
if
(
!
n
->
IsOp
())
continue
;
if
(
!
n
->
IsOp
())
continue
;
if
(
adj_list
.
find
(
n
)
==
adj_list
.
end
())
{
if
(
adj_list
.
find
(
n
)
==
adj_list
.
end
())
{
adj_list
[
n
]
=
std
::
unordered_set
<
ir
::
Node
*
>
();
adj_list
[
n
]
=
std
::
set
<
ir
::
Node
*
,
ir
::
NodeComp
>
();
}
}
std
::
vector
<
ir
::
Node
*>
nodes
;
for
(
auto
&
var
:
n
->
inputs
)
{
for
(
auto
&
var
:
n
->
inputs
)
{
for
(
auto
&
adj_n
:
var
->
inputs
)
{
for
(
auto
&
adj_n
:
var
->
inputs
)
{
PADDLE_ENFORCE
(
adj_n
->
NodeType
()
==
ir
::
Node
::
Type
::
kOperation
);
PADDLE_ENFORCE
(
adj_n
->
NodeType
()
==
ir
::
Node
::
Type
::
kOperation
);
VLOG
(
4
)
<<
"adj "
<<
adj_n
->
Name
()
<<
reinterpret_cast
<
void
*>
(
adj_n
)
VLOG
(
4
)
<<
"adj "
<<
adj_n
->
Name
()
<<
reinterpret_cast
<
void
*>
(
adj_n
)
<<
" -> "
<<
n
->
Name
()
<<
reinterpret_cast
<
void
*>
(
n
)
<<
" -> "
<<
n
->
Name
()
<<
reinterpret_cast
<
void
*>
(
n
)
<<
" via "
<<
var
->
Name
()
<<
reinterpret_cast
<
void
*>
(
var
);
<<
" via "
<<
var
->
Name
()
<<
reinterpret_cast
<
void
*>
(
var
);
nodes
.
push_back
(
adj_n
);
adj_list
[
n
].
insert
(
adj_n
);
}
}
}
}
std
::
sort
(
nodes
.
begin
(),
nodes
.
end
(),
[](
ir
::
Node
*
node1
,
ir
::
Node
*
node2
)
{
return
node1
->
id
()
>
node2
->
id
();
});
adj_list
[
n
].
insert
(
std
::
make_move_iterator
(
nodes
.
begin
()),
std
::
make_move_iterator
(
nodes
.
end
()));
}
}
return
adj_list
;
return
adj_list
;
}
}
...
...
paddle/fluid/framework/ir/graph_helper.h
浏览文件 @
ebf6cf9f
...
@@ -16,6 +16,7 @@ limitations under the License. */
...
@@ -16,6 +16,7 @@ limitations under the License. */
#include <map>
#include <map>
#include <memory>
#include <memory>
#include <set>
#include <vector>
#include <vector>
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/graph.h"
...
@@ -25,6 +26,13 @@ namespace paddle {
...
@@ -25,6 +26,13 @@ namespace paddle {
namespace
framework
{
namespace
framework
{
namespace
ir
{
namespace
ir
{
// Compare nodes via node id.
struct
NodeComp
{
bool
operator
()(
ir
::
Node
*
const
&
node1
,
ir
::
Node
*
const
&
node2
)
const
{
return
node1
->
id
()
<
node2
->
id
();
}
};
// Test if the graph contains circle.
// Test if the graph contains circle.
bool
HasCircle
(
const
Graph
&
graph
);
bool
HasCircle
(
const
Graph
&
graph
);
...
@@ -57,8 +65,8 @@ std::vector<Node *> TopologyVarientSort(const Graph &graph, SortKind sort_kind);
...
@@ -57,8 +65,8 @@ std::vector<Node *> TopologyVarientSort(const Graph &graph, SortKind sort_kind);
void
CleanIndividualNodes
(
Graph
*
graph
);
void
CleanIndividualNodes
(
Graph
*
graph
);
// Build an adjacency list of operations for the `graph`.
// Build an adjacency list of operations for the `graph`.
std
::
map
<
ir
::
Node
*
,
std
::
unordered_set
<
ir
::
Node
*>>
BuildOperationAdjList
(
std
::
map
<
ir
::
Node
*
,
std
::
set
<
ir
::
Node
*
,
ir
::
NodeComp
>
,
ir
::
NodeComp
>
const
Graph
&
graph
);
BuildOperationAdjList
(
const
Graph
&
graph
);
template
<
typename
T
>
template
<
typename
T
>
std
::
vector
<
T
*>
FilterByNodeWrapper
(
const
Graph
&
graph
)
{
std
::
vector
<
T
*>
FilterByNodeWrapper
(
const
Graph
&
graph
)
{
...
...
paddle/fluid/framework/op_desc.cc
浏览文件 @
ebf6cf9f
...
@@ -241,6 +241,7 @@ OpDesc::OpDesc(const std::string &type, const VariableNameMap &inputs,
...
@@ -241,6 +241,7 @@ OpDesc::OpDesc(const std::string &type, const VariableNameMap &inputs,
outputs_
=
outputs
;
outputs_
=
outputs
;
attrs_
=
attrs
;
attrs_
=
attrs
;
need_update_
=
true
;
need_update_
=
true
;
block_
=
nullptr
;
}
}
OpDesc
::
OpDesc
(
const
OpDesc
&
other
,
BlockDesc
*
block
)
{
OpDesc
::
OpDesc
(
const
OpDesc
&
other
,
BlockDesc
*
block
)
{
...
...
paddle/fluid/inference/api/analysis_predictor.cc
浏览文件 @
ebf6cf9f
...
@@ -259,6 +259,9 @@ bool AnalysisPredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
...
@@ -259,6 +259,9 @@ bool AnalysisPredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
return
false
;
return
false
;
}
}
PADDLE_ENFORCE_NOT_NULL
(
input_ptr
);
PADDLE_ENFORCE_NOT_NULL
(
inputs
[
i
].
data
.
data
());
if
(
platform
::
is_cpu_place
(
place_
))
{
if
(
platform
::
is_cpu_place
(
place_
))
{
// TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
// TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
std
::
memcpy
(
static_cast
<
void
*>
(
input_ptr
),
inputs
[
i
].
data
.
data
(),
std
::
memcpy
(
static_cast
<
void
*>
(
input_ptr
),
inputs
[
i
].
data
.
data
(),
...
...
paddle/fluid/inference/api/api.cc
浏览文件 @
ebf6cf9f
...
@@ -54,6 +54,7 @@ PaddleBuf &PaddleBuf::operator=(const PaddleBuf &other) {
...
@@ -54,6 +54,7 @@ PaddleBuf &PaddleBuf::operator=(const PaddleBuf &other) {
memory_owned_
=
other
.
memory_owned_
;
memory_owned_
=
other
.
memory_owned_
;
}
else
{
}
else
{
Resize
(
other
.
length
());
Resize
(
other
.
length
());
PADDLE_ENFORCE
(
!
(
other
.
length
()
>
0
&&
other
.
data
()
==
nullptr
));
memcpy
(
data_
,
other
.
data
(),
other
.
length
());
memcpy
(
data_
,
other
.
data
(),
other
.
length
());
length_
=
other
.
length
();
length_
=
other
.
length
();
memory_owned_
=
true
;
memory_owned_
=
true
;
...
...
paddle/fluid/inference/api/api_impl.cc
浏览文件 @
ebf6cf9f
...
@@ -169,6 +169,7 @@ std::unique_ptr<PaddlePredictor> NativePaddlePredictor::Clone() {
...
@@ -169,6 +169,7 @@ std::unique_ptr<PaddlePredictor> NativePaddlePredictor::Clone() {
std
::
unique_ptr
<
PaddlePredictor
>
cls
(
new
NativePaddlePredictor
(
config_
));
std
::
unique_ptr
<
PaddlePredictor
>
cls
(
new
NativePaddlePredictor
(
config_
));
// Hot fix the bug that result diff in multi-thread.
// Hot fix the bug that result diff in multi-thread.
// TODO(Superjomn) re-implement a real clone here.
// TODO(Superjomn) re-implement a real clone here.
PADDLE_ENFORCE_NOT_NULL
(
dynamic_cast
<
NativePaddlePredictor
*>
(
cls
.
get
()));
if
(
!
dynamic_cast
<
NativePaddlePredictor
*>
(
cls
.
get
())
->
Init
(
nullptr
))
{
if
(
!
dynamic_cast
<
NativePaddlePredictor
*>
(
cls
.
get
())
->
Init
(
nullptr
))
{
LOG
(
ERROR
)
<<
"fail to call Init"
;
LOG
(
ERROR
)
<<
"fail to call Init"
;
return
nullptr
;
return
nullptr
;
...
@@ -210,6 +211,8 @@ bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
...
@@ -210,6 +211,8 @@ bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
return
false
;
return
false
;
}
}
PADDLE_ENFORCE_NOT_NULL
(
input_ptr
);
PADDLE_ENFORCE_NOT_NULL
(
inputs
[
i
].
data
.
data
());
if
(
platform
::
is_cpu_place
(
place_
))
{
if
(
platform
::
is_cpu_place
(
place_
))
{
// TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
// TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
std
::
memcpy
(
static_cast
<
void
*>
(
input_ptr
),
inputs
[
i
].
data
.
data
(),
std
::
memcpy
(
static_cast
<
void
*>
(
input_ptr
),
inputs
[
i
].
data
.
data
(),
...
@@ -316,6 +319,8 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
...
@@ -316,6 +319,8 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
}
}
std
::
unique_ptr
<
PaddlePredictor
>
predictor
(
new
NativePaddlePredictor
(
config
));
std
::
unique_ptr
<
PaddlePredictor
>
predictor
(
new
NativePaddlePredictor
(
config
));
PADDLE_ENFORCE_NOT_NULL
(
dynamic_cast
<
NativePaddlePredictor
*>
(
predictor
.
get
()));
if
(
!
dynamic_cast
<
NativePaddlePredictor
*>
(
predictor
.
get
())
->
Init
(
nullptr
))
{
if
(
!
dynamic_cast
<
NativePaddlePredictor
*>
(
predictor
.
get
())
->
Init
(
nullptr
))
{
return
nullptr
;
return
nullptr
;
}
}
...
...
paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc
浏览文件 @
ebf6cf9f
...
@@ -47,6 +47,7 @@ struct DataRecord {
...
@@ -47,6 +47,7 @@ struct DataRecord {
num_lines
++
;
num_lines
++
;
std
::
vector
<
std
::
string
>
data
;
std
::
vector
<
std
::
string
>
data
;
split
(
line
,
'\t'
,
&
data
);
split
(
line
,
'\t'
,
&
data
);
PADDLE_ENFORCE
(
data
.
size
()
>=
4
);
// load title1 data
// load title1 data
std
::
vector
<
int64_t
>
title1_data
;
std
::
vector
<
int64_t
>
title1_data
;
split_to_int64
(
data
[
0
],
' '
,
&
title1_data
);
split_to_int64
(
data
[
0
],
' '
,
&
title1_data
);
...
...
paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc
浏览文件 @
ebf6cf9f
...
@@ -214,28 +214,23 @@ TEST(Analyzer_Transformer, fuse_statis) {
...
@@ -214,28 +214,23 @@ TEST(Analyzer_Transformer, fuse_statis) {
}
}
// Compare result of NativeConfig and AnalysisConfig
// Compare result of NativeConfig and AnalysisConfig
// void compare(bool use_mkldnn = false) {
void
compare
(
bool
use_mkldnn
=
false
)
{
// AnalysisConfig cfg;
AnalysisConfig
cfg
;
// SetConfig(&cfg);
SetConfig
(
&
cfg
);
// if (use_mkldnn) {
if
(
use_mkldnn
)
{
// cfg.EnableMKLDNN();
cfg
.
EnableMKLDNN
();
// }
}
//
// std::vector<std::vector<PaddleTensor>> input_slots_all;
std
::
vector
<
std
::
vector
<
PaddleTensor
>>
input_slots_all
;
// SetInput(&input_slots_all);
SetInput
(
&
input_slots_all
);
// CompareNativeAndAnalysis(
CompareNativeAndAnalysis
(
// reinterpret_cast<const PaddlePredictor::Config *>(&cfg),
reinterpret_cast
<
const
PaddlePredictor
::
Config
*>
(
&
cfg
),
input_slots_all
);
// input_slots_all);
}
// }
TEST
(
Analyzer_Transformer
,
compare
)
{
compare
();
}
// TODO(yihuaxu):
#ifdef PADDLE_WITH_MKLDNN
// Disable compare and compare_mkldnn temporary, see
TEST
(
Analyzer_Transformer
,
compare_mkldnn
)
{
compare
(
true
/* use_mkldnn */
);
}
// https://github.com/paddlePaddle/Paddle/issues/16316 for details.
#endif
// TEST(Analyzer_Transformer, compare) { compare(); }
// #ifdef PADDLE_WITH_MKLDNN
// TEST(Analyzer_Transformer, compare_mkldnn) { compare(true /* use_mkldnn */);
// }
// #endif
}
// namespace inference
}
// namespace inference
}
// namespace paddle
}
// namespace paddle
paddle/fluid/operators/detection/gpc.cc
浏览文件 @
ebf6cf9f
...
@@ -24,6 +24,7 @@
...
@@ -24,6 +24,7 @@
**/
**/
#include "paddle/fluid/operators/detection/gpc.h"
#include "paddle/fluid/operators/detection/gpc.h"
#include "paddle/fluid/platform/enforce.h"
namespace
gpc
{
namespace
gpc
{
...
@@ -689,6 +690,7 @@ static bbox *create_contour_bboxes(gpc_polygon *p) {
...
@@ -689,6 +690,7 @@ static bbox *create_contour_bboxes(gpc_polygon *p) {
gpc_malloc
<
bbox
>
(
box
,
p
->
num_contours
*
sizeof
(
bbox
),
gpc_malloc
<
bbox
>
(
box
,
p
->
num_contours
*
sizeof
(
bbox
),
const_cast
<
char
*>
(
"Bounding box creation"
));
const_cast
<
char
*>
(
"Bounding box creation"
));
PADDLE_ENFORCE_NOT_NULL
(
box
);
/* Construct contour bounding boxes */
/* Construct contour bounding boxes */
for
(
c
=
0
;
c
<
p
->
num_contours
;
c
++
)
{
for
(
c
=
0
;
c
<
p
->
num_contours
;
c
++
)
{
...
@@ -852,6 +854,7 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) {
...
@@ -852,6 +854,7 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) {
/* Create an extended hole array */
/* Create an extended hole array */
gpc_malloc
<
int
>
(
extended_hole
,
(
p
->
num_contours
+
1
)
*
sizeof
(
int
),
gpc_malloc
<
int
>
(
extended_hole
,
(
p
->
num_contours
+
1
)
*
sizeof
(
int
),
const_cast
<
char
*>
(
"contour hole addition"
));
const_cast
<
char
*>
(
"contour hole addition"
));
PADDLE_ENFORCE_NOT_NULL
(
extended_hole
);
/* Create an extended contour array */
/* Create an extended contour array */
gpc_malloc
<
gpc_vertex_list
>
(
extended_contour
,
gpc_malloc
<
gpc_vertex_list
>
(
extended_contour
,
...
@@ -969,6 +972,7 @@ void gpc_polygon_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
...
@@ -969,6 +972,7 @@ void gpc_polygon_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
/* Build scanbeam table from scanbeam tree */
/* Build scanbeam table from scanbeam tree */
gpc_malloc
<
double
>
(
sbt
,
sbt_entries
*
sizeof
(
double
),
gpc_malloc
<
double
>
(
sbt
,
sbt_entries
*
sizeof
(
double
),
const_cast
<
char
*>
(
"sbt creation"
));
const_cast
<
char
*>
(
"sbt creation"
));
PADDLE_ENFORCE_NOT_NULL
(
sbt
);
build_sbt
(
&
scanbeam
,
sbt
,
sbtree
);
build_sbt
(
&
scanbeam
,
sbt
,
sbtree
);
scanbeam
=
0
;
scanbeam
=
0
;
free_sbtree
(
&
sbtree
);
free_sbtree
(
&
sbtree
);
...
@@ -1604,6 +1608,7 @@ void gpc_tristrip_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
...
@@ -1604,6 +1608,7 @@ void gpc_tristrip_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
/* Build scanbeam table from scanbeam tree */
/* Build scanbeam table from scanbeam tree */
gpc_malloc
<
double
>
(
sbt
,
sbt_entries
*
sizeof
(
double
),
gpc_malloc
<
double
>
(
sbt
,
sbt_entries
*
sizeof
(
double
),
const_cast
<
char
*>
(
"sbt creation"
));
const_cast
<
char
*>
(
"sbt creation"
));
PADDLE_ENFORCE_NOT_NULL
(
sbt
);
build_sbt
(
&
scanbeam
,
sbt
,
sbtree
);
build_sbt
(
&
scanbeam
,
sbt
,
sbtree
);
scanbeam
=
0
;
scanbeam
=
0
;
free_sbtree
(
&
sbtree
);
free_sbtree
(
&
sbtree
);
...
...
paddle/fluid/operators/squared_l2_distance_op.h
浏览文件 @
ebf6cf9f
...
@@ -77,6 +77,9 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel<T> {
...
@@ -77,6 +77,9 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel<T> {
auto
*
x_g
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
x_g
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
y_g
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
auto
*
y_g
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
PADDLE_ENFORCE_NOT_NULL
(
x_g
);
PADDLE_ENFORCE_NOT_NULL
(
y_g
);
auto
sub_result
=
EigenMatrix
<
T
>::
From
(
*
in0
);
auto
sub_result
=
EigenMatrix
<
T
>::
From
(
*
in0
);
auto
out_grad
=
EigenMatrix
<
T
>::
From
(
*
in1
);
auto
out_grad
=
EigenMatrix
<
T
>::
From
(
*
in1
);
...
@@ -92,31 +95,28 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel<T> {
...
@@ -92,31 +95,28 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel<T> {
// propagate back to input
// propagate back to input
auto
&
eigen_place
=
auto
&
eigen_place
=
*
context
.
template
device_context
<
DeviceContext
>().
eigen_device
();
*
context
.
template
device_context
<
DeviceContext
>().
eigen_device
();
if
(
x_g
)
{
x_g
->
mutable_data
<
T
>
(
context
.
GetPlace
());
// eigen matrix
auto
x_grad
=
EigenMatrix
<
T
>::
From
(
*
x_g
,
framework
::
make_ddim
({
x_dims
[
0
],
cols
}));
// dimensions are same with subResult
x_grad
.
device
(
eigen_place
)
=
grad_mat
;
}
if
(
y_g
)
{
x_g
->
mutable_data
<
T
>
(
context
.
GetPlace
());
y_g
->
mutable_data
<
T
>
(
context
.
GetPlace
());
// eigen matrix
auto
x_grad
=
PADDLE_ENFORCE_GE
(
sub_result
.
dimensions
()[
0
],
y_dims
[
0
],
EigenMatrix
<
T
>::
From
(
*
x_g
,
framework
::
make_ddim
({
x_dims
[
0
],
cols
}));
"First dimension of gradient must be greater or "
// dimensions are same with subResult
"equal than first dimension of target."
);
x_grad
.
device
(
eigen_place
)
=
grad_mat
;
if
(
sub_result
.
dimensions
()[
0
]
==
y_dims
[
0
])
{
y_g
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
y_grad
=
EigenMatrix
<
T
>::
From
(
*
y_g
,
framework
::
make_ddim
({
y_dims
[
0
],
cols
}));
PADDLE_ENFORCE_GE
(
sub_result
.
dimensions
()[
0
],
y_dims
[
0
],
y_grad
.
device
(
eigen_place
)
=
-
1
*
grad_mat
;
"First dimension of gradient must be greater or "
}
else
{
"equal than first dimension of target."
);
auto
col_sum_res
=
-
1
*
(
grad_mat
.
sum
(
Eigen
::
array
<
int
,
1
>
({{
0
}})));
auto
y_grad
=
EigenVector
<
T
>::
Flatten
(
*
y_g
);
if
(
sub_result
.
dimensions
()[
0
]
==
y_dims
[
0
])
{
y_grad
.
device
(
eigen_place
)
=
col_sum_res
;
auto
y_grad
=
}
EigenMatrix
<
T
>::
From
(
*
y_g
,
framework
::
make_ddim
({
y_dims
[
0
],
cols
}));
y_grad
.
device
(
eigen_place
)
=
-
1
*
grad_mat
;
}
else
{
auto
col_sum_res
=
-
1
*
(
grad_mat
.
sum
(
Eigen
::
array
<
int
,
1
>
({{
0
}})));
auto
y_grad
=
EigenVector
<
T
>::
Flatten
(
*
y_g
);
y_grad
.
device
(
eigen_place
)
=
col_sum_res
;
}
}
}
}
};
};
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录