Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
6bef0796
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
6bef0796
编写于
8月 31, 2017
作者:
Y
yangyaming
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Follow coding style and move reshaping operation to paddle tensor.
上级
f8b885f2
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
92 addition
and
135 deletion
+92
-135
paddle/operators/squared_l2_distance_op.cc
paddle/operators/squared_l2_distance_op.cc
+26
-21
paddle/operators/squared_l2_distance_op.h
paddle/operators/squared_l2_distance_op.h
+56
-114
python/paddle/v2/framework/tests/test_squared_l2_distance_op.py
.../paddle/v2/framework/tests/test_squared_l2_distance_op.py
+10
-0
未找到文件。
paddle/operators/squared_l2_distance_op.cc
浏览文件 @
6bef0796
...
...
@@ -30,22 +30,27 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel {
"Target of SquaredL2DistanceOp "
"must be initialized."
);
auto
*
X
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
x
Dims
=
X
->
dims
();
auto
*
Y
=
ctx
.
Input
<
Tensor
>
(
"Y"
);
auto
y
Dims
=
Y
->
dims
();
auto
*
x
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
x
_dims
=
x
->
dims
();
auto
*
y
=
ctx
.
Input
<
Tensor
>
(
"Y"
);
auto
y
_dims
=
y
->
dims
();
PADDLE_ENFORCE_EQ
(
framework
::
arity
(
x
Dims
),
framework
::
arity
(
yD
ims
),
PADDLE_ENFORCE_EQ
(
framework
::
arity
(
x
_dims
),
framework
::
arity
(
y_d
ims
),
"Tensor rank of both SquaredL2DistanceOp's "
"inputs must be same."
);
int
rank
=
framework
::
arity
(
xDims
);
PADDLE_ENFORCE
(
rank
>=
2
||
rank
<=
6
,
"Tensor rank should be in [2, 6]."
);
PADDLE_ENFORCE
(
yDims
[
0
]
==
1
||
yDims
[
0
]
==
xDims
[
0
],
int
rank
=
framework
::
arity
(
x_dims
);
PADDLE_ENFORCE
(
rank
>=
2
,
"Tensor rank should be at least equal to 2."
);
PADDLE_ENFORCE_EQ
(
framework
::
product
(
x_dims
)
/
x_dims
[
0
],
framework
::
product
(
y_dims
)
/
y_dims
[
0
],
"Product of dimensions expcet the first dimension of "
"input and target must be equal."
);
PADDLE_ENFORCE
(
y_dims
[
0
]
==
1
||
y_dims
[
0
]
==
x_dims
[
0
],
"First dimension of target must be equal to input "
"or to 1."
);
ctx
.
Output
<
Tensor
>
(
"sub_result"
)
->
Resize
(
x
D
ims
);
ctx
.
Output
<
Tensor
>
(
"Out"
)
->
Resize
({
x
D
ims
[
0
],
1
});
ctx
.
Output
<
Tensor
>
(
"sub_result"
)
->
Resize
(
x
_d
ims
);
ctx
.
Output
<
Tensor
>
(
"Out"
)
->
Resize
({
x
_d
ims
[
0
],
1
});
}
};
...
...
@@ -66,8 +71,8 @@ class SquaredL2DistanceOpMaker : public framework::OpProtoAndCheckerMaker {
input and target. Number of distance value equals to the
first dimension of input. First dimension of target could be equal to
input or to 1. If the first dimension of target is 1, SquaredL2DistanceOp
will broadcast t
he first dimension to the first dimension of input
.
You can decide whether calculate the gradient of target.
will broadcast t
arget's first dimension to input's first dimension
.
You can decide whether calculate the gradient of
input and
target.
)DOC"
);
}
};
...
...
@@ -81,19 +86,19 @@ class SquaredL2DistanceGradOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
framework
::
GradVarName
(
"Out"
)),
"Gradient of Out should not be null"
);
// check out grad dimensions
auto
out
D
ims
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
))
->
dims
();
auto
x
D
ims
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
y
D
ims
=
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
();
PADDLE_ENFORCE_EQ
(
out
Dims
[
0
],
xD
ims
[
0
],
auto
out
_d
ims
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
))
->
dims
();
auto
x
_d
ims
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
y
_d
ims
=
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
();
PADDLE_ENFORCE_EQ
(
out
_dims
[
0
],
x_d
ims
[
0
],
"First dimension of output gradient and "
"input value must be equal."
);
PADDLE_ENFORCE_EQ
(
out
D
ims
[
1
],
1
,
PADDLE_ENFORCE_EQ
(
out
_d
ims
[
1
],
1
,
"Second dimension of output gradient "
"must be 1."
);
auto
*
x
G
rad
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
y
G
rad
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
if
(
x
Grad
!=
nullptr
)
xGrad
->
Resize
(
xD
ims
);
if
(
y
Grad
!=
nullptr
)
yGrad
->
Resize
(
yD
ims
);
auto
*
x
_g
rad
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
y
_g
rad
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
if
(
x
_grad
!=
nullptr
)
x_grad
->
Resize
(
x_d
ims
);
if
(
y
_grad
!=
nullptr
)
y_grad
->
Resize
(
y_d
ims
);
}
};
...
...
paddle/operators/squared_l2_distance_op.h
浏览文件 @
6bef0796
...
...
@@ -20,9 +20,6 @@ namespace paddle {
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
template
<
typename
T
,
size_t
D
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenTensor
=
framework
::
EigenTensor
<
T
,
D
,
MajorType
,
IndexType
>
;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenMatrix
=
framework
::
EigenMatrix
<
T
,
MajorType
,
IndexType
>
;
...
...
@@ -31,64 +28,39 @@ template <typename Place, typename T>
class
SquaredL2DistanceKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
*
input0
=
context
.
Input
<
Tensor
>
(
"X"
);
const
int
rank
=
framework
::
arity
(
input0
->
dims
());
switch
(
rank
)
{
case
2
:
Operate
<
2
>
(
context
);
break
;
case
3
:
Operate
<
3
>
(
context
);
break
;
case
4
:
Operate
<
4
>
(
context
);
break
;
case
5
:
Operate
<
5
>
(
context
);
break
;
case
6
:
Operate
<
6
>
(
context
);
break
;
default:
// already asserted in SquaredL2DistanceOpMaker
break
;
}
}
private:
template
<
int
Dims
>
void
Operate
(
const
framework
::
ExecutionContext
&
context
)
const
{
auto
*
input0
=
context
.
Input
<
Tensor
>
(
"X"
);
auto
*
input1
=
context
.
Input
<
Tensor
>
(
"Y"
);
auto
*
output0
=
context
.
Output
<
Tensor
>
(
"sub_result"
);
auto
*
output1
=
context
.
Output
<
Tensor
>
(
"Out"
);
output0
->
mutable_data
<
T
>
(
context
.
GetPlace
());
output1
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
X
=
EigenTensor
<
T
,
Dims
>::
From
(
*
input0
);
auto
Y
=
EigenTensor
<
T
,
Dims
>::
From
(
*
input1
);
auto
subResult
=
EigenTensor
<
T
,
Dims
>::
From
(
*
output0
);
auto
Z
=
EigenMatrix
<
T
>::
From
(
*
output1
);
auto
xDims
=
X
.
dimensions
();
auto
yDims
=
Y
.
dimensions
();
auto
*
in0
=
context
.
Input
<
Tensor
>
(
"X"
);
auto
*
in1
=
context
.
Input
<
Tensor
>
(
"Y"
);
auto
*
out0
=
context
.
Output
<
Tensor
>
(
"sub_result"
);
auto
*
out1
=
context
.
Output
<
Tensor
>
(
"Out"
);
auto
in0_dims
=
in0
->
dims
();
auto
in1_dims
=
in1
->
dims
();
int
cols
=
framework
::
product
(
in0_dims
)
/
in0_dims
[
0
];
// reduce dimensions except the first
auto
x
=
EigenMatrix
<
T
>::
From
(
*
in0
,
framework
::
make_ddim
({
in0_dims
[
0
],
cols
}));
auto
y
=
EigenMatrix
<
T
>::
From
(
*
in1
,
framework
::
make_ddim
({
in1_dims
[
0
],
cols
}));
out0
->
mutable_data
<
T
>
(
context
.
GetPlace
());
out1
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
sub_result
=
EigenMatrix
<
T
>::
From
(
*
out0
);
auto
z
=
EigenMatrix
<
T
>::
From
(
*
out1
);
auto
place
=
context
.
GetEigenDevice
<
Place
>
();
auto
x_dims
=
x
.
dimensions
();
auto
y_dims
=
y
.
dimensions
();
// buffer the substraction result
if
(
y
Dims
[
0
]
==
1
&&
xDims
[
0
]
!=
yD
ims
[
0
])
{
auto
y
BroadcastDims
=
yD
ims
;
y
BroadcastDims
[
0
]
=
xD
ims
[
0
];
sub
Result
.
device
(
place
)
=
X
-
Y
.
broadcast
(
yBroadcastD
ims
);
if
(
y
_dims
[
0
]
==
1
&&
x_dims
[
0
]
>
y_d
ims
[
0
])
{
auto
y
_broadcast_dims
=
y_d
ims
;
y
_broadcast_dims
[
0
]
=
x_d
ims
[
0
];
sub
_result
.
device
(
place
)
=
x
-
y
.
broadcast
(
y_broadcast_d
ims
);
}
else
{
sub
Result
.
device
(
place
)
=
X
-
Y
;
sub
_result
.
device
(
place
)
=
x
-
y
;
}
// create matrix view for substraction result
const
auto
&
subResMat
=
subResult
.
reshape
(
Eigen
::
array
<
int
,
2
>
(
{
static_cast
<
int
>
(
xDims
[
0
]),
static_cast
<
int
>
(
X
.
size
()
/
xDims
[
0
])}));
Z
.
device
(
place
)
=
subResMat
.
pow
(
2
).
sum
(
Eigen
::
array
<
int
,
1
>
({
1
}));
z
.
device
(
place
)
=
sub_result
.
pow
(
2
).
sum
(
Eigen
::
array
<
int
,
1
>
({
1
}));
}
};
...
...
@@ -96,77 +68,47 @@ template <typename Place, typename T>
class
SquaredL2DistanceGradKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
*
input0
=
context
.
Input
<
Tensor
>
(
"sub_result"
);
const
int
rank
=
framework
::
arity
(
input0
->
dims
());
switch
(
rank
)
{
case
2
:
Operate
<
2
>
(
context
);
break
;
case
3
:
Operate
<
3
>
(
context
);
break
;
case
4
:
Operate
<
4
>
(
context
);
break
;
case
5
:
Operate
<
5
>
(
context
);
break
;
case
6
:
Operate
<
6
>
(
context
);
break
;
default:
// already asserted in SquaredL2DistanceOpMaker
break
;
}
}
auto
*
in0
=
context
.
Input
<
Tensor
>
(
"sub_result"
);
auto
*
in1
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
x_g
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
y_g
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
private:
template
<
int
Dims
>
void
Operate
(
const
framework
::
ExecutionContext
&
context
)
const
{
auto
*
input0
=
context
.
Input
<
Tensor
>
(
"sub_result"
);
auto
*
OG
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
XG
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
YG
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
auto
sub_result
=
EigenMatrix
<
T
>::
From
(
*
in0
);
auto
out_grad
=
EigenMatrix
<
T
>::
From
(
*
in1
);
auto
subResult
=
EigenTensor
<
T
,
Dims
>::
From
(
*
input0
);
auto
outGrad
=
EigenMatrix
<
T
>::
From
(
*
OG
);
auto
subResDims
=
subResult
.
dimensions
();
int
firstDim
=
static_cast
<
int
>
(
subResDims
[
0
]);
int
cols
=
subResult
.
size
()
/
firstDim
;
const
auto
subResMat
=
subResult
.
reshape
(
Eigen
::
array
<
int
,
2
>
({
firstDim
,
cols
}));
auto
x_dims
=
x_g
->
dims
();
auto
y_dims
=
y_g
->
dims
();
int
cols
=
framework
::
product
(
x_dims
)
/
x_dims
[
0
];
// calculate gradient
auto
grad
M
at
=
2
*
(
out
Grad
.
broadcast
(
Eigen
::
array
<
int
,
2
>
({
1
,
cols
})))
*
subResMa
t
;
auto
grad
_m
at
=
2
*
(
out
_grad
.
broadcast
(
Eigen
::
array
<
int
,
2
>
({
1
,
cols
})))
*
sub_resul
t
;
// propagate back to input
auto
eigenPlace
=
context
.
GetEigenDevice
<
Place
>
();
if
(
XG
!=
nullptr
)
{
XG
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
xGrad
=
EigenTensor
<
T
,
Dims
>::
From
(
*
XG
);
auto
eigen_place
=
context
.
GetEigenDevice
<
Place
>
();
if
(
x_g
!=
nullptr
)
{
x_g
->
mutable_data
<
T
>
(
context
.
GetPlace
());
// eigen matrix
auto
x_grad
=
EigenMatrix
<
T
>::
From
(
*
x_g
,
framework
::
make_ddim
({
x_dims
[
0
],
cols
}));
// dimensions are same with subResult
auto
xGradMat
=
xGrad
.
reshape
(
Eigen
::
array
<
int
,
2
>
({
firstDim
,
cols
}));
xGradMat
.
device
(
eigenPlace
)
=
gradMat
;
x_grad
.
device
(
eigen_place
)
=
grad_mat
;
}
if
(
YG
!=
nullptr
)
{
YG
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
yGrad
=
EigenTensor
<
T
,
Dims
>::
From
(
*
YG
);
auto
dimsYGrad
=
yGrad
.
dimensions
();
auto
yGradMat
=
yGrad
.
reshape
(
Eigen
::
array
<
int
,
2
>
(
{
static_cast
<
int
>
(
dimsYGrad
[
0
]),
static_cast
<
int
>
(
yGrad
.
size
()
/
dimsYGrad
[
0
])}));
PADDLE_ENFORCE
(
dimsYGrad
[
0
]
<=
firstDim
,
if
(
y_g
!=
nullptr
)
{
y_g
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
y_grad
=
EigenMatrix
<
T
>::
From
(
*
y_g
,
framework
::
make_ddim
({
y_dims
[
0
],
cols
}));
PADDLE_ENFORCE
(
sub_result
.
dimensions
()[
0
]
>=
y_dims
[
0
],
"First dimension of gradient must be greater or "
"equal than first dimension of target"
);
if
(
dimsYGrad
[
0
]
==
firstDim
)
{
y
GradMat
.
device
(
eigenPlace
)
=
-
1
*
gradM
at
;
if
(
sub_result
.
dimensions
()[
0
]
==
y_dims
[
0
]
)
{
y
_grad
.
device
(
eigen_place
)
=
-
1
*
grad_m
at
;
}
else
{
y
GradMat
.
device
(
eigenP
lace
)
=
-
1
*
(
grad
M
at
.
sum
(
Eigen
::
array
<
int
,
2
>
({
0
})));
y
_grad
.
device
(
eigen_p
lace
)
=
-
1
*
(
grad
_m
at
.
sum
(
Eigen
::
array
<
int
,
2
>
({
0
})));
}
}
}
...
...
python/paddle/v2/framework/tests/test_squared_l2_distance_op.py
浏览文件 @
6bef0796
...
...
@@ -21,5 +21,15 @@ class TestSquaredL2DistanceOp(unittest.TestCase):
}
class
TestSquaredL2DistanceGradOp
(
GradientChecker
):
def
test_squared_l2_distance
(
self
):
op
=
create_op
(
"squared_l2_distance"
)
inputs
=
{
'X'
:
np
.
random
.
uniform
(
0.1
,
1.
,
(
2
,
3
)).
astype
(
'float32'
),
'Y'
:
np
.
random
.
uniform
(
0.1
,
1.
,
(
2
,
3
)).
astype
(
'float32'
)
}
self
.
check_grad
(
op
,
inputs
,
set
([
"X"
,
"Y"
]),
"Out"
)
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录