Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
ca1502c7
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ca1502c7
编写于
2月 21, 2019
作者:
D
dengkaipeng
提交者:
ceci3
3月 06, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add grad kernel for spectral_norm. test=develop
上级
8956a596
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
104 addition
and
33 deletion
+104
-33
paddle/fluid/operators/spectral_norm_op.h
paddle/fluid/operators/spectral_norm_op.h
+63
-29
python/paddle/fluid/tests/unittests/test_spectral_norm_op.py
python/paddle/fluid/tests/unittests/test_spectral_norm_op.py
+41
-4
未找到文件。
paddle/fluid/operators/spectral_norm_op.h
浏览文件 @
ca1502c7
...
@@ -27,18 +27,18 @@ using Array1 = Eigen::DSizes<int64_t, 1>;
...
@@ -27,18 +27,18 @@ using Array1 = Eigen::DSizes<int64_t, 1>;
using
Array2
=
Eigen
::
DSizes
<
int64_t
,
2
>
;
using
Array2
=
Eigen
::
DSizes
<
int64_t
,
2
>
;
using
IndexPair
=
Eigen
::
IndexPair
<
int
>
;
using
IndexPair
=
Eigen
::
IndexPair
<
int
>
;
static
inline
void
ResizeWeight
(
Tensor
*
weight_mat
,
const
int
dim
)
{
static
inline
void
CalcMatrixShape
(
const
Tensor
&
weight
,
const
int
dim
,
int
*
h
,
auto
weight_dims
=
weight_mat
->
dims
();
int
*
w
)
{
int
h
=
1
;
auto
weight_dims
=
weight
.
dims
();
int
w
=
1
;
*
h
=
1
;
*
w
=
1
;
for
(
int
i
=
0
;
i
<
weight_dims
.
size
();
i
++
)
{
for
(
int
i
=
0
;
i
<
weight_dims
.
size
();
i
++
)
{
if
(
i
<=
dim
)
{
if
(
i
<=
dim
)
{
h
*=
weight_dims
[
i
];
*
h
*=
weight_dims
[
i
];
}
else
{
}
else
{
w
*=
weight_dims
[
i
];
*
w
*=
weight_dims
[
i
];
}
}
}
}
*
weight_mat
=
weight_mat
->
Resize
({
h
,
w
});
}
}
template
<
typename
DeviceContext
,
typename
T
>
template
<
typename
DeviceContext
,
typename
T
>
...
@@ -55,42 +55,27 @@ static inline void CalcMatrixSigmaAndNormWeight(
...
@@ -55,42 +55,27 @@ static inline void CalcMatrixSigmaAndNormWeight(
const
int
h
=
weight
->
dims
()[
0
];
const
int
h
=
weight
->
dims
()[
0
];
const
int
w
=
weight
->
dims
()[
1
];
const
int
w
=
weight
->
dims
()[
1
];
// LOG(ERROR) << "weight: " << weight_t;
// LOG(ERROR) << "weight_trans: " << weight_trans_t;
for
(
int
i
=
0
;
i
<
power_iters
;
i
++
)
{
for
(
int
i
=
0
;
i
<
power_iters
;
i
++
)
{
// v_t.device(place) = weight_trans_t.contract(u_t, product_dims);
blas
.
MatMul
(
*
weight
,
true
,
*
u
,
false
,
T
(
1
),
v
,
T
(
0
));
blas
.
MatMul
(
*
weight
,
true
,
*
u
,
false
,
T
(
1
),
v
,
T
(
0
));
// LOG(ERROR) << "iter v: " << v_t;
auto
v_t_norm
=
auto
v_t_norm
=
v_t
.
square
().
sum
().
sqrt
().
eval
().
reshape
(
Array1
(
1
)).
broadcast
(
v_t
.
square
().
sum
().
sqrt
().
eval
().
reshape
(
Array1
(
1
)).
broadcast
(
Array1
(
w
));
Array1
(
w
));
// LOG(ERROR) << "iter v_norm: " << v_t_norm;
v_t
.
device
(
place
)
=
v_t
/
(
v_t_norm
+
v_t_norm
.
constant
(
eps
));
v_t
.
device
(
place
)
=
v_t
/
(
v_t_norm
+
v_t_norm
.
constant
(
eps
));
// LOG(ERROR) << "iter norm v: " << v_t;
// u_t.device(place) = weight_t.contract(v_t, product_dims);
blas
.
MatMul
(
*
weight
,
false
,
*
v
,
false
,
T
(
1
),
u
,
T
(
0
));
blas
.
MatMul
(
*
weight
,
false
,
*
v
,
false
,
T
(
1
),
u
,
T
(
0
));
// LOG(ERROR) << "iter u: " << u_t;
auto
u_t_norm
=
auto
u_t_norm
=
u_t
.
square
().
sum
().
sqrt
().
eval
().
reshape
(
Array1
(
1
)).
broadcast
(
u_t
.
square
().
sum
().
sqrt
().
eval
().
reshape
(
Array1
(
1
)).
broadcast
(
Array1
(
h
));
Array1
(
h
));
u_t
.
device
(
place
)
=
u_t
/
(
u_t_norm
+
u_t_norm
.
constant
(
eps
));
u_t
.
device
(
place
)
=
u_t
/
(
u_t_norm
+
u_t_norm
.
constant
(
eps
));
// LOG(ERROR) << "iter norm u: " << u_t;
}
}
// LOG(ERROR) << "h" << h << "w" << w;
// LOG(ERROR) << "u: " << u_t;
// LOG(ERROR) << "v: " << v_t;
Tensor
weight_v
;
Tensor
weight_v
;
weight_v
.
mutable_data
<
T
>
({
h
,
1
},
ctx
.
GetPlace
());
weight_v
.
mutable_data
<
T
>
({
h
,
1
},
ctx
.
GetPlace
());
blas
.
MatMul
(
*
weight
,
false
,
*
v
,
false
,
T
(
1
),
&
weight_v
,
T
(
0
));
blas
.
MatMul
(
*
weight
,
false
,
*
v
,
false
,
T
(
1
),
&
weight_v
,
T
(
0
));
auto
weight_v_t
=
EigenTensor
<
T
,
2
>::
From
(
weight_v
);
auto
weight_v_t
=
EigenTensor
<
T
,
2
>::
From
(
weight_v
);
// LOG(ERROR) << "weight_v: " << weight_v_t;
sigma_t
.
device
(
place
)
=
(
u_t
*
weight_v_t
)
sigma_t
.
device
(
place
)
=
(
u_t
*
weight_v_t
)
.
sum
()
.
sum
()
.
eval
()
.
eval
()
.
reshape
(
Array2
(
1
,
1
))
.
reshape
(
Array2
(
1
,
1
))
.
broadcast
(
Array2
(
h
,
w
));
.
broadcast
(
Array2
(
h
,
w
));
// LOG(ERROR) << "weight: " << weight_t;
// LOG(ERROR) << "sigma: " << sigma_t;
weight_t
.
device
(
place
)
=
weight_t
/
sigma_t
;
weight_t
.
device
(
place
)
=
weight_t
/
sigma_t
;
}
}
...
@@ -107,29 +92,78 @@ class SpectralNormKernel : public framework::OpKernel<T> {
...
@@ -107,29 +92,78 @@ class SpectralNormKernel : public framework::OpKernel<T> {
int
power_iters
=
ctx
.
Attr
<
int
>
(
"power_iters"
);
int
power_iters
=
ctx
.
Attr
<
int
>
(
"power_iters"
);
float
eps
=
ctx
.
Attr
<
float
>
(
"eps"
);
float
eps
=
ctx
.
Attr
<
float
>
(
"eps"
);
const
int
h
=
weight
->
dims
()[
0
];
const
int
w
=
weight
->
dims
()[
1
];
Tensor
weight_mat
;
Tensor
weight_mat
;
int
h
,
w
;
CalcMatrixShape
(
*
weight
,
dim
,
&
h
,
&
w
);
TensorCopySync
(
*
weight
,
ctx
.
GetPlace
(),
&
weight_mat
);
TensorCopySync
(
*
weight
,
ctx
.
GetPlace
(),
&
weight_mat
);
ResizeWeight
(
&
weight_mat
,
dim
);
weight_mat
=
weight_mat
.
Resize
({
h
,
w
}
);
Tensor
sigma
;
Tensor
sigma
;
sigma
.
mutable_data
<
T
>
(
weight
->
dims
(),
ctx
.
GetPlace
());
sigma
.
mutable_data
<
T
>
(
weight
_mat
.
dims
(),
ctx
.
GetPlace
());
Tensor
uu
,
vv
;
Tensor
uu
,
vv
;
TensorCopySync
(
*
u
,
ctx
.
GetPlace
(),
&
uu
);
TensorCopySync
(
*
u
,
ctx
.
GetPlace
(),
&
uu
);
TensorCopySync
(
*
v
,
ctx
.
GetPlace
(),
&
vv
);
TensorCopySync
(
*
v
,
ctx
.
GetPlace
(),
&
vv
);
CalcMatrixSigmaAndNormWeight
<
DeviceContext
,
T
>
(
CalcMatrixSigmaAndNormWeight
<
DeviceContext
,
T
>
(
&
sigma
,
&
(
uu
.
Resize
({
h
,
1
})),
&
(
vv
.
Resize
({
w
,
1
})),
&
weight_mat
,
&
sigma
,
&
(
uu
.
Resize
({
h
,
1
})),
&
(
vv
.
Resize
({
w
,
1
})),
&
weight_mat
,
power_iters
,
eps
,
ctx
);
power_iters
,
eps
,
ctx
);
TensorCopySync
(
weight_mat
,
ctx
.
GetPlace
(),
out
);
TensorCopySync
(
weight_mat
.
Resize
(
out
->
dims
())
,
ctx
.
GetPlace
(),
out
);
}
}
};
};
template
<
typename
DeviceContext
,
typename
T
>
template
<
typename
DeviceContext
,
typename
T
>
class
SpectralNormGradKernel
:
public
framework
::
OpKernel
<
T
>
{
class
SpectralNormGradKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{}
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
&
place
=
*
ctx
.
template
device_context
<
DeviceContext
>().
eigen_device
();
auto
blas
=
math
::
GetBlas
<
DeviceContext
,
T
>
(
ctx
);
auto
weight
=
ctx
.
Input
<
Tensor
>
(
"Weight"
);
auto
u
=
ctx
.
Input
<
Tensor
>
(
"U"
);
auto
v
=
ctx
.
Input
<
Tensor
>
(
"V"
);
auto
out_grad
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
weight_grad
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Weight"
));
int
dim
=
ctx
.
Attr
<
int
>
(
"dim"
);
int
power_iters
=
ctx
.
Attr
<
int
>
(
"power_iters"
);
float
eps
=
ctx
.
Attr
<
float
>
(
"eps"
);
Tensor
weight_mat
,
out_grad_mat
;
int
h
,
w
;
CalcMatrixShape
(
*
weight
,
dim
,
&
h
,
&
w
);
TensorCopySync
(
*
weight
,
ctx
.
GetPlace
(),
&
weight_mat
);
TensorCopySync
(
*
out_grad
,
ctx
.
GetPlace
(),
&
out_grad_mat
);
weight_mat
=
weight_mat
.
Resize
({
h
,
w
});
out_grad_mat
=
out_grad_mat
.
Resize
({
h
,
w
});
Tensor
sigma
;
sigma
.
mutable_data
<
T
>
(
weight_mat
.
dims
(),
ctx
.
GetPlace
());
Tensor
uu
,
vv
;
TensorCopySync
(
*
u
,
ctx
.
GetPlace
(),
&
uu
);
TensorCopySync
(
*
v
,
ctx
.
GetPlace
(),
&
vv
);
CalcMatrixSigmaAndNormWeight
<
DeviceContext
,
T
>
(
&
sigma
,
&
(
uu
.
Resize
({
h
,
1
})),
&
(
vv
.
Resize
({
w
,
1
})),
&
weight_mat
,
power_iters
,
eps
,
ctx
);
Tensor
uv
;
uv
.
mutable_data
<
T
>
({
h
,
w
},
ctx
.
GetPlace
());
blas
.
MatMul
(
uu
.
Resize
({
h
,
1
}),
false
,
vv
.
Resize
({
w
,
1
}),
false
,
T
(
1
),
&
uv
,
T
(
0
));
Tensor
weight_grad_mat
,
ones
;
weight_grad_mat
.
mutable_data
<
T
>
({
h
,
w
},
ctx
.
GetPlace
());
ones
.
mutable_data
<
T
>
({
h
,
w
},
ctx
.
GetPlace
());
auto
weight_grad_mat_t
=
EigenTensor
<
T
,
2
>::
From
(
weight_grad_mat
);
auto
weight_mat_t
=
EigenTensor
<
T
,
2
>::
From
(
weight_mat
);
auto
out_grad_mat_t
=
EigenTensor
<
T
,
2
>::
From
(
out_grad_mat
);
auto
sigma_t
=
EigenTensor
<
T
,
2
>::
From
(
sigma
);
auto
uv_t
=
EigenTensor
<
T
,
2
>::
From
(
uv
);
auto
ones_t
=
EigenTensor
<
T
,
2
>::
From
(
ones
).
setConstant
((
T
)
1
);
weight_mat_t
.
device
(
place
)
=
weight_mat_t
.
sum
().
eval
().
reshape
(
Array2
(
1
,
1
)).
broadcast
(
Array2
(
h
,
w
));
weight_grad_mat_t
.
device
(
place
)
=
out_grad_mat_t
*
(
ones_t
-
uv_t
*
weight_mat_t
)
/
sigma_t
;
TensorCopySync
(
weight_grad_mat
.
Resize
(
weight_grad
->
dims
()),
ctx
.
GetPlace
(),
weight_grad
);
}
};
};
}
// namespace operators
}
// namespace operators
...
...
python/paddle/fluid/tests/unittests/test_spectral_norm_op.py
浏览文件 @
ca1502c7
...
@@ -44,13 +44,13 @@ def spectral_norm(weight, u, v, dim, power_iters, eps):
...
@@ -44,13 +44,13 @@ def spectral_norm(weight, u, v, dim, power_iters, eps):
return
(
weight_mat
/
sigma
).
reshape
(
weight
.
shape
)
return
(
weight_mat
/
sigma
).
reshape
(
weight
.
shape
)
class
TestSpectralNormOp
(
OpTest
):
class
TestSpectralNormOp
NoGrad
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
initTestCase
()
self
.
initTestCase
()
self
.
op_type
=
'spectral_norm'
self
.
op_type
=
'spectral_norm'
weight
=
np
.
random
.
random
(
self
.
weight_shape
).
astype
(
'float32'
)
weight
=
np
.
random
.
random
(
self
.
weight_shape
).
astype
(
'float32'
)
u
=
np
.
random
.
random
(
self
.
u_shape
).
astype
(
'float32'
)
u
=
np
.
random
.
normal
(
0.
,
1.
,
self
.
u_shape
).
astype
(
'float32'
)
v
=
np
.
random
.
random
(
self
.
v_shape
).
astype
(
'float32'
)
v
=
np
.
random
.
normal
(
0.
,
1.
,
self
.
v_shape
).
astype
(
'float32'
)
self
.
attrs
=
{
self
.
attrs
=
{
"dim"
:
self
.
dim
,
"dim"
:
self
.
dim
,
...
@@ -76,7 +76,44 @@ class TestSpectralNormOp(OpTest):
...
@@ -76,7 +76,44 @@ class TestSpectralNormOp(OpTest):
self
.
u_shape
=
(
2
,
)
self
.
u_shape
=
(
2
,
)
self
.
v_shape
=
(
3
,
)
self
.
v_shape
=
(
3
,
)
self
.
dim
=
0
self
.
dim
=
0
self
.
power_iters
=
2
self
.
power_iters
=
5
self
.
eps
=
1e-12
class
TestSpectralNormOpNoGrad2
(
TestSpectralNormOpNoGrad
):
def
initTestCase
(
self
):
self
.
weight_shape
=
(
2
,
3
,
3
,
3
)
self
.
u_shape
=
(
6
,
)
self
.
v_shape
=
(
9
,
)
self
.
dim
=
1
self
.
power_iters
=
10
self
.
eps
=
1e-12
class
TestSpectralNormOp
(
TestSpectralNormOpNoGrad
):
def
test_check_grad_ignore_uv
(
self
):
self
.
check_grad
(
[
'Weight'
],
'Out'
,
no_grad_set
=
set
([
"U"
,
"V"
]),
max_relative_error
=
0.1
)
def
initTestCase
(
self
):
self
.
weight_shape
=
(
2
,
3
)
self
.
u_shape
=
(
2
,
)
self
.
v_shape
=
(
3
,
)
self
.
dim
=
0
self
.
power_iters
=
0
self
.
eps
=
1e-12
class
TestSpectralNormOp2
(
TestSpectralNormOp
):
def
initTestCase
(
self
):
self
.
weight_shape
=
(
2
,
3
,
3
,
3
)
self
.
u_shape
=
(
6
,
)
self
.
v_shape
=
(
9
,
)
self
.
dim
=
1
self
.
power_iters
=
0
self
.
eps
=
1e-12
self
.
eps
=
1e-12
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录