Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
076dcdfd
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
076dcdfd
编写于
4月 10, 2020
作者:
Z
zhongpu
提交者:
GitHub
4月 10, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add instance_norm op for dygraph (#23362)
* add instance_norm op for dygraph, test=develop * add error message, test=develop
上级
cb36478a
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
195 addition
and
3 deletion
+195
-3
python/paddle/fluid/dygraph/nn.py
python/paddle/fluid/dygraph/nn.py
+129
-3
python/paddle/fluid/tests/unittests/test_layers.py
python/paddle/fluid/tests/unittests/test_layers.py
+55
-0
python/paddle/nn/__init__.py
python/paddle/nn/__init__.py
+6
-0
python/paddle/nn/layer/__init__.py
python/paddle/nn/layer/__init__.py
+2
-0
python/paddle/nn/layer/norm.py
python/paddle/nn/layer/norm.py
+3
-0
未找到文件。
python/paddle/fluid/dygraph/nn.py
浏览文件 @
076dcdfd
...
...
@@ -33,9 +33,9 @@ import logging
__all__
=
[
'Conv2D'
,
'Conv3D'
,
'Pool2D'
,
'Linear'
,
'BatchNorm'
,
'Dropout'
,
'Embedding'
,
'GRUUnit'
,
'
LayerNorm'
,
'NCE'
,
'PRelu'
,
'BilinearTensorProduct
'
,
'
Conv2DTranspose'
,
'Conv3DTranspose'
,
'GroupNorm'
,
'Spectral
Norm'
,
'TreeConv'
'GRUUnit'
,
'
InstanceNorm'
,
'LayerNorm'
,
'NCE'
,
'PRelu
'
,
'
BilinearTensorProduct'
,
'Conv2DTranspose'
,
'Conv3DTranspose'
,
'Group
Norm'
,
'
SpectralNorm'
,
'
TreeConv'
]
...
...
@@ -971,6 +971,132 @@ class Linear(layers.Layer):
return
self
.
_helper
.
append_activation
(
pre_activation
,
act
=
self
.
_act
)
class
InstanceNorm
(
layers
.
Layer
):
"""
This interface is used to construct a callable object of the ``InstanceNorm`` class.
For more details, refer to code examples.
Can be used as a normalizer function for convolution or fully_connected operations.
The required data format for this layer is one of the following:
DataLayout: NCHW `[batch, in_channels, in_height, in_width]`
Refer to `Instance Normalization: The Missing Ingredient for Fast Stylization <https://arxiv.org/pdf/1607.08022.pdf>`_
for more details.
:math:`input` is the input features over a mini-batch.
.. math::
\\
mu_{
\\
beta} &
\\
gets
\\
frac{1}{HW}
\\
sum_{i=1}^{HW} x_i
\\
qquad &//
\\
\\
mean\ of\ one\ feature\ map\ in\ mini-batch
\\\\
\\
sigma_{
\\
beta}^{2} &
\\
gets
\\
frac{1}{HW}
\\
sum_{i=1}^{HW}(x_i -
\\
\\
mu_{
\\
beta})^2
\\
qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch
\\\\
\\
hat{x_i} &
\\
gets
\\
frac{x_i -
\\
mu_
\\
beta} {
\\
sqrt{
\\
\\
sigma_{
\\
beta}^{2} +
\\
epsilon}}
\\
qquad &//\ normalize
\\\\
y_i &
\\
gets
\\
gamma
\\
hat{x_i} +
\\
beta
\\
qquad &//\ scale\ and\ shift
Note:
`H` means height of feature map, `W` means width of feature map.
Parameters:
num_channels(int): Indicate the number of channels of the input ``Tensor``.
epsilon(float, optional): A value added to the denominator for
numerical stability. Default is 1e-5.
param_attr(ParamAttr, optional): The parameter attribute for Parameter `scale`
of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
If the Initializer of the param_attr is not set, the parameter is initialized
one. Default: None.
bias_attr(ParamAttr, optional): The parameter attribute for the bias of instance_norm.
If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero.
Default: None.
dtype(str, optional): Indicate the data type of the input ``Tensor``,
which can be float32 or float64. Default: float32.
Returns:
None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
import paddle
# x's shape is [1, 3, 1, 2]
x = np.array([[[[1.0, 8.0]], [[10.0, 5.0]], [[4.0, 6.0]]]]).astype('float32')
with fluid.dygraph.guard():
x = to_variable(x)
instanceNorm = paddle.nn.InstanceNorm(3)
ret = instanceNorm(x)
# ret's shape is [1, 3, 1, 2]; value is [-1 1 0.999999 -0.999999 -0.999995 0.999995]
print(ret)
"""
def
__init__
(
self
,
num_channels
,
epsilon
=
1e-5
,
param_attr
=
None
,
bias_attr
=
None
,
dtype
=
'float32'
):
super
(
InstanceNorm
,
self
).
__init__
()
assert
bias_attr
is
not
False
,
"bias_attr should not be False in InstanceNorm."
self
.
_epsilon
=
epsilon
self
.
_param_attr
=
param_attr
self
.
_bias_attr
=
bias_attr
self
.
_dtype
=
dtype
self
.
scale
=
self
.
create_parameter
(
attr
=
self
.
_param_attr
,
shape
=
[
num_channels
],
dtype
=
self
.
_dtype
,
default_initializer
=
Constant
(
1.0
),
is_bias
=
False
)
self
.
bias
=
self
.
create_parameter
(
attr
=
self
.
_bias_attr
,
shape
=
[
num_channels
],
dtype
=
self
.
_dtype
,
default_initializer
=
Constant
(
0.0
),
is_bias
=
True
)
def
forward
(
self
,
input
):
if
in_dygraph_mode
():
out
,
_
,
_
=
core
.
ops
.
instance_norm
(
input
,
self
.
scale
,
self
.
bias
,
'epsilon'
,
self
.
_epsilon
)
return
out
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
],
"InstanceNorm"
)
attrs
=
{
"epsilon"
:
self
.
_epsilon
}
inputs
=
{
"X"
:
[
input
],
"Scale"
:
[
self
.
scale
],
"Bias"
:
[
self
.
bias
]}
saved_mean
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
self
.
_dtype
,
stop_gradient
=
True
)
saved_variance
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
self
.
_dtype
,
stop_gradient
=
True
)
instance_norm_out
=
self
.
_helper
.
create_variable_for_type_inference
(
self
.
_dtype
)
outputs
=
{
"Y"
:
[
instance_norm_out
],
"SavedMean"
:
[
saved_mean
],
"SavedVariance"
:
[
saved_variance
]
}
self
.
_helper
.
append_op
(
type
=
"instance_norm"
,
inputs
=
inputs
,
outputs
=
outputs
,
attrs
=
attrs
)
return
instance_norm_out
class
BatchNorm
(
layers
.
Layer
):
"""
This interface is used to construct a callable object of the ``BatchNorm`` class.
...
...
python/paddle/fluid/tests/unittests/test_layers.py
浏览文件 @
076dcdfd
...
...
@@ -1258,6 +1258,61 @@ class TestLayer(LayerTest):
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_rlt_value
))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
static_ret2
))
def
test_instance_norm
(
self
):
if
core
.
is_compiled_with_cuda
():
place
=
core
.
CUDAPlace
(
0
)
else
:
place
=
core
.
CPUPlace
()
shape
=
(
2
,
4
,
3
,
3
)
input
=
np
.
random
.
random
(
shape
).
astype
(
'float32'
)
with
self
.
static_graph
():
X
=
fluid
.
layers
.
data
(
name
=
'X'
,
shape
=
shape
,
dtype
=
'float32'
,
append_batch_size
=
False
)
ret
=
layers
.
instance_norm
(
input
=
X
)
static_ret
=
self
.
get_static_graph_result
(
feed
=
{
'X'
:
input
},
fetch_list
=
[
ret
])[
0
]
with
self
.
static_graph
():
X
=
fluid
.
layers
.
data
(
name
=
'X'
,
shape
=
shape
,
dtype
=
'float32'
,
append_batch_size
=
False
)
instanceNorm
=
nn
.
InstanceNorm
(
num_channels
=
shape
[
1
])
ret
=
instanceNorm
(
X
)
static_ret2
=
self
.
get_static_graph_result
(
feed
=
{
'X'
:
input
},
fetch_list
=
[
ret
])[
0
]
with
self
.
dynamic_graph
():
instanceNorm
=
nn
.
InstanceNorm
(
num_channels
=
shape
[
1
])
dy_ret
=
instanceNorm
(
base
.
to_variable
(
input
))
dy_rlt_value
=
dy_ret
.
numpy
()
with
self
.
dynamic_graph
():
instanceNorm
=
paddle
.
nn
.
InstanceNorm
(
num_channels
=
shape
[
1
])
dy_ret
=
instanceNorm
(
base
.
to_variable
(
input
))
dy_rlt_value2
=
dy_ret
.
numpy
()
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_rlt_value
))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_rlt_value2
))
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
static_ret2
))
with
self
.
static_graph
():
# the input of InstanceNorm must be Variable.
def
test_Variable
():
instanceNorm
=
paddle
.
nn
.
InstanceNorm
(
num_channels
=
shape
[
1
])
ret1
=
instanceNorm
(
input
)
self
.
assertRaises
(
TypeError
,
test_Variable
)
# the input dtype of InstanceNorm must be float32 or float64
def
test_type
():
input
=
np
.
random
.
random
(
shape
).
astype
(
'int32'
)
instanceNorm
=
paddle
.
nn
.
InstanceNorm
(
num_channels
=
shape
[
1
])
ret2
=
instanceNorm
(
input
)
self
.
assertRaises
(
TypeError
,
test_type
)
def
test_spectral_norm
(
self
):
if
core
.
is_compiled_with_cuda
():
place
=
core
.
CUDAPlace
(
0
)
...
...
python/paddle/nn/__init__.py
浏览文件 @
076dcdfd
...
...
@@ -16,6 +16,11 @@
# including layers, linear, conv, rnn etc.
# __all__ = []
from
.layer
import
norm
__all__
=
[]
__all__
+=
norm
.
__all__
# TODO: define alias in nn directory
# from .clip import ErrorClipByValue #DEFINE_ALIAS
# from .clip import GradientClipByGlobalNorm #DEFINE_ALIAS
...
...
@@ -73,6 +78,7 @@ from .layer.conv import Conv2D, Conv2DTranspose, Conv3D, Conv3DTranspose #DEFIN
# from .layer.norm import BatchNorm #DEFINE_ALIAS
# from .layer.norm import GroupNorm #DEFINE_ALIAS
# from .layer.norm import LayerNorm #DEFINE_ALIAS
from
.layer.norm
import
InstanceNorm
#DEFINE_ALIAS
# from .layer.norm import SpectralNorm #DEFINE_ALIAS
# from .layer.activation import PReLU #DEFINE_ALIAS
# from .layer.activation import ReLU #DEFINE_ALIAS
...
...
python/paddle/nn/layer/__init__.py
浏览文件 @
076dcdfd
...
...
@@ -16,6 +16,8 @@
from
.
import
loss
from
.
import
conv
from
.
import
norm
from
.loss
import
*
from
.conv
import
*
from
.norm
import
*
python/paddle/nn/layer/norm.py
浏览文件 @
076dcdfd
...
...
@@ -17,3 +17,6 @@
# 'GroupNorm',
# 'LayerNorm',
# 'SpectralNorm']
__all__
=
[
'InstanceNorm'
]
from
...fluid.dygraph.nn
import
InstanceNorm
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录