Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
0b3b4918
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
0b3b4918
编写于
11月 07, 2022
作者:
J
JYChen
提交者:
GitHub
11月 07, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Fluid Clean] remove paddle.fluid.dygraph.nn.conv2D (#47441)
* remove paddle.fluid.dygraph.nn.conv2D * fix ut * fix conv fp16 UT
上级
908a381d
变更
29
隐藏空白更改
内联
并排
Showing
29 changed file
with
204 addition
and
737 deletion
+204
-737
python/paddle/fluid/contrib/slim/tests/test_imperative_qat_user_defined.py
...id/contrib/slim/tests/test_imperative_qat_user_defined.py
+8
-9
python/paddle/fluid/dygraph/nn.py
python/paddle/fluid/dygraph/nn.py
+0
-304
python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_se_resnext.py
...unittests/collective/fleet/parallel_dygraph_se_resnext.py
+5
-5
python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py
.../collective/fleet/test_imperative_auto_mixed_precision.py
+15
-19
python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py
...e/fleet/test_imperative_auto_mixed_precision_for_eager.py
+15
-19
python/paddle/fluid/tests/unittests/dygraph_to_static/darknet.py
...paddle/fluid/tests/unittests/dygraph_to_static/darknet.py
+7
-7
python/paddle/fluid/tests/unittests/dygraph_to_static/test_basic_api_transformation.py
...ttests/dygraph_to_static/test_basic_api_transformation.py
+6
-6
python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py
...addle/fluid/tests/unittests/dygraph_to_static/test_bmn.py
+36
-41
python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_call.py
...id/tests/unittests/dygraph_to_static/test_convert_call.py
+6
-6
python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py
...fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py
+6
-7
python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist.py
...dle/fluid/tests/unittests/dygraph_to_static/test_mnist.py
+6
-8
python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py
...luid/tests/unittests/dygraph_to_static/test_mobile_net.py
+6
-8
python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py
...le/fluid/tests/unittests/dygraph_to_static/test_resnet.py
+5
-6
python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py
...fluid/tests/unittests/dygraph_to_static/test_se_resnet.py
+5
-6
python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py
...fluid/tests/unittests/dygraph_to_static/test_sentiment.py
+6
-8
python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py
...addle/fluid/tests/unittests/dygraph_to_static/test_tsm.py
+7
-8
python/paddle/fluid/tests/unittests/dygraph_to_static/yolov3.py
.../paddle/fluid/tests/unittests/dygraph_to_static/yolov3.py
+6
-7
python/paddle/fluid/tests/unittests/parallel_dygraph_mnist.py
...on/paddle/fluid/tests/unittests/parallel_dygraph_mnist.py
+6
-7
python/paddle/fluid/tests/unittests/test_conv2d_api.py
python/paddle/fluid/tests/unittests/test_conv2d_api.py
+0
-11
python/paddle/fluid/tests/unittests/test_dygraph_mnist_fp16.py
...n/paddle/fluid/tests/unittests/test_dygraph_mnist_fp16.py
+14
-14
python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py
...addle/fluid/tests/unittests/test_dygraph_multi_forward.py
+6
-7
python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py
...luid/tests/unittests/test_imperative_load_static_param.py
+5
-11
python/paddle/fluid/tests/unittests/test_imperative_mnist.py
python/paddle/fluid/tests/unittests/test_imperative_mnist.py
+6
-7
python/paddle/fluid/tests/unittests/test_imperative_named_members.py
...le/fluid/tests/unittests/test_imperative_named_members.py
+2
-2
python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py
...id/tests/unittests/test_imperative_ocr_attention_model.py
+6
-11
python/paddle/fluid/tests/unittests/test_imperative_resnet.py
...on/paddle/fluid/tests/unittests/test_imperative_resnet.py
+5
-7
python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py
...addle/fluid/tests/unittests/test_imperative_se_resnext.py
+5
-6
python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py
...ittests/test_imperative_star_gan_with_gradient_penalty.py
+4
-4
python/paddle/fluid/tests/unittests/test_layers.py
python/paddle/fluid/tests/unittests/test_layers.py
+0
-176
未找到文件。
python/paddle/fluid/contrib/slim/tests/test_imperative_qat_user_defined.py
浏览文件 @
0b3b4918
...
...
@@ -23,7 +23,6 @@ from paddle.optimizer import Adam
from
paddle.fluid.contrib.slim.quantization
import
ImperativeQuantAware
from
paddle.fluid.contrib.slim.quantization
import
QuantizationTransformPass
from
paddle.nn
import
Sequential
from
paddle.fluid.dygraph
import
Conv2D
from
paddle.fluid.dygraph
import
Pool2D
from
paddle.fluid.dygraph
import
Linear
from
paddle.nn.quant.quant_layers
import
QuantizedConv2DTranspose
...
...
@@ -126,18 +125,18 @@ class ImperativeLenet(paddle.nn.Layer):
def
__init__
(
self
,
num_classes
=
10
,
classifier_activation
=
'softmax'
):
super
().
__init__
()
self
.
features
=
Sequential
(
Conv2D
(
num
_channels
=
1
,
num_filter
s
=
6
,
filter
_size
=
3
,
paddle
.
nn
.
Conv2D
(
in
_channels
=
1
,
out_channel
s
=
6
,
kernel
_size
=
3
,
stride
=
1
,
padding
=
1
,
),
Pool2D
(
pool_size
=
2
,
pool_type
=
'max'
,
pool_stride
=
2
),
Conv2D
(
num
_channels
=
6
,
num_filter
s
=
16
,
filter
_size
=
5
,
paddle
.
nn
.
Conv2D
(
in
_channels
=
6
,
out_channel
s
=
16
,
kernel
_size
=
5
,
stride
=
1
,
padding
=
0
,
),
...
...
python/paddle/fluid/dygraph/nn.py
浏览文件 @
0b3b4918
...
...
@@ -49,7 +49,6 @@ import paddle.utils.deprecated as deprecated
from
paddle
import
_C_ops
,
_legacy_C_ops
__all__
=
[
'Conv2D'
,
'Conv3D'
,
'Pool2D'
,
'Linear'
,
...
...
@@ -71,309 +70,6 @@ __all__ = [
]
class
Conv2D
(
layers
.
Layer
):
r
"""
This interface is used to construct a callable object of the ``Conv2D`` class.
For more details, refer to code examples.
The convolution2D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input and
Output are in NCHW format, where N is batch size, C is the number of
the feature map, H is the height of the feature map, and W is the width of the feature map.
Filter's shape is [MCHW] , where M is the number of output feature map,
C is the number of input feature map, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input feature map divided by the groups.
Please refer to UFLDL's `convolution
<http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`_
for more details.
If bias attribution and activation type are provided, bias is added to the
output of the convolution, and the corresponding activation function is
applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \\sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a ``Tensor`` with NCHW format.
* :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D ``Tensor`` with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Parameters:
num_channels(int): The number of channels in the input image.
num_filters(int): The number of filter. It is as same as the output
feature map.
filter_size (int or tuple): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
stride (int or tuple, optional): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: 1.
padding (int or tuple, optional): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: 0.
dilation (int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1.
groups (int, optional): The groups number of the Conv2D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr or bool, optional): The attribute for the bias of conv2d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of filter of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Raises:
ValueError: if ``use_cudnn`` is not a bool value.
Examples:
.. code-block:: python
from paddle.fluid.dygraph.base import to_variable
import paddle.fluid as fluid
from paddle.fluid.dygraph import Conv2D
import numpy as np
data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32')
with fluid.dygraph.guard():
conv2d = Conv2D(3, 2, 3)
data = to_variable(data)
conv = conv2d(data)
"""
def
__init__
(
self
,
num_channels
,
num_filters
,
filter_size
,
stride
=
1
,
padding
=
0
,
dilation
=
1
,
groups
=
None
,
param_attr
=
None
,
bias_attr
=
None
,
use_cudnn
=
True
,
act
=
None
,
dtype
=
'float32'
,
):
assert
param_attr
is
not
False
,
"param_attr should not be False here."
super
().
__init__
()
if
(
core
.
is_compiled_with_cuda
()
and
paddle
.
fluid
.
get_flags
(
"FLAGS_conv2d_disable_cudnn"
)[
"FLAGS_conv2d_disable_cudnn"
]
):
use_cudnn
=
False
self
.
_num_channels
=
num_channels
self
.
_groups
=
groups
self
.
_stride
=
utils
.
convert_to_list
(
stride
,
2
,
'stride'
)
self
.
_padding
=
utils
.
convert_to_list
(
padding
,
2
,
'padding'
)
self
.
_dilation
=
utils
.
convert_to_list
(
dilation
,
2
,
'dilation'
)
self
.
_act
=
act
if
not
isinstance
(
use_cudnn
,
bool
):
raise
ValueError
(
"use_cudnn should be True or False"
)
self
.
_use_cudnn
=
use_cudnn
self
.
_use_mkldnn
=
_global_flags
()[
"FLAGS_use_mkldnn"
]
self
.
_filter_size
=
filter_size
self
.
_num_filters
=
num_filters
self
.
_param_attr
=
param_attr
self
.
_bias_attr
=
bias_attr
self
.
_dtype
=
dtype
if
(
self
.
_num_channels
==
self
.
_groups
and
num_filters
%
self
.
_num_channels
==
0
and
not
self
.
_use_cudnn
and
not
self
.
_use_mkldnn
):
self
.
_l_type
=
'depthwise_conv2d'
else
:
self
.
_l_type
=
'conv2d'
# NPU only supports depthwise_conv2d when "input_channel = output_channel = groups"
if
core
.
is_compiled_with_npu
():
if
(
self
.
_num_channels
==
self
.
_groups
and
self
.
_num_channels
==
self
.
_num_filters
):
self
.
_l_type
=
'depthwise_conv2d'
else
:
self
.
_l_type
=
'conv2d'
self
.
_num_channels
=
num_channels
if
self
.
_groups
is
None
:
num_filter_channels
=
self
.
_num_channels
else
:
if
self
.
_num_channels
%
self
.
_groups
!=
0
:
raise
ValueError
(
"num_channels must be divisible by groups."
)
num_filter_channels
=
self
.
_num_channels
//
self
.
_groups
filter_size
=
utils
.
convert_to_list
(
self
.
_filter_size
,
2
,
'filter_size'
)
filter_shape
=
[
self
.
_num_filters
,
num_filter_channels
]
+
filter_size
def
_get_default_param_initializer
():
filter_elem_num
=
(
filter_size
[
0
]
*
filter_size
[
1
]
*
self
.
_num_channels
)
std
=
(
2.0
/
filter_elem_num
)
**
0.5
return
Normal
(
0.0
,
std
,
0
)
self
.
weight
=
self
.
create_parameter
(
attr
=
self
.
_param_attr
,
shape
=
filter_shape
,
dtype
=
self
.
_dtype
,
default_initializer
=
_get_default_param_initializer
(),
)
self
.
bias
=
self
.
create_parameter
(
attr
=
self
.
_bias_attr
,
shape
=
[
self
.
_num_filters
],
dtype
=
self
.
_dtype
,
is_bias
=
True
,
)
def
forward
(
self
,
input
):
if
in_dygraph_mode
()
and
self
.
_l_type
==
"conv2d"
:
pre_bias
=
_C_ops
.
conv2d
(
input
,
self
.
weight
,
self
.
_stride
,
self
.
_padding
,
"EXPLICIT"
,
self
.
_dilation
,
self
.
_groups
if
self
.
_groups
else
1
,
"NCHW"
,
)
if
self
.
bias
is
not
None
:
pre_act
=
F
.
elementwise_add
(
pre_bias
,
self
.
bias
,
axis
=
1
)
else
:
pre_act
=
pre_bias
return
dygraph_utils
.
_append_activation_in_dygraph
(
pre_act
,
self
.
_act
,
use_mkldnn
=
self
.
_use_mkldnn
)
if
_non_static_mode
()
and
(
self
.
_l_type
==
'conv2d'
or
self
.
_l_type
==
'depthwise_conv2d'
):
attrs
=
(
'strides'
,
self
.
_stride
,
'paddings'
,
self
.
_padding
,
'dilations'
,
self
.
_dilation
,
'groups'
,
self
.
_groups
if
self
.
_groups
else
1
,
'use_cudnn'
,
self
.
_use_cudnn
,
'use_mkldnn'
,
self
.
_use_mkldnn
,
)
out
=
_legacy_C_ops
.
conv2d
(
input
,
self
.
weight
,
*
attrs
)
pre_bias
=
out
pre_act
=
dygraph_utils
.
_append_bias_in_dygraph
(
pre_bias
,
self
.
bias
,
1
,
use_mkldnn
=
self
.
_use_mkldnn
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
pre_act
,
self
.
_act
,
use_mkldnn
=
self
.
_use_mkldnn
)
inputs
=
{
'Input'
:
[
input
],
'Filter'
:
[
self
.
weight
],
}
attrs
=
{
'strides'
:
self
.
_stride
,
'paddings'
:
self
.
_padding
,
'dilations'
:
self
.
_dilation
,
'groups'
:
self
.
_groups
if
self
.
_groups
else
1
,
'use_cudnn'
:
self
.
_use_cudnn
,
'use_mkldnn'
:
self
.
_use_mkldnn
,
}
check_variable_and_dtype
(
input
,
'input'
,
[
'float16'
,
'float32'
,
'float64'
],
'Conv2D'
)
pre_bias
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
self
.
_dtype
)
self
.
_helper
.
append_op
(
type
=
self
.
_l_type
,
inputs
=
{
'Input'
:
input
,
'Filter'
:
self
.
weight
,
},
outputs
=
{
"Output"
:
pre_bias
},
attrs
=
attrs
,
)
if
self
.
bias
is
not
None
:
pre_act
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
self
.
_dtype
)
self
.
_helper
.
append_op
(
type
=
'elementwise_add'
,
inputs
=
{
'X'
:
[
pre_bias
],
'Y'
:
[
self
.
bias
]},
outputs
=
{
'Out'
:
[
pre_act
]},
attrs
=
{
'axis'
:
1
,
'use_mkldnn'
:
self
.
_use_mkldnn
},
)
else
:
pre_act
=
pre_bias
# Currently, we don't support inplace in dygraph mode
return
self
.
_helper
.
append_activation
(
pre_act
,
act
=
self
.
_act
)
class
Conv3D
(
layers
.
Layer
):
r
"""
**Convlution3D Layer**
...
...
python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_se_resnext.py
浏览文件 @
0b3b4918
...
...
@@ -16,7 +16,7 @@ import numpy as np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Linear
,
Pool2D
from
paddle.fluid.dygraph.nn
import
Linear
,
Pool2D
from
paddle.fluid.dygraph.base
import
to_variable
import
math
from
test_dist_base
import
runtime_main
,
TestParallelDyGraphRunnerBase
...
...
@@ -87,10 +87,10 @@ class ConvBNLayer(fluid.dygraph.Layer):
):
super
().
__init__
()
self
.
_conv
=
Conv2D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
...
...
python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py
浏览文件 @
0b3b4918
...
...
@@ -42,16 +42,14 @@ class SimpleConv(fluid.dygraph.Layer):
act
=
None
,
):
super
().
__init__
()
self
.
_conv
=
fluid
.
dygraph
.
Conv2D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
bias_attr
=
None
,
use_cudnn
=
True
,
)
def
forward
(
self
,
inputs
):
...
...
@@ -62,7 +60,7 @@ class TestAutoCast(unittest.TestCase):
def
amp_guard_white_op
(
self
):
data
=
np
.
random
.
uniform
(
-
1
,
1
,
[
10
,
3
,
32
,
32
]).
astype
(
'float32'
)
with
fluid
.
dygraph
.
guard
():
conv2d
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
conv2d
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
data
=
fluid
.
dygraph
.
to_variable
(
data
)
with
fluid
.
dygraph
.
amp_guard
(
True
):
out_fp16
=
conv2d
(
data
)
...
...
@@ -156,7 +154,7 @@ class TestAutoCast(unittest.TestCase):
def
amp_guard_upsupported_fp16_op
(
self
):
data
=
np
.
random
.
uniform
(
-
1
,
1
,
[
10
,
3
,
32
,
32
]).
astype
(
'float32'
)
with
fluid
.
dygraph
.
guard
():
conv2d
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
conv2d
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
data
=
fluid
.
dygraph
.
to_variable
(
data
)
with
fluid
.
dygraph
.
amp_guard
(
True
):
out_amp_fp16
=
conv2d
(
data
)
...
...
@@ -186,9 +184,7 @@ class TestAutoCast(unittest.TestCase):
def
func
():
data
=
np
.
random
.
uniform
(
-
1
,
1
,
[
10
,
3
,
32
,
32
]).
astype
(
'float32'
)
with
fluid
.
dygraph
.
guard
():
conv2d
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
None
)
conv2d
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
)
data
=
fluid
.
dygraph
.
to_variable
(
data
)
with
fluid
.
dygraph
.
amp_guard
(
level
=
'O'
):
out
=
conv2d
(
data
)
...
...
@@ -606,7 +602,7 @@ class TestAmpDecorator(unittest.TestCase):
def
test_mode_exception
(
self
):
def
func
():
with
fluid
.
dygraph
.
guard
():
model
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
model
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
opt
=
paddle
.
optimizer
.
SGD
(
parameters
=
model
.
parameters
())
model
,
opt
=
paddle
.
amp
.
decorate
(
models
=
model
,
optimizers
=
opt
,
level
=
'O'
...
...
@@ -627,7 +623,7 @@ class TestAmpDecorator(unittest.TestCase):
self
.
assertRaises
(
TypeError
,
test_error_model
)
def
test_error_distributed_model
():
model
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
model
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
model
=
paddle
.
DataParallel
(
model
)
with
fluid
.
dygraph
.
guard
():
model
=
paddle
.
amp
.
decorate
(
models
=
model
,
level
=
'O2'
)
...
...
@@ -639,7 +635,7 @@ class TestAmpDecorator(unittest.TestCase):
def
__init__
(
self
):
print
(
"A fake Optimizer"
)
model
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
model
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
opt
=
MyOptimizer
()
with
fluid
.
dygraph
.
guard
():
paddle
.
amp
.
decorate
(
models
=
model
,
optimizers
=
opt
,
level
=
'O2'
)
...
...
@@ -647,14 +643,14 @@ class TestAmpDecorator(unittest.TestCase):
self
.
assertRaises
(
TypeError
,
test_error_optimizer
)
def
test_set_master_weight
(
self
):
model1
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
model1
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
opt1
=
paddle
.
optimizer
.
Adam
(
learning_rate
=
0.0001
,
parameters
=
model1
.
parameters
(),
multi_precision
=
True
,
)
model2
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
model2
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
opt2
=
paddle
.
optimizer
.
Adam
(
learning_rate
=
0.0001
,
parameters
=
model2
.
parameters
(),
...
...
@@ -674,12 +670,12 @@ class TestAmpDecorator(unittest.TestCase):
)
self
.
assertEqual
(
opt2
.
_multi_precision
,
True
)
model3
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
model3
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
opt3
=
paddle
.
optimizer
.
Adam
(
learning_rate
=
0.0001
,
parameters
=
model3
.
parameters
()
)
model4
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
model4
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
opt4
=
paddle
.
optimizer
.
Adam
(
learning_rate
=
0.0001
,
parameters
=
model4
.
parameters
()
)
...
...
@@ -777,7 +773,7 @@ class TestPureFp16SaveLoad(unittest.TestCase):
def
test_save_dtype_exception
(
self
):
def
func
():
paddle
.
disable_static
()
model
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
model
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
opt
=
paddle
.
optimizer
.
SGD
(
parameters
=
model
.
parameters
())
paddle
.
amp
.
decorate
(
models
=
model
,
optimizers
=
opt
,
level
=
'O2'
,
save_dtype
=
'int'
...
...
python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py
浏览文件 @
0b3b4918
...
...
@@ -41,16 +41,14 @@ class SimpleConv(fluid.dygraph.Layer):
act
=
None
,
):
super
().
__init__
()
self
.
_conv
=
fluid
.
dygraph
.
Conv2D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
bias_attr
=
None
,
use_cudnn
=
True
,
)
def
forward
(
self
,
inputs
):
...
...
@@ -61,7 +59,7 @@ class TestAutoCast(unittest.TestCase):
def
amp_guard_white_op
(
self
):
data
=
np
.
random
.
uniform
(
-
1
,
1
,
[
10
,
3
,
32
,
32
]).
astype
(
'float32'
)
with
fluid
.
dygraph
.
guard
():
conv2d
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
conv2d
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
data
=
fluid
.
dygraph
.
to_variable
(
data
)
with
fluid
.
dygraph
.
amp_guard
(
True
):
out_fp16
=
conv2d
(
data
)
...
...
@@ -155,7 +153,7 @@ class TestAutoCast(unittest.TestCase):
def
amp_guard_upsupported_fp16_op
(
self
):
data
=
np
.
random
.
uniform
(
-
1
,
1
,
[
10
,
3
,
32
,
32
]).
astype
(
'float32'
)
with
fluid
.
dygraph
.
guard
():
conv2d
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
conv2d
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
data
=
fluid
.
dygraph
.
to_variable
(
data
)
with
fluid
.
dygraph
.
amp_guard
(
True
):
out_amp_fp16
=
conv2d
(
data
)
...
...
@@ -185,9 +183,7 @@ class TestAutoCast(unittest.TestCase):
def
func
():
data
=
np
.
random
.
uniform
(
-
1
,
1
,
[
10
,
3
,
32
,
32
]).
astype
(
'float32'
)
with
fluid
.
dygraph
.
guard
():
conv2d
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
None
)
conv2d
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
)
data
=
fluid
.
dygraph
.
to_variable
(
data
)
with
fluid
.
dygraph
.
amp_guard
(
level
=
'O'
):
out
=
conv2d
(
data
)
...
...
@@ -605,7 +601,7 @@ class TestAmpDecorator(unittest.TestCase):
def
test_mode_exception
(
self
):
def
func
():
with
fluid
.
dygraph
.
guard
():
model
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
model
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
opt
=
paddle
.
optimizer
.
SGD
(
parameters
=
model
.
parameters
())
model
,
opt
=
paddle
.
amp
.
decorate
(
models
=
model
,
optimizers
=
opt
,
level
=
'O'
...
...
@@ -626,7 +622,7 @@ class TestAmpDecorator(unittest.TestCase):
self
.
assertRaises
(
TypeError
,
test_error_model
)
def
test_error_distributed_model
():
model
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
model
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
model
=
paddle
.
DataParallel
(
model
)
with
fluid
.
dygraph
.
guard
():
model
=
paddle
.
amp
.
decorate
(
models
=
model
,
level
=
'O2'
)
...
...
@@ -638,7 +634,7 @@ class TestAmpDecorator(unittest.TestCase):
def
__init__
(
self
):
print
(
"A fake Optimizer"
)
model
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
model
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
opt
=
MyOptimizer
()
with
fluid
.
dygraph
.
guard
():
paddle
.
amp
.
decorate
(
models
=
model
,
optimizers
=
opt
,
level
=
'O2'
)
...
...
@@ -646,14 +642,14 @@ class TestAmpDecorator(unittest.TestCase):
self
.
assertRaises
(
TypeError
,
test_error_optimizer
)
def
test_set_master_weight
(
self
):
model1
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
model1
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
opt1
=
paddle
.
optimizer
.
Adam
(
learning_rate
=
0.0001
,
parameters
=
model1
.
parameters
(),
multi_precision
=
True
,
)
model2
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
model2
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
opt2
=
paddle
.
optimizer
.
Adam
(
learning_rate
=
0.0001
,
parameters
=
model2
.
parameters
(),
...
...
@@ -673,12 +669,12 @@ class TestAmpDecorator(unittest.TestCase):
)
self
.
assertEqual
(
opt2
.
_multi_precision
,
True
)
model3
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
model3
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
opt3
=
paddle
.
optimizer
.
Adam
(
learning_rate
=
0.0001
,
parameters
=
model3
.
parameters
()
)
model4
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
model4
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
opt4
=
paddle
.
optimizer
.
Adam
(
learning_rate
=
0.0001
,
parameters
=
model4
.
parameters
()
)
...
...
@@ -767,7 +763,7 @@ class TestPureFp16SaveLoad(unittest.TestCase):
def
test_save_dtype_exception
(
self
):
def
func
():
paddle
.
disable_static
()
model
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
,
act
=
Non
e
)
model
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
Fals
e
)
opt
=
paddle
.
optimizer
.
SGD
(
parameters
=
model
.
parameters
())
paddle
.
amp
.
decorate
(
models
=
model
,
optimizers
=
opt
,
level
=
'O2'
,
save_dtype
=
'int'
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/darknet.py
浏览文件 @
0b3b4918
...
...
@@ -12,11 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.regularizer
import
L2Decay
from
paddle.fluid.dygraph.nn
import
Conv2D
,
BatchNorm
from
paddle.fluid.dygraph.nn
import
BatchNorm
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
...
...
@@ -33,18 +34,17 @@ class ConvBNLayer(fluid.dygraph.Layer):
):
super
().
__init__
()
self
.
conv
=
Conv2D
(
num
_channels
=
ch_in
,
num_filter
s
=
ch_out
,
filter
_size
=
filter_size
,
self
.
conv
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
ch_in
,
out_channel
s
=
ch_out
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
groups
=
groups
,
param
_attr
=
ParamAttr
(
weight
_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Normal
(
0.0
,
0.02
)
),
bias_attr
=
False
,
act
=
None
,
)
self
.
batch_norm
=
BatchNorm
(
num_channels
=
ch_out
,
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_basic_api_transformation.py
浏览文件 @
0b3b4918
...
...
@@ -133,14 +133,14 @@ def dyfunc_BilinearTensorProduct(layer1, layer2):
def
dyfunc_Conv2D
(
input
):
conv2d
=
fluid
.
dygraph
.
Conv2D
(
num
_channels
=
3
,
num_filter
s
=
2
,
filter
_size
=
3
,
param_attr
=
fluid
.
ParamAttr
(
conv2d
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
3
,
out_channel
s
=
2
,
kernel
_size
=
3
,
weight_attr
=
paddle
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.99
)
),
bias_attr
=
fluid
.
ParamAttr
(
bias_attr
=
paddle
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.5
)
),
)
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py
浏览文件 @
0b3b4918
...
...
@@ -123,15 +123,14 @@ class Conv1D(fluid.dygraph.Layer):
initializer
=
fluid
.
initializer
.
Uniform
(
low
=-
k
,
high
=
k
),
)
self
.
_conv2d
=
fluid
.
dygraph
.
Conv2D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
(
1
,
size_k
),
self
.
_conv2d
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
(
1
,
size_k
),
stride
=
1
,
padding
=
(
0
,
padding
),
groups
=
groups
,
act
=
act
,
param_attr
=
param_attr
,
weight_attr
=
param_attr
,
bias_attr
=
bias_attr
,
)
...
...
@@ -230,63 +229,59 @@ class BMN(fluid.dygraph.Layer):
bias_attr
=
ParamAttr
(
name
=
"PEM_3d1_b"
),
)
self
.
p_conv2d1
=
fluid
.
dygraph
.
Conv2D
(
num
_channels
=
512
,
num_filter
s
=
self
.
hidden_dim_2d
,
filter
_size
=
1
,
self
.
p_conv2d1
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
512
,
out_channel
s
=
self
.
hidden_dim_2d
,
kernel
_size
=
1
,
stride
=
1
,
padding
=
0
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
"PEM_2d1_w"
),
weight_attr
=
ParamAttr
(
name
=
"PEM_2d1_w"
),
bias_attr
=
ParamAttr
(
name
=
"PEM_2d1_b"
),
)
self
.
p_conv2d2
=
fluid
.
dygraph
.
Conv2D
(
num
_channels
=
128
,
num_filter
s
=
self
.
hidden_dim_2d
,
filter
_size
=
3
,
self
.
p_conv2d2
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
128
,
out_channel
s
=
self
.
hidden_dim_2d
,
kernel
_size
=
3
,
stride
=
1
,
padding
=
1
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
"PEM_2d2_w"
),
weight_attr
=
ParamAttr
(
name
=
"PEM_2d2_w"
),
bias_attr
=
ParamAttr
(
name
=
"PEM_2d2_b"
),
)
self
.
p_conv2d3
=
fluid
.
dygraph
.
Conv2D
(
num
_channels
=
128
,
num_filter
s
=
self
.
hidden_dim_2d
,
filter
_size
=
3
,
self
.
p_conv2d3
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
128
,
out_channel
s
=
self
.
hidden_dim_2d
,
kernel
_size
=
3
,
stride
=
1
,
padding
=
1
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
"PEM_2d3_w"
),
weight_attr
=
ParamAttr
(
name
=
"PEM_2d3_w"
),
bias_attr
=
ParamAttr
(
name
=
"PEM_2d3_b"
),
)
self
.
p_conv2d4
=
fluid
.
dygraph
.
Conv2D
(
num
_channels
=
128
,
num_filter
s
=
2
,
filter
_size
=
1
,
self
.
p_conv2d4
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
128
,
out_channel
s
=
2
,
kernel
_size
=
1
,
stride
=
1
,
padding
=
0
,
act
=
"sigmoid"
,
param_attr
=
ParamAttr
(
name
=
"PEM_2d4_w"
),
weight_attr
=
ParamAttr
(
name
=
"PEM_2d4_w"
),
bias_attr
=
ParamAttr
(
name
=
"PEM_2d4_b"
),
)
@
to_static
def
forward
(
self
,
x
):
# Base Module
x
=
self
.
b_conv1
(
x
)
x
=
self
.
b_conv2
(
x
)
x
=
paddle
.
nn
.
functional
.
relu
(
self
.
b_conv1
(
x
)
)
x
=
paddle
.
nn
.
functional
.
relu
(
self
.
b_conv2
(
x
)
)
# TEM
xs
=
self
.
ts_conv1
(
x
)
xs
=
self
.
ts_conv2
(
xs
)
xs
=
paddle
.
nn
.
functional
.
relu
(
self
.
ts_conv1
(
x
)
)
xs
=
paddle
.
nn
.
functional
.
relu
(
self
.
ts_conv2
(
xs
)
)
xs
=
fluid
.
layers
.
squeeze
(
xs
,
axes
=
[
1
])
xe
=
self
.
te_conv1
(
x
)
xe
=
self
.
te_conv2
(
xe
)
xe
=
paddle
.
nn
.
functional
.
relu
(
self
.
te_conv1
(
x
)
)
xe
=
paddle
.
nn
.
functional
.
relu
(
self
.
te_conv2
(
xe
)
)
xe
=
fluid
.
layers
.
squeeze
(
xe
,
axes
=
[
1
])
# PEM
xp
=
self
.
p_conv1
(
x
)
xp
=
paddle
.
nn
.
functional
.
relu
(
self
.
p_conv1
(
x
)
)
# BM layer
xp
=
fluid
.
layers
.
matmul
(
xp
,
self
.
sample_mask
)
xp
=
fluid
.
layers
.
reshape
(
...
...
@@ -295,10 +290,10 @@ class BMN(fluid.dygraph.Layer):
xp
=
self
.
p_conv3d1
(
xp
)
xp
=
fluid
.
layers
.
squeeze
(
xp
,
axes
=
[
2
])
xp
=
self
.
p_conv2d1
(
xp
)
xp
=
self
.
p_conv2d2
(
xp
)
xp
=
self
.
p_conv2d3
(
xp
)
xp
=
self
.
p_conv2d4
(
xp
)
xp
=
paddle
.
nn
.
functional
.
relu
(
self
.
p_conv2d1
(
xp
)
)
xp
=
paddle
.
nn
.
functional
.
relu
(
self
.
p_conv2d2
(
xp
)
)
xp
=
paddle
.
nn
.
functional
.
relu
(
self
.
p_conv2d3
(
xp
)
)
xp
=
paddle
.
nn
.
functional
.
sigmoid
(
self
.
p_conv2d4
(
xp
)
)
return
xp
,
xs
,
xe
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_call.py
浏览文件 @
0b3b4918
...
...
@@ -125,14 +125,14 @@ lambda_fun = lambda x: x
class
MyConvLayer
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
):
super
().
__init__
()
self
.
_conv
=
fluid
.
dygraph
.
Conv2D
(
num
_channels
=
3
,
num_filter
s
=
2
,
filter
_size
=
3
,
param_attr
=
fluid
.
ParamAttr
(
self
.
_conv
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
3
,
out_channel
s
=
2
,
kernel
_size
=
3
,
weight_attr
=
paddle
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.99
)
),
bias_attr
=
fluid
.
ParamAttr
(
bias_attr
=
paddle
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.5
)
),
)
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py
浏览文件 @
0b3b4918
...
...
@@ -37,7 +37,7 @@ os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.dygraph
import
to_variable
,
declarative
,
ProgramTranslator
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Conv2D
Transpose
,
BatchNorm
from
paddle.fluid.dygraph.nn
import
Conv2DTranspose
,
BatchNorm
# Note: Set True to eliminate randomness.
# 1. For one operation, cuDNN has several algorithms,
...
...
@@ -363,14 +363,13 @@ class conv2d(fluid.dygraph.Layer):
initializer
=
fluid
.
initializer
.
Constant
(
0.0
)
)
self
.
conv
=
Conv2D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
conv
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
use_cudnn
=
use_cudnn
,
param_attr
=
fluid
.
ParamAttr
(
weight_attr
=
paddle
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
NormalInitializer
(
loc
=
0.0
,
scale
=
stddev
)
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist.py
浏览文件 @
0b3b4918
...
...
@@ -23,7 +23,7 @@ import paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.dygraph.base
import
switch_to_static_graph
from
paddle.fluid.dygraph
import
to_variable
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Linear
,
Pool2D
from
paddle.fluid.dygraph.nn
import
Linear
,
Pool2D
from
paddle.fluid.optimizer
import
AdamOptimizer
from
paddle.fluid.dygraph.io
import
INFER_MODEL_SUFFIX
,
INFER_PARAMS_SUFFIX
from
paddle.fluid.framework
import
_test_eager_guard
...
...
@@ -58,18 +58,16 @@ class SimpleImgConvPool(fluid.dygraph.Layer):
):
super
().
__init__
()
self
.
_conv2d
=
Conv2D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv2d
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
conv_stride
,
padding
=
conv_padding
,
dilation
=
conv_dilation
,
groups
=
conv_groups
,
param
_attr
=
None
,
weight
_attr
=
None
,
bias_attr
=
None
,
act
=
act
,
use_cudnn
=
use_cudnn
,
)
self
.
_pool2d
=
Pool2D
(
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py
浏览文件 @
0b3b4918
...
...
@@ -20,7 +20,7 @@ import paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.initializer
import
MSRA
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle.fluid.dygraph.nn
import
Pool2D
,
BatchNorm
,
Linear
from
paddle.fluid.dygraph
import
declarative
,
ProgramTranslator
from
paddle.fluid.dygraph.io
import
INFER_MODEL_SUFFIX
,
INFER_PARAMS_SUFFIX
...
...
@@ -54,16 +54,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
):
super
().
__init__
()
self
.
_conv
=
Conv2D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
groups
=
num_groups
,
act
=
None
,
use_cudnn
=
use_cudnn
,
param_attr
=
ParamAttr
(
weight_attr
=
ParamAttr
(
initializer
=
MSRA
(),
name
=
self
.
full_name
()
+
"_weights"
),
bias_attr
=
False
,
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py
浏览文件 @
0b3b4918
...
...
@@ -23,7 +23,7 @@ import numpy as np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.dygraph
import
ProgramTranslator
from
paddle.fluid.dygraph.nn
import
BatchNorm
,
Conv2D
,
Linear
,
Pool2D
from
paddle.fluid.dygraph.nn
import
BatchNorm
,
Linear
,
Pool2D
from
paddle.fluid.dygraph.io
import
INFER_MODEL_SUFFIX
,
INFER_PARAMS_SUFFIX
from
predictor_utils
import
PredictorTools
...
...
@@ -69,14 +69,13 @@ class ConvBNLayer(fluid.dygraph.Layer):
):
super
().
__init__
()
self
.
_conv
=
Conv2D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
bias_attr
=
False
,
)
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py
浏览文件 @
0b3b4918
...
...
@@ -23,7 +23,7 @@ import numpy as np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.dygraph.base
import
to_variable
from
paddle.fluid.dygraph.nn
import
BatchNorm
,
Conv2D
,
Linear
,
Pool2D
from
paddle.fluid.dygraph.nn
import
BatchNorm
,
Linear
,
Pool2D
from
paddle.fluid.dygraph
import
declarative
from
paddle.fluid.dygraph
import
ProgramTranslator
from
paddle.fluid.dygraph.io
import
INFER_MODEL_SUFFIX
,
INFER_PARAMS_SUFFIX
...
...
@@ -102,14 +102,13 @@ class ConvBNLayer(fluid.dygraph.Layer):
):
super
().
__init__
()
self
.
_conv
=
Conv2D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
bias_attr
=
False
,
)
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py
浏览文件 @
0b3b4918
...
...
@@ -17,7 +17,7 @@ import numpy as np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Linear
,
Embedding
from
paddle.fluid.dygraph.nn
import
Linear
,
Embedding
from
paddle.fluid.dygraph
import
to_variable
,
ProgramTranslator
,
declarative
from
test_lac
import
DynamicGRU
...
...
@@ -43,17 +43,15 @@ class SimpleConvPool(fluid.dygraph.Layer):
):
super
().
__init__
()
self
.
batch_size
=
batch_size
self
.
_conv2d
=
Conv2D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv2d
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
padding
=
[
1
,
1
],
use_cudnn
=
use_cudnn
,
act
=
'tanh'
,
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv2d
(
inputs
)
x
=
paddle
.
tanh
(
self
.
_conv2d
(
inputs
)
)
x
=
fluid
.
layers
.
reduce_max
(
x
,
dim
=-
1
)
x
=
fluid
.
layers
.
reshape
(
x
,
shape
=
[
self
.
batch_size
,
-
1
])
return
x
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py
浏览文件 @
0b3b4918
...
...
@@ -21,7 +21,7 @@ import unittest
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.dygraph
import
declarative
,
ProgramTranslator
,
to_variable
from
paddle.fluid.dygraph.nn
import
Conv2D
,
BatchNorm
,
Linear
,
Pool2D
from
paddle.fluid.dygraph.nn
import
BatchNorm
,
Linear
,
Pool2D
from
tsm_config_utils
import
merge_configs
,
parse_config
,
print_configs
random
.
seed
(
0
)
...
...
@@ -58,15 +58,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
):
super
().
__init__
()
self
.
_conv
=
Conv2D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
None
,
act
=
None
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(),
groups
=
1
,
weight_attr
=
fluid
.
param_attr
.
ParamAttr
(),
bias_attr
=
False
,
)
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/yolov3.py
浏览文件 @
0b3b4918
...
...
@@ -15,9 +15,9 @@
import
os
import
sys
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.dygraph
import
declarative
from
paddle.fluid.dygraph.nn
import
Conv2D
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.regularizer
import
L2Decay
...
...
@@ -247,14 +247,13 @@ class YOLOv3(fluid.dygraph.Layer):
block_out
=
self
.
add_sublayer
(
"block_out_%d"
%
(
i
),
Conv2D
(
num
_channels
=
1024
//
(
2
**
i
),
num_filter
s
=
num_filters
,
filter
_size
=
1
,
paddle
.
nn
.
Conv2D
(
in
_channels
=
1024
//
(
2
**
i
),
out_channel
s
=
num_filters
,
kernel
_size
=
1
,
stride
=
1
,
padding
=
0
,
act
=
None
,
param_attr
=
ParamAttr
(
weight_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Normal
(
0.0
,
0.02
)
),
bias_attr
=
ParamAttr
(
...
...
python/paddle/fluid/tests/unittests/parallel_dygraph_mnist.py
浏览文件 @
0b3b4918
...
...
@@ -16,7 +16,7 @@ import numpy as np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
Linear
from
paddle.fluid.dygraph.nn
import
Pool2D
,
Linear
from
paddle.fluid.dygraph.base
import
to_variable
from
test_dist_base
import
runtime_main
,
TestParallelDyGraphRunnerBase
...
...
@@ -44,17 +44,16 @@ class SimpleImgConvPool(fluid.dygraph.Layer):
):
super
().
__init__
()
self
.
_conv2d
=
Conv2D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv2d
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
conv_stride
,
padding
=
conv_padding
,
dilation
=
conv_dilation
,
groups
=
conv_groups
,
param
_attr
=
None
,
weight
_attr
=
None
,
bias_attr
=
None
,
use_cudnn
=
use_cudnn
,
)
self
.
_pool2d
=
Pool2D
(
...
...
python/paddle/fluid/tests/unittests/test_conv2d_api.py
浏览文件 @
0b3b4918
...
...
@@ -359,20 +359,9 @@ class TestConv2DEnviron(unittest.TestCase):
)
result
=
conv
(
inputs
)
def
run3
(
self
,
place
):
with
fluid
.
dygraph
.
guard
(
place
):
inputs
=
fluid
.
dygraph
.
to_variable
(
self
.
input_np
)
conv
=
paddle
.
fluid
.
dygraph
.
nn
.
Conv2D
(
num_channels
=
3
,
num_filters
=
4
,
filter_size
=
(
3
,
3
),
)
result
=
conv
(
inputs
)
def
run_all
(
self
,
place
):
self
.
run1
(
place
)
self
.
run2
(
place
)
self
.
run3
(
place
)
def
test_environ
(
self
):
self
.
input_np
=
np
.
random
.
random
([
2
,
3
,
5
,
5
]).
astype
(
"float32"
)
...
...
python/paddle/fluid/tests/unittests/test_dygraph_mnist_fp16.py
浏览文件 @
0b3b4918
...
...
@@ -17,7 +17,7 @@ import numpy as np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
Linear
from
paddle.fluid.dygraph.nn
import
Pool2D
,
Linear
from
paddle.fluid.framework
import
_test_eager_guard
...
...
@@ -44,19 +44,16 @@ class SimpleImgConvPool(fluid.dygraph.Layer):
):
super
().
__init__
()
self
.
_conv2d
=
Conv2D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv2d
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
conv_stride
,
padding
=
conv_padding
,
dilation
=
conv_dilation
,
groups
=
conv_groups
,
param
_attr
=
param_attr
,
weight
_attr
=
param_attr
,
bias_attr
=
bias_attr
,
use_cudnn
=
use_cudnn
,
dtype
=
dtype
,
act
=
act
,
)
self
.
_pool2d
=
Pool2D
(
...
...
@@ -116,8 +113,8 @@ class MNIST(fluid.dygraph.Layer):
)
def
forward
(
self
,
inputs
,
label
):
x
=
self
.
_simple_img_conv_pool_1
(
inputs
)
x
=
self
.
_simple_img_conv_pool_2
(
x
)
x
=
paddle
.
nn
.
functional
.
relu
(
self
.
_simple_img_conv_pool_1
(
inputs
)
)
x
=
paddle
.
nn
.
functional
.
relu
(
self
.
_simple_img_conv_pool_2
(
x
)
)
x
=
fluid
.
layers
.
reshape
(
x
,
shape
=
[
-
1
,
self
.
pool_2_shape
])
cost
=
self
.
_linear
(
x
)
loss
=
fluid
.
layers
.
cross_entropy
(
cost
,
label
)
...
...
@@ -129,13 +126,16 @@ class TestMnist(unittest.TestCase):
def
func_mnist_fp16
(
self
):
if
not
fluid
.
is_compiled_with_cuda
():
return
x
=
np
.
random
.
randn
(
1
,
3
,
224
,
224
).
astype
(
"float
16
"
)
x
=
np
.
random
.
randn
(
1
,
3
,
224
,
224
).
astype
(
"float
32
"
)
y
=
np
.
random
.
randint
(
10
,
size
=
[
1
,
1
],
dtype
=
"int64"
)
with
fluid
.
dygraph
.
guard
(
fluid
.
CUDAPlace
(
0
)):
model
=
MNIST
(
dtype
=
"float
16
"
)
model
=
MNIST
(
dtype
=
"float
32
"
)
x
=
fluid
.
dygraph
.
to_variable
(
x
)
y
=
fluid
.
dygraph
.
to_variable
(
y
)
loss
=
model
(
x
,
y
)
# using amp.auto_cast because paddle.nn.Conv2D doesn't suppport setting dtype
with
paddle
.
amp
.
auto_cast
(
dtype
=
'float16'
):
loss
=
model
(
x
,
y
)
print
(
loss
.
numpy
())
def
test_mnist_fp16
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py
浏览文件 @
0b3b4918
...
...
@@ -19,7 +19,7 @@ import paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid.optimizer
import
SGDOptimizer
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
Linear
from
paddle.fluid.dygraph.nn
import
Pool2D
,
Linear
from
paddle.fluid.dygraph.base
import
to_variable
from
test_imperative_base
import
new_program_scope
...
...
@@ -48,17 +48,16 @@ class SimpleImgConvPool(fluid.dygraph.Layer):
):
super
().
__init__
()
self
.
_conv2d
=
Conv2D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv2d
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
conv_stride
,
padding
=
conv_padding
,
dilation
=
conv_dilation
,
groups
=
conv_groups
,
param
_attr
=
None
,
weight
_attr
=
None
,
bias_attr
=
None
,
use_cudnn
=
use_cudnn
,
)
self
.
_pool2d
=
Pool2D
(
...
...
python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py
浏览文件 @
0b3b4918
...
...
@@ -13,11 +13,11 @@
# limitations under the License.
import
unittest
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid.framework
as
framework
from
paddle.fluid.dygraph.nn
import
(
BatchNorm
,
Conv2D
,
Conv3D
,
Embedding
,
GroupNorm
,
...
...
@@ -187,17 +187,11 @@ class TestDygraphLoadStatic(unittest.TestCase):
self
.
linear1
=
Linear
(
10
,
10
)
self
.
lienar2
=
Linear
(
10
,
20
)
self
.
conv2d_1
=
Conv2D
(
num_channels
=
10
,
num_filters
=
10
,
filter_size
=
5
,
act
=
"relu"
,
self
.
conv2d_1
=
paddle
.
nn
.
Conv2D
(
in_channels
=
10
,
out_channels
=
10
,
kernel_size
=
5
)
self
.
conv2d_2
=
Conv2D
(
num_channels
=
10
,
num_filters
=
10
,
filter_size
=
5
,
act
=
"relu"
,
self
.
conv2d_2
=
paddle
.
nn
.
Conv2D
(
in_channels
=
10
,
out_channels
=
10
,
kernel_size
=
5
)
self
.
conv3d_1
=
Conv3D
(
...
...
python/paddle/fluid/tests/unittests/test_imperative_mnist.py
浏览文件 @
0b3b4918
...
...
@@ -19,7 +19,7 @@ import paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid.optimizer
import
SGDOptimizer
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
Linear
from
paddle.fluid.dygraph.nn
import
Pool2D
,
Linear
from
test_imperative_base
import
new_program_scope
from
utils
import
DyGraphProgramDescTracerTestHelper
from
paddle.fluid.framework
import
_test_eager_guard
,
_in_legacy_dygraph
...
...
@@ -47,17 +47,16 @@ class SimpleImgConvPool(fluid.dygraph.Layer):
):
super
().
__init__
()
self
.
_conv2d
=
Conv2D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv2d
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
conv_stride
,
padding
=
conv_padding
,
dilation
=
conv_dilation
,
groups
=
conv_groups
,
param
_attr
=
None
,
weight
_attr
=
None
,
bias_attr
=
None
,
use_cudnn
=
use_cudnn
,
)
self
.
_pool2d
=
Pool2D
(
...
...
python/paddle/fluid/tests/unittests/test_imperative_named_members.py
浏览文件 @
0b3b4918
...
...
@@ -23,7 +23,7 @@ class MyLayer(fluid.Layer):
def
__init__
(
self
,
num_channel
,
dim
,
num_filter
=
5
):
super
().
__init__
()
self
.
fc
=
fluid
.
dygraph
.
Linear
(
dim
,
dim
)
self
.
conv
=
fluid
.
dygraph
.
Conv2D
(
num_channel
,
num_channel
,
num_filter
)
self
.
conv
=
paddle
.
nn
.
Conv2D
(
num_channel
,
num_channel
,
num_filter
)
def
forward
(
self
,
x
):
x
=
self
.
fc
(
x
)
...
...
@@ -98,7 +98,7 @@ class TestImperativeNamedParameters(unittest.TestCase):
super
().
__init__
()
self
.
linear1
=
fluid
.
dygraph
.
Linear
(
10
,
10
)
self
.
linear2
=
fluid
.
dygraph
.
Linear
(
5
,
5
)
self
.
conv2d
=
fluid
.
dygraph
.
Conv2D
(
3
,
2
,
3
)
self
.
conv2d
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
)
self
.
embedding
=
fluid
.
dygraph
.
Embedding
(
size
=
[
128
,
16
])
self
.
h_0
=
fluid
.
dygraph
.
to_variable
(
np
.
zeros
([
10
,
10
]).
astype
(
'float32'
)
...
...
python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py
浏览文件 @
0b3b4918
...
...
@@ -18,7 +18,6 @@ import paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid.dygraph.nn
import
(
Conv2D
,
Pool2D
,
Linear
,
BatchNorm
,
...
...
@@ -91,26 +90,22 @@ class ConvBNPool(fluid.dygraph.Layer):
initializer
=
fluid
.
initializer
.
Normal
(
0.0
,
conv_std_1
)
)
self
.
conv_0_layer
=
Conv2D
(
self
.
conv_0_layer
=
paddle
.
nn
.
Conv2D
(
channels
[
0
],
out_ch
[
0
],
3
,
padding
=
1
,
param
_attr
=
conv_param_0
,
weight
_attr
=
conv_param_0
,
bias_attr
=
False
,
act
=
None
,
use_cudnn
=
use_cudnn
,
)
self
.
bn_0_layer
=
BatchNorm
(
out_ch
[
0
],
act
=
act
,
is_test
=
is_test
)
self
.
conv_1_layer
=
Conv2D
(
self
.
conv_1_layer
=
paddle
.
nn
.
Conv2D
(
out_ch
[
0
],
num_filters
=
out_ch
[
1
],
filter_size
=
3
,
out_ch
[
1
],
3
,
padding
=
1
,
param
_attr
=
conv_param_1
,
weight
_attr
=
conv_param_1
,
bias_attr
=
False
,
act
=
None
,
use_cudnn
=
use_cudnn
,
)
self
.
bn_1_layer
=
BatchNorm
(
out_ch
[
1
],
act
=
act
,
is_test
=
is_test
)
...
...
python/paddle/fluid/tests/unittests/test_imperative_resnet.py
浏览文件 @
0b3b4918
...
...
@@ -19,7 +19,7 @@ import paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle.fluid
import
Pool2D
,
BatchNorm
,
Linear
from
paddle.fluid.dygraph.base
import
to_variable
from
test_imperative_base
import
new_program_scope
from
utils
import
DyGraphProgramDescTracerTestHelper
,
is_equal_program
...
...
@@ -89,16 +89,14 @@ class ConvBNLayer(fluid.Layer):
):
super
().
__init__
()
self
.
_conv
=
Conv2D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
bias_attr
=
False
,
use_cudnn
=
use_cudnn
,
)
self
.
_batch_norm
=
BatchNorm
(
num_filters
,
act
=
act
)
...
...
python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py
浏览文件 @
0b3b4918
...
...
@@ -19,7 +19,7 @@ import paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle.fluid.dygraph.nn
import
Pool2D
,
BatchNorm
,
Linear
from
test_imperative_base
import
new_program_scope
from
paddle.fluid.framework
import
_test_eager_guard
...
...
@@ -79,14 +79,13 @@ class ConvBNLayer(fluid.dygraph.Layer):
):
super
().
__init__
()
self
.
_conv
=
Conv2D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
bias_attr
=
None
,
)
...
...
python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py
浏览文件 @
0b3b4918
...
...
@@ -140,10 +140,10 @@ class Conv2DLayer(fluid.dygraph.Layer):
relufactor
=
None
,
):
super
().
__init__
()
self
.
_conv
=
fluid
.
dygraph
.
Conv2D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
paddle
.
nn
.
Conv2D
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
bias_attr
=
None
if
use_bias
else
False
,
...
...
python/paddle/fluid/tests/unittests/test_layers.py
浏览文件 @
0b3b4918
...
...
@@ -456,182 +456,6 @@ class TestLayer(LayerTest):
np
.
testing
.
assert_allclose
(
static_ret
,
dy_ret_value
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
static_ret
,
dy_eager_ret_value
,
rtol
=
1e-05
)
def
test_conv2d
(
self
):
with
self
.
static_graph
():
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
3
,
5
,
5
],
dtype
=
'float32'
)
ret
=
layers
.
conv2d
(
input
=
images
,
num_filters
=
3
,
filter_size
=
[
2
,
2
])
static_ret
=
self
.
get_static_graph_result
(
feed
=
{
'pixel'
:
np
.
ones
([
2
,
3
,
5
,
5
],
dtype
=
'float32'
)},
fetch_list
=
[
ret
],
)[
0
]
with
self
.
static_graph
():
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
3
,
5
,
5
],
dtype
=
'float32'
)
conv2d
=
nn
.
Conv2D
(
num_channels
=
3
,
num_filters
=
3
,
filter_size
=
[
2
,
2
]
)
ret
=
conv2d
(
images
)
static_ret2
=
self
.
get_static_graph_result
(
feed
=
{
'pixel'
:
np
.
ones
([
2
,
3
,
5
,
5
],
dtype
=
'float32'
)},
fetch_list
=
[
ret
],
)[
0
]
with
self
.
dynamic_graph
():
with
_test_eager_guard
():
images
=
np
.
ones
([
2
,
3
,
5
,
5
],
dtype
=
'float32'
)
conv2d
=
nn
.
Conv2D
(
num_channels
=
3
,
num_filters
=
3
,
filter_size
=
[
2
,
2
]
)
dy_eager_ret
=
conv2d
(
base
.
to_variable
(
images
))
dy_eager_ret_value
=
dy_eager_ret
.
numpy
()
images
=
np
.
ones
([
2
,
3
,
5
,
5
],
dtype
=
'float32'
)
conv2d
=
nn
.
Conv2D
(
num_channels
=
3
,
num_filters
=
3
,
filter_size
=
[
2
,
2
]
)
dy_ret
=
conv2d
(
base
.
to_variable
(
images
))
dy_ret_value
=
dy_ret
.
numpy
()
with
self
.
dynamic_graph
():
with
_test_eager_guard
():
images
=
np
.
ones
([
2
,
3
,
5
,
5
],
dtype
=
'float32'
)
conv2d
=
nn
.
Conv2D
(
num_channels
=
3
,
num_filters
=
3
,
filter_size
=
[
2
,
2
],
bias_attr
=
False
,
)
dy_ret
=
conv2d
(
base
.
to_variable
(
images
))
self
.
assertIsNone
(
conv2d
.
bias
)
images
=
np
.
ones
([
2
,
3
,
5
,
5
],
dtype
=
'float32'
)
conv2d
=
nn
.
Conv2D
(
num_channels
=
3
,
num_filters
=
3
,
filter_size
=
[
2
,
2
],
bias_attr
=
False
,
)
dy_ret
=
conv2d
(
base
.
to_variable
(
images
))
self
.
assertIsNone
(
conv2d
.
bias
)
with
self
.
static_graph
():
# the input of Conv2D must be Variable.
def
test_Variable
():
images
=
np
.
ones
([
2
,
3
,
5
,
5
],
dtype
=
'float32'
)
conv2d
=
nn
.
Conv2D
(
num_channels
=
3
,
num_filters
=
3
,
filter_size
=
[
2
,
2
]
)
conv2d_ret1
=
conv2d
(
images
)
self
.
assertRaises
(
TypeError
,
test_Variable
)
# the input dtype of Conv2D must be float16 or float32 or float64
# float16 only can be set on GPU place
def
test_type
():
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
3
,
5
,
5
],
dtype
=
'int32'
)
conv2d
=
nn
.
Conv2D
(
num_channels
=
3
,
num_filters
=
3
,
filter_size
=
[
2
,
2
]
)
conv2d_ret2
=
conv2d
(
images
)
self
.
assertRaises
(
TypeError
,
test_type
)
np
.
testing
.
assert_allclose
(
static_ret
,
dy_ret_value
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
static_ret
,
dy_eager_ret_value
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
static_ret
,
static_ret2
,
rtol
=
1e-05
)
with
self
.
dynamic_graph
():
with
_test_eager_guard
():
images
=
np
.
ones
([
2
,
3
,
5
,
5
],
dtype
=
'float32'
)
custom_weight
=
np
.
random
.
randn
(
3
,
3
,
2
,
2
).
astype
(
"float32"
)
weight_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
NumpyArrayInitializer
(
custom_weight
)
)
conv2d1
=
nn
.
Conv2D
(
num_channels
=
3
,
num_filters
=
3
,
filter_size
=
[
2
,
2
]
)
conv2d2
=
nn
.
Conv2D
(
num_channels
=
3
,
num_filters
=
3
,
filter_size
=
[
2
,
2
],
param_attr
=
weight_attr
,
)
dy_ret1
=
conv2d1
(
base
.
to_variable
(
images
))
dy_ret2
=
conv2d2
(
base
.
to_variable
(
images
))
self
.
assertFalse
(
np
.
array_equal
(
dy_ret1
.
numpy
(),
dy_ret2
.
numpy
())
)
conv2d1_weight_np
=
conv2d1
.
weight
.
numpy
()
conv2d1_bias
=
conv2d1
.
bias
self
.
assertFalse
(
np
.
array_equal
(
conv2d1_weight_np
,
conv2d2
.
weight
.
numpy
())
)
conv2d2
.
weight
.
set_value
(
conv2d1_weight_np
)
np
.
testing
.
assert_array_equal
(
conv2d1_weight_np
,
conv2d2
.
weight
.
numpy
()
)
conv2d2
.
bias
.
set_value
(
conv2d1_bias
)
dy_ret1
=
conv2d1
(
base
.
to_variable
(
images
))
dy_ret2
=
conv2d2
(
base
.
to_variable
(
images
))
np
.
testing
.
assert_array_equal
(
dy_ret1
.
numpy
(),
dy_ret2
.
numpy
())
conv2d2
.
weight
=
conv2d1
.
weight
conv2d2
.
bias
=
conv2d1
.
bias
np
.
testing
.
assert_array_equal
(
conv2d1
.
weight
.
numpy
(),
conv2d2
.
weight
.
numpy
()
)
np
.
testing
.
assert_array_equal
(
conv2d1
.
bias
.
numpy
(),
conv2d2
.
bias
.
numpy
()
)
images
=
np
.
ones
([
2
,
3
,
5
,
5
],
dtype
=
'float32'
)
custom_weight
=
np
.
random
.
randn
(
3
,
3
,
2
,
2
).
astype
(
"float32"
)
weight_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
NumpyArrayInitializer
(
custom_weight
)
)
conv2d1
=
nn
.
Conv2D
(
num_channels
=
3
,
num_filters
=
3
,
filter_size
=
[
2
,
2
]
)
conv2d2
=
nn
.
Conv2D
(
num_channels
=
3
,
num_filters
=
3
,
filter_size
=
[
2
,
2
],
param_attr
=
weight_attr
,
)
dy_ret1
=
conv2d1
(
base
.
to_variable
(
images
))
dy_ret2
=
conv2d2
(
base
.
to_variable
(
images
))
self
.
assertFalse
(
np
.
array_equal
(
dy_ret1
.
numpy
(),
dy_ret2
.
numpy
()))
conv2d1_weight_np
=
conv2d1
.
weight
.
numpy
()
conv2d1_bias
=
conv2d1
.
bias
self
.
assertFalse
(
np
.
array_equal
(
conv2d1_weight_np
,
conv2d2
.
weight
.
numpy
())
)
conv2d2
.
weight
.
set_value
(
conv2d1_weight_np
)
np
.
testing
.
assert_array_equal
(
conv2d1_weight_np
,
conv2d2
.
weight
.
numpy
()
)
conv2d2
.
bias
.
set_value
(
conv2d1_bias
)
dy_ret1
=
conv2d1
(
base
.
to_variable
(
images
))
dy_ret2
=
conv2d2
(
base
.
to_variable
(
images
))
np
.
testing
.
assert_array_equal
(
dy_ret1
.
numpy
(),
dy_ret2
.
numpy
())
conv2d2
.
weight
=
conv2d1
.
weight
conv2d2
.
bias
=
conv2d1
.
bias
np
.
testing
.
assert_array_equal
(
conv2d1
.
weight
.
numpy
(),
conv2d2
.
weight
.
numpy
()
)
np
.
testing
.
assert_array_equal
(
conv2d1
.
bias
.
numpy
(),
conv2d2
.
bias
.
numpy
()
)
def
test_gru_unit
(
self
):
lod
=
[[
2
,
4
,
3
]]
D
=
5
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录