未验证 提交 b04c55ef 编写于 作者: C cnn 提交者: GitHub

2.0rc api rename (#28088) (#28179)

* rename manual_seed to seed

* rename xxx1d-->xxx1D, xxx2d-->xxx2D, xxx3d-->xxx3D

* rename manual_seed --> seed

* do not rename .cc, .cu and .h file

* rename manual_seed --> seed

* rename manual_seed --> seed

* rename manual_seed --> seed

* rename manual_seed --> seed

* disable_static on doc example code

* donot change manual_seed on generator

* add enable_static on sample code

* convert python/paddle/fluid/layers/nn.py to bak

* fix typo

* fix code style

* fix seed to manual_seed when call functions of Generator()

* fix bug
上级 7232f1ed
...@@ -221,7 +221,7 @@ from .tensor.search import sort #DEFINE_ALIAS ...@@ -221,7 +221,7 @@ from .tensor.search import sort #DEFINE_ALIAS
from .tensor.to_string import set_printoptions from .tensor.to_string import set_printoptions
from .framework.random import manual_seed #DEFINE_ALIAS from .framework.random import seed #DEFINE_ALIAS
from .framework.random import get_cuda_rng_state #DEFINE_ALIAS from .framework.random import get_cuda_rng_state #DEFINE_ALIAS
from .framework.random import set_cuda_rng_state #DEFINE_ALIAS from .framework.random import set_cuda_rng_state #DEFINE_ALIAS
from .framework import ParamAttr #DEFINE_ALIAS from .framework import ParamAttr #DEFINE_ALIAS
......
...@@ -37,7 +37,7 @@ def auto_cast(enable=True, custom_white_list=None, custom_black_list=None): ...@@ -37,7 +37,7 @@ def auto_cast(enable=True, custom_white_list=None, custom_black_list=None):
import paddle import paddle
conv2d = paddle.nn.Conv2d(3, 2, 3, bias_attr=False) conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast(): with paddle.amp.auto_cast():
......
...@@ -50,7 +50,7 @@ class GradScaler(AmpScaler): ...@@ -50,7 +50,7 @@ class GradScaler(AmpScaler):
import paddle import paddle
model = paddle.nn.Conv2d(3, 2, 3, bias_attr=True) model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True)
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters()) optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024) scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
...@@ -90,7 +90,7 @@ class GradScaler(AmpScaler): ...@@ -90,7 +90,7 @@ class GradScaler(AmpScaler):
import paddle import paddle
model = paddle.nn.Conv2d(3, 2, 3, bias_attr=True) model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True)
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters()) optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024) scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
...@@ -122,7 +122,7 @@ class GradScaler(AmpScaler): ...@@ -122,7 +122,7 @@ class GradScaler(AmpScaler):
import paddle import paddle
model = paddle.nn.Conv2d(3, 2, 3, bias_attr=True) model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True)
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters()) optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024) scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
......
...@@ -670,13 +670,13 @@ class Categorical(Distribution): ...@@ -670,13 +670,13 @@ class Categorical(Distribution):
import paddle import paddle
from paddle.distribution import Categorical from paddle.distribution import Categorical
paddle.manual_seed(100) # on CPU device paddle.seed(100) # on CPU device
x = paddle.rand([6]) x = paddle.rand([6])
print(x.numpy()) print(x.numpy())
# [0.5535528 0.20714243 0.01162981 # [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ] # 0.51577556 0.36369765 0.2609165 ]
paddle.manual_seed(200) # on CPU device paddle.seed(200) # on CPU device
y = paddle.rand([6]) y = paddle.rand([6])
print(y.numpy()) print(y.numpy())
# [0.77663314 0.90824795 0.15685187 # [0.77663314 0.90824795 0.15685187
...@@ -685,7 +685,7 @@ class Categorical(Distribution): ...@@ -685,7 +685,7 @@ class Categorical(Distribution):
cat = Categorical(x) cat = Categorical(x)
cat2 = Categorical(y) cat2 = Categorical(y)
paddle.manual_seed(1000) # on CPU device paddle.seed(1000) # on CPU device
cat.sample([2,3]) cat.sample([2,3])
# [[0, 0, 5], # [[0, 0, 5],
# [3, 4, 5]] # [3, 4, 5]]
...@@ -744,7 +744,7 @@ class Categorical(Distribution): ...@@ -744,7 +744,7 @@ class Categorical(Distribution):
import paddle import paddle
from paddle.distribution import Categorical from paddle.distribution import Categorical
paddle.manual_seed(100) # on CPU device paddle.seed(100) # on CPU device
x = paddle.rand([6]) x = paddle.rand([6])
print(x.numpy()) print(x.numpy())
# [0.5535528 0.20714243 0.01162981 # [0.5535528 0.20714243 0.01162981
...@@ -752,7 +752,7 @@ class Categorical(Distribution): ...@@ -752,7 +752,7 @@ class Categorical(Distribution):
cat = Categorical(x) cat = Categorical(x)
paddle.manual_seed(1000) # on CPU device paddle.seed(1000) # on CPU device
cat.sample([2,3]) cat.sample([2,3])
# [[0, 0, 5], # [[0, 0, 5],
# [3, 4, 5]] # [3, 4, 5]]
...@@ -791,13 +791,13 @@ class Categorical(Distribution): ...@@ -791,13 +791,13 @@ class Categorical(Distribution):
import paddle import paddle
from paddle.distribution import Categorical from paddle.distribution import Categorical
paddle.manual_seed(100) # on CPU device paddle.seed(100) # on CPU device
x = paddle.rand([6]) x = paddle.rand([6])
print(x.numpy()) print(x.numpy())
# [0.5535528 0.20714243 0.01162981 # [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ] # 0.51577556 0.36369765 0.2609165 ]
paddle.manual_seed(200) # on CPU device paddle.seed(200) # on CPU device
y = paddle.rand([6]) y = paddle.rand([6])
print(y.numpy()) print(y.numpy())
# [0.77663314 0.90824795 0.15685187 # [0.77663314 0.90824795 0.15685187
...@@ -842,7 +842,7 @@ class Categorical(Distribution): ...@@ -842,7 +842,7 @@ class Categorical(Distribution):
import paddle import paddle
from paddle.distribution import Categorical from paddle.distribution import Categorical
paddle.manual_seed(100) # on CPU device paddle.seed(100) # on CPU device
x = paddle.rand([6]) x = paddle.rand([6])
print(x.numpy()) print(x.numpy())
# [0.5535528 0.20714243 0.01162981 # [0.5535528 0.20714243 0.01162981
...@@ -887,7 +887,7 @@ class Categorical(Distribution): ...@@ -887,7 +887,7 @@ class Categorical(Distribution):
import paddle import paddle
from paddle.distribution import Categorical from paddle.distribution import Categorical
paddle.manual_seed(100) # on CPU device paddle.seed(100) # on CPU device
x = paddle.rand([6]) x = paddle.rand([6])
print(x.numpy()) print(x.numpy())
# [0.5535528 0.20714243 0.01162981 # [0.5535528 0.20714243 0.01162981
...@@ -953,7 +953,7 @@ class Categorical(Distribution): ...@@ -953,7 +953,7 @@ class Categorical(Distribution):
import paddle import paddle
from paddle.distribution import Categorical from paddle.distribution import Categorical
paddle.manual_seed(100) # on CPU device paddle.seed(100) # on CPU device
x = paddle.rand([6]) x = paddle.rand([6])
print(x.numpy()) print(x.numpy())
# [0.5535528 0.20714243 0.01162981 # [0.5535528 0.20714243 0.01162981
......
...@@ -114,7 +114,7 @@ class TestWeightDecay(unittest.TestCase): ...@@ -114,7 +114,7 @@ class TestWeightDecay(unittest.TestCase):
return param_sum return param_sum
def check_weight_decay(self, place, model): def check_weight_decay(self, place, model):
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program() main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program() startup_prog = fluid.framework.Program()
...@@ -137,7 +137,7 @@ class TestWeightDecay(unittest.TestCase): ...@@ -137,7 +137,7 @@ class TestWeightDecay(unittest.TestCase):
return param_sum return param_sum
def check_weight_decay2(self, place, model): def check_weight_decay2(self, place, model):
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program() main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program() startup_prog = fluid.framework.Program()
......
...@@ -1058,7 +1058,7 @@ class Layer(core.Layer): ...@@ -1058,7 +1058,7 @@ class Layer(core.Layer):
super(Mylayer, self).__init__() super(Mylayer, self).__init__()
self.linear1 = paddle.nn.Linear(10, 10) self.linear1 = paddle.nn.Linear(10, 10)
self.linear2 = paddle.nn.Linear(5, 5) self.linear2 = paddle.nn.Linear(5, 5)
self.conv2d = paddle.nn.Conv2d(3, 2, 3) self.conv2d = paddle.nn.Conv2D(3, 2, 3)
self.embedding = paddle.nn.Embedding(128, 16) self.embedding = paddle.nn.Embedding(128, 16)
self.h_0 = paddle.to_tensor(np.zeros([10, 10]).astype('float32')) self.h_0 = paddle.to_tensor(np.zeros([10, 10]).astype('float32'))
......
...@@ -110,7 +110,7 @@ class Conv2D(layers.Layer): ...@@ -110,7 +110,7 @@ class Conv2D(layers.Layer):
dilation (int or tuple, optional): The dilation size. If dilation is a tuple, it must dilation (int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1. dilation_H = dilation_W = dilation. Default: 1.
groups (int, optional): The groups number of the Conv2d Layer. According to grouped groups (int, optional): The groups number of the Conv2D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2, convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only of the input channels, while the second half of the filters is only
...@@ -345,7 +345,7 @@ class Conv3D(layers.Layer): ...@@ -345,7 +345,7 @@ class Conv3D(layers.Layer):
dilation (int|tuple, optional): The dilation size. If dilation is a tuple, it must dilation (int|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1. dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups (int, optional): The groups number of the Conv3d Layer. According to grouped groups (int, optional): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2, convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only of the input channels, while the second half of the filters is only
...@@ -574,7 +574,7 @@ class Conv3DTranspose(layers.Layer): ...@@ -574,7 +574,7 @@ class Conv3DTranspose(layers.Layer):
dilation(int|tuple, optional): The dilation size. If dilation is a tuple, it must dilation(int|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1. dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by groups(int, optional): The groups number of the Conv3D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the first half of the input channels, while the second half of the
...@@ -2541,7 +2541,7 @@ class Conv2DTranspose(layers.Layer): ...@@ -2541,7 +2541,7 @@ class Conv2DTranspose(layers.Layer):
dilation(int or tuple, optional): The dilation size. If dilation is a tuple, it must dilation(int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1. dilation_H = dilation_W = dilation. Default: 1.
groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by groups(int, optional): The groups number of the Conv2D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the first half of the input channels, while the second half of the
......
...@@ -749,7 +749,7 @@ class BilinearInitializer(Initializer): ...@@ -749,7 +749,7 @@ class BilinearInitializer(Initializer):
regularizer=L2Decay(0.), regularizer=L2Decay(0.),
initializer=nn.initializer.Bilinear()) initializer=nn.initializer.Bilinear())
data = paddle.rand([B, 3, H, W], dtype='float32') data = paddle.rand([B, 3, H, W], dtype='float32')
conv_up = nn.ConvTranspose2d(3, conv_up = nn.Conv2DTranspose(3,
out_channels=C, out_channels=C,
kernel_size=2 * factor - factor % 2, kernel_size=2 * factor - factor % 2,
padding=int( padding=int(
......
...@@ -43,7 +43,7 @@ def simple_img_conv_pool(input, ...@@ -43,7 +43,7 @@ def simple_img_conv_pool(input,
act=None, act=None,
use_cudnn=True): use_cudnn=True):
""" """
:api_attr: Static Graph :api_attr: Static Graph
The simple_img_conv_pool api is composed of :ref:`api_fluid_layers_conv2d` and :ref:`api_fluid_layers_pool2d` . The simple_img_conv_pool api is composed of :ref:`api_fluid_layers_conv2d` and :ref:`api_fluid_layers_pool2d` .
...@@ -106,6 +106,8 @@ def simple_img_conv_pool(input, ...@@ -106,6 +106,8 @@ def simple_img_conv_pool(input,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
img = fluid.data(name='img', shape=[100, 1, 28, 28], dtype='float32') img = fluid.data(name='img', shape=[100, 1, 28, 28], dtype='float32')
conv_pool = fluid.nets.simple_img_conv_pool(input=img, conv_pool = fluid.nets.simple_img_conv_pool(input=img,
filter_size=5, filter_size=5,
...@@ -151,37 +153,37 @@ def img_conv_group(input, ...@@ -151,37 +153,37 @@ def img_conv_group(input,
pool_type="max", pool_type="max",
use_cudnn=True): use_cudnn=True):
""" """
:api_attr: Static Graph :api_attr: Static Graph
The Image Convolution Group is composed of Convolution2d, BatchNorm, DropOut, The Image Convolution Group is composed of Convolution2d, BatchNorm, DropOut,
and Pool2d. According to the input arguments, img_conv_group will do serials of and Pool2D. According to the input arguments, img_conv_group will do serials of
computation for Input using Convolution2d, BatchNorm, DropOut, and pass the last computation for Input using Convolution2d, BatchNorm, DropOut, and pass the last
result to Pool2d. result to Pool2D.
Args: Args:
input (Variable): The input is 4-D Tensor with shape [N, C, H, W], the data type of input is float32 or float64. input (Variable): The input is 4-D Tensor with shape [N, C, H, W], the data type of input is float32 or float64.
conv_num_filter(list|tuple): Indicates the numbers of filter of this group. conv_num_filter(list|tuple): Indicates the numbers of filter of this group.
pool_size (int|list|tuple): The pooling size of Pool2d Layer. If pool_size pool_size (int|list|tuple): The pooling size of Pool2D Layer. If pool_size
is a list or tuple, it must contain two integers, (pool_size_height, pool_size_width). is a list or tuple, it must contain two integers, (pool_size_height, pool_size_width).
Otherwise, the pool_size_height = pool_size_width = pool_size. Otherwise, the pool_size_height = pool_size_width = pool_size.
conv_padding (int|list|tuple): The padding size of the Conv2d Layer. If padding is conv_padding (int|list|tuple): The padding size of the Conv2D Layer. If padding is
a list or tuple, its length must be equal to the length of conv_num_filter. a list or tuple, its length must be equal to the length of conv_num_filter.
Otherwise the conv_padding of all Conv2d Layers are the same. Default 1. Otherwise the conv_padding of all Conv2D Layers are the same. Default 1.
conv_filter_size (int|list|tuple): The filter size. If filter_size is a list or conv_filter_size (int|list|tuple): The filter size. If filter_size is a list or
tuple, its length must be equal to the length of conv_num_filter. tuple, its length must be equal to the length of conv_num_filter.
Otherwise the conv_filter_size of all Conv2d Layers are the same. Default 3. Otherwise the conv_filter_size of all Conv2D Layers are the same. Default 3.
conv_act (str): Activation type for Conv2d Layer that is not followed by BatchNorm. conv_act (str): Activation type for Conv2D Layer that is not followed by BatchNorm.
Default: None. Default: None.
param_attr (ParamAttr): The parameters to the Conv2d Layer. Default: None param_attr (ParamAttr): The parameters to the Conv2D Layer. Default: None
conv_with_batchnorm (bool|list): Indicates whether to use BatchNorm after Conv2d Layer. conv_with_batchnorm (bool|list): Indicates whether to use BatchNorm after Conv2D Layer.
If conv_with_batchnorm is a list, its length must be equal to the length of If conv_with_batchnorm is a list, its length must be equal to the length of
conv_num_filter. Otherwise, conv_with_batchnorm indicates whether all the conv_num_filter. Otherwise, conv_with_batchnorm indicates whether all the
Conv2d Layer follows a BatchNorm. Default False. Conv2D Layer follows a BatchNorm. Default False.
conv_batchnorm_drop_rate (float|list): Indicates the drop_rate of Dropout Layer conv_batchnorm_drop_rate (float|list): Indicates the drop_rate of Dropout Layer
after BatchNorm. If conv_batchnorm_drop_rate is a list, its length must be after BatchNorm. If conv_batchnorm_drop_rate is a list, its length must be
equal to the length of conv_num_filter. Otherwise, drop_rate of all Dropout equal to the length of conv_num_filter. Otherwise, drop_rate of all Dropout
Layers is conv_batchnorm_drop_rate. Default 0.0. Layers is conv_batchnorm_drop_rate. Default 0.0.
pool_stride (int|list|tuple): The pooling stride of Pool2d layer. If pool_stride pool_stride (int|list|tuple): The pooling stride of Pool2D layer. If pool_stride
is a list or tuple, it must contain two integers, (pooling_stride_H, is a list or tuple, it must contain two integers, (pooling_stride_H,
pooling_stride_W). Otherwise, the pooling_stride_H = pooling_stride_W = pool_stride. pooling_stride_W). Otherwise, the pooling_stride_H = pooling_stride_W = pool_stride.
Default 1. Default 1.
...@@ -192,12 +194,15 @@ def img_conv_group(input, ...@@ -192,12 +194,15 @@ def img_conv_group(input,
Return: Return:
A Variable holding Tensor representing the final result after serial computation using Convolution2d, A Variable holding Tensor representing the final result after serial computation using Convolution2d,
BatchNorm, DropOut, and Pool2d, whose data type is the same with input. BatchNorm, DropOut, and Pool2D, whose data type is the same with input.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
conv_pool = fluid.nets.img_conv_group(input=img, conv_pool = fluid.nets.img_conv_group(input=img,
conv_padding=1, conv_padding=1,
...@@ -261,7 +266,7 @@ def sequence_conv_pool(input, ...@@ -261,7 +266,7 @@ def sequence_conv_pool(input,
pool_type="max", pool_type="max",
bias_attr=None): bias_attr=None):
""" """
:api_attr: Static Graph :api_attr: Static Graph
**This api takes input as an LoDTensor. If input is a Tensor, please use** **This api takes input as an LoDTensor. If input is a Tensor, please use**
:ref:`api_fluid_nets_simple_img_conv_pool` **instead** :ref:`api_fluid_nets_simple_img_conv_pool` **instead**
...@@ -300,6 +305,8 @@ def sequence_conv_pool(input, ...@@ -300,6 +305,8 @@ def sequence_conv_pool(input,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
input_dim = 100 #len(word_dict) input_dim = 100 #len(word_dict)
emb_dim = 128 emb_dim = 128
hid_dim = 512 hid_dim = 512
...@@ -327,7 +334,7 @@ def sequence_conv_pool(input, ...@@ -327,7 +334,7 @@ def sequence_conv_pool(input,
def glu(input, dim=-1): def glu(input, dim=-1):
""" """
:api_attr: Static Graph :api_attr: Static Graph
The Gated Linear Units(GLU) composed by :ref:`api_fluid_layers_split` , The Gated Linear Units(GLU) composed by :ref:`api_fluid_layers_split` ,
:ref:`api_fluid_layers_sigmoid` and :ref:`api_fluid_layers_elementwise_mul` . :ref:`api_fluid_layers_sigmoid` and :ref:`api_fluid_layers_elementwise_mul` .
...@@ -356,6 +363,9 @@ def glu(input, dim=-1): ...@@ -356,6 +363,9 @@ def glu(input, dim=-1):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data( data = fluid.data(
name="words", shape=[-1, 6, 3, 9], dtype="float32") name="words", shape=[-1, 6, 3, 9], dtype="float32")
# shape of output: [-1, 3, 3, 9] # shape of output: [-1, 3, 3, 9]
...@@ -375,7 +385,7 @@ def scaled_dot_product_attention(queries, ...@@ -375,7 +385,7 @@ def scaled_dot_product_attention(queries,
num_heads=1, num_heads=1,
dropout_rate=0.): dropout_rate=0.):
""" """
:api_attr: Static Graph :api_attr: Static Graph
This interface Multi-Head Attention using scaled dot product. This interface Multi-Head Attention using scaled dot product.
Attention mechanism can be seen as mapping a query and a set of key-value Attention mechanism can be seen as mapping a query and a set of key-value
...@@ -435,7 +445,9 @@ def scaled_dot_product_attention(queries, ...@@ -435,7 +445,9 @@ def scaled_dot_product_attention(queries,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
queries = fluid.data(name="queries", shape=[3, 5, 9], dtype="float32") queries = fluid.data(name="queries", shape=[3, 5, 9], dtype="float32")
keys = fluid.data(name="keys", shape=[3, 6, 9], dtype="float32") keys = fluid.data(name="keys", shape=[3, 6, 9], dtype="float32")
values = fluid.data(name="values", shape=[3, 6, 10], dtype="float32") values = fluid.data(name="values", shape=[3, 6, 10], dtype="float32")
......
...@@ -564,7 +564,7 @@ def train_bmn(args, place, to_static): ...@@ -564,7 +564,7 @@ def train_bmn(args, place, to_static):
loss_data = [] loss_data = []
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
global local_random global local_random
local_random = np.random.RandomState(SEED) local_random = np.random.RandomState(SEED)
......
...@@ -450,7 +450,7 @@ def do_train(args, to_static): ...@@ -450,7 +450,7 @@ def do_train(args, to_static):
place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda( place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace() ) else fluid.CPUPlace()
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
reader = get_random_input_data(args.batch_size, args.vocab_size, reader = get_random_input_data(args.batch_size, args.vocab_size,
......
...@@ -451,7 +451,7 @@ def train_mobilenet(args, to_static): ...@@ -451,7 +451,7 @@ def train_mobilenet(args, to_static):
with fluid.dygraph.guard(args.place): with fluid.dygraph.guard(args.place):
np.random.seed(SEED) np.random.seed(SEED)
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
if args.model == "MobileNetV1": if args.model == "MobileNetV1":
......
...@@ -218,7 +218,7 @@ def train(place): ...@@ -218,7 +218,7 @@ def train(place):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
ptb_model = PtbModel( ptb_model = PtbModel(
hidden_size=hidden_size, hidden_size=hidden_size,
......
...@@ -210,7 +210,7 @@ def train(place): ...@@ -210,7 +210,7 @@ def train(place):
batch_num = 200 batch_num = 200
paddle.disable_static(place) paddle.disable_static(place)
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
ptb_model = PtbModel( ptb_model = PtbModel(
hidden_size=hidden_size, hidden_size=hidden_size,
......
...@@ -65,7 +65,7 @@ def train(args, place, to_static): ...@@ -65,7 +65,7 @@ def train(args, place, to_static):
env.seed(SEED) env.seed(SEED)
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
local_random = np.random.RandomState(SEED) local_random = np.random.RandomState(SEED)
......
...@@ -219,7 +219,7 @@ def train(to_static): ...@@ -219,7 +219,7 @@ def train(to_static):
""" """
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
np.random.seed(SEED) np.random.seed(SEED)
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
train_reader = paddle.batch( train_reader = paddle.batch(
......
...@@ -66,7 +66,7 @@ class ConvBNLayer(paddle.nn.Layer): ...@@ -66,7 +66,7 @@ class ConvBNLayer(paddle.nn.Layer):
act=None): act=None):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self._conv = paddle.nn.Conv2d( self._conv = paddle.nn.Conv2D(
in_channels=num_channels, in_channels=num_channels,
out_channels=num_filters, out_channels=num_filters,
kernel_size=filter_size, kernel_size=filter_size,
...@@ -214,7 +214,7 @@ def train(to_static): ...@@ -214,7 +214,7 @@ def train(to_static):
""" """
paddle.disable_static(place) paddle.disable_static(place)
np.random.seed(SEED) np.random.seed(SEED)
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
train_reader = paddle.batch( train_reader = paddle.batch(
......
...@@ -334,7 +334,7 @@ def train(train_reader, to_static): ...@@ -334,7 +334,7 @@ def train(train_reader, to_static):
np.random.seed(SEED) np.random.seed(SEED)
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
se_resnext = SeResNeXt() se_resnext = SeResNeXt()
optimizer = optimizer_setting(train_parameters, se_resnext.parameters()) optimizer = optimizer_setting(train_parameters, se_resnext.parameters())
......
...@@ -286,7 +286,7 @@ def train(args, to_static): ...@@ -286,7 +286,7 @@ def train(args, to_static):
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
np.random.seed(SEED) np.random.seed(SEED)
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
train_reader = fake_data_reader(args.class_num, args.vocab_size, train_reader = fake_data_reader(args.class_num, args.vocab_size,
......
...@@ -108,7 +108,7 @@ def train(conf_dict, to_static): ...@@ -108,7 +108,7 @@ def train(conf_dict, to_static):
place = fluid.CPUPlace() place = fluid.CPUPlace()
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
conf_dict['dict_size'] = len(vocab) conf_dict['dict_size'] = len(vocab)
......
...@@ -106,7 +106,7 @@ def train(conf_dict, to_static): ...@@ -106,7 +106,7 @@ def train(conf_dict, to_static):
place = paddle.CPUPlace() place = paddle.CPUPlace()
paddle.disable_static(place) paddle.disable_static(place)
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
conf_dict['dict_size'] = len(vocab) conf_dict['dict_size'] = len(vocab)
......
...@@ -33,7 +33,7 @@ STEP_NUM = 10 ...@@ -33,7 +33,7 @@ STEP_NUM = 10
def train_static(args, batch_generator): def train_static(args, batch_generator):
paddle.enable_static() paddle.enable_static()
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
train_prog = fluid.Program() train_prog = fluid.Program()
startup_prog = fluid.Program() startup_prog = fluid.Program()
...@@ -131,7 +131,7 @@ def train_static(args, batch_generator): ...@@ -131,7 +131,7 @@ def train_static(args, batch_generator):
def train_dygraph(args, batch_generator): def train_dygraph(args, batch_generator):
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
if SEED is not None: if SEED is not None:
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
# define data loader # define data loader
train_loader = fluid.io.DataLoader.from_generator(capacity=10) train_loader = fluid.io.DataLoader.from_generator(capacity=10)
...@@ -223,7 +223,7 @@ def train_dygraph(args, batch_generator): ...@@ -223,7 +223,7 @@ def train_dygraph(args, batch_generator):
def predict_dygraph(args, batch_generator): def predict_dygraph(args, batch_generator):
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
# define data loader # define data loader
...@@ -295,7 +295,7 @@ def predict_dygraph(args, batch_generator): ...@@ -295,7 +295,7 @@ def predict_dygraph(args, batch_generator):
def predict_static(args, batch_generator): def predict_static(args, batch_generator):
test_prog = fluid.Program() test_prog = fluid.Program()
with fluid.program_guard(test_prog): with fluid.program_guard(test_prog):
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
# define input and reader # define input and reader
......
...@@ -272,7 +272,7 @@ def train(args, fake_data_reader, to_static): ...@@ -272,7 +272,7 @@ def train(args, fake_data_reader, to_static):
random.seed(0) random.seed(0)
np.random.seed(0) np.random.seed(0)
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(1000) paddle.seed(1000)
paddle.framework.random._manual_program_seed(1000) paddle.framework.random._manual_program_seed(1000)
video_model = TSM_ResNet("TSM", train_config, 'Train') video_model = TSM_ResNet("TSM", train_config, 'Train')
......
...@@ -20,7 +20,7 @@ import struct ...@@ -20,7 +20,7 @@ import struct
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive, TestConv2dOp from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive, TestConv2DOp
def conv2d_residual_naive(out, residual): def conv2d_residual_naive(out, residual):
...@@ -31,7 +31,7 @@ def conv2d_residual_naive(out, residual): ...@@ -31,7 +31,7 @@ def conv2d_residual_naive(out, residual):
@unittest.skipIf(not core.supports_bfloat16(), @unittest.skipIf(not core.supports_bfloat16(),
"place does not support BF16 evaluation") "place does not support BF16 evaluation")
class TestConv2dBf16Op(TestConv2dOp): class TestConv2DBf16Op(TestConv2DOp):
def setUp(self): def setUp(self):
self.op_type = "conv2d" self.op_type = "conv2d"
self.use_cudnn = False self.use_cudnn = False
...@@ -110,7 +110,7 @@ class TestConv2dBf16Op(TestConv2dOp): ...@@ -110,7 +110,7 @@ class TestConv2dBf16Op(TestConv2dOp):
pass pass
def init_test_case(self): def init_test_case(self):
TestConv2dOp.init_test_case(self) TestConv2DOp.init_test_case(self)
self.input_size = [1, 1, 5, 5] # NCHW self.input_size = [1, 1, 5, 5] # NCHW
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.input_residual_size = [1, 2, 3, 3] self.input_residual_size = [1, 2, 3, 3]
...@@ -130,7 +130,7 @@ class TestConv2dBf16Op(TestConv2dOp): ...@@ -130,7 +130,7 @@ class TestConv2dBf16Op(TestConv2dOp):
self.fuse_residual = True self.fuse_residual = True
class TestConv2d(TestConv2dBf16Op): class TestConv2D(TestConv2DBf16Op):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -144,19 +144,19 @@ class TestConv2d(TestConv2dBf16Op): ...@@ -144,19 +144,19 @@ class TestConv2d(TestConv2dBf16Op):
self.input_type = np.uint16 self.input_type = np.uint16
class TestWithPad(TestConv2d): class TestWithPad(TestConv2D):
def init_test_case(self): def init_test_case(self):
TestConv2d.init_test_case(self) TestConv2D.init_test_case(self)
self.pad = [1, 1] self.pad = [1, 1]
self.input_residual_size = [2, 6, 5, 5] self.input_residual_size = [2, 6, 5, 5]
class TestWithGroup(TestConv2d): class TestWithGroup(TestConv2D):
def init_group(self): def init_group(self):
self.groups = 3 self.groups = 3
class TestWithStride(TestConv2dBf16Op): class TestWithStride(TestConv2DBf16Op):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -170,7 +170,7 @@ class TestWithStride(TestConv2dBf16Op): ...@@ -170,7 +170,7 @@ class TestWithStride(TestConv2dBf16Op):
self.input_type = np.uint16 self.input_type = np.uint16
class TestWithDilations(TestConv2dBf16Op): class TestWithDilations(TestConv2DBf16Op):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -185,7 +185,7 @@ class TestWithDilations(TestConv2dBf16Op): ...@@ -185,7 +185,7 @@ class TestWithDilations(TestConv2dBf16Op):
self.input_type = np.uint16 self.input_type = np.uint16
class TestWith1x1ForceFP32Output(TestConv2dBf16Op): class TestWith1x1ForceFP32Output(TestConv2DBf16Op):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -201,7 +201,7 @@ class TestWith1x1ForceFP32Output(TestConv2dBf16Op): ...@@ -201,7 +201,7 @@ class TestWith1x1ForceFP32Output(TestConv2dBf16Op):
self.fuse_residual = False self.fuse_residual = False
class TestWithInput1x1Filter1x1(TestConv2dBf16Op): class TestWithInput1x1Filter1x1(TestConv2DBf16Op):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
......
...@@ -19,7 +19,7 @@ import numpy as np ...@@ -19,7 +19,7 @@ import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive, TestConv2dOp from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive, TestConv2DOp
def conv2d_forward_refer(input, filter, group, conv_param): def conv2d_forward_refer(input, filter, group, conv_param):
...@@ -28,7 +28,7 @@ def conv2d_forward_refer(input, filter, group, conv_param): ...@@ -28,7 +28,7 @@ def conv2d_forward_refer(input, filter, group, conv_param):
return out return out
class TestConv2dInt8Op(TestConv2dOp): class TestConv2DInt8Op(TestConv2DOp):
def setUp(self): def setUp(self):
self.op_type = "conv2d" self.op_type = "conv2d"
self.use_cudnn = False self.use_cudnn = False
...@@ -162,7 +162,7 @@ class TestConv2dInt8Op(TestConv2dOp): ...@@ -162,7 +162,7 @@ class TestConv2dInt8Op(TestConv2dOp):
pass pass
def init_test_case(self): def init_test_case(self):
TestConv2dOp.init_test_case(self) TestConv2DOp.init_test_case(self)
self.input_size = [1, 1, 5, 5] # NCHW self.input_size = [1, 1, 5, 5] # NCHW
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.input_residual_size = [1, 2, 3, 3] self.input_residual_size = [1, 2, 3, 3]
...@@ -186,7 +186,7 @@ class TestConv2dInt8Op(TestConv2dOp): ...@@ -186,7 +186,7 @@ class TestConv2dInt8Op(TestConv2dOp):
#--------------------test conv2d u8 in and u8 out with residual fuse-------------------- #--------------------test conv2d u8 in and u8 out with residual fuse--------------------
class TestConv2d(TestConv2dInt8Op): class TestConv2D(TestConv2DInt8Op):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -201,19 +201,19 @@ class TestConv2d(TestConv2dInt8Op): ...@@ -201,19 +201,19 @@ class TestConv2d(TestConv2dInt8Op):
self.scale_in_eltwise = 0.6 self.scale_in_eltwise = 0.6
class TestWithPad(TestConv2d): class TestWithPad(TestConv2D):
def init_test_case(self): def init_test_case(self):
TestConv2d.init_test_case(self) TestConv2D.init_test_case(self)
self.pad = [1, 1] self.pad = [1, 1]
self.input_residual_size = [2, 6, 5, 5] self.input_residual_size = [2, 6, 5, 5]
class TestWithGroup(TestConv2d): class TestWithGroup(TestConv2D):
def init_group(self): def init_group(self):
self.groups = 3 self.groups = 3
class TestWithStride(TestConv2dInt8Op): class TestWithStride(TestConv2DInt8Op):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -228,7 +228,7 @@ class TestWithStride(TestConv2dInt8Op): ...@@ -228,7 +228,7 @@ class TestWithStride(TestConv2dInt8Op):
self.scale_in_eltwise = 0.5 self.scale_in_eltwise = 0.5
class TestWithDilations(TestConv2dInt8Op): class TestWithDilations(TestConv2DInt8Op):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -244,7 +244,7 @@ class TestWithDilations(TestConv2dInt8Op): ...@@ -244,7 +244,7 @@ class TestWithDilations(TestConv2dInt8Op):
self.scale_in_eltwise = 0.5 self.scale_in_eltwise = 0.5
class TestWith1x1(TestConv2dInt8Op): class TestWith1x1(TestConv2DInt8Op):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -259,7 +259,7 @@ class TestWith1x1(TestConv2dInt8Op): ...@@ -259,7 +259,7 @@ class TestWith1x1(TestConv2dInt8Op):
self.scale_in_eltwise = 0.5 self.scale_in_eltwise = 0.5
class TestWithInput1x1Filter1x1(TestConv2dInt8Op): class TestWithInput1x1Filter1x1(TestConv2DInt8Op):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -356,7 +356,7 @@ def create_test_int8_class(parent): ...@@ -356,7 +356,7 @@ def create_test_int8_class(parent):
globals()[cls_name_u8s8_re_1] = TestU8S8ResCase globals()[cls_name_u8s8_re_1] = TestU8S8ResCase
create_test_int8_class(TestConv2dInt8Op) create_test_int8_class(TestConv2DInt8Op)
create_test_int8_class(TestWithPad) create_test_int8_class(TestWithPad)
create_test_int8_class(TestWithStride) create_test_int8_class(TestWithStride)
create_test_int8_class(TestWithDilations) create_test_int8_class(TestWithDilations)
...@@ -365,7 +365,7 @@ create_test_int8_class(TestWith1x1) ...@@ -365,7 +365,7 @@ create_test_int8_class(TestWith1x1)
create_test_int8_class(TestWithInput1x1Filter1x1) create_test_int8_class(TestWithInput1x1Filter1x1)
class TestConv2dOp_AsyPadding_INT_MKLDNN(TestConv2dInt8Op): class TestConv2DOp_AsyPadding_INT_MKLDNN(TestConv2DInt8Op):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
...@@ -374,13 +374,13 @@ class TestConv2dOp_AsyPadding_INT_MKLDNN(TestConv2dInt8Op): ...@@ -374,13 +374,13 @@ class TestConv2dOp_AsyPadding_INT_MKLDNN(TestConv2dInt8Op):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestConv2dOp_Same_INT_MKLDNN(TestConv2dOp_AsyPadding_INT_MKLDNN): class TestConv2DOp_Same_INT_MKLDNN(TestConv2DOp_AsyPadding_INT_MKLDNN):
def init_paddings(self): def init_paddings(self):
self.pad = [0, 0] self.pad = [0, 0]
self.padding_algorithm = "SAME" self.padding_algorithm = "SAME"
class TestConv2dOp_Valid_INT_MKLDNN(TestConv2dOp_AsyPadding_INT_MKLDNN): class TestConv2DOp_Valid_INT_MKLDNN(TestConv2DOp_AsyPadding_INT_MKLDNN):
def init_paddings(self): def init_paddings(self):
self.pad = [1, 1] self.pad = [1, 1]
self.padding_algorithm = "VALID" self.padding_algorithm = "VALID"
......
...@@ -19,7 +19,7 @@ import numpy as np ...@@ -19,7 +19,7 @@ import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci
from paddle.fluid.tests.unittests.test_conv2d_op import TestConv2dOp, TestConv2dOp_v2 from paddle.fluid.tests.unittests.test_conv2d_op import TestConv2DOp, TestConv2DOp_v2
def conv2d_bias_naive(out, bias): def conv2d_bias_naive(out, bias):
...@@ -36,7 +36,7 @@ def conv2d_residual_naive(out, residual): ...@@ -36,7 +36,7 @@ def conv2d_residual_naive(out, residual):
return out return out
class TestConv2dMKLDNNOp(TestConv2dOp): class TestConv2DMKLDNNOp(TestConv2DOp):
def init_group(self): def init_group(self):
self.groups = 1 self.groups = 1
...@@ -64,7 +64,7 @@ class TestConv2dMKLDNNOp(TestConv2dOp): ...@@ -64,7 +64,7 @@ class TestConv2dMKLDNNOp(TestConv2dOp):
self.fuse_residual_connection = False self.fuse_residual_connection = False
self.input_residual_size = None self.input_residual_size = None
TestConv2dOp.setUp(self) TestConv2DOp.setUp(self)
output = self.outputs['Output'] output = self.outputs['Output']
...@@ -106,9 +106,9 @@ class TestConv2dMKLDNNOp(TestConv2dOp): ...@@ -106,9 +106,9 @@ class TestConv2dMKLDNNOp(TestConv2dOp):
@skip_check_grad_ci( @skip_check_grad_ci(
reason="Fusion is for inference only, check_grad is not required.") reason="Fusion is for inference only, check_grad is not required.")
class TestWithbreluFusion(TestConv2dMKLDNNOp): class TestWithbreluFusion(TestConv2DMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dMKLDNNOp.init_test_case(self) TestConv2DMKLDNNOp.init_test_case(self)
self.fuse_activation = "relu6" self.fuse_activation = "relu6"
self.fuse_alpha = 6.0 self.fuse_alpha = 6.0
self.dsttype = np.float32 self.dsttype = np.float32
...@@ -116,9 +116,9 @@ class TestWithbreluFusion(TestConv2dMKLDNNOp): ...@@ -116,9 +116,9 @@ class TestWithbreluFusion(TestConv2dMKLDNNOp):
@skip_check_grad_ci( @skip_check_grad_ci(
reason="Fusion is for inference only, check_grad is not required.") reason="Fusion is for inference only, check_grad is not required.")
class TestWithFuse(TestConv2dMKLDNNOp): class TestWithFuse(TestConv2DMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dMKLDNNOp.init_test_case(self) TestConv2DMKLDNNOp.init_test_case(self)
self.pad = [1, 1] self.pad = [1, 1]
self.fuse_bias = True self.fuse_bias = True
self.bias_size = [6] self.bias_size = [6]
...@@ -126,22 +126,22 @@ class TestWithFuse(TestConv2dMKLDNNOp): ...@@ -126,22 +126,22 @@ class TestWithFuse(TestConv2dMKLDNNOp):
self.input_residual_size = [2, 6, 5, 5] self.input_residual_size = [2, 6, 5, 5]
class TestWithPadWithBias(TestConv2dMKLDNNOp): class TestWithPadWithBias(TestConv2DMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dMKLDNNOp.init_test_case(self) TestConv2DMKLDNNOp.init_test_case(self)
self.pad = [1, 1] self.pad = [1, 1]
self.input_size = [2, 3, 6, 6] self.input_size = [2, 3, 6, 6]
class TestWithStride(TestConv2dMKLDNNOp): class TestWithStride(TestConv2DMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dMKLDNNOp.init_test_case(self) TestConv2DMKLDNNOp.init_test_case(self)
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
self.input_size = [2, 3, 6, 6] self.input_size = [2, 3, 6, 6]
class TestWithGroup(TestConv2dMKLDNNOp): class TestWithGroup(TestConv2DMKLDNNOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -154,15 +154,15 @@ class TestWithGroup(TestConv2dMKLDNNOp): ...@@ -154,15 +154,15 @@ class TestWithGroup(TestConv2dMKLDNNOp):
self.groups = 3 self.groups = 3
class TestWith1x1(TestConv2dMKLDNNOp): class TestWith1x1(TestConv2DMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dMKLDNNOp.init_test_case(self) TestConv2DMKLDNNOp.init_test_case(self)
self.filter_size = [40, 3, 1, 1] self.filter_size = [40, 3, 1, 1]
class TestWithInput1x1Filter1x1(TestConv2dMKLDNNOp): class TestWithInput1x1Filter1x1(TestConv2DMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dMKLDNNOp.init_test_case(self) TestConv2DMKLDNNOp.init_test_case(self)
self.input_size = [2, 60, 1, 1] # NCHW self.input_size = [2, 60, 1, 1] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0 assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
...@@ -172,7 +172,7 @@ class TestWithInput1x1Filter1x1(TestConv2dMKLDNNOp): ...@@ -172,7 +172,7 @@ class TestWithInput1x1Filter1x1(TestConv2dMKLDNNOp):
self.groups = 3 self.groups = 3
class TestConv2dOp_AsyPadding_MKLDNN(TestConv2dOp_v2): class TestConv2DOp_AsyPadding_MKLDNN(TestConv2DOp_v2):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
self.dtype = np.float32 self.dtype = np.float32
...@@ -182,19 +182,19 @@ class TestConv2dOp_AsyPadding_MKLDNN(TestConv2dOp_v2): ...@@ -182,19 +182,19 @@ class TestConv2dOp_AsyPadding_MKLDNN(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestConv2dOp_Same_MKLDNN(TestConv2dOp_AsyPadding_MKLDNN): class TestConv2DOp_Same_MKLDNN(TestConv2DOp_AsyPadding_MKLDNN):
def init_paddings(self): def init_paddings(self):
self.pad = [0, 0] self.pad = [0, 0]
self.padding_algorithm = "SAME" self.padding_algorithm = "SAME"
class TestConv2dOp_Valid_MKLDNN(TestConv2dOp_AsyPadding_MKLDNN): class TestConv2DOp_Valid_MKLDNN(TestConv2DOp_AsyPadding_MKLDNN):
def init_paddings(self): def init_paddings(self):
self.pad = [1, 1] self.pad = [1, 1]
self.padding_algorithm = "VALID" self.padding_algorithm = "VALID"
class TestConv2dOp_Valid_NHWC_MKLDNN(TestConv2dOp_Valid_MKLDNN): class TestConv2DOp_Valid_NHWC_MKLDNN(TestConv2DOp_Valid_MKLDNN):
def init_data_format(self): def init_data_format(self):
self.data_format = "NHWC" self.data_format = "NHWC"
...@@ -203,21 +203,21 @@ class TestConv2dOp_Valid_NHWC_MKLDNN(TestConv2dOp_Valid_MKLDNN): ...@@ -203,21 +203,21 @@ class TestConv2dOp_Valid_NHWC_MKLDNN(TestConv2dOp_Valid_MKLDNN):
self.input_size = [N, H, W, C] self.input_size = [N, H, W, C]
class TestConv2dOp_Same_NHWC_MKLDNN(TestConv2dOp_Valid_NHWC_MKLDNN): class TestConv2DOp_Same_NHWC_MKLDNN(TestConv2DOp_Valid_NHWC_MKLDNN):
def init_paddings(self): def init_paddings(self):
self.pad = [0, 0] self.pad = [0, 0]
self.padding_algorithm = "SAME" self.padding_algorithm = "SAME"
class TestConv2dOp_AsyPadding_NHWC_MKLDNN(TestConv2dOp_Valid_NHWC_MKLDNN): class TestConv2DOp_AsyPadding_NHWC_MKLDNN(TestConv2DOp_Valid_NHWC_MKLDNN):
def init_paddings(self): def init_paddings(self):
self.pad = [0, 0, 1, 2] self.pad = [0, 0, 1, 2]
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestMKLDNNDilations(TestConv2dMKLDNNOp): class TestMKLDNNDilations(TestConv2DMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dMKLDNNOp.init_test_case(self) TestConv2DMKLDNNOp.init_test_case(self)
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 10, 10] # NCHW self.input_size = [2, 3, 10, 10] # NCHW
......
...@@ -19,7 +19,7 @@ import numpy as np ...@@ -19,7 +19,7 @@ import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.test_conv2d_transpose_op import conv2dtranspose_forward_naive, TestConv2dTransposeOp from paddle.fluid.tests.unittests.test_conv2d_transpose_op import conv2dtranspose_forward_naive, TestConv2DTransposeOp
def conv2d_bias_naive(out, bias): def conv2d_bias_naive(out, bias):
...@@ -30,7 +30,7 @@ def conv2d_bias_naive(out, bias): ...@@ -30,7 +30,7 @@ def conv2d_bias_naive(out, bias):
return out return out
class TestConv2dTransposeMKLDNNOp(TestConv2dTransposeOp): class TestConv2DTransposeMKLDNNOp(TestConv2DTransposeOp):
def test_check_grad(self): def test_check_grad(self):
return return
...@@ -64,7 +64,7 @@ class TestConv2dTransposeMKLDNNOp(TestConv2dTransposeOp): ...@@ -64,7 +64,7 @@ class TestConv2dTransposeMKLDNNOp(TestConv2dTransposeOp):
def setUp(self): def setUp(self):
TestConv2dTransposeOp.setUp(self) TestConv2DTransposeOp.setUp(self)
output = self.outputs['Output'] output = self.outputs['Output']
...@@ -86,46 +86,46 @@ class TestConv2dTransposeMKLDNNOp(TestConv2dTransposeOp): ...@@ -86,46 +86,46 @@ class TestConv2dTransposeMKLDNNOp(TestConv2dTransposeOp):
self.outputs['Output'] = output self.outputs['Output'] = output
class TestMKLDNNFuseBias(TestConv2dTransposeMKLDNNOp): class TestMKLDNNFuseBias(TestConv2DTransposeMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self) TestConv2DTransposeMKLDNNOp.init_test_case(self)
self.pad = [1, 1] self.pad = [1, 1]
self.fuse_bias = True self.fuse_bias = True
self.bias_size = [6] self.bias_size = [6]
class TestMKLDNNWithPad(TestConv2dTransposeMKLDNNOp): class TestMKLDNNWithPad(TestConv2DTransposeMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self) TestConv2DTransposeMKLDNNOp.init_test_case(self)
self.pad = [1, 1] self.pad = [1, 1]
self.input_size = [2, 3, 10, 10] self.input_size = [2, 3, 10, 10]
class TestMKLDNNWithStride(TestConv2dTransposeMKLDNNOp): class TestMKLDNNWithStride(TestConv2DTransposeMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self) TestConv2DTransposeMKLDNNOp.init_test_case(self)
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
self.input_size = [2, 3, 6, 6] # NCHW self.input_size = [2, 3, 6, 6] # NCHW
class TestMKLDNNWithAsymPad(TestConv2dTransposeMKLDNNOp): class TestMKLDNNWithAsymPad(TestConv2DTransposeMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self) TestConv2DTransposeMKLDNNOp.init_test_case(self)
self.pad = [0, 0, 1, 2] self.pad = [0, 0, 1, 2]
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestMKLDNNWithSamePad(TestConv2dTransposeMKLDNNOp): class TestMKLDNNWithSamePad(TestConv2DTransposeMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self) TestConv2DTransposeMKLDNNOp.init_test_case(self)
self.pad = [0, 0] self.pad = [0, 0]
self.padding_algorithm = "SAME" self.padding_algorithm = "SAME"
class TestMKLDNNWithValidPad(TestConv2dTransposeMKLDNNOp): class TestMKLDNNWithValidPad(TestConv2DTransposeMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self) TestConv2DTransposeMKLDNNOp.init_test_case(self)
self.pad = [1, 1] self.pad = [1, 1]
self.padding_algorithm = "VALID" self.padding_algorithm = "VALID"
...@@ -138,10 +138,10 @@ class TestMKLDNNWithValidPad_NHWC(TestMKLDNNWithValidPad): ...@@ -138,10 +138,10 @@ class TestMKLDNNWithValidPad_NHWC(TestMKLDNNWithValidPad):
self.input_size = [N, H, W, C] self.input_size = [N, H, W, C]
class TestConv2dTransposeMKLDNNWithDilationsExplicitPad( class TestConv2DTransposeMKLDNNWithDilationsExplicitPad(
TestConv2dTransposeMKLDNNOp): TestConv2DTransposeMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self) TestConv2DTransposeMKLDNNOp.init_test_case(self)
self.stride = [2, 1] self.stride = [2, 1]
self.dilations = [1, 2] self.dilations = [1, 2]
self.groups = 1 self.groups = 1
......
...@@ -16,10 +16,10 @@ from __future__ import print_function ...@@ -16,10 +16,10 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
from paddle.fluid.tests.unittests.test_conv3d_op import TestConv3dOp, TestCase1, TestWithGroup1, TestWithGroup2, TestWith1x1, TestWithInput1x1Filter1x1, TestConv3dOp_2 from paddle.fluid.tests.unittests.test_conv3d_op import TestConv3DOp, TestCase1, TestWithGroup1, TestWithGroup2, TestWith1x1, TestWithInput1x1Filter1x1, TestConv3DOp_2
class TestMKLDNN(TestConv3dOp): class TestMKLDNN(TestConv3DOp):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
self.data_format = "NCHW" self.data_format = "NCHW"
...@@ -61,7 +61,7 @@ class TestMKLDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1): ...@@ -61,7 +61,7 @@ class TestMKLDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1):
self.dtype = np.float32 self.dtype = np.float32
class TestConv3dOp_AsyPadding_MKLDNN(TestConv3dOp): class TestConv3DOp_AsyPadding_MKLDNN(TestConv3DOp):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
self.data_format = "NCHW" self.data_format = "NCHW"
...@@ -72,7 +72,7 @@ class TestConv3dOp_AsyPadding_MKLDNN(TestConv3dOp): ...@@ -72,7 +72,7 @@ class TestConv3dOp_AsyPadding_MKLDNN(TestConv3dOp):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestConv3dOp_Same_MKLDNN(TestConv3dOp_AsyPadding_MKLDNN): class TestConv3DOp_Same_MKLDNN(TestConv3DOp_AsyPadding_MKLDNN):
def init_paddings(self): def init_paddings(self):
self.pad = [0, 0, 0] self.pad = [0, 0, 0]
self.padding_algorithm = "SAME" self.padding_algorithm = "SAME"
...@@ -83,7 +83,7 @@ class TestConv3dOp_Same_MKLDNN(TestConv3dOp_AsyPadding_MKLDNN): ...@@ -83,7 +83,7 @@ class TestConv3dOp_Same_MKLDNN(TestConv3dOp_AsyPadding_MKLDNN):
self.dtype = np.float32 self.dtype = np.float32
class TestConv3dOp_Valid_MKLDNN(TestConv3dOp_AsyPadding_MKLDNN): class TestConv3DOp_Valid_MKLDNN(TestConv3DOp_AsyPadding_MKLDNN):
def init_paddings(self): def init_paddings(self):
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
self.padding_algorithm = "VALID" self.padding_algorithm = "VALID"
......
...@@ -23,7 +23,7 @@ from paddle.fluid.tests.unittests.op_test import OpTest ...@@ -23,7 +23,7 @@ from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, avg_pool2D_forward_naive, max_pool2D_forward_naive from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, avg_pool2D_forward_naive, max_pool2D_forward_naive
class TestPool2dMKLDNNInt8_Op(TestPool2D_Op): class TestPool2DMKLDNNInt8_Op(TestPool2D_Op):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
...@@ -51,7 +51,7 @@ class TestPool2dMKLDNNInt8_Op(TestPool2D_Op): ...@@ -51,7 +51,7 @@ class TestPool2dMKLDNNInt8_Op(TestPool2D_Op):
pass pass
class TestCase1Avg(TestPool2dMKLDNNInt8_Op): class TestCase1Avg(TestPool2DMKLDNNInt8_Op):
def init_test_case(self): def init_test_case(self):
self.shape = [2, 3, 7, 7] self.shape = [2, 3, 7, 7]
self.ksize = [3, 3] self.ksize = [3, 3]
...@@ -65,7 +65,7 @@ class TestCase1Avg(TestPool2dMKLDNNInt8_Op): ...@@ -65,7 +65,7 @@ class TestCase1Avg(TestPool2dMKLDNNInt8_Op):
self.exclusive = True self.exclusive = True
class TestCase2Avg(TestPool2dMKLDNNInt8_Op): class TestCase2Avg(TestPool2DMKLDNNInt8_Op):
def init_test_case(self): def init_test_case(self):
self.shape = [2, 3, 7, 7] self.shape = [2, 3, 7, 7]
self.ksize = [3, 3] self.ksize = [3, 3]
...@@ -79,7 +79,7 @@ class TestCase2Avg(TestPool2dMKLDNNInt8_Op): ...@@ -79,7 +79,7 @@ class TestCase2Avg(TestPool2dMKLDNNInt8_Op):
self.exclusive = False self.exclusive = False
class TestCase0Max(TestPool2dMKLDNNInt8_Op): class TestCase0Max(TestPool2DMKLDNNInt8_Op):
def init_pool_type(self): def init_pool_type(self):
self.pool_type = "max" self.pool_type = "max"
self.pool2D_forward_naive = max_pool2D_forward_naive self.pool2D_forward_naive = max_pool2D_forward_naive
...@@ -114,7 +114,7 @@ def create_test_s8_u8_class(parent): ...@@ -114,7 +114,7 @@ def create_test_s8_u8_class(parent):
globals()[cls_name_u8] = TestU8Case globals()[cls_name_u8] = TestU8Case
create_test_s8_u8_class(TestPool2dMKLDNNInt8_Op) create_test_s8_u8_class(TestPool2DMKLDNNInt8_Op)
create_test_s8_u8_class(TestCase1Avg) create_test_s8_u8_class(TestCase1Avg)
create_test_s8_u8_class(TestCase2Avg) create_test_s8_u8_class(TestCase2Avg)
create_test_s8_u8_class(TestCase0Max) create_test_s8_u8_class(TestCase0Max)
......
...@@ -26,7 +26,7 @@ import paddle.fluid as fluid ...@@ -26,7 +26,7 @@ import paddle.fluid as fluid
import paddle.fluid.dygraph as dygraph import paddle.fluid.dygraph as dygraph
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
from paddle.nn import Conv2d, Linear, SyncBatchNorm from paddle.nn import Conv2D, Linear, SyncBatchNorm
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase
...@@ -42,7 +42,7 @@ class TestLayer(fluid.dygraph.Layer): ...@@ -42,7 +42,7 @@ class TestLayer(fluid.dygraph.Layer):
act=None): act=None):
super(TestLayer, self).__init__() super(TestLayer, self).__init__()
self._conv = Conv2d( self._conv = Conv2D(
in_channels=num_channels, in_channels=num_channels,
out_channels=num_filters, out_channels=num_filters,
kernel_size=filter_size, kernel_size=filter_size,
...@@ -53,7 +53,7 @@ class TestLayer(fluid.dygraph.Layer): ...@@ -53,7 +53,7 @@ class TestLayer(fluid.dygraph.Layer):
self._sync_batch_norm = SyncBatchNorm(num_filters) self._sync_batch_norm = SyncBatchNorm(num_filters)
self._conv2 = Conv2d( self._conv2 = Conv2D(
in_channels=num_filters, in_channels=num_filters,
out_channels=num_filters, out_channels=num_filters,
kernel_size=filter_size, kernel_size=filter_size,
......
...@@ -65,7 +65,7 @@ class TestParallelExecutorBase(unittest.TestCase): ...@@ -65,7 +65,7 @@ class TestParallelExecutorBase(unittest.TestCase):
feed_data_reader, FeedDataReader feed_data_reader, FeedDataReader
), "feed_data_reader must be type of FeedDataReader" ), "feed_data_reader must be type of FeedDataReader"
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
main = fluid.Program() main = fluid.Program()
startup = fluid.Program() startup = fluid.Program()
......
...@@ -259,7 +259,7 @@ class TestLSTM(unittest.TestCase): ...@@ -259,7 +259,7 @@ class TestLSTM(unittest.TestCase):
def test_predict(self): def test_predict(self):
place = paddle.set_device(self.place) place = paddle.set_device(self.place)
paddle.manual_seed(123) paddle.seed(123)
np.random.seed(123) np.random.seed(123)
class Net(paddle.nn.Layer): class Net(paddle.nn.Layer):
......
...@@ -72,7 +72,7 @@ def avg_pool1D_forward_naive(x, ...@@ -72,7 +72,7 @@ def avg_pool1D_forward_naive(x,
return out return out
class TestPool1d_API(unittest.TestCase): class TestPool1D_API(unittest.TestCase):
def setUp(self): def setUp(self):
np.random.seed(123) np.random.seed(123)
self.places = [fluid.CPUPlace()] self.places = [fluid.CPUPlace()]
...@@ -89,7 +89,7 @@ class TestPool1d_API(unittest.TestCase): ...@@ -89,7 +89,7 @@ class TestPool1d_API(unittest.TestCase):
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
ada_max_pool1d_dg = paddle.nn.layer.AdaptiveAvgPool1d( ada_max_pool1d_dg = paddle.nn.layer.AdaptiveAvgPool1D(
output_size=16) output_size=16)
result = ada_max_pool1d_dg(input) result = ada_max_pool1d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
......
...@@ -84,7 +84,7 @@ def adaptive_pool2d_forward(x, output_size, data_format='NCHW', ...@@ -84,7 +84,7 @@ def adaptive_pool2d_forward(x, output_size, data_format='NCHW',
return out return out
class TestAdaptiveAvgPool2dAPI(unittest.TestCase): class TestAdaptiveAvgPool2DAPI(unittest.TestCase):
def setUp(self): def setUp(self):
self.x_np = np.random.random([2, 3, 7, 7]).astype("float32") self.x_np = np.random.random([2, 3, 7, 7]).astype("float32")
self.res_1_np = adaptive_pool2d_forward( self.res_1_np = adaptive_pool2d_forward(
...@@ -179,7 +179,7 @@ class TestAdaptiveAvgPool2dAPI(unittest.TestCase): ...@@ -179,7 +179,7 @@ class TestAdaptiveAvgPool2dAPI(unittest.TestCase):
assert np.allclose(out_6.numpy(), self.res_3_np) assert np.allclose(out_6.numpy(), self.res_3_np)
class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase): class TestAdaptiveAvgPool2DClassAPI(unittest.TestCase):
def setUp(self): def setUp(self):
self.x_np = np.random.random([2, 3, 7, 7]).astype("float32") self.x_np = np.random.random([2, 3, 7, 7]).astype("float32")
self.res_1_np = adaptive_pool2d_forward( self.res_1_np = adaptive_pool2d_forward(
...@@ -207,20 +207,20 @@ class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase): ...@@ -207,20 +207,20 @@ class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32")
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[3, 3]) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[3, 3])
out_1 = adaptive_avg_pool(x=x) out_1 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=5) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=5)
out_2 = adaptive_avg_pool(x=x) out_2 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[2, 5]) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[2, 5])
out_3 = adaptive_avg_pool(x=x) out_3 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(
output_size=[3, 3], data_format="NHWC") output_size=[3, 3], data_format="NHWC")
out_4 = adaptive_avg_pool(x=x) out_4 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(
output_size=[None, 3]) output_size=[None, 3])
out_5 = adaptive_avg_pool(x=x) out_5 = adaptive_avg_pool(x=x)
...@@ -247,20 +247,20 @@ class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase): ...@@ -247,20 +247,20 @@ class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase):
paddle.disable_static(place=place) paddle.disable_static(place=place)
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[3, 3]) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[3, 3])
out_1 = adaptive_avg_pool(x=x) out_1 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=5) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=5)
out_2 = adaptive_avg_pool(x=x) out_2 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[2, 5]) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[2, 5])
out_3 = adaptive_avg_pool(x=x) out_3 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(
output_size=[3, 3], data_format="NHWC") output_size=[3, 3], data_format="NHWC")
out_4 = adaptive_avg_pool(x=x) out_4 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(
output_size=[None, 3]) output_size=[None, 3])
out_5 = adaptive_avg_pool(x=x) out_5 = adaptive_avg_pool(x=x)
......
...@@ -99,7 +99,7 @@ def adaptive_pool3d_forward(x, ...@@ -99,7 +99,7 @@ def adaptive_pool3d_forward(x,
return out return out
class TestAdaptiveAvgPool3dAPI(unittest.TestCase): class TestAdaptiveAvgPool3DAPI(unittest.TestCase):
def setUp(self): def setUp(self):
self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32") self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32")
self.res_1_np = adaptive_pool3d_forward( self.res_1_np = adaptive_pool3d_forward(
...@@ -125,7 +125,8 @@ class TestAdaptiveAvgPool3dAPI(unittest.TestCase): ...@@ -125,7 +125,8 @@ class TestAdaptiveAvgPool3dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static() paddle.enable_static()
x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") x = paddle.fluid.data(
name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
out_1 = paddle.nn.functional.adaptive_avg_pool3d( out_1 = paddle.nn.functional.adaptive_avg_pool3d(
x=x, output_size=[3, 3, 3]) x=x, output_size=[3, 3, 3])
...@@ -194,7 +195,7 @@ class TestAdaptiveAvgPool3dAPI(unittest.TestCase): ...@@ -194,7 +195,7 @@ class TestAdaptiveAvgPool3dAPI(unittest.TestCase):
assert np.allclose(out_6.numpy(), self.res_3_np) assert np.allclose(out_6.numpy(), self.res_3_np)
class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase): class TestAdaptiveAvgPool3DClassAPI(unittest.TestCase):
def setUp(self): def setUp(self):
self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32") self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32")
self.res_1_np = adaptive_pool3d_forward( self.res_1_np = adaptive_pool3d_forward(
...@@ -220,24 +221,25 @@ class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase): ...@@ -220,24 +221,25 @@ class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static() paddle.enable_static()
x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") x = paddle.fluid.data(
name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(
output_size=[3, 3, 3]) output_size=[3, 3, 3])
out_1 = adaptive_avg_pool(x=x) out_1 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d(output_size=5) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(output_size=5)
out_2 = adaptive_avg_pool(x=x) out_2 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(
output_size=[2, 3, 5]) output_size=[2, 3, 5])
out_3 = adaptive_avg_pool(x=x) out_3 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(
output_size=[3, 3, 3], data_format="NDHWC") output_size=[3, 3, 3], data_format="NDHWC")
out_4 = adaptive_avg_pool(x=x) out_4 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(
output_size=[None, 3, None]) output_size=[None, 3, None])
out_5 = adaptive_avg_pool(x=x) out_5 = adaptive_avg_pool(x=x)
...@@ -264,22 +266,22 @@ class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase): ...@@ -264,22 +266,22 @@ class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase):
paddle.disable_static(place=place) paddle.disable_static(place=place)
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(
output_size=[3, 3, 3]) output_size=[3, 3, 3])
out_1 = adaptive_avg_pool(x=x) out_1 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d(output_size=5) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(output_size=5)
out_2 = adaptive_avg_pool(x=x) out_2 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(
output_size=[2, 3, 5]) output_size=[2, 3, 5])
out_3 = adaptive_avg_pool(x=x) out_3 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(
output_size=[3, 3, 3], data_format="NDHWC") output_size=[3, 3, 3], data_format="NDHWC")
out_4 = adaptive_avg_pool(x=x) out_4 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(
output_size=[None, 3, None]) output_size=[None, 3, None])
out_5 = adaptive_avg_pool(x=x) out_5 = adaptive_avg_pool(x=x)
......
...@@ -63,7 +63,7 @@ def max_pool1D_forward_naive(x, ...@@ -63,7 +63,7 @@ def max_pool1D_forward_naive(x,
return out return out
class TestPool1d_API(unittest.TestCase): class TestPool1D_API(unittest.TestCase):
def setUp(self): def setUp(self):
np.random.seed(123) np.random.seed(123)
self.places = [fluid.CPUPlace()] self.places = [fluid.CPUPlace()]
...@@ -80,7 +80,7 @@ class TestPool1d_API(unittest.TestCase): ...@@ -80,7 +80,7 @@ class TestPool1d_API(unittest.TestCase):
input_np, ksize=[16], strides=[0], paddings=[0], adaptive=True) input_np, ksize=[16], strides=[0], paddings=[0], adaptive=True)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
ada_max_pool1d_dg = paddle.nn.layer.AdaptiveMaxPool1d( ada_max_pool1d_dg = paddle.nn.layer.AdaptiveMaxPool1D(
output_size=16) output_size=16)
result = ada_max_pool1d_dg(input) result = ada_max_pool1d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
......
...@@ -84,7 +84,7 @@ def adaptive_pool2d_forward(x, output_size, data_format='NCHW', ...@@ -84,7 +84,7 @@ def adaptive_pool2d_forward(x, output_size, data_format='NCHW',
return out return out
class TestAdaptiveMaxPool2dAPI(unittest.TestCase): class TestAdaptiveMaxPool2DAPI(unittest.TestCase):
def setUp(self): def setUp(self):
self.x_np = np.random.random([2, 3, 7, 7]).astype("float32") self.x_np = np.random.random([2, 3, 7, 7]).astype("float32")
self.res_1_np = adaptive_pool2d_forward( self.res_1_np = adaptive_pool2d_forward(
...@@ -174,7 +174,7 @@ class TestAdaptiveMaxPool2dAPI(unittest.TestCase): ...@@ -174,7 +174,7 @@ class TestAdaptiveMaxPool2dAPI(unittest.TestCase):
assert np.allclose(out_5.numpy(), self.res_5_np) assert np.allclose(out_5.numpy(), self.res_5_np)
class TestAdaptiveMaxPool2dClassAPI(unittest.TestCase): class TestAdaptiveMaxPool2DClassAPI(unittest.TestCase):
def setUp(self): def setUp(self):
self.x_np = np.random.random([2, 3, 7, 7]).astype("float32") self.x_np = np.random.random([2, 3, 7, 7]).astype("float32")
self.res_1_np = adaptive_pool2d_forward( self.res_1_np = adaptive_pool2d_forward(
...@@ -202,20 +202,20 @@ class TestAdaptiveMaxPool2dClassAPI(unittest.TestCase): ...@@ -202,20 +202,20 @@ class TestAdaptiveMaxPool2dClassAPI(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32")
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=[3, 3]) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[3, 3])
out_1 = adaptive_max_pool(x=x) out_1 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=5) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=5)
out_2 = adaptive_max_pool(x=x) out_2 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=[2, 5]) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[2, 5])
out_3 = adaptive_max_pool(x=x) out_3 = adaptive_max_pool(x=x)
# adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d( # adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
# output_size=[3, 3], data_format="NHWC") # output_size=[3, 3], data_format="NHWC")
# out_4 = adaptive_max_pool(x=x) # out_4 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d( adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
output_size=[None, 3]) output_size=[None, 3])
out_5 = adaptive_max_pool(x=x) out_5 = adaptive_max_pool(x=x)
...@@ -242,20 +242,20 @@ class TestAdaptiveMaxPool2dClassAPI(unittest.TestCase): ...@@ -242,20 +242,20 @@ class TestAdaptiveMaxPool2dClassAPI(unittest.TestCase):
paddle.disable_static(place=place) paddle.disable_static(place=place)
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=[3, 3]) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[3, 3])
out_1 = adaptive_max_pool(x=x) out_1 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=5) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=5)
out_2 = adaptive_max_pool(x=x) out_2 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=[2, 5]) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[2, 5])
out_3 = adaptive_max_pool(x=x) out_3 = adaptive_max_pool(x=x)
#adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d( #adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
# output_size=[3, 3], data_format="NHWC") # output_size=[3, 3], data_format="NHWC")
#out_4 = adaptive_max_pool(x=x) #out_4 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d( adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
output_size=[None, 3]) output_size=[None, 3])
out_5 = adaptive_max_pool(x=x) out_5 = adaptive_max_pool(x=x)
......
...@@ -99,7 +99,7 @@ def adaptive_pool3d_forward(x, ...@@ -99,7 +99,7 @@ def adaptive_pool3d_forward(x,
return out return out
class TestAdaptiveMaxPool3dAPI(unittest.TestCase): class TestAdaptiveMaxPool3DAPI(unittest.TestCase):
def setUp(self): def setUp(self):
self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32") self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32")
self.res_1_np = adaptive_pool3d_forward( self.res_1_np = adaptive_pool3d_forward(
...@@ -125,7 +125,8 @@ class TestAdaptiveMaxPool3dAPI(unittest.TestCase): ...@@ -125,7 +125,8 @@ class TestAdaptiveMaxPool3dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static() paddle.enable_static()
x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") x = paddle.fluid.data(
name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
out_1 = paddle.nn.functional.adaptive_max_pool3d( out_1 = paddle.nn.functional.adaptive_max_pool3d(
x=x, output_size=[3, 3, 3]) x=x, output_size=[3, 3, 3])
...@@ -189,7 +190,7 @@ class TestAdaptiveMaxPool3dAPI(unittest.TestCase): ...@@ -189,7 +190,7 @@ class TestAdaptiveMaxPool3dAPI(unittest.TestCase):
assert np.allclose(out_5.numpy(), self.res_5_np) assert np.allclose(out_5.numpy(), self.res_5_np)
class TestAdaptiveMaxPool3dClassAPI(unittest.TestCase): class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase):
def setUp(self): def setUp(self):
self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32") self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32")
self.res_1_np = adaptive_pool3d_forward( self.res_1_np = adaptive_pool3d_forward(
...@@ -215,24 +216,25 @@ class TestAdaptiveMaxPool3dClassAPI(unittest.TestCase): ...@@ -215,24 +216,25 @@ class TestAdaptiveMaxPool3dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static() paddle.enable_static()
x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") x = paddle.fluid.data(
name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
output_size=[3, 3, 3]) output_size=[3, 3, 3])
out_1 = adaptive_max_pool(x=x) out_1 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d(output_size=5) adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(output_size=5)
out_2 = adaptive_max_pool(x=x) out_2 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
output_size=[2, 3, 5]) output_size=[2, 3, 5])
out_3 = adaptive_max_pool(x=x) out_3 = adaptive_max_pool(x=x)
# adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( # adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
# output_size=[3, 3, 3], data_format="NDHWC") # output_size=[3, 3, 3], data_format="NDHWC")
# out_4 = adaptive_max_pool(x=x) # out_4 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
output_size=[None, 3, None]) output_size=[None, 3, None])
out_5 = adaptive_max_pool(x=x) out_5 = adaptive_max_pool(x=x)
...@@ -259,22 +261,22 @@ class TestAdaptiveMaxPool3dClassAPI(unittest.TestCase): ...@@ -259,22 +261,22 @@ class TestAdaptiveMaxPool3dClassAPI(unittest.TestCase):
paddle.disable_static(place=place) paddle.disable_static(place=place)
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
output_size=[3, 3, 3]) output_size=[3, 3, 3])
out_1 = adaptive_max_pool(x=x) out_1 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d(output_size=5) adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(output_size=5)
out_2 = adaptive_max_pool(x=x) out_2 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
output_size=[2, 3, 5]) output_size=[2, 3, 5])
out_3 = adaptive_max_pool(x=x) out_3 = adaptive_max_pool(x=x)
# adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( # adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
# output_size=[3, 3, 3], data_format="NDHWC") # output_size=[3, 3, 3], data_format="NDHWC")
# out_4 = adaptive_max_pool(x=x) # out_4 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
output_size=[None, 3, None]) output_size=[None, 3, None])
out_5 = adaptive_max_pool(x=x) out_5 = adaptive_max_pool(x=x)
......
...@@ -32,7 +32,7 @@ class TestBatchNorm(unittest.TestCase): ...@@ -32,7 +32,7 @@ class TestBatchNorm(unittest.TestCase):
places.append(fluid.CUDAPlace(0)) places.append(fluid.CUDAPlace(0))
for p in places: for p in places:
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
batch_norm1d = paddle.nn.BatchNorm1d(1, name="test") batch_norm1d = paddle.nn.BatchNorm1D(1, name="test")
def test_error(self): def test_error(self):
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
...@@ -45,32 +45,32 @@ class TestBatchNorm(unittest.TestCase): ...@@ -45,32 +45,32 @@ class TestBatchNorm(unittest.TestCase):
def error1d_dataformat(): def error1d_dataformat():
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
batch_norm1d = paddle.nn.BatchNorm1d(1, data_format='NCDHW') batch_norm1d = paddle.nn.BatchNorm1D(1, data_format='NCDHW')
batch_norm1d(fluid.dygraph.to_variable(x_data_4)) batch_norm1d(fluid.dygraph.to_variable(x_data_4))
def error2d_dataformat(): def error2d_dataformat():
x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32') x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')
batch_norm2d = paddle.nn.BatchNorm2d(1, data_format='NCDHW') batch_norm2d = paddle.nn.BatchNorm2D(1, data_format='NCDHW')
batch_norm2d(fluid.dygraph.to_variable(x_data_3)) batch_norm2d(fluid.dygraph.to_variable(x_data_3))
def error3d_dataformat(): def error3d_dataformat():
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
batch_norm3d = paddle.nn.BatchNorm3d(1, data_format='NCL') batch_norm3d = paddle.nn.BatchNorm3D(1, data_format='NCL')
batch_norm3d(fluid.dygraph.to_variable(x_data_4)) batch_norm3d(fluid.dygraph.to_variable(x_data_4))
def error1d(): def error1d():
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
batch_norm1d = paddle.nn.BatchNorm1d(1) batch_norm1d = paddle.nn.BatchNorm1D(1)
batch_norm1d(fluid.dygraph.to_variable(x_data_4)) batch_norm1d(fluid.dygraph.to_variable(x_data_4))
def error2d(): def error2d():
x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32') x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')
batch_norm2d = paddle.nn.BatchNorm2d(1) batch_norm2d = paddle.nn.BatchNorm2D(1)
batch_norm2d(fluid.dygraph.to_variable(x_data_3)) batch_norm2d(fluid.dygraph.to_variable(x_data_3))
def error3d(): def error3d():
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
batch_norm3d = paddle.nn.BatchNorm3d(1) batch_norm3d = paddle.nn.BatchNorm3D(1)
batch_norm3d(fluid.dygraph.to_variable(x_data_4)) batch_norm3d(fluid.dygraph.to_variable(x_data_4))
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
...@@ -99,7 +99,7 @@ class TestBatchNorm(unittest.TestCase): ...@@ -99,7 +99,7 @@ class TestBatchNorm(unittest.TestCase):
def compute_v2(x): def compute_v2(x):
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
bn = paddle.nn.BatchNorm2d(shape[1]) bn = paddle.nn.BatchNorm2D(shape[1])
y = bn(fluid.dygraph.to_variable(x)) y = bn(fluid.dygraph.to_variable(x))
return y.numpy() return y.numpy()
...@@ -120,7 +120,7 @@ class TestBatchNorm(unittest.TestCase): ...@@ -120,7 +120,7 @@ class TestBatchNorm(unittest.TestCase):
def compute_v4(x): def compute_v4(x):
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
bn = paddle.nn.BatchNorm2d( bn = paddle.nn.BatchNorm2D(
shape[1], weight_attr=False, bias_attr=False) shape[1], weight_attr=False, bias_attr=False)
y = bn(fluid.dygraph.to_variable(x)) y = bn(fluid.dygraph.to_variable(x))
return y.numpy() return y.numpy()
...@@ -155,7 +155,7 @@ class TestBatchNorm(unittest.TestCase): ...@@ -155,7 +155,7 @@ class TestBatchNorm(unittest.TestCase):
def compute_v2(x_np): def compute_v2(x_np):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
bn = paddle.nn.BatchNorm2d(shape[1]) bn = paddle.nn.BatchNorm2D(shape[1])
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
y = bn(x) y = bn(x)
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
...@@ -183,8 +183,8 @@ class TestBatchNormChannelLast(unittest.TestCase): ...@@ -183,8 +183,8 @@ class TestBatchNormChannelLast(unittest.TestCase):
for p in self.places: for p in self.places:
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
x = paddle.randn([2, 6, 4]) x = paddle.randn([2, 6, 4])
net1 = paddle.nn.BatchNorm1d(4, data_format="NLC") net1 = paddle.nn.BatchNorm1D(4, data_format="NLC")
net2 = paddle.nn.BatchNorm1d(4) net2 = paddle.nn.BatchNorm1D(4)
net2.weight = net1.weight net2.weight = net1.weight
net2.bias = net1.bias net2.bias = net1.bias
y1 = net1(x) y1 = net1(x)
...@@ -197,8 +197,8 @@ class TestBatchNormChannelLast(unittest.TestCase): ...@@ -197,8 +197,8 @@ class TestBatchNormChannelLast(unittest.TestCase):
for p in self.places: for p in self.places:
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
x = paddle.randn([2, 6, 6, 4]) x = paddle.randn([2, 6, 6, 4])
net1 = paddle.nn.BatchNorm2d(4, data_format="NHWC") net1 = paddle.nn.BatchNorm2D(4, data_format="NHWC")
net2 = paddle.nn.BatchNorm2d(4) net2 = paddle.nn.BatchNorm2D(4)
net2.weight = net1.weight net2.weight = net1.weight
net2.bias = net1.bias net2.bias = net1.bias
y1 = net1(x) y1 = net1(x)
...@@ -211,8 +211,8 @@ class TestBatchNormChannelLast(unittest.TestCase): ...@@ -211,8 +211,8 @@ class TestBatchNormChannelLast(unittest.TestCase):
for p in self.places: for p in self.places:
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
x = paddle.randn([2, 6, 6, 6, 4]) x = paddle.randn([2, 6, 6, 6, 4])
net1 = paddle.nn.BatchNorm3d(4, data_format="NDHWC") net1 = paddle.nn.BatchNorm3D(4, data_format="NDHWC")
net2 = paddle.nn.BatchNorm3d(4) net2 = paddle.nn.BatchNorm3D(4)
net2.weight = net1.weight net2.weight = net1.weight
net2.bias = net1.bias net2.bias = net1.bias
y1 = net1(x) y1 = net1(x)
......
...@@ -47,7 +47,7 @@ class InplaceTestBase(unittest.TestCase): ...@@ -47,7 +47,7 @@ class InplaceTestBase(unittest.TestCase):
def build_program_and_scope(self): def build_program_and_scope(self):
self.place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace() self.place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace()
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
startup_program = fluid.Program() startup_program = fluid.Program()
main_program = fluid.Program() main_program = fluid.Program()
......
...@@ -30,7 +30,7 @@ class TestCompiledProgram(unittest.TestCase): ...@@ -30,7 +30,7 @@ class TestCompiledProgram(unittest.TestCase):
self.label = np.random.randint( self.label = np.random.randint(
low=0, high=10, size=[16, 1], dtype=np.int64) low=0, high=10, size=[16, 1], dtype=np.int64)
with new_program_scope(): with new_program_scope():
paddle.manual_seed(self.seed) paddle.seed(self.seed)
paddle.framework.random._manual_program_seed(self.seed) paddle.framework.random._manual_program_seed(self.seed)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace() ) else fluid.CPUPlace()
...@@ -47,7 +47,7 @@ class TestCompiledProgram(unittest.TestCase): ...@@ -47,7 +47,7 @@ class TestCompiledProgram(unittest.TestCase):
def test_compiled_program_base(self): def test_compiled_program_base(self):
with new_program_scope(): with new_program_scope():
paddle.manual_seed(self.seed) paddle.seed(self.seed)
paddle.framework.random._manual_program_seed(self.seed) paddle.framework.random._manual_program_seed(self.seed)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace() ) else fluid.CPUPlace()
...@@ -65,7 +65,7 @@ class TestCompiledProgram(unittest.TestCase): ...@@ -65,7 +65,7 @@ class TestCompiledProgram(unittest.TestCase):
def test_compiled_program_with_data_parallel(self): def test_compiled_program_with_data_parallel(self):
with new_program_scope(): with new_program_scope():
paddle.manual_seed(self.seed) paddle.seed(self.seed)
paddle.framework.random._manual_program_seed(self.seed) paddle.framework.random._manual_program_seed(self.seed)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace() ) else fluid.CPUPlace()
......
...@@ -21,7 +21,7 @@ import paddle.fluid.initializer as I ...@@ -21,7 +21,7 @@ import paddle.fluid.initializer as I
import unittest import unittest
class Conv1dTestCase(unittest.TestCase): class Conv1DTestCase(unittest.TestCase):
def __init__(self, def __init__(self,
methodName='runTest', methodName='runTest',
batch_size=4, batch_size=4,
...@@ -37,7 +37,7 @@ class Conv1dTestCase(unittest.TestCase): ...@@ -37,7 +37,7 @@ class Conv1dTestCase(unittest.TestCase):
no_bias=False, no_bias=False,
dtype="float32", dtype="float32",
data_format="NCL"): data_format="NCL"):
super(Conv1dTestCase, self).__init__(methodName) super(Conv1DTestCase, self).__init__(methodName)
self.batch_size = batch_size self.batch_size = batch_size
self.num_channels = num_channels self.num_channels = num_channels
self.num_filters = num_filters self.num_filters = num_filters
...@@ -107,7 +107,7 @@ class Conv1dTestCase(unittest.TestCase): ...@@ -107,7 +107,7 @@ class Conv1dTestCase(unittest.TestCase):
def paddle_nn_layer(self): def paddle_nn_layer(self):
x_var = paddle.to_tensor(self.input) x_var = paddle.to_tensor(self.input)
conv = nn.Conv1d( conv = nn.Conv1D(
self.num_channels, self.num_channels,
self.num_filters, self.num_filters,
self.filter_size, self.filter_size,
...@@ -139,7 +139,7 @@ class Conv1dTestCase(unittest.TestCase): ...@@ -139,7 +139,7 @@ class Conv1dTestCase(unittest.TestCase):
self._test_equivalence(place) self._test_equivalence(place)
class Conv1dErrorTestCase(Conv1dTestCase): class Conv1DErrorTestCase(Conv1DTestCase):
def runTest(self): def runTest(self):
place = fluid.CPUPlace() place = fluid.CPUPlace()
with dg.guard(place): with dg.guard(place):
...@@ -147,7 +147,7 @@ class Conv1dErrorTestCase(Conv1dTestCase): ...@@ -147,7 +147,7 @@ class Conv1dErrorTestCase(Conv1dTestCase):
self.paddle_nn_layer() self.paddle_nn_layer()
class Conv1dTypeErrorTestCase(Conv1dTestCase): class Conv1DTypeErrorTestCase(Conv1DTestCase):
def runTest(self): def runTest(self):
place = fluid.CPUPlace() place = fluid.CPUPlace()
with dg.guard(place): with dg.guard(place):
...@@ -156,27 +156,27 @@ class Conv1dTypeErrorTestCase(Conv1dTestCase): ...@@ -156,27 +156,27 @@ class Conv1dTypeErrorTestCase(Conv1dTestCase):
def add_cases(suite): def add_cases(suite):
suite.addTest(Conv1dTestCase(methodName='runTest')) suite.addTest(Conv1DTestCase(methodName='runTest'))
suite.addTest(Conv1dTestCase(methodName='runTest', stride=[1], dilation=2)) suite.addTest(Conv1DTestCase(methodName='runTest', stride=[1], dilation=2))
suite.addTest(Conv1dTestCase(methodName='runTest', stride=2, dilation=(1))) suite.addTest(Conv1DTestCase(methodName='runTest', stride=2, dilation=(1)))
suite.addTest( suite.addTest(
Conv1dTestCase( Conv1DTestCase(
methodName='runTest', padding="same", no_bias=True)) methodName='runTest', padding="same", no_bias=True))
suite.addTest( suite.addTest(
Conv1dTestCase( Conv1DTestCase(
methodName='runTest', filter_size=3, padding='valid')) methodName='runTest', filter_size=3, padding='valid'))
suite.addTest( suite.addTest(
Conv1dTestCase( Conv1DTestCase(
methodName='runTest', padding=2, data_format='NLC')) methodName='runTest', padding=2, data_format='NLC'))
suite.addTest(Conv1dTestCase(methodName='runTest', padding=[1])) suite.addTest(Conv1DTestCase(methodName='runTest', padding=[1]))
suite.addTest(Conv1dTestCase(methodName='runTest', padding=[1, 2])) suite.addTest(Conv1DTestCase(methodName='runTest', padding=[1, 2]))
suite.addTest(Conv1dTestCase(methodName='runTest', padding=2)) suite.addTest(Conv1DTestCase(methodName='runTest', padding=2))
suite.addTest(Conv1dTestCase(methodName='runTest')) suite.addTest(Conv1DTestCase(methodName='runTest'))
suite.addTest( suite.addTest(
Conv1dTestCase( Conv1DTestCase(
methodName='runTest', groups=2, padding="valid")) methodName='runTest', groups=2, padding="valid"))
suite.addTest( suite.addTest(
Conv1dTestCase( Conv1DTestCase(
methodName='runTest', methodName='runTest',
num_filters=6, num_filters=6,
num_channels=3, num_channels=3,
...@@ -187,22 +187,22 @@ def add_cases(suite): ...@@ -187,22 +187,22 @@ def add_cases(suite):
def add_error_cases(suite): def add_error_cases(suite):
suite.addTest( suite.addTest(
Conv1dTypeErrorTestCase( Conv1DTypeErrorTestCase(
methodName='runTest', padding_mode="reflect", padding="valid")) methodName='runTest', padding_mode="reflect", padding="valid"))
suite.addTest( suite.addTest(
Conv1dErrorTestCase( Conv1DErrorTestCase(
methodName='runTest', data_format="VALID")) methodName='runTest', data_format="VALID"))
suite.addTest( suite.addTest(
Conv1dErrorTestCase( Conv1DErrorTestCase(
methodName='runTest', padding_mode="VALID")) methodName='runTest', padding_mode="VALID"))
suite.addTest( suite.addTest(
Conv1dErrorTestCase( Conv1DErrorTestCase(
methodName='runTest', num_channels=5, groups=2)) methodName='runTest', num_channels=5, groups=2))
suite.addTest( suite.addTest(
Conv1dErrorTestCase( Conv1DErrorTestCase(
methodName='runTest', num_filters=8, num_channels=15, groups=3)) methodName='runTest', num_filters=8, num_channels=15, groups=3))
suite.addTest( suite.addTest(
Conv1dErrorTestCase( Conv1DErrorTestCase(
methodName='runTest', padding=[1, 2, 3, 4, 5])) methodName='runTest', padding=[1, 2, 3, 4, 5]))
......
...@@ -21,7 +21,7 @@ import paddle.fluid.initializer as I ...@@ -21,7 +21,7 @@ import paddle.fluid.initializer as I
import unittest import unittest
class ConvTranspose1dTestCase(unittest.TestCase): class Conv1DTransposeTestCase(unittest.TestCase):
def __init__(self, def __init__(self,
methodName='runTest', methodName='runTest',
batch_size=4, batch_size=4,
...@@ -38,7 +38,7 @@ class ConvTranspose1dTestCase(unittest.TestCase): ...@@ -38,7 +38,7 @@ class ConvTranspose1dTestCase(unittest.TestCase):
no_bias=False, no_bias=False,
data_format="NCL", data_format="NCL",
dtype="float32"): dtype="float32"):
super(ConvTranspose1dTestCase, self).__init__(methodName) super(Conv1DTransposeTestCase, self).__init__(methodName)
self.batch_size = batch_size self.batch_size = batch_size
self.in_channels = in_channels self.in_channels = in_channels
self.out_channels = out_channels self.out_channels = out_channels
...@@ -113,7 +113,7 @@ class ConvTranspose1dTestCase(unittest.TestCase): ...@@ -113,7 +113,7 @@ class ConvTranspose1dTestCase(unittest.TestCase):
def paddle_nn_layer(self): def paddle_nn_layer(self):
x_var = paddle.to_tensor(self.input) x_var = paddle.to_tensor(self.input)
conv = nn.ConvTranspose1d( conv = nn.Conv1DTranspose(
self.in_channels, self.in_channels,
self.out_channels, self.out_channels,
self.filter_size, self.filter_size,
...@@ -145,7 +145,7 @@ class ConvTranspose1dTestCase(unittest.TestCase): ...@@ -145,7 +145,7 @@ class ConvTranspose1dTestCase(unittest.TestCase):
self._test_equivalence(place) self._test_equivalence(place)
class ConvTranspose1dErrorTestCase(ConvTranspose1dTestCase): class Conv1DTransposeErrorTestCase(Conv1DTransposeTestCase):
def runTest(self): def runTest(self):
place = fluid.CPUPlace() place = fluid.CPUPlace()
with dg.guard(place): with dg.guard(place):
...@@ -154,68 +154,68 @@ class ConvTranspose1dErrorTestCase(ConvTranspose1dTestCase): ...@@ -154,68 +154,68 @@ class ConvTranspose1dErrorTestCase(ConvTranspose1dTestCase):
def add_cases(suite): def add_cases(suite):
suite.addTest(ConvTranspose1dTestCase(methodName='runTest')) suite.addTest(Conv1DTransposeTestCase(methodName='runTest'))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', stride=[2], no_bias=True, dilation=2)) methodName='runTest', stride=[2], no_bias=True, dilation=2))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', methodName='runTest',
filter_size=(3), filter_size=(3),
output_size=[36], output_size=[36],
stride=[2], stride=[2],
dilation=2)) dilation=2))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', stride=2, dilation=(2))) methodName='runTest', stride=2, dilation=(2)))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', padding="valid")) methodName='runTest', padding="valid"))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', padding='valid')) methodName='runTest', padding='valid'))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', filter_size=1, padding=3)) methodName='runTest', filter_size=1, padding=3))
suite.addTest(ConvTranspose1dTestCase(methodName='runTest', padding=[2])) suite.addTest(Conv1DTransposeTestCase(methodName='runTest', padding=[2]))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', data_format="NLC")) methodName='runTest', data_format="NLC"))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', groups=2, padding="valid")) methodName='runTest', groups=2, padding="valid"))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', methodName='runTest',
out_channels=6, out_channels=6,
in_channels=3, in_channels=3,
groups=3, groups=3,
padding="valid")) padding="valid"))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', methodName='runTest',
data_format="NLC", data_format="NLC",
spartial_shape=16, spartial_shape=16,
output_size=18)) output_size=18))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', data_format="NLC", stride=3, methodName='runTest', data_format="NLC", stride=3,
output_padding=2)) output_padding=2))
suite.addTest(ConvTranspose1dTestCase(methodName='runTest', padding=[1, 2])) suite.addTest(Conv1DTransposeTestCase(methodName='runTest', padding=[1, 2]))
def add_error_cases(suite): def add_error_cases(suite):
suite.addTest( suite.addTest(
ConvTranspose1dErrorTestCase( Conv1DTransposeErrorTestCase(
methodName='runTest', data_format="not_valid")) methodName='runTest', data_format="not_valid"))
suite.addTest( suite.addTest(
ConvTranspose1dErrorTestCase( Conv1DTransposeErrorTestCase(
methodName='runTest', in_channels=5, groups=2)) methodName='runTest', in_channels=5, groups=2))
suite.addTest( suite.addTest(
ConvTranspose1dErrorTestCase( Conv1DTransposeErrorTestCase(
methodName='runTest', stride=2, output_padding=3)) methodName='runTest', stride=2, output_padding=3))
suite.addTest( suite.addTest(
ConvTranspose1dErrorTestCase( Conv1DTransposeErrorTestCase(
methodName='runTest', output_size="not_valid")) methodName='runTest', output_size="not_valid"))
......
...@@ -45,7 +45,7 @@ def create_test_padding_VALID_class(parent): ...@@ -45,7 +45,7 @@ def create_test_padding_VALID_class(parent):
globals()[cls_name] = TestPaddingVALIDCase globals()[cls_name] = TestPaddingVALIDCase
class TestConv2dFusionOp(OpTest): class TestConv2DFusionOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "conv2d_fusion" self.op_type = "conv2d_fusion"
self.exhaustive_search = False self.exhaustive_search = False
...@@ -157,28 +157,28 @@ class TestConv2dFusionOp(OpTest): ...@@ -157,28 +157,28 @@ class TestConv2dFusionOp(OpTest):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithoutResidual(TestConv2dFusionOp): class TestWithoutResidual(TestConv2DFusionOp):
def init_residual(self): def init_residual(self):
self.add_residual_data = False self.add_residual_data = False
class TestIdentityActivation(TestConv2dFusionOp): class TestIdentityActivation(TestConv2DFusionOp):
def init_activation(self): def init_activation(self):
self.activation = 'identity' self.activation = 'identity'
class TestIdentityActivation(TestConv2dFusionOp): class TestIdentityActivation(TestConv2DFusionOp):
def init_activation(self): def init_activation(self):
self.activation = 'identity' self.activation = 'identity'
self.add_residual_data = False self.add_residual_data = False
class TestWithGroup(TestConv2dFusionOp): class TestWithGroup(TestConv2DFusionOp):
def init_group(self): def init_group(self):
self.groups = 3 self.groups = 3
class TestWithDilation(TestConv2dFusionOp): class TestWithDilation(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -194,12 +194,12 @@ class TestWithDilation(TestConv2dFusionOp): ...@@ -194,12 +194,12 @@ class TestWithDilation(TestConv2dFusionOp):
self.groups = 3 self.groups = 3
class TestCUDNNExhaustiveSearch(TestConv2dFusionOp): class TestCUDNNExhaustiveSearch(TestConv2DFusionOp):
def set_search_method(self): def set_search_method(self):
self.exhaustive_search = True self.exhaustive_search = True
class TestMultipleOutputs(TestConv2dFusionOp): class TestMultipleOutputs(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -215,13 +215,13 @@ class TestMultipleOutputs(TestConv2dFusionOp): ...@@ -215,13 +215,13 @@ class TestMultipleOutputs(TestConv2dFusionOp):
self.outputs['Outputs'] = [('out1', out1), ('out2', out2)] self.outputs['Outputs'] = [('out1', out1), ('out2', out2)]
class TestAsyPadding(TestConv2dFusionOp): class TestAsyPadding(TestConv2DFusionOp):
def init_paddings(self): def init_paddings(self):
self.pad = [0, 0, 1, 2] self.pad = [0, 0, 1, 2]
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithPad_AsyPadding(TestConv2dFusionOp): class TestWithPad_AsyPadding(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 10, 10] # NCHW self.input_size = [2, 3, 10, 10] # NCHW
...@@ -234,7 +234,7 @@ class TestWithPad_AsyPadding(TestConv2dFusionOp): ...@@ -234,7 +234,7 @@ class TestWithPad_AsyPadding(TestConv2dFusionOp):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithStride_AsyPadding(TestConv2dFusionOp): class TestWithStride_AsyPadding(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.stride = [2, 2] self.stride = [2, 2]
self.input_size = [2, 3, 6, 6] # NCHW self.input_size = [2, 3, 6, 6] # NCHW
...@@ -247,7 +247,7 @@ class TestWithStride_AsyPadding(TestConv2dFusionOp): ...@@ -247,7 +247,7 @@ class TestWithStride_AsyPadding(TestConv2dFusionOp):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWith1x1_AsyPadding(TestConv2dFusionOp): class TestWith1x1_AsyPadding(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW self.input_size = [2, 3, 5, 5] # NCHW
...@@ -263,12 +263,12 @@ class TestWith1x1_AsyPadding(TestConv2dFusionOp): ...@@ -263,12 +263,12 @@ class TestWith1x1_AsyPadding(TestConv2dFusionOp):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithGroup_AsyPadding(TestConv2dFusionOp): class TestWithGroup_AsyPadding(TestConv2DFusionOp):
def init_group(self): def init_group(self):
self.groups = 3 self.groups = 3
class TestWithDepthWise3x3_AsyPadding(TestConv2dFusionOp): class TestWithDepthWise3x3_AsyPadding(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [3, 4, 10, 10] # NCHW self.input_size = [3, 4, 10, 10] # NCHW
...@@ -287,7 +287,7 @@ class TestWithDepthWise3x3_AsyPadding(TestConv2dFusionOp): ...@@ -287,7 +287,7 @@ class TestWithDepthWise3x3_AsyPadding(TestConv2dFusionOp):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithDepthWise5x5_AsyPadding(TestConv2dFusionOp): class TestWithDepthWise5x5_AsyPadding(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 4, 10, 10] # NCHW self.input_size = [2, 4, 10, 10] # NCHW
...@@ -303,7 +303,7 @@ class TestWithDepthWise5x5_AsyPadding(TestConv2dFusionOp): ...@@ -303,7 +303,7 @@ class TestWithDepthWise5x5_AsyPadding(TestConv2dFusionOp):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithDepthWise7x7_AsyPadding(TestConv2dFusionOp): class TestWithDepthWise7x7_AsyPadding(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.stride = [2, 2] self.stride = [2, 2]
self.input_size = [2, 8, 10, 10] # NCHW self.input_size = [2, 8, 10, 10] # NCHW
...@@ -319,7 +319,7 @@ class TestWithDepthWise7x7_AsyPadding(TestConv2dFusionOp): ...@@ -319,7 +319,7 @@ class TestWithDepthWise7x7_AsyPadding(TestConv2dFusionOp):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithDilation_AsyPadding(TestConv2dFusionOp): class TestWithDilation_AsyPadding(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 10, 10] # NCHW self.input_size = [2, 3, 10, 10] # NCHW
...@@ -338,7 +338,7 @@ class TestWithDilation_AsyPadding(TestConv2dFusionOp): ...@@ -338,7 +338,7 @@ class TestWithDilation_AsyPadding(TestConv2dFusionOp):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithInput1x1Filter1x1_AsyPadding(TestConv2dFusionOp): class TestWithInput1x1Filter1x1_AsyPadding(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 1, 1] # NCHW self.input_size = [2, 3, 1, 1] # NCHW
......
...@@ -166,7 +166,7 @@ class Conv2DTestCase(unittest.TestCase): ...@@ -166,7 +166,7 @@ class Conv2DTestCase(unittest.TestCase):
def paddle_nn_layer(self): def paddle_nn_layer(self):
x_var = dg.to_variable(self.input) x_var = dg.to_variable(self.input)
conv = nn.Conv2d( conv = nn.Conv2D(
self.num_channels, self.num_channels,
self.num_filters, self.num_filters,
self.filter_size, self.filter_size,
......
...@@ -289,7 +289,7 @@ def create_test_cudnn_padding_VALID_class(parent): ...@@ -289,7 +289,7 @@ def create_test_cudnn_padding_VALID_class(parent):
globals()[cls_name] = TestCUDNNPaddingVALIDCase globals()[cls_name] = TestCUDNNPaddingVALIDCase
class TestConv2dOp(OpTest): class TestConv2DOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "conv2d" self.op_type = "conv2d"
self.use_cudnn = False self.use_cudnn = False
...@@ -412,7 +412,7 @@ class TestConv2dOp(OpTest): ...@@ -412,7 +412,7 @@ class TestConv2dOp(OpTest):
pass pass
class TestWithPad(TestConv2dOp): class TestWithPad(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -422,7 +422,7 @@ class TestWithPad(TestConv2dOp): ...@@ -422,7 +422,7 @@ class TestWithPad(TestConv2dOp):
self.filter_size = [6, f_c, 3, 3] self.filter_size = [6, f_c, 3, 3]
class TestWithStride(TestConv2dOp): class TestWithStride(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -432,7 +432,7 @@ class TestWithStride(TestConv2dOp): ...@@ -432,7 +432,7 @@ class TestWithStride(TestConv2dOp):
self.filter_size = [6, f_c, 3, 3] self.filter_size = [6, f_c, 3, 3]
class TestWithGroup(TestConv2dOp): class TestWithGroup(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -443,7 +443,7 @@ class TestWithGroup(TestConv2dOp): ...@@ -443,7 +443,7 @@ class TestWithGroup(TestConv2dOp):
self.filter_size = [18, f_c, 3, 3] self.filter_size = [18, f_c, 3, 3]
class TestWith1x1(TestConv2dOp): class TestWith1x1(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -456,7 +456,7 @@ class TestWith1x1(TestConv2dOp): ...@@ -456,7 +456,7 @@ class TestWith1x1(TestConv2dOp):
self.groups = 3 self.groups = 3
class TestWithDepthWise3x3(TestConv2dOp): class TestWithDepthWise3x3(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -472,7 +472,7 @@ class TestWithDepthWise3x3(TestConv2dOp): ...@@ -472,7 +472,7 @@ class TestWithDepthWise3x3(TestConv2dOp):
self.groups = 4 self.groups = 4
class TestWithDepthWise5x5(TestConv2dOp): class TestWithDepthWise5x5(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -485,7 +485,7 @@ class TestWithDepthWise5x5(TestConv2dOp): ...@@ -485,7 +485,7 @@ class TestWithDepthWise5x5(TestConv2dOp):
self.groups = 4 self.groups = 4
class TestWithDepthWise7x7(TestConv2dOp): class TestWithDepthWise7x7(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -498,7 +498,7 @@ class TestWithDepthWise7x7(TestConv2dOp): ...@@ -498,7 +498,7 @@ class TestWithDepthWise7x7(TestConv2dOp):
self.groups = 8 self.groups = 8
class TestWithDilation(TestConv2dOp): class TestWithDilation(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -514,7 +514,7 @@ class TestWithDilation(TestConv2dOp): ...@@ -514,7 +514,7 @@ class TestWithDilation(TestConv2dOp):
self.groups = 3 self.groups = 3
class TestWithInput1x1Filter1x1(TestConv2dOp): class TestWithInput1x1Filter1x1(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -527,18 +527,18 @@ class TestWithInput1x1Filter1x1(TestConv2dOp): ...@@ -527,18 +527,18 @@ class TestWithInput1x1Filter1x1(TestConv2dOp):
self.groups = 3 self.groups = 3
#----------------Conv2dCUDNN---------------- #----------------Conv2DCUDNN----------------
create_test_cudnn_class(TestConv2dOp) create_test_cudnn_class(TestConv2DOp)
create_test_cudnn_class(TestWithPad) create_test_cudnn_class(TestWithPad)
create_test_cudnn_class(TestWithStride) create_test_cudnn_class(TestWithStride)
create_test_cudnn_class(TestWithGroup) create_test_cudnn_class(TestWithGroup)
create_test_cudnn_class(TestWith1x1) create_test_cudnn_class(TestWith1x1)
create_test_cudnn_class(TestWithInput1x1Filter1x1) create_test_cudnn_class(TestWithInput1x1Filter1x1)
#----------------Conv2dCUDNN fp16---------------- #----------------Conv2DCUDNN fp16----------------
create_test_cudnn_fp16_class(TestConv2dOp, grad_check=False) create_test_cudnn_fp16_class(TestConv2DOp, grad_check=False)
create_test_cudnn_fp16_class(TestWithPad, grad_check=False) create_test_cudnn_fp16_class(TestWithPad, grad_check=False)
create_test_cudnn_fp16_class(TestWithStride, grad_check=False) create_test_cudnn_fp16_class(TestWithStride, grad_check=False)
create_test_cudnn_fp16_class(TestWithGroup, grad_check=False) create_test_cudnn_fp16_class(TestWithGroup, grad_check=False)
...@@ -548,7 +548,7 @@ create_test_cudnn_fp16_class(TestWithInput1x1Filter1x1, grad_check=False) ...@@ -548,7 +548,7 @@ create_test_cudnn_fp16_class(TestWithInput1x1Filter1x1, grad_check=False)
#----------------TestDepthwiseConv ----- #----------------TestDepthwiseConv -----
class TestDepthwiseConv(TestConv2dOp): class TestDepthwiseConv(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.pad = [1, 1] self.pad = [1, 1]
...@@ -561,7 +561,7 @@ class TestDepthwiseConv(TestConv2dOp): ...@@ -561,7 +561,7 @@ class TestDepthwiseConv(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestDepthwiseConv2(TestConv2dOp): class TestDepthwiseConv2(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.pad = [1, 1] self.pad = [1, 1]
...@@ -574,7 +574,7 @@ class TestDepthwiseConv2(TestConv2dOp): ...@@ -574,7 +574,7 @@ class TestDepthwiseConv2(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestDepthwiseConv3(TestConv2dOp): class TestDepthwiseConv3(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.pad = [1, 1] self.pad = [1, 1]
...@@ -587,7 +587,7 @@ class TestDepthwiseConv3(TestConv2dOp): ...@@ -587,7 +587,7 @@ class TestDepthwiseConv3(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestDepthwiseConvWithDilation(TestConv2dOp): class TestDepthwiseConvWithDilation(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.pad = [1, 1] self.pad = [1, 1]
...@@ -601,7 +601,7 @@ class TestDepthwiseConvWithDilation(TestConv2dOp): ...@@ -601,7 +601,7 @@ class TestDepthwiseConvWithDilation(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestDepthwiseConvWithDilation2(TestConv2dOp): class TestDepthwiseConvWithDilation2(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.pad = [1, 1] self.pad = [1, 1]
...@@ -615,7 +615,7 @@ class TestDepthwiseConvWithDilation2(TestConv2dOp): ...@@ -615,7 +615,7 @@ class TestDepthwiseConvWithDilation2(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestDepthwiseConvandFuse(TestConv2dOp): class TestDepthwiseConvandFuse(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -629,7 +629,7 @@ class TestDepthwiseConvandFuse(TestConv2dOp): ...@@ -629,7 +629,7 @@ class TestDepthwiseConvandFuse(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestDepthwiseConv2andFuse(TestConv2dOp): class TestDepthwiseConv2andFuse(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -643,7 +643,7 @@ class TestDepthwiseConv2andFuse(TestConv2dOp): ...@@ -643,7 +643,7 @@ class TestDepthwiseConv2andFuse(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestDepthwiseConv3andFuse(TestConv2dOp): class TestDepthwiseConv3andFuse(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -657,7 +657,7 @@ class TestDepthwiseConv3andFuse(TestConv2dOp): ...@@ -657,7 +657,7 @@ class TestDepthwiseConv3andFuse(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestDepthwiseConvWithDilationandFuse(TestConv2dOp): class TestDepthwiseConvWithDilationandFuse(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -672,7 +672,7 @@ class TestDepthwiseConvWithDilationandFuse(TestConv2dOp): ...@@ -672,7 +672,7 @@ class TestDepthwiseConvWithDilationandFuse(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestDepthwiseConvWithDilation2andFuse(TestConv2dOp): class TestDepthwiseConvWithDilation2andFuse(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -687,13 +687,13 @@ class TestDepthwiseConvWithDilation2andFuse(TestConv2dOp): ...@@ -687,13 +687,13 @@ class TestDepthwiseConvWithDilation2andFuse(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestCUDNNExhaustiveSearch(TestConv2dOp): class TestCUDNNExhaustiveSearch(TestConv2DOp):
def init_kernel_type(self): def init_kernel_type(self):
self.use_cudnn = True self.use_cudnn = True
self.exhaustive_search = True self.exhaustive_search = True
class TestConv2dOpError(unittest.TestCase): class TestConv2DOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
...@@ -724,7 +724,7 @@ class TestConv2dOpError(unittest.TestCase): ...@@ -724,7 +724,7 @@ class TestConv2dOpError(unittest.TestCase):
# ---- test asymmetric padding ---- # ---- test asymmetric padding ----
class TestConv2dOp_v2(OpTest): class TestConv2DOp_v2(OpTest):
def setUp(self): def setUp(self):
self.op_type = "conv2d" self.op_type = "conv2d"
self.use_cudnn = False self.use_cudnn = False
...@@ -854,13 +854,13 @@ class TestConv2dOp_v2(OpTest): ...@@ -854,13 +854,13 @@ class TestConv2dOp_v2(OpTest):
pass pass
class TestConv2dOp_AsyPadding(TestConv2dOp_v2): class TestConv2DOp_AsyPadding(TestConv2DOp_v2):
def init_paddings(self): def init_paddings(self):
self.pad = [0, 0, 1, 2] self.pad = [0, 0, 1, 2]
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithPad_AsyPadding(TestConv2dOp_v2): class TestWithPad_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW self.input_size = [2, 3, 5, 5] # NCHW
...@@ -873,7 +873,7 @@ class TestWithPad_AsyPadding(TestConv2dOp_v2): ...@@ -873,7 +873,7 @@ class TestWithPad_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithStride_AsyPadding(TestConv2dOp_v2): class TestWithStride_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [2, 2] self.stride = [2, 2]
self.input_size = [2, 3, 6, 6] # NCHW self.input_size = [2, 3, 6, 6] # NCHW
...@@ -886,7 +886,7 @@ class TestWithStride_AsyPadding(TestConv2dOp_v2): ...@@ -886,7 +886,7 @@ class TestWithStride_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithGroup_AsyPadding(TestConv2dOp_v2): class TestWithGroup_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 2] self.stride = [1, 2]
...@@ -897,7 +897,7 @@ class TestWithGroup_AsyPadding(TestConv2dOp_v2): ...@@ -897,7 +897,7 @@ class TestWithGroup_AsyPadding(TestConv2dOp_v2):
self.filter_size = [24, f_c, 4, 3] self.filter_size = [24, f_c, 4, 3]
class TestWith1x1_AsyPadding(TestConv2dOp_v2): class TestWith1x1_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW self.input_size = [2, 3, 5, 5] # NCHW
...@@ -913,7 +913,7 @@ class TestWith1x1_AsyPadding(TestConv2dOp_v2): ...@@ -913,7 +913,7 @@ class TestWith1x1_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithDepthWise3x3_AsyPadding(TestConv2dOp_v2): class TestWithDepthWise3x3_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [3, 4, 10, 10] # NCHW self.input_size = [3, 4, 10, 10] # NCHW
...@@ -932,7 +932,7 @@ class TestWithDepthWise3x3_AsyPadding(TestConv2dOp_v2): ...@@ -932,7 +932,7 @@ class TestWithDepthWise3x3_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithDepthWise5x5_AsyPadding(TestConv2dOp_v2): class TestWithDepthWise5x5_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 4, 10, 10] # NCHW self.input_size = [2, 4, 10, 10] # NCHW
...@@ -948,7 +948,7 @@ class TestWithDepthWise5x5_AsyPadding(TestConv2dOp_v2): ...@@ -948,7 +948,7 @@ class TestWithDepthWise5x5_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithDepthWise7x7_AsyPadding(TestConv2dOp_v2): class TestWithDepthWise7x7_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [2, 2] self.stride = [2, 2]
self.input_size = [2, 8, 10, 10] # NCHW self.input_size = [2, 8, 10, 10] # NCHW
...@@ -964,7 +964,7 @@ class TestWithDepthWise7x7_AsyPadding(TestConv2dOp_v2): ...@@ -964,7 +964,7 @@ class TestWithDepthWise7x7_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithDilation_AsyPadding(TestConv2dOp_v2): class TestWithDilation_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 10, 10] # NCHW self.input_size = [2, 3, 10, 10] # NCHW
...@@ -983,7 +983,7 @@ class TestWithDilation_AsyPadding(TestConv2dOp_v2): ...@@ -983,7 +983,7 @@ class TestWithDilation_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithInput1x1Filter1x1_AsyPadding(TestConv2dOp_v2): class TestWithInput1x1Filter1x1_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [40, 3, 1, 1] # NCHW self.input_size = [40, 3, 1, 1] # NCHW
...@@ -999,7 +999,7 @@ class TestWithInput1x1Filter1x1_AsyPadding(TestConv2dOp_v2): ...@@ -999,7 +999,7 @@ class TestWithInput1x1Filter1x1_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
create_test_cudnn_class(TestConv2dOp_AsyPadding) create_test_cudnn_class(TestConv2DOp_AsyPadding)
create_test_cudnn_class(TestWithPad_AsyPadding) create_test_cudnn_class(TestWithPad_AsyPadding)
create_test_cudnn_class(TestWithStride_AsyPadding) create_test_cudnn_class(TestWithStride_AsyPadding)
create_test_cudnn_class(TestWithGroup_AsyPadding) create_test_cudnn_class(TestWithGroup_AsyPadding)
...@@ -1007,7 +1007,7 @@ create_test_cudnn_class(TestWith1x1_AsyPadding) ...@@ -1007,7 +1007,7 @@ create_test_cudnn_class(TestWith1x1_AsyPadding)
create_test_cudnn_class(TestWithInput1x1Filter1x1_AsyPadding) create_test_cudnn_class(TestWithInput1x1Filter1x1_AsyPadding)
class TestDepthwiseConv_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConv_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.stride = [2, 2] self.stride = [2, 2]
...@@ -1023,7 +1023,7 @@ class TestDepthwiseConv_AsyPadding(TestConv2dOp_v2): ...@@ -1023,7 +1023,7 @@ class TestDepthwiseConv_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv2_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConv2_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.stride = [1, 1] self.stride = [1, 1]
...@@ -1039,7 +1039,7 @@ class TestDepthwiseConv2_AsyPadding(TestConv2dOp_v2): ...@@ -1039,7 +1039,7 @@ class TestDepthwiseConv2_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv3_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConv3_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.stride = [1, 1] self.stride = [1, 1]
...@@ -1055,7 +1055,7 @@ class TestDepthwiseConv3_AsyPadding(TestConv2dOp_v2): ...@@ -1055,7 +1055,7 @@ class TestDepthwiseConv3_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvWithDilation_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConvWithDilation_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.pad = [1, 1] self.pad = [1, 1]
...@@ -1073,7 +1073,7 @@ class TestDepthwiseConvWithDilation_AsyPadding(TestConv2dOp_v2): ...@@ -1073,7 +1073,7 @@ class TestDepthwiseConvWithDilation_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvWithDilation2_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConvWithDilation2_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.pad = [1, 1] self.pad = [1, 1]
...@@ -1091,7 +1091,7 @@ class TestDepthwiseConvWithDilation2_AsyPadding(TestConv2dOp_v2): ...@@ -1091,7 +1091,7 @@ class TestDepthwiseConvWithDilation2_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvandFuse_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConvandFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -1109,7 +1109,7 @@ class TestDepthwiseConvandFuse_AsyPadding(TestConv2dOp_v2): ...@@ -1109,7 +1109,7 @@ class TestDepthwiseConvandFuse_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv2andFuse_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConv2andFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -1127,7 +1127,7 @@ class TestDepthwiseConv2andFuse_AsyPadding(TestConv2dOp_v2): ...@@ -1127,7 +1127,7 @@ class TestDepthwiseConv2andFuse_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv3andFuse_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConv3andFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -1145,7 +1145,7 @@ class TestDepthwiseConv3andFuse_AsyPadding(TestConv2dOp_v2): ...@@ -1145,7 +1145,7 @@ class TestDepthwiseConv3andFuse_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvWithDilationandFuse_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConvWithDilationandFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -1164,7 +1164,7 @@ class TestDepthwiseConvWithDilationandFuse_AsyPadding(TestConv2dOp_v2): ...@@ -1164,7 +1164,7 @@ class TestDepthwiseConvWithDilationandFuse_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvWithDilation2andFuse_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConvWithDilation2andFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -1184,25 +1184,25 @@ class TestDepthwiseConvWithDilation2andFuse_AsyPadding(TestConv2dOp_v2): ...@@ -1184,25 +1184,25 @@ class TestDepthwiseConvWithDilation2andFuse_AsyPadding(TestConv2dOp_v2):
#---------- test SAME VALID ----------- #---------- test SAME VALID -----------
create_test_padding_SAME_class(TestConv2dOp_AsyPadding) create_test_padding_SAME_class(TestConv2DOp_AsyPadding)
create_test_padding_SAME_class(TestWithPad_AsyPadding) create_test_padding_SAME_class(TestWithPad_AsyPadding)
create_test_padding_SAME_class(TestWithStride_AsyPadding) create_test_padding_SAME_class(TestWithStride_AsyPadding)
create_test_padding_SAME_class(TestWithGroup_AsyPadding) create_test_padding_SAME_class(TestWithGroup_AsyPadding)
create_test_padding_SAME_class(TestWithInput1x1Filter1x1_AsyPadding) create_test_padding_SAME_class(TestWithInput1x1Filter1x1_AsyPadding)
create_test_padding_VALID_class(TestConv2dOp_AsyPadding) create_test_padding_VALID_class(TestConv2DOp_AsyPadding)
create_test_padding_VALID_class(TestWithPad_AsyPadding) create_test_padding_VALID_class(TestWithPad_AsyPadding)
create_test_padding_VALID_class(TestWithStride_AsyPadding) create_test_padding_VALID_class(TestWithStride_AsyPadding)
create_test_padding_VALID_class(TestWithGroup_AsyPadding) create_test_padding_VALID_class(TestWithGroup_AsyPadding)
create_test_padding_VALID_class(TestWithInput1x1Filter1x1_AsyPadding) create_test_padding_VALID_class(TestWithInput1x1Filter1x1_AsyPadding)
create_test_cudnn_padding_SAME_class(TestConv2dOp_AsyPadding) create_test_cudnn_padding_SAME_class(TestConv2DOp_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithPad_AsyPadding) create_test_cudnn_padding_SAME_class(TestWithPad_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithStride_AsyPadding) create_test_cudnn_padding_SAME_class(TestWithStride_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithGroup_AsyPadding) create_test_cudnn_padding_SAME_class(TestWithGroup_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithInput1x1Filter1x1_AsyPadding) create_test_cudnn_padding_SAME_class(TestWithInput1x1Filter1x1_AsyPadding)
create_test_cudnn_padding_VALID_class(TestConv2dOp_AsyPadding) create_test_cudnn_padding_VALID_class(TestConv2DOp_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithPad_AsyPadding) create_test_cudnn_padding_VALID_class(TestWithPad_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithStride_AsyPadding) create_test_cudnn_padding_VALID_class(TestWithStride_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithGroup_AsyPadding) create_test_cudnn_padding_VALID_class(TestWithGroup_AsyPadding)
...@@ -1221,7 +1221,7 @@ create_test_padding_VALID_class(TestDepthwiseConvandFuse_AsyPadding) ...@@ -1221,7 +1221,7 @@ create_test_padding_VALID_class(TestDepthwiseConvandFuse_AsyPadding)
create_test_padding_VALID_class(TestDepthwiseConvWithDilationandFuse_AsyPadding) create_test_padding_VALID_class(TestDepthwiseConvWithDilationandFuse_AsyPadding)
# ------------ test channel last --------- # ------------ test channel last ---------
create_test_channel_last_class(TestConv2dOp_AsyPadding) create_test_channel_last_class(TestConv2DOp_AsyPadding)
create_test_channel_last_class(TestWithPad_AsyPadding) create_test_channel_last_class(TestWithPad_AsyPadding)
create_test_channel_last_class(TestWithGroup_AsyPadding) create_test_channel_last_class(TestWithGroup_AsyPadding)
create_test_channel_last_class(TestWith1x1_AsyPadding) create_test_channel_last_class(TestWith1x1_AsyPadding)
...@@ -1232,14 +1232,14 @@ create_test_channel_last_class(TestDepthwiseConvWithDilation2_AsyPadding) ...@@ -1232,14 +1232,14 @@ create_test_channel_last_class(TestDepthwiseConvWithDilation2_AsyPadding)
create_test_channel_last_class(TestDepthwiseConvandFuse_AsyPadding) create_test_channel_last_class(TestDepthwiseConvandFuse_AsyPadding)
create_test_channel_last_class(TestDepthwiseConvWithDilationandFuse_AsyPadding) create_test_channel_last_class(TestDepthwiseConvWithDilationandFuse_AsyPadding)
create_test_cudnn_channel_last_class(TestConv2dOp_AsyPadding) create_test_cudnn_channel_last_class(TestConv2DOp_AsyPadding)
create_test_cudnn_channel_last_class(TestWithPad_AsyPadding) create_test_cudnn_channel_last_class(TestWithPad_AsyPadding)
create_test_cudnn_channel_last_class(TestWithStride_AsyPadding) create_test_cudnn_channel_last_class(TestWithStride_AsyPadding)
create_test_cudnn_channel_last_class(TestWithGroup_AsyPadding) create_test_cudnn_channel_last_class(TestWithGroup_AsyPadding)
create_test_cudnn_channel_last_class(TestWithDilation_AsyPadding) create_test_cudnn_channel_last_class(TestWithDilation_AsyPadding)
create_test_cudnn_channel_last_fp16_class( create_test_cudnn_channel_last_fp16_class(
TestConv2dOp_AsyPadding, grad_check=False) TestConv2DOp_AsyPadding, grad_check=False)
create_test_cudnn_channel_last_fp16_class( create_test_cudnn_channel_last_fp16_class(
TestWithPad_AsyPadding, grad_check=False) TestWithPad_AsyPadding, grad_check=False)
create_test_cudnn_channel_last_fp16_class( create_test_cudnn_channel_last_fp16_class(
...@@ -1251,7 +1251,7 @@ create_test_cudnn_channel_last_fp16_class( ...@@ -1251,7 +1251,7 @@ create_test_cudnn_channel_last_fp16_class(
# --------- test python API --------------- # --------- test python API ---------------
class TestConv2dAPI(unittest.TestCase): class TestConv2DAPI(unittest.TestCase):
def test_api(self): def test_api(self):
input_NHWC = fluid.layers.data( input_NHWC = fluid.layers.data(
...@@ -1327,7 +1327,7 @@ class TestConv2dAPI(unittest.TestCase): ...@@ -1327,7 +1327,7 @@ class TestConv2dAPI(unittest.TestCase):
data_format="NCHW") data_format="NCHW")
class TestConv2dAPI_Error(unittest.TestCase): class TestConv2DAPI_Error(unittest.TestCase):
def test_api(self): def test_api(self):
input = fluid.layers.data( input = fluid.layers.data(
name="input", name="input",
......
...@@ -155,7 +155,7 @@ class Conv2DTransposeTestCase(unittest.TestCase): ...@@ -155,7 +155,7 @@ class Conv2DTransposeTestCase(unittest.TestCase):
else: else:
output_size = self.output_size output_size = self.output_size
conv = nn.ConvTranspose2d( conv = nn.Conv2DTranspose(
self.num_channels, self.num_channels,
self.num_filters, self.num_filters,
self.filter_size, self.filter_size,
......
...@@ -111,7 +111,7 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs): ...@@ -111,7 +111,7 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs):
return out return out
class TestConv2dTransposeOp(OpTest): class TestConv2DTransposeOp(OpTest):
def setUp(self): def setUp(self):
# init as conv transpose # init as conv transpose
self.dtype = np.float64 self.dtype = np.float64
...@@ -211,7 +211,7 @@ class TestConv2dTransposeOp(OpTest): ...@@ -211,7 +211,7 @@ class TestConv2dTransposeOp(OpTest):
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
class TestWithSymmetricPad(TestConv2dTransposeOp): class TestWithSymmetricPad(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -222,7 +222,7 @@ class TestWithSymmetricPad(TestConv2dTransposeOp): ...@@ -222,7 +222,7 @@ class TestWithSymmetricPad(TestConv2dTransposeOp):
self.filter_size = [f_c, 6, 3, 3] self.filter_size = [f_c, 6, 3, 3]
class TestWithAsymmetricPad(TestConv2dTransposeOp): class TestWithAsymmetricPad(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 0, 1, 2] self.pad = [1, 0, 1, 2]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -233,7 +233,7 @@ class TestWithAsymmetricPad(TestConv2dTransposeOp): ...@@ -233,7 +233,7 @@ class TestWithAsymmetricPad(TestConv2dTransposeOp):
self.filter_size = [f_c, 6, 3, 3] self.filter_size = [f_c, 6, 3, 3]
class TestWithSAMEPad(TestConv2dTransposeOp): class TestWithSAMEPad(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.stride = [2, 1] self.stride = [2, 1]
self.dilations = [1, 2] self.dilations = [1, 2]
...@@ -244,7 +244,7 @@ class TestWithSAMEPad(TestConv2dTransposeOp): ...@@ -244,7 +244,7 @@ class TestWithSAMEPad(TestConv2dTransposeOp):
self.padding_algorithm = 'SAME' self.padding_algorithm = 'SAME'
class TestWithVALIDPad(TestConv2dTransposeOp): class TestWithVALIDPad(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.dilations = [1, 1] self.dilations = [1, 1]
...@@ -255,7 +255,7 @@ class TestWithVALIDPad(TestConv2dTransposeOp): ...@@ -255,7 +255,7 @@ class TestWithVALIDPad(TestConv2dTransposeOp):
self.padding_algorithm = 'VALID' self.padding_algorithm = 'VALID'
class TestWithGroups(TestConv2dTransposeOp): class TestWithGroups(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -266,7 +266,7 @@ class TestWithGroups(TestConv2dTransposeOp): ...@@ -266,7 +266,7 @@ class TestWithGroups(TestConv2dTransposeOp):
self.filter_size = [f_c, 3, 3, 3] self.filter_size = [f_c, 3, 3, 3]
class TestWithStride(TestConv2dTransposeOp): class TestWithStride(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -277,7 +277,7 @@ class TestWithStride(TestConv2dTransposeOp): ...@@ -277,7 +277,7 @@ class TestWithStride(TestConv2dTransposeOp):
self.filter_size = [f_c, 6, 3, 3] self.filter_size = [f_c, 6, 3, 3]
class TestWithDilation(TestConv2dTransposeOp): class TestWithDilation(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -288,7 +288,7 @@ class TestWithDilation(TestConv2dTransposeOp): ...@@ -288,7 +288,7 @@ class TestWithDilation(TestConv2dTransposeOp):
self.filter_size = [f_c, 6, 3, 3] self.filter_size = [f_c, 6, 3, 3]
class TestWithEvenUpsample(TestConv2dTransposeOp): class TestWithEvenUpsample(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [2, 2] self.pad = [2, 2]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -300,7 +300,7 @@ class TestWithEvenUpsample(TestConv2dTransposeOp): ...@@ -300,7 +300,7 @@ class TestWithEvenUpsample(TestConv2dTransposeOp):
self.filter_size = [f_c, 6, 5, 5] self.filter_size = [f_c, 6, 5, 5]
class TestWithEvenUpsampleOutputPadding(TestConv2dTransposeOp): class TestWithEvenUpsampleOutputPadding(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [2, 2] self.pad = [2, 2]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -312,7 +312,7 @@ class TestWithEvenUpsampleOutputPadding(TestConv2dTransposeOp): ...@@ -312,7 +312,7 @@ class TestWithEvenUpsampleOutputPadding(TestConv2dTransposeOp):
self.filter_size = [f_c, 6, 5, 5] self.filter_size = [f_c, 6, 5, 5]
class Test_NHWC(TestConv2dTransposeOp): class Test_NHWC(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -324,7 +324,7 @@ class Test_NHWC(TestConv2dTransposeOp): ...@@ -324,7 +324,7 @@ class Test_NHWC(TestConv2dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithSymmetricPad_NHWC(TestConv2dTransposeOp): class TestWithSymmetricPad_NHWC(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -336,7 +336,7 @@ class TestWithSymmetricPad_NHWC(TestConv2dTransposeOp): ...@@ -336,7 +336,7 @@ class TestWithSymmetricPad_NHWC(TestConv2dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithAsymmetricPad_NHWC(TestConv2dTransposeOp): class TestWithAsymmetricPad_NHWC(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 0, 1, 2] self.pad = [1, 0, 1, 2]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -348,7 +348,7 @@ class TestWithAsymmetricPad_NHWC(TestConv2dTransposeOp): ...@@ -348,7 +348,7 @@ class TestWithAsymmetricPad_NHWC(TestConv2dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithGroups_NHWC(TestConv2dTransposeOp): class TestWithGroups_NHWC(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -360,7 +360,7 @@ class TestWithGroups_NHWC(TestConv2dTransposeOp): ...@@ -360,7 +360,7 @@ class TestWithGroups_NHWC(TestConv2dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithStride_NHWC(TestConv2dTransposeOp): class TestWithStride_NHWC(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -372,7 +372,7 @@ class TestWithStride_NHWC(TestConv2dTransposeOp): ...@@ -372,7 +372,7 @@ class TestWithStride_NHWC(TestConv2dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithDilation_NHWC(TestConv2dTransposeOp): class TestWithDilation_NHWC(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -384,7 +384,7 @@ class TestWithDilation_NHWC(TestConv2dTransposeOp): ...@@ -384,7 +384,7 @@ class TestWithDilation_NHWC(TestConv2dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithEvenUpsample_NHWC(TestConv2dTransposeOp): class TestWithEvenUpsample_NHWC(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [2, 2] self.pad = [2, 2]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -397,7 +397,7 @@ class TestWithEvenUpsample_NHWC(TestConv2dTransposeOp): ...@@ -397,7 +397,7 @@ class TestWithEvenUpsample_NHWC(TestConv2dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithEvenUpsample_NHWC_output_padding(TestConv2dTransposeOp): class TestWithEvenUpsample_NHWC_output_padding(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [2, 2] self.pad = [2, 2]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -413,7 +413,7 @@ class TestWithEvenUpsample_NHWC_output_padding(TestConv2dTransposeOp): ...@@ -413,7 +413,7 @@ class TestWithEvenUpsample_NHWC_output_padding(TestConv2dTransposeOp):
# ------------ test_cudnn ------------ # ------------ test_cudnn ------------
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
class TestCUDNN(TestConv2dTransposeOp): class TestCUDNN(TestConv2DTransposeOp):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
...@@ -547,7 +547,7 @@ class TestCUDNNWithEvenUpsample(TestWithEvenUpsample): ...@@ -547,7 +547,7 @@ class TestCUDNNWithEvenUpsample(TestWithEvenUpsample):
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
class TestCUDNN_NHWC(TestConv2dTransposeOp): class TestCUDNN_NHWC(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -654,7 +654,7 @@ class TestCUDNNWithEvenUpsample_NHWC(TestWithEvenUpsample): ...@@ -654,7 +654,7 @@ class TestCUDNNWithEvenUpsample_NHWC(TestWithEvenUpsample):
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
class TestDepthwiseConvTranspose(TestConv2dTransposeOp): class TestDepthwiseConvTranspose(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -667,7 +667,7 @@ class TestDepthwiseConvTranspose(TestConv2dTransposeOp): ...@@ -667,7 +667,7 @@ class TestDepthwiseConvTranspose(TestConv2dTransposeOp):
self.op_type = "depthwise_conv2d_transpose" self.op_type = "depthwise_conv2d_transpose"
class TestDepthwiseConvTransposeAsymmetricPad(TestConv2dTransposeOp): class TestDepthwiseConvTransposeAsymmetricPad(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 0, 1, 2] self.pad = [1, 0, 1, 2]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -681,7 +681,7 @@ class TestDepthwiseConvTransposeAsymmetricPad(TestConv2dTransposeOp): ...@@ -681,7 +681,7 @@ class TestDepthwiseConvTransposeAsymmetricPad(TestConv2dTransposeOp):
self.data_format = 'NCHW' self.data_format = 'NCHW'
class TestDepthwiseConvTransposeSAMEPad(TestConv2dTransposeOp): class TestDepthwiseConvTransposeSAMEPad(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.stride = [2, 2] self.stride = [2, 2]
self.dilations = [1, 1] self.dilations = [1, 1]
...@@ -694,7 +694,7 @@ class TestDepthwiseConvTransposeSAMEPad(TestConv2dTransposeOp): ...@@ -694,7 +694,7 @@ class TestDepthwiseConvTransposeSAMEPad(TestConv2dTransposeOp):
self.padding_algorithm = 'SAME' self.padding_algorithm = 'SAME'
class TestDepthwiseConvTransposeVALIDPad(TestConv2dTransposeOp): class TestDepthwiseConvTransposeVALIDPad(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.stride = [2, 2] self.stride = [2, 2]
self.dilations = [1, 1] self.dilations = [1, 1]
...@@ -707,7 +707,7 @@ class TestDepthwiseConvTransposeVALIDPad(TestConv2dTransposeOp): ...@@ -707,7 +707,7 @@ class TestDepthwiseConvTransposeVALIDPad(TestConv2dTransposeOp):
self.padding_algorithm = 'VALID' self.padding_algorithm = 'VALID'
class TestDepthwiseConvTranspose_NHWC_4x4kernel(TestConv2dTransposeOp): class TestDepthwiseConvTranspose_NHWC_4x4kernel(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -721,7 +721,7 @@ class TestDepthwiseConvTranspose_NHWC_4x4kernel(TestConv2dTransposeOp): ...@@ -721,7 +721,7 @@ class TestDepthwiseConvTranspose_NHWC_4x4kernel(TestConv2dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestDepthwiseConvTranspose_NHWC_3x3kernel(TestConv2dTransposeOp): class TestDepthwiseConvTranspose_NHWC_3x3kernel(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -735,7 +735,7 @@ class TestDepthwiseConvTranspose_NHWC_3x3kernel(TestConv2dTransposeOp): ...@@ -735,7 +735,7 @@ class TestDepthwiseConvTranspose_NHWC_3x3kernel(TestConv2dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestDepthwiseConvTransposeAsymmetricPad_NHWC(TestConv2dTransposeOp): class TestDepthwiseConvTransposeAsymmetricPad_NHWC(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 0, 1, 2] self.pad = [1, 0, 1, 2]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -751,7 +751,7 @@ class TestDepthwiseConvTransposeAsymmetricPad_NHWC(TestConv2dTransposeOp): ...@@ -751,7 +751,7 @@ class TestDepthwiseConvTransposeAsymmetricPad_NHWC(TestConv2dTransposeOp):
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
class TestCUDNN_FP16(TestConv2dTransposeOp): class TestCUDNN_FP16(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.dtype = np.float16 self.dtype = np.float16
self.pad = [1, 1] self.pad = [1, 1]
...@@ -867,7 +867,7 @@ class TestCUDNNWithEvenUpsample_NHWC_FP16(TestCUDNN_FP16): ...@@ -867,7 +867,7 @@ class TestCUDNNWithEvenUpsample_NHWC_FP16(TestCUDNN_FP16):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestConv2dTransposeAPI(unittest.TestCase): class TestConv2DTransposeAPI(unittest.TestCase):
def test_case1(self): def test_case1(self):
data1 = fluid.layers.data( data1 = fluid.layers.data(
name='data1', shape=[3, 5, 5], dtype='float32') name='data1', shape=[3, 5, 5], dtype='float32')
...@@ -945,7 +945,7 @@ class TestConv2dTransposeAPI(unittest.TestCase): ...@@ -945,7 +945,7 @@ class TestConv2dTransposeAPI(unittest.TestCase):
self.assertIsNotNone(results[6]) self.assertIsNotNone(results[6])
class TestConv2dTransposeOpException(unittest.TestCase): class TestConv2DTransposeOpException(unittest.TestCase):
def test_exception(self): def test_exception(self):
data = fluid.layers.data(name='data', shape=[3, 5, 5], dtype="float32") data = fluid.layers.data(name='data', shape=[3, 5, 5], dtype="float32")
......
...@@ -135,7 +135,7 @@ class Conv3DTestCase(unittest.TestCase): ...@@ -135,7 +135,7 @@ class Conv3DTestCase(unittest.TestCase):
def paddle_nn_layer(self): def paddle_nn_layer(self):
x_var = dg.to_variable(self.input) x_var = dg.to_variable(self.input)
conv = nn.Conv3d( conv = nn.Conv3D(
self.num_channels, self.num_channels,
self.num_filters, self.num_filters,
self.filter_size, self.filter_size,
......
...@@ -228,7 +228,7 @@ def create_test_cudnn_channel_last_class(parent): ...@@ -228,7 +228,7 @@ def create_test_cudnn_channel_last_class(parent):
globals()[cls_name] = TestCudnnChannelLastCase globals()[cls_name] = TestCudnnChannelLastCase
class TestConv3dOp(OpTest): class TestConv3DOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "conv3d" self.op_type = "conv3d"
self.use_cudnn = False self.use_cudnn = False
...@@ -334,7 +334,7 @@ class TestConv3dOp(OpTest): ...@@ -334,7 +334,7 @@ class TestConv3dOp(OpTest):
pass pass
class TestCase1(TestConv3dOp): class TestCase1(TestConv3DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -344,7 +344,7 @@ class TestCase1(TestConv3dOp): ...@@ -344,7 +344,7 @@ class TestCase1(TestConv3dOp):
self.filter_size = [6, f_c, 3, 3, 3] self.filter_size = [6, f_c, 3, 3, 3]
class TestWithGroup1(TestConv3dOp): class TestWithGroup1(TestConv3DOp):
def init_group(self): def init_group(self):
self.groups = 3 self.groups = 3
...@@ -354,7 +354,7 @@ class TestWithGroup2(TestCase1): ...@@ -354,7 +354,7 @@ class TestWithGroup2(TestCase1):
self.groups = 3 self.groups = 3
class TestWith1x1(TestConv3dOp): class TestWith1x1(TestConv3DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0, 0] self.pad = [0, 0, 0]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -370,7 +370,7 @@ class TestWith1x1(TestConv3dOp): ...@@ -370,7 +370,7 @@ class TestWith1x1(TestConv3dOp):
self.groups = 3 self.groups = 3
class TestWithInput1x1Filter1x1(TestConv3dOp): class TestWithInput1x1Filter1x1(TestConv3DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0, 0] self.pad = [0, 0, 0]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -386,7 +386,7 @@ class TestWithInput1x1Filter1x1(TestConv3dOp): ...@@ -386,7 +386,7 @@ class TestWithInput1x1Filter1x1(TestConv3dOp):
self.groups = 3 self.groups = 3
class TestWithDilation(TestConv3dOp): class TestWithDilation(TestConv3DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0, 0] self.pad = [0, 0, 0]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -402,19 +402,19 @@ class TestWithDilation(TestConv3dOp): ...@@ -402,19 +402,19 @@ class TestWithDilation(TestConv3dOp):
self.groups = 3 self.groups = 3
#---------------- Conv3dCUDNN ---------------- #---------------- Conv3DCUDNN ----------------
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
class TestCUDNN(TestConv3dOp): class TestCUDNN(TestConv3DOp):
def init_kernel_type(self): def init_kernel_type(self):
self.use_cudnn = True self.use_cudnn = True
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
class TestFP16CUDNN(TestConv3dOp): class TestFP16CUDNN(TestConv3DOp):
def init_kernel_type(self): def init_kernel_type(self):
self.use_cudnn = True self.use_cudnn = True
self.dtype = np.float16 self.dtype = np.float16
...@@ -519,7 +519,7 @@ class TestCUDNNExhaustiveSearch(TestCUDNN): ...@@ -519,7 +519,7 @@ class TestCUDNNExhaustiveSearch(TestCUDNN):
# ---- test asymmetric padding ---- # ---- test asymmetric padding ----
class TestConv3dOp_2(OpTest): class TestConv3DOp_2(OpTest):
def setUp(self): def setUp(self):
self.op_type = "conv3d" self.op_type = "conv3d"
self.use_cudnn = False self.use_cudnn = False
...@@ -624,7 +624,7 @@ class TestConv3dOp_2(OpTest): ...@@ -624,7 +624,7 @@ class TestConv3dOp_2(OpTest):
self.data_format = "NCDHW" self.data_format = "NCDHW"
class TestConv3dOp_AsyPadding(TestConv3dOp_2): class TestConv3DOp_AsyPadding(TestConv3DOp_2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1, 2] self.stride = [1, 1, 2]
self.input_size = [2, 3, 4, 4, 4] # NCDHW self.input_size = [2, 3, 4, 4, 4] # NCDHW
...@@ -637,7 +637,7 @@ class TestConv3dOp_AsyPadding(TestConv3dOp_2): ...@@ -637,7 +637,7 @@ class TestConv3dOp_AsyPadding(TestConv3dOp_2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestConv3dOp_DiffDataInDiffDim(TestConv3dOp_2): class TestConv3DOp_DiffDataInDiffDim(TestConv3DOp_2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1, 2] self.stride = [1, 1, 2]
self.input_size = [2, 3, 4, 5, 5] # NCDHW self.input_size = [2, 3, 4, 5, 5] # NCDHW
...@@ -650,12 +650,12 @@ class TestConv3dOp_DiffDataInDiffDim(TestConv3dOp_2): ...@@ -650,12 +650,12 @@ class TestConv3dOp_DiffDataInDiffDim(TestConv3dOp_2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
create_test_padding_SAME_class(TestConv3dOp_DiffDataInDiffDim) create_test_padding_SAME_class(TestConv3DOp_DiffDataInDiffDim)
create_test_padding_VALID_class(TestConv3dOp_DiffDataInDiffDim) create_test_padding_VALID_class(TestConv3DOp_DiffDataInDiffDim)
create_test_channel_last_class(TestConv3dOp_DiffDataInDiffDim) create_test_channel_last_class(TestConv3DOp_DiffDataInDiffDim)
class TestCase1_AsyPadding(TestConv3dOp_2): class TestCase1_AsyPadding(TestConv3DOp_2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] # NCDHW self.input_size = [2, 3, 4, 4, 4] # NCDHW
...@@ -668,7 +668,7 @@ class TestCase1_AsyPadding(TestConv3dOp_2): ...@@ -668,7 +668,7 @@ class TestCase1_AsyPadding(TestConv3dOp_2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithGroup1_AsyPadding(TestConv3dOp_2): class TestWithGroup1_AsyPadding(TestConv3DOp_2):
def init_group(self): def init_group(self):
self.groups = 3 self.groups = 3
...@@ -677,7 +677,7 @@ class TestWithGroup1_AsyPadding(TestConv3dOp_2): ...@@ -677,7 +677,7 @@ class TestWithGroup1_AsyPadding(TestConv3dOp_2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithGroup2_AsyPadding(TestConv3dOp_2): class TestWithGroup2_AsyPadding(TestConv3DOp_2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] # NCDHW self.input_size = [2, 3, 4, 4, 4] # NCDHW
...@@ -693,7 +693,7 @@ class TestWithGroup2_AsyPadding(TestConv3dOp_2): ...@@ -693,7 +693,7 @@ class TestWithGroup2_AsyPadding(TestConv3dOp_2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWith1x1_AsyPadding(TestConv3dOp_2): class TestWith1x1_AsyPadding(TestConv3DOp_2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] self.input_size = [2, 3, 4, 4, 4]
...@@ -712,7 +712,7 @@ class TestWith1x1_AsyPadding(TestConv3dOp_2): ...@@ -712,7 +712,7 @@ class TestWith1x1_AsyPadding(TestConv3dOp_2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithDilation_AsyPadding(TestConv3dOp_2): class TestWithDilation_AsyPadding(TestConv3DOp_2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
self.input_size = [2, 3, 6, 6, 6] self.input_size = [2, 3, 6, 6, 6]
...@@ -731,41 +731,41 @@ class TestWithDilation_AsyPadding(TestConv3dOp_2): ...@@ -731,41 +731,41 @@ class TestWithDilation_AsyPadding(TestConv3dOp_2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
create_test_cudnn_class(TestConv3dOp_AsyPadding) create_test_cudnn_class(TestConv3DOp_AsyPadding)
create_test_cudnn_class(TestWithGroup1_AsyPadding) create_test_cudnn_class(TestWithGroup1_AsyPadding)
create_test_cudnn_class(TestWithGroup2_AsyPadding) create_test_cudnn_class(TestWithGroup2_AsyPadding)
create_test_cudnn_class(TestWith1x1_AsyPadding) create_test_cudnn_class(TestWith1x1_AsyPadding)
create_test_cudnn_class(TestWithDilation_AsyPadding) create_test_cudnn_class(TestWithDilation_AsyPadding)
create_test_padding_SAME_class(TestConv3dOp_AsyPadding) create_test_padding_SAME_class(TestConv3DOp_AsyPadding)
create_test_padding_SAME_class(TestWithGroup1_AsyPadding) create_test_padding_SAME_class(TestWithGroup1_AsyPadding)
create_test_padding_SAME_class(TestWith1x1_AsyPadding) create_test_padding_SAME_class(TestWith1x1_AsyPadding)
create_test_padding_VALID_class(TestConv3dOp_AsyPadding) create_test_padding_VALID_class(TestConv3DOp_AsyPadding)
create_test_padding_VALID_class(TestWithGroup1_AsyPadding) create_test_padding_VALID_class(TestWithGroup1_AsyPadding)
create_test_padding_VALID_class(TestWith1x1_AsyPadding) create_test_padding_VALID_class(TestWith1x1_AsyPadding)
create_test_cudnn_padding_SAME_class(TestConv3dOp_AsyPadding) create_test_cudnn_padding_SAME_class(TestConv3DOp_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithGroup1_AsyPadding) create_test_cudnn_padding_SAME_class(TestWithGroup1_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWith1x1_AsyPadding) create_test_cudnn_padding_SAME_class(TestWith1x1_AsyPadding)
create_test_cudnn_padding_VALID_class(TestConv3dOp_AsyPadding) create_test_cudnn_padding_VALID_class(TestConv3DOp_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithGroup1_AsyPadding) create_test_cudnn_padding_VALID_class(TestWithGroup1_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWith1x1_AsyPadding) create_test_cudnn_padding_VALID_class(TestWith1x1_AsyPadding)
create_test_channel_last_class(TestConv3dOp_AsyPadding) create_test_channel_last_class(TestConv3DOp_AsyPadding)
create_test_channel_last_class(TestWithGroup1_AsyPadding) create_test_channel_last_class(TestWithGroup1_AsyPadding)
create_test_channel_last_class(TestWith1x1_AsyPadding) create_test_channel_last_class(TestWith1x1_AsyPadding)
create_test_channel_last_class(TestConv3dOp_AsyPadding) create_test_channel_last_class(TestConv3DOp_AsyPadding)
create_test_channel_last_class(TestWithGroup1_AsyPadding) create_test_channel_last_class(TestWithGroup1_AsyPadding)
create_test_channel_last_class(TestWith1x1_AsyPadding) create_test_channel_last_class(TestWith1x1_AsyPadding)
create_test_cudnn_channel_last_class(TestConv3dOp_AsyPadding) create_test_cudnn_channel_last_class(TestConv3DOp_AsyPadding)
create_test_cudnn_channel_last_class(TestWithGroup1_AsyPadding) create_test_cudnn_channel_last_class(TestWithGroup1_AsyPadding)
create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding) create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding)
create_test_cudnn_channel_last_class(TestConv3dOp_AsyPadding) create_test_cudnn_channel_last_class(TestConv3DOp_AsyPadding)
create_test_cudnn_channel_last_class(TestWithGroup1_AsyPadding) create_test_cudnn_channel_last_class(TestWithGroup1_AsyPadding)
create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding) create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding)
...@@ -777,7 +777,7 @@ create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding) ...@@ -777,7 +777,7 @@ create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding)
# --------- test python API --------------- # --------- test python API ---------------
class TestConv3dAPI(unittest.TestCase): class TestConv3DAPI(unittest.TestCase):
def test_api(self): def test_api(self):
input_NDHWC = fluid.layers.data( input_NDHWC = fluid.layers.data(
...@@ -853,7 +853,7 @@ class TestConv3dAPI(unittest.TestCase): ...@@ -853,7 +853,7 @@ class TestConv3dAPI(unittest.TestCase):
data_format="NCDHW") data_format="NCDHW")
class TestConv3dAPI_Error(unittest.TestCase): class TestConv3DAPI_Error(unittest.TestCase):
def test_api(self): def test_api(self):
input = fluid.layers.data( input = fluid.layers.data(
name="input", name="input",
......
...@@ -139,7 +139,7 @@ class Conv3DTransposeTestCase(unittest.TestCase): ...@@ -139,7 +139,7 @@ class Conv3DTransposeTestCase(unittest.TestCase):
def paddle_nn_layer(self): def paddle_nn_layer(self):
x_var = dg.to_variable(self.input) x_var = dg.to_variable(self.input)
conv = nn.ConvTranspose3d( conv = nn.Conv3DTranspose(
self.num_channels, self.num_channels,
self.num_filters, self.num_filters,
self.filter_size, self.filter_size,
......
...@@ -107,7 +107,7 @@ def conv3dtranspose_forward_naive(input_, filter_, attrs): ...@@ -107,7 +107,7 @@ def conv3dtranspose_forward_naive(input_, filter_, attrs):
return out return out
class TestConv3dTransposeOp(OpTest): class TestConv3DTransposeOp(OpTest):
def setUp(self): def setUp(self):
# init as conv transpose # init as conv transpose
self.use_cudnn = False self.use_cudnn = False
...@@ -200,7 +200,7 @@ class TestConv3dTransposeOp(OpTest): ...@@ -200,7 +200,7 @@ class TestConv3dTransposeOp(OpTest):
self.op_type = "conv3d_transpose" self.op_type = "conv3d_transpose"
class TestWithSymmetricPad(TestConv3dTransposeOp): class TestWithSymmetricPad(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.check_no_input = True self.check_no_input = True
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
...@@ -212,7 +212,7 @@ class TestWithSymmetricPad(TestConv3dTransposeOp): ...@@ -212,7 +212,7 @@ class TestWithSymmetricPad(TestConv3dTransposeOp):
self.filter_size = [f_c, 6, 3, 3, 3] self.filter_size = [f_c, 6, 3, 3, 3]
class TestWithAsymmetricPad(TestConv3dTransposeOp): class TestWithAsymmetricPad(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 0, 1, 0, 1, 2] self.pad = [1, 0, 1, 0, 1, 2]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -223,7 +223,7 @@ class TestWithAsymmetricPad(TestConv3dTransposeOp): ...@@ -223,7 +223,7 @@ class TestWithAsymmetricPad(TestConv3dTransposeOp):
self.filter_size = [f_c, 6, 3, 3, 3] self.filter_size = [f_c, 6, 3, 3, 3]
class TestWithSAMEPad(TestConv3dTransposeOp): class TestWithSAMEPad(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1, 2] self.stride = [1, 1, 2]
self.dilations = [1, 2, 1] self.dilations = [1, 2, 1]
...@@ -234,7 +234,7 @@ class TestWithSAMEPad(TestConv3dTransposeOp): ...@@ -234,7 +234,7 @@ class TestWithSAMEPad(TestConv3dTransposeOp):
self.padding_algorithm = 'SAME' self.padding_algorithm = 'SAME'
class TestWithVALIDPad(TestConv3dTransposeOp): class TestWithVALIDPad(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.stride = [2, 1, 1] self.stride = [2, 1, 1]
self.dilations = [1, 1, 1] self.dilations = [1, 1, 1]
...@@ -245,7 +245,7 @@ class TestWithVALIDPad(TestConv3dTransposeOp): ...@@ -245,7 +245,7 @@ class TestWithVALIDPad(TestConv3dTransposeOp):
self.padding_algorithm = 'VALID' self.padding_algorithm = 'VALID'
class TestWithStride(TestConv3dTransposeOp): class TestWithStride(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.check_no_filter = True self.check_no_filter = True
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
...@@ -257,7 +257,7 @@ class TestWithStride(TestConv3dTransposeOp): ...@@ -257,7 +257,7 @@ class TestWithStride(TestConv3dTransposeOp):
self.filter_size = [f_c, 6, 3, 3, 3] self.filter_size = [f_c, 6, 3, 3, 3]
class TestWithGroups(TestConv3dTransposeOp): class TestWithGroups(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -268,7 +268,7 @@ class TestWithGroups(TestConv3dTransposeOp): ...@@ -268,7 +268,7 @@ class TestWithGroups(TestConv3dTransposeOp):
self.filter_size = [f_c, 3, 3, 3, 3] self.filter_size = [f_c, 3, 3, 3, 3]
class TestWithDilation(TestConv3dTransposeOp): class TestWithDilation(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -279,7 +279,7 @@ class TestWithDilation(TestConv3dTransposeOp): ...@@ -279,7 +279,7 @@ class TestWithDilation(TestConv3dTransposeOp):
self.filter_size = [f_c, 6, 3, 3, 3] self.filter_size = [f_c, 6, 3, 3, 3]
class Test_NHWC(TestConv3dTransposeOp): class Test_NHWC(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0, 0] self.pad = [0, 0, 0]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -294,7 +294,7 @@ class Test_NHWC(TestConv3dTransposeOp): ...@@ -294,7 +294,7 @@ class Test_NHWC(TestConv3dTransposeOp):
# ------------ test_cudnn ------------ # ------------ test_cudnn ------------
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
class TestCUDNN(TestConv3dTransposeOp): class TestCUDNN(TestConv3DTransposeOp):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv3d_transpose" self.op_type = "conv3d_transpose"
...@@ -419,7 +419,7 @@ class TestCUDNNWithGroups(TestWithGroups): ...@@ -419,7 +419,7 @@ class TestCUDNNWithGroups(TestWithGroups):
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
class TestCUDNN_NHWC(TestConv3dTransposeOp): class TestCUDNN_NHWC(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0, 0] self.pad = [0, 0, 0]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
......
...@@ -20,10 +20,10 @@ import numpy as np ...@@ -20,10 +20,10 @@ import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
from op_test import OpTest from op_test import OpTest
from test_conv3d_transpose_op import conv3dtranspose_forward_naive, TestConv3dTransposeOp from test_conv3d_transpose_op import TestConv3DTransposeOp
class TestWithSymmetricPad_NHWC(TestConv3dTransposeOp): class TestWithSymmetricPad_NHWC(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -35,7 +35,7 @@ class TestWithSymmetricPad_NHWC(TestConv3dTransposeOp): ...@@ -35,7 +35,7 @@ class TestWithSymmetricPad_NHWC(TestConv3dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithAsymmetricPad_NHWC(TestConv3dTransposeOp): class TestWithAsymmetricPad_NHWC(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 0, 1, 0, 1, 2] self.pad = [1, 0, 1, 0, 1, 2]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -47,7 +47,7 @@ class TestWithAsymmetricPad_NHWC(TestConv3dTransposeOp): ...@@ -47,7 +47,7 @@ class TestWithAsymmetricPad_NHWC(TestConv3dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithGroups_NHWC(TestConv3dTransposeOp): class TestWithGroups_NHWC(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.check_no_filter = True self.check_no_filter = True
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
...@@ -60,7 +60,7 @@ class TestWithGroups_NHWC(TestConv3dTransposeOp): ...@@ -60,7 +60,7 @@ class TestWithGroups_NHWC(TestConv3dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithStride_NHWC(TestConv3dTransposeOp): class TestWithStride_NHWC(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
self.stride = [2, 2, 2] self.stride = [2, 2, 2]
...@@ -72,7 +72,7 @@ class TestWithStride_NHWC(TestConv3dTransposeOp): ...@@ -72,7 +72,7 @@ class TestWithStride_NHWC(TestConv3dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithDilation_NHWC(TestConv3dTransposeOp): class TestWithDilation_NHWC(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.check_no_input = True self.check_no_input = True
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
...@@ -85,7 +85,7 @@ class TestWithDilation_NHWC(TestConv3dTransposeOp): ...@@ -85,7 +85,7 @@ class TestWithDilation_NHWC(TestConv3dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestConv3dTransposeAPI(unittest.TestCase): class TestConv3DTransposeAPI(unittest.TestCase):
def test_case1(self): def test_case1(self):
data1 = fluid.layers.data( data1 = fluid.layers.data(
name='data1', shape=[3, 5, 5, 5], dtype='float32') name='data1', shape=[3, 5, 5, 5], dtype='float32')
...@@ -164,7 +164,7 @@ class TestConv3dTransposeAPI(unittest.TestCase): ...@@ -164,7 +164,7 @@ class TestConv3dTransposeAPI(unittest.TestCase):
self.assertIsNotNone(results[6]) self.assertIsNotNone(results[6])
class TestConv3dTransposeOpException(unittest.TestCase): class TestConv3DTransposeOpException(unittest.TestCase):
def test_exception(self): def test_exception(self):
data = fluid.layers.data( data = fluid.layers.data(
name='data', shape=[3, 5, 5, 5], dtype="float32") name='data', shape=[3, 5, 5, 5], dtype="float32")
......
...@@ -438,7 +438,7 @@ class TestConv3DDoubleGradCheck_ChannelLast(unittest.TestCase): ...@@ -438,7 +438,7 @@ class TestConv3DDoubleGradCheck_ChannelLast(unittest.TestCase):
self.func(p) self.func(p)
class TestConv3dDoubleGradCheck_ChannelLast_AsyPadding(unittest.TestCase): class TestConv3DDoubleGradCheck_ChannelLast_AsyPadding(unittest.TestCase):
@prog_scope() @prog_scope()
def func(self, place): def func(self, place):
shape = [2, 2, 2, 2, 3] shape = [2, 2, 2, 2, 3]
......
...@@ -31,7 +31,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -31,7 +31,7 @@ class TestGeneratorSeed(unittest.TestCase):
""" """
def test_gen_dropout_dygraph(self): def test_gen_dropout_dygraph(self):
gen = paddle.manual_seed(12343) gen = paddle.seed(12343)
fluid.enable_dygraph() fluid.enable_dygraph()
...@@ -70,13 +70,13 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -70,13 +70,13 @@ class TestGeneratorSeed(unittest.TestCase):
"""Test Generator seed.""" """Test Generator seed."""
fluid.enable_dygraph() fluid.enable_dygraph()
paddle.manual_seed(12312321111) paddle.seed(12312321111)
x = fluid.layers.gaussian_random([120], dtype="float32") x = fluid.layers.gaussian_random([120], dtype="float32")
st1 = paddle.get_cuda_rng_state() st1 = paddle.get_cuda_rng_state()
x1 = fluid.layers.gaussian_random([120], dtype="float32") x1 = fluid.layers.gaussian_random([120], dtype="float32")
paddle.set_cuda_rng_state(st1) paddle.set_cuda_rng_state(st1)
x2 = fluid.layers.gaussian_random([120], dtype="float32") x2 = fluid.layers.gaussian_random([120], dtype="float32")
paddle.manual_seed(12312321111) paddle.seed(12312321111)
x3 = fluid.layers.gaussian_random([120], dtype="float32") x3 = fluid.layers.gaussian_random([120], dtype="float32")
x_np = x.numpy() x_np = x.numpy()
x1_np = x1.numpy() x1_np = x1.numpy()
...@@ -93,13 +93,13 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -93,13 +93,13 @@ class TestGeneratorSeed(unittest.TestCase):
fluid.enable_dygraph() fluid.enable_dygraph()
gen = paddle.manual_seed(12312321111) gen = paddle.seed(12312321111)
x = paddle.randint(low=10, shape=[10], dtype="int32") x = paddle.randint(low=10, shape=[10], dtype="int32")
st1 = gen.get_state() st1 = gen.get_state()
x1 = paddle.randint(low=10, shape=[10], dtype="int32") x1 = paddle.randint(low=10, shape=[10], dtype="int32")
gen.set_state(st1) gen.set_state(st1)
x2 = paddle.randint(low=10, shape=[10], dtype="int32") x2 = paddle.randint(low=10, shape=[10], dtype="int32")
paddle.manual_seed(12312321111) paddle.seed(12312321111)
x3 = paddle.randint(low=10, shape=[10], dtype="int32") x3 = paddle.randint(low=10, shape=[10], dtype="int32")
x_np = x.numpy() x_np = x.numpy()
x1_np = x1.numpy() x1_np = x1.numpy()
...@@ -114,7 +114,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -114,7 +114,7 @@ class TestGeneratorSeed(unittest.TestCase):
def test_gen_TruncatedNormal_initializer(self): def test_gen_TruncatedNormal_initializer(self):
fluid.disable_dygraph() fluid.disable_dygraph()
gen = paddle.manual_seed(123123143) gen = paddle.seed(123123143)
cur_state = paddle.get_cuda_rng_state() cur_state = paddle.get_cuda_rng_state()
startup_program = fluid.Program() startup_program = fluid.Program()
...@@ -140,7 +140,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -140,7 +140,7 @@ class TestGeneratorSeed(unittest.TestCase):
feed={}, feed={},
fetch_list=[result_1, result_2]) fetch_list=[result_1, result_2])
paddle.manual_seed(123123143) paddle.seed(123123143)
with fluid.program_guard(train_program, startup_program): with fluid.program_guard(train_program, startup_program):
exe.run(startup_program) exe.run(startup_program)
out2 = exe.run(train_program, out2 = exe.run(train_program,
......
...@@ -34,7 +34,7 @@ def random_reader(): ...@@ -34,7 +34,7 @@ def random_reader():
def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): def simple_fc_net(places, use_legacy_py_reader, use_double_buffer):
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
startup_prog = fluid.Program() startup_prog = fluid.Program()
main_prog = fluid.Program() main_prog = fluid.Program()
......
...@@ -286,7 +286,7 @@ class TestModulatedDeformableConvInvalidInput(unittest.TestCase): ...@@ -286,7 +286,7 @@ class TestModulatedDeformableConvInvalidInput(unittest.TestCase):
self.assertRaises(TypeError, test_invalid_offset) self.assertRaises(TypeError, test_invalid_offset)
class TestDeformConv2dAPI(unittest.TestCase): class TestDeformConv2DAPI(unittest.TestCase):
def test_api(self): def test_api(self):
def test_deform_conv2d_v1(): def test_deform_conv2d_v1():
paddle.enable_static() paddle.enable_static()
......
...@@ -487,7 +487,7 @@ class TestDropoutCAPI(unittest.TestCase): ...@@ -487,7 +487,7 @@ class TestDropoutCAPI(unittest.TestCase):
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
class TestDropout2dFAPI(unittest.TestCase): class TestDropout2DFAPI(unittest.TestCase):
def setUp(self): def setUp(self):
np.random.seed(123) np.random.seed(123)
self.places = [fluid.CPUPlace()] self.places = [fluid.CPUPlace()]
...@@ -535,7 +535,7 @@ class TestDropout2dFAPI(unittest.TestCase): ...@@ -535,7 +535,7 @@ class TestDropout2dFAPI(unittest.TestCase):
self.assertTrue(np.allclose(res.numpy(), res_np)) self.assertTrue(np.allclose(res.numpy(), res_np))
class TestDropout2dFAPIError(unittest.TestCase): class TestDropout2DFAPIError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
...@@ -554,7 +554,7 @@ class TestDropout2dFAPIError(unittest.TestCase): ...@@ -554,7 +554,7 @@ class TestDropout2dFAPIError(unittest.TestCase):
self.assertRaises(ValueError, test_dataformat) self.assertRaises(ValueError, test_dataformat)
class TestDropout2dCAPI(unittest.TestCase): class TestDropout2DCAPI(unittest.TestCase):
def setUp(self): def setUp(self):
np.random.seed(123) np.random.seed(123)
self.places = [fluid.CPUPlace()] self.places = [fluid.CPUPlace()]
...@@ -567,13 +567,13 @@ class TestDropout2dCAPI(unittest.TestCase): ...@@ -567,13 +567,13 @@ class TestDropout2dCAPI(unittest.TestCase):
input_np = np.random.random([2, 3, 4, 5]).astype("float32") input_np = np.random.random([2, 3, 4, 5]).astype("float32")
result_np = input_np result_np = input_np
input = fluid.dygraph.to_variable(input_np) input = fluid.dygraph.to_variable(input_np)
m = paddle.nn.Dropout2d(p=0.) m = paddle.nn.Dropout2D(p=0.)
m.eval() m.eval()
result = m(input) result = m(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
class TestDropout3dFAPI(unittest.TestCase): class TestDropout3DFAPI(unittest.TestCase):
def setUp(self): def setUp(self):
np.random.seed(123) np.random.seed(123)
self.places = [fluid.CPUPlace()] self.places = [fluid.CPUPlace()]
...@@ -621,7 +621,7 @@ class TestDropout3dFAPI(unittest.TestCase): ...@@ -621,7 +621,7 @@ class TestDropout3dFAPI(unittest.TestCase):
self.assertTrue(np.allclose(res.numpy(), res_np)) self.assertTrue(np.allclose(res.numpy(), res_np))
class TestDropout3dFAPIError(unittest.TestCase): class TestDropout3DFAPIError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
...@@ -640,7 +640,7 @@ class TestDropout3dFAPIError(unittest.TestCase): ...@@ -640,7 +640,7 @@ class TestDropout3dFAPIError(unittest.TestCase):
self.assertRaises(ValueError, test_dataformat) self.assertRaises(ValueError, test_dataformat)
class TestDropout3dCAPI(unittest.TestCase): class TestDropout3DCAPI(unittest.TestCase):
def setUp(self): def setUp(self):
np.random.seed(123) np.random.seed(123)
self.places = [fluid.CPUPlace()] self.places = [fluid.CPUPlace()]
...@@ -653,7 +653,7 @@ class TestDropout3dCAPI(unittest.TestCase): ...@@ -653,7 +653,7 @@ class TestDropout3dCAPI(unittest.TestCase):
input_np = np.random.random([2, 3, 4, 5, 6]).astype("float32") input_np = np.random.random([2, 3, 4, 5, 6]).astype("float32")
result_np = input_np result_np = input_np
input = fluid.dygraph.to_variable(input_np) input = fluid.dygraph.to_variable(input_np)
m = paddle.nn.Dropout3d(p=0.) m = paddle.nn.Dropout3D(p=0.)
m.eval() m.eval()
result = m(input) result = m(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
......
...@@ -110,7 +110,7 @@ class TestDygraphMultiForward(unittest.TestCase): ...@@ -110,7 +110,7 @@ class TestDygraphMultiForward(unittest.TestCase):
epoch_num = 1 epoch_num = 1
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
mnist = MNIST() mnist = MNIST()
sgd = SGDOptimizer( sgd = SGDOptimizer(
...@@ -143,7 +143,7 @@ class TestDygraphMultiForward(unittest.TestCase): ...@@ -143,7 +143,7 @@ class TestDygraphMultiForward(unittest.TestCase):
dy_param_init_value[param.name] = param.numpy() dy_param_init_value[param.name] = param.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
exe = fluid.Executor(fluid.CPUPlace( exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
......
...@@ -117,7 +117,7 @@ class TestDygraphWeightNorm(unittest.TestCase): ...@@ -117,7 +117,7 @@ class TestDygraphWeightNorm(unittest.TestCase):
def test_check_output(self): def test_check_output(self):
fluid.enable_imperative() fluid.enable_imperative()
linear = paddle.nn.Conv2d(2, 3, 3) linear = paddle.nn.Conv2D(2, 3, 3)
before_weight = linear.weight.numpy() before_weight = linear.weight.numpy()
if self.dim == None: if self.dim == None:
self.dim = -1 self.dim = -1
...@@ -179,7 +179,7 @@ class TestDygraphRemoveWeightNorm(unittest.TestCase): ...@@ -179,7 +179,7 @@ class TestDygraphRemoveWeightNorm(unittest.TestCase):
def test_check_output(self): def test_check_output(self):
fluid.enable_imperative() fluid.enable_imperative()
linear = paddle.nn.Conv2d(2, 3, 3) linear = paddle.nn.Conv2D(2, 3, 3)
before_weight = linear.weight before_weight = linear.weight
wn = weight_norm(linear, dim=self.dim) wn = weight_norm(linear, dim=self.dim)
rwn = remove_weight_norm(linear) rwn = remove_weight_norm(linear)
......
...@@ -466,7 +466,7 @@ class PaddingRNNTestBase(unittest.TestCase): ...@@ -466,7 +466,7 @@ class PaddingRNNTestBase(unittest.TestCase):
pass pass
def _prepare_program(self, config, parallel=True): def _prepare_program(self, config, parallel=True):
paddle.manual_seed(config.random_seed) paddle.seed(config.random_seed)
self.main_program = fluid.Program() self.main_program = fluid.Program()
self.startup_program = fluid.Program() self.startup_program = fluid.Program()
with fluid.program_guard(self.main_program, self.startup_program): with fluid.program_guard(self.main_program, self.startup_program):
......
...@@ -39,7 +39,7 @@ class TestEmbeddingIdStopGradientBase(unittest.TestCase): ...@@ -39,7 +39,7 @@ class TestEmbeddingIdStopGradientBase(unittest.TestCase):
def run_program(self, place, stop_gradient=False): def run_program(self, place, stop_gradient=False):
np.random.seed(1) np.random.seed(1)
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
startup_program = fluid.Program() startup_program = fluid.Program()
......
...@@ -137,7 +137,7 @@ class TestFCOpWithPadding(TestFCOp): ...@@ -137,7 +137,7 @@ class TestFCOpWithPadding(TestFCOp):
class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase): class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase):
def test_api(self): def test_api(self):
def run_program(num_flatten_dims): def run_program(num_flatten_dims):
paddle.manual_seed(SEED) paddle.seed(SEED)
startup_program = Program() startup_program = Program()
main_program = Program() main_program = Program()
......
...@@ -57,7 +57,7 @@ class TestFuseBatchNormActPass(unittest.TestCase): ...@@ -57,7 +57,7 @@ class TestFuseBatchNormActPass(unittest.TestCase):
return x, y, loss return x, y, loss
def check(self, place, use_cuda): def check(self, place, use_cuda):
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
main_program = fluid.Program() main_program = fluid.Program()
startup_program = fluid.Program() startup_program = fluid.Program()
......
...@@ -158,7 +158,7 @@ class TestFusedBnAddActAPI(unittest.TestCase): ...@@ -158,7 +158,7 @@ class TestFusedBnAddActAPI(unittest.TestCase):
return x, y, loss return x, y, loss
def check(self, place, use_cuda): def check(self, place, use_cuda):
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
iters = 5 iters = 5
batch_size = 16 batch_size = 16
......
...@@ -38,7 +38,7 @@ class TestGaussianRandomOp(OpTest): ...@@ -38,7 +38,7 @@ class TestGaussianRandomOp(OpTest):
"seed": 10, "seed": 10,
"use_mkldnn": self.use_mkldnn "use_mkldnn": self.use_mkldnn
} }
paddle.manual_seed(10) paddle.seed(10)
self.outputs = {'Out': np.zeros((123, 92), dtype='float32')} self.outputs = {'Out': np.zeros((123, 92), dtype='float32')}
......
...@@ -30,8 +30,6 @@ class TestGenerator(unittest.TestCase): ...@@ -30,8 +30,6 @@ class TestGenerator(unittest.TestCase):
"""Test basic generator.""" """Test basic generator."""
gen = generator.Generator() gen = generator.Generator()
gen.manual_seed(123123143) gen.manual_seed(123123143)
s = gen.initial_seed()
s = gen.seed()
st = gen.get_state() st = gen.get_state()
gen.set_state(st) gen.set_state(st)
gen.random() gen.random()
......
...@@ -35,7 +35,7 @@ def random_reader(): ...@@ -35,7 +35,7 @@ def random_reader():
def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): def simple_fc_net(places, use_legacy_py_reader, use_double_buffer):
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
startup_prog = fluid.Program() startup_prog = fluid.Program()
main_prog = fluid.Program() main_prog = fluid.Program()
......
...@@ -269,7 +269,7 @@ class TestHSigmoidOpWithSparseGrad(unittest.TestCase): ...@@ -269,7 +269,7 @@ class TestHSigmoidOpWithSparseGrad(unittest.TestCase):
def training_test(self, is_sparse): def training_test(self, is_sparse):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
paddle.manual_seed(1) paddle.seed(1)
start_up = fluid.default_startup_program() start_up = fluid.default_startup_program()
x = np.arange(6).reshape(6) x = np.arange(6).reshape(6)
path_table = np.array([(1, 2, -1), (1, 2, -1)]).astype('int64') path_table = np.array([(1, 2, -1), (1, 2, -1)]).astype('int64')
......
...@@ -120,7 +120,7 @@ class TestAmpScaler(unittest.TestCase): ...@@ -120,7 +120,7 @@ class TestAmpScaler(unittest.TestCase):
inp_np = np.random.random(size=[1, 3, 128, 128]).astype(np.float32) inp_np = np.random.random(size=[1, 3, 128, 128]).astype(np.float32)
def run_simple_conv(inp_np, use_scaler=True): def run_simple_conv(inp_np, use_scaler=True):
paddle.manual_seed(10) paddle.seed(10)
paddle.framework.random._manual_program_seed(10) paddle.framework.random._manual_program_seed(10)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
model = SimpleConv( model = SimpleConv(
...@@ -205,7 +205,7 @@ class TestResnet2(unittest.TestCase): ...@@ -205,7 +205,7 @@ class TestResnet2(unittest.TestCase):
paddle.disable_static() paddle.disable_static()
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
resnet = ResNet(use_cudnn=True) resnet = ResNet(use_cudnn=True)
...@@ -282,7 +282,7 @@ class TestResnet(unittest.TestCase): ...@@ -282,7 +282,7 @@ class TestResnet(unittest.TestCase):
batch_num = 1 batch_num = 1
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
resnet = ResNet(use_cudnn=True) resnet = ResNet(use_cudnn=True)
......
...@@ -206,7 +206,7 @@ class TestDygraphDeepCF(unittest.TestCase): ...@@ -206,7 +206,7 @@ class TestDygraphDeepCF(unittest.TestCase):
else: else:
(users_np, items_np, labels_np, num_users, num_items, (users_np, items_np, labels_np, num_users, num_items,
matrix) = get_data() matrix) = get_data()
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
startup = fluid.Program() startup = fluid.Program()
main = fluid.Program() main = fluid.Program()
...@@ -243,7 +243,7 @@ class TestDygraphDeepCF(unittest.TestCase): ...@@ -243,7 +243,7 @@ class TestDygraphDeepCF(unittest.TestCase):
sys.stderr.write('static loss %s\n' % static_loss) sys.stderr.write('static loss %s\n' % static_loss)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
deepcf = DeepCF(num_users, num_items, matrix) deepcf = DeepCF(num_users, num_items, matrix)
...@@ -268,7 +268,7 @@ class TestDygraphDeepCF(unittest.TestCase): ...@@ -268,7 +268,7 @@ class TestDygraphDeepCF(unittest.TestCase):
sys.stderr.write('dynamic loss: %s %s\n' % (slice, dy_loss)) sys.stderr.write('dynamic loss: %s %s\n' % (slice, dy_loss))
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
deepcf2 = DeepCF(num_users, num_items, matrix) deepcf2 = DeepCF(num_users, num_items, matrix)
......
...@@ -311,7 +311,7 @@ class TestDygraphDoubleGradVisitedUniq(TestCase): ...@@ -311,7 +311,7 @@ class TestDygraphDoubleGradVisitedUniq(TestCase):
fluid.set_flags({'FLAGS_sort_sum_gradient': True}) fluid.set_flags({'FLAGS_sort_sum_gradient': True})
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(123) paddle.seed(123)
paddle.framework.random._manual_program_seed(123) paddle.framework.random._manual_program_seed(123)
a = fluid.dygraph.to_variable(value) a = fluid.dygraph.to_variable(value)
a.stop_gradient = False a.stop_gradient = False
...@@ -328,7 +328,7 @@ class TestDygraphDoubleGradVisitedUniq(TestCase): ...@@ -328,7 +328,7 @@ class TestDygraphDoubleGradVisitedUniq(TestCase):
grad_1 = dx[0].numpy() grad_1 = dx[0].numpy()
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(123) paddle.seed(123)
paddle.framework.random._manual_program_seed(123) paddle.framework.random._manual_program_seed(123)
a = fluid.dygraph.to_variable(value) a = fluid.dygraph.to_variable(value)
a.stop_gradient = False a.stop_gradient = False
......
...@@ -56,7 +56,7 @@ class Generator(fluid.Layer): ...@@ -56,7 +56,7 @@ class Generator(fluid.Layer):
class TestDygraphGAN(unittest.TestCase): class TestDygraphGAN(unittest.TestCase):
def test_gan_float32(self): def test_gan_float32(self):
seed = 90 seed = 90
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
startup = fluid.Program() startup = fluid.Program()
discriminate_p = fluid.Program() discriminate_p = fluid.Program()
...@@ -131,7 +131,7 @@ class TestDygraphGAN(unittest.TestCase): ...@@ -131,7 +131,7 @@ class TestDygraphGAN(unittest.TestCase):
dy_params = dict() dy_params = dict()
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
discriminator = Discriminator() discriminator = Discriminator()
...@@ -176,7 +176,7 @@ class TestDygraphGAN(unittest.TestCase): ...@@ -176,7 +176,7 @@ class TestDygraphGAN(unittest.TestCase):
dy_params2 = dict() dy_params2 = dict()
with fluid.dygraph.guard(): with fluid.dygraph.guard():
fluid.set_flags({'FLAGS_sort_sum_gradient': True}) fluid.set_flags({'FLAGS_sort_sum_gradient': True})
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
discriminator2 = Discriminator() discriminator2 = Discriminator()
generator2 = Generator() generator2 = Generator()
......
...@@ -61,7 +61,7 @@ class GCN(fluid.Layer): ...@@ -61,7 +61,7 @@ class GCN(fluid.Layer):
class TestDygraphGNN(unittest.TestCase): class TestDygraphGNN(unittest.TestCase):
def test_gnn_float32(self): def test_gnn_float32(self):
paddle.manual_seed(90) paddle.seed(90)
paddle.framework.random._manual_program_seed(90) paddle.framework.random._manual_program_seed(90)
startup = fluid.Program() startup = fluid.Program()
main = fluid.Program() main = fluid.Program()
...@@ -112,7 +112,7 @@ class TestDygraphGNN(unittest.TestCase): ...@@ -112,7 +112,7 @@ class TestDygraphGNN(unittest.TestCase):
scope.find_var(model.gc.weight.name).get_tensor()) scope.find_var(model.gc.weight.name).get_tensor())
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(90) paddle.seed(90)
paddle.framework.random._manual_program_seed(90) paddle.framework.random._manual_program_seed(90)
features = np.ones([1, 100, 50], dtype=np.float32) features = np.ones([1, 100, 50], dtype=np.float32)
...@@ -138,7 +138,7 @@ class TestDygraphGNN(unittest.TestCase): ...@@ -138,7 +138,7 @@ class TestDygraphGNN(unittest.TestCase):
model_gc_weight_value = model.gc.weight.numpy() model_gc_weight_value = model.gc.weight.numpy()
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(90) paddle.seed(90)
paddle.framework.random._manual_program_seed(90) paddle.framework.random._manual_program_seed(90)
features2 = np.ones([1, 100, 50], dtype=np.float32) features2 = np.ones([1, 100, 50], dtype=np.float32)
......
...@@ -28,11 +28,11 @@ class LeNetDygraph(fluid.dygraph.Layer): ...@@ -28,11 +28,11 @@ class LeNetDygraph(fluid.dygraph.Layer):
super(LeNetDygraph, self).__init__() super(LeNetDygraph, self).__init__()
self.num_classes = num_classes self.num_classes = num_classes
self.features = nn.Sequential( self.features = nn.Sequential(
nn.Conv2d( nn.Conv2D(
1, 6, 3, stride=1, padding=1), 1, 6, 3, stride=1, padding=1),
nn.ReLU(), nn.ReLU(),
paddle.fluid.dygraph.Pool2D(2, 'max', 2), paddle.fluid.dygraph.Pool2D(2, 'max', 2),
nn.Conv2d( nn.Conv2D(
6, 16, 5, stride=1, padding=0), 6, 16, 5, stride=1, padding=0),
nn.ReLU(), nn.ReLU(),
paddle.fluid.dygraph.Pool2D(2, 'max', 2)) paddle.fluid.dygraph.Pool2D(2, 'max', 2))
...@@ -60,7 +60,7 @@ def init_weights(layer): ...@@ -60,7 +60,7 @@ def init_weights(layer):
new_bias = paddle.fluid.layers.fill_constant( new_bias = paddle.fluid.layers.fill_constant(
layer.bias.shape, layer.bias.dtype, value=-0.1) layer.bias.shape, layer.bias.dtype, value=-0.1)
layer.bias.set_value(new_bias) layer.bias.set_value(new_bias)
elif type(layer) == nn.Conv2d: elif type(layer) == nn.Conv2D:
new_weight = paddle.fluid.layers.fill_constant( new_weight = paddle.fluid.layers.fill_constant(
layer.weight.shape, layer.weight.dtype, value=0.7) layer.weight.shape, layer.weight.dtype, value=0.7)
layer.weight.set_value(new_weight) layer.weight.set_value(new_weight)
...@@ -80,7 +80,7 @@ class TestLayerApply(unittest.TestCase): ...@@ -80,7 +80,7 @@ class TestLayerApply(unittest.TestCase):
if type(layer) == nn.Linear: if type(layer) == nn.Linear:
np.testing.assert_allclose(layer.weight.numpy(), 0.9) np.testing.assert_allclose(layer.weight.numpy(), 0.9)
np.testing.assert_allclose(layer.bias.numpy(), -0.1) np.testing.assert_allclose(layer.bias.numpy(), -0.1)
elif type(layer) == nn.Conv2d: elif type(layer) == nn.Conv2D:
np.testing.assert_allclose(layer.weight.numpy(), 0.7) np.testing.assert_allclose(layer.weight.numpy(), 0.7)
np.testing.assert_allclose(layer.bias.numpy(), -0.2) np.testing.assert_allclose(layer.bias.numpy(), -0.2)
......
...@@ -27,11 +27,11 @@ class LeNetDygraph(fluid.dygraph.Layer): ...@@ -27,11 +27,11 @@ class LeNetDygraph(fluid.dygraph.Layer):
def __init__(self): def __init__(self):
super(LeNetDygraph, self).__init__() super(LeNetDygraph, self).__init__()
self.features = nn.Sequential( self.features = nn.Sequential(
nn.Conv2d( nn.Conv2D(
1, 6, 3, stride=1, padding=1), 1, 6, 3, stride=1, padding=1),
nn.ReLU(), nn.ReLU(),
paddle.fluid.dygraph.Pool2D(2, 'max', 2), paddle.fluid.dygraph.Pool2D(2, 'max', 2),
nn.Conv2d( nn.Conv2D(
6, 16, 5, stride=1, padding=0), 6, 16, 5, stride=1, padding=0),
nn.ReLU(), nn.ReLU(),
paddle.fluid.dygraph.Pool2D(2, 'max', 2)) paddle.fluid.dygraph.Pool2D(2, 'max', 2))
......
...@@ -95,7 +95,7 @@ class TestDygraphSimpleNet(unittest.TestCase): ...@@ -95,7 +95,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
for is_sort_sum_gradient in [True, False]: for is_sort_sum_gradient in [True, False]:
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
simple_net = SimpleNet( simple_net = SimpleNet(
...@@ -140,7 +140,7 @@ class TestDygraphSimpleNet(unittest.TestCase): ...@@ -140,7 +140,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
dy_loss_value = dy_loss.numpy() dy_loss_value = dy_loss.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
simple_net = SimpleNet( simple_net = SimpleNet(
......
...@@ -403,7 +403,7 @@ class TestDygraphOCRAttention(unittest.TestCase): ...@@ -403,7 +403,7 @@ class TestDygraphOCRAttention(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
fluid.set_flags({'FLAGS_sort_sum_gradient': True}) fluid.set_flags({'FLAGS_sort_sum_gradient': True})
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
ocr_attention = OCRAttention() ocr_attention = OCRAttention()
...@@ -454,7 +454,7 @@ class TestDygraphOCRAttention(unittest.TestCase): ...@@ -454,7 +454,7 @@ class TestDygraphOCRAttention(unittest.TestCase):
dy_param_value[param.name] = param.numpy() dy_param_value[param.name] = param.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
exe = fluid.Executor(fluid.CPUPlace( exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
......
...@@ -74,7 +74,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -74,7 +74,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
try: try:
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
mlp = MLP() mlp = MLP()
optimizer = self.get_optimizer_dygraph( optimizer = self.get_optimizer_dygraph(
...@@ -91,7 +91,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -91,7 +91,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
) else fluid.CUDAPlace(0) ) else fluid.CUDAPlace(0)
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
mlp = MLP() mlp = MLP()
...@@ -132,7 +132,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -132,7 +132,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
dy_param_value[param.name] = param.numpy() dy_param_value[param.name] = param.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
if place == None: if place == None:
......
...@@ -74,7 +74,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -74,7 +74,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
try: try:
paddle.disable_static() paddle.disable_static()
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
mlp = MLP() mlp = MLP()
optimizer = self.get_optimizer_dygraph( optimizer = self.get_optimizer_dygraph(
...@@ -93,7 +93,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -93,7 +93,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
) else fluid.CUDAPlace(0) ) else fluid.CUDAPlace(0)
paddle.disable_static(place) paddle.disable_static(place)
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
mlp = MLP() mlp = MLP()
...@@ -142,7 +142,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -142,7 +142,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
if place == None: if place == None:
......
...@@ -226,7 +226,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -226,7 +226,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
traced_layer = None traced_layer = None
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -294,7 +294,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -294,7 +294,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
dy_last_hidden_value = last_hidden.numpy() dy_last_hidden_value = last_hidden.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
ptb_model = PtbModel( ptb_model = PtbModel(
hidden_size=hidden_size, hidden_size=hidden_size,
......
...@@ -45,7 +45,7 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase): ...@@ -45,7 +45,7 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
fluid.set_flags({'FLAGS_sort_sum_gradient': True}) fluid.set_flags({'FLAGS_sort_sum_gradient': True})
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
...@@ -95,7 +95,7 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase): ...@@ -95,7 +95,7 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase):
dy_last_hidden_value = last_hidden.numpy() dy_last_hidden_value = last_hidden.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
ptb_model = PtbModel( ptb_model = PtbModel(
......
...@@ -64,7 +64,7 @@ class TestImperativeMnist(unittest.TestCase): ...@@ -64,7 +64,7 @@ class TestImperativeMnist(unittest.TestCase):
mask = np.array(mask_list).astype("float32") mask = np.array(mask_list).astype("float32")
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
policy = Policy(input_size=4) policy = Policy(input_size=4)
...@@ -105,7 +105,7 @@ class TestImperativeMnist(unittest.TestCase): ...@@ -105,7 +105,7 @@ class TestImperativeMnist(unittest.TestCase):
dy_param_value[param.name] = param.numpy() dy_param_value[param.name] = param.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
exe = fluid.Executor(fluid.CPUPlace( exe = fluid.Executor(fluid.CPUPlace(
......
...@@ -251,7 +251,7 @@ class TestDygraphResnet(unittest.TestCase): ...@@ -251,7 +251,7 @@ class TestDygraphResnet(unittest.TestCase):
traced_layer = None traced_layer = None
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
resnet = ResNet() resnet = ResNet()
...@@ -334,7 +334,7 @@ class TestDygraphResnet(unittest.TestCase): ...@@ -334,7 +334,7 @@ class TestDygraphResnet(unittest.TestCase):
dy_param_value[param.name] = param.numpy() dy_param_value[param.name] = param.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
exe = fluid.Executor(fluid.CPUPlace( exe = fluid.Executor(fluid.CPUPlace(
......
...@@ -78,7 +78,7 @@ class TestDygraphResnetSortGradient(unittest.TestCase): ...@@ -78,7 +78,7 @@ class TestDygraphResnetSortGradient(unittest.TestCase):
batch_num = 10 batch_num = 10
with fluid.dygraph.guard(): with fluid.dygraph.guard():
fluid.set_flags({'FLAGS_sort_sum_gradient': True}) fluid.set_flags({'FLAGS_sort_sum_gradient': True})
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
resnet = ResNet() resnet = ResNet()
...@@ -137,7 +137,7 @@ class TestDygraphResnetSortGradient(unittest.TestCase): ...@@ -137,7 +137,7 @@ class TestDygraphResnetSortGradient(unittest.TestCase):
dy_param_value[param.name] = param.numpy() dy_param_value[param.name] = param.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
exe = fluid.Executor(fluid.CPUPlace( exe = fluid.Executor(fluid.CPUPlace(
......
...@@ -219,7 +219,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -219,7 +219,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -305,7 +305,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -305,7 +305,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -414,7 +414,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -414,7 +414,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -521,7 +521,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -521,7 +521,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -711,7 +711,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -711,7 +711,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -802,7 +802,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -802,7 +802,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
......
...@@ -219,7 +219,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -219,7 +219,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -308,7 +308,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -308,7 +308,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -416,7 +416,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -416,7 +416,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -524,7 +524,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -524,7 +524,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -638,7 +638,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -638,7 +638,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -717,7 +717,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -717,7 +717,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -808,7 +808,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -808,7 +808,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
......
...@@ -311,7 +311,7 @@ class TestImperativeResneXt(unittest.TestCase): ...@@ -311,7 +311,7 @@ class TestImperativeResneXt(unittest.TestCase):
batch_num = 1 batch_num = 1
epoch_num = 1 epoch_num = 1
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
se_resnext = SeResNeXt() se_resnext = SeResNeXt()
...@@ -372,7 +372,7 @@ class TestImperativeResneXt(unittest.TestCase): ...@@ -372,7 +372,7 @@ class TestImperativeResneXt(unittest.TestCase):
dy_param_value[param.name] = param.numpy() dy_param_value[param.name] = param.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
exe = fluid.Executor(fluid.CPUPlace( exe = fluid.Executor(fluid.CPUPlace(
......
...@@ -102,7 +102,7 @@ class TestDygraphSimpleNet(unittest.TestCase): ...@@ -102,7 +102,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
for is_sort_sum_gradient in [True, False]: for is_sort_sum_gradient in [True, False]:
traced_layer = None traced_layer = None
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
simple_net = SimpleNet( simple_net = SimpleNet(
...@@ -146,7 +146,7 @@ class TestDygraphSimpleNet(unittest.TestCase): ...@@ -146,7 +146,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
dy_loss_value = dy_loss.numpy() dy_loss_value = dy_loss.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
simple_net = SimpleNet( simple_net = SimpleNet(
......
...@@ -468,7 +468,7 @@ def build_optimizer(layer, cfg, loss=None): ...@@ -468,7 +468,7 @@ def build_optimizer(layer, cfg, loss=None):
class DyGraphTrainModel(object): class DyGraphTrainModel(object):
def __init__(self, cfg): def __init__(self, cfg):
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
self.generator = Generator(cfg) self.generator = Generator(cfg)
...@@ -529,7 +529,7 @@ class StaticGraphTrainModel(object): ...@@ -529,7 +529,7 @@ class StaticGraphTrainModel(object):
shape=[None, cfg.c_dim], dtype='float32', name='label_trg') shape=[None, cfg.c_dim], dtype='float32', name='label_trg')
return image_real, label_org, label_trg return image_real, label_org, label_trg
paddle.manual_seed(cfg.seed) paddle.seed(cfg.seed)
paddle.framework.random._manual_program_seed(cfg.seed) paddle.framework.random._manual_program_seed(cfg.seed)
self.gen_program = fluid.Program() self.gen_program = fluid.Program()
gen_startup_program = fluid.Program() gen_startup_program = fluid.Program()
......
...@@ -951,7 +951,7 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): ...@@ -951,7 +951,7 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
with guard(): with guard():
fluid.set_flags({'FLAGS_sort_sum_gradient': True}) fluid.set_flags({'FLAGS_sort_sum_gradient': True})
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
transformer = TransFormer( transformer = TransFormer(
ModelHyperParams.src_vocab_size, ModelHyperParams.src_vocab_size,
...@@ -1035,7 +1035,7 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): ...@@ -1035,7 +1035,7 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
dy_token_num_value = dy_token_num.numpy() dy_token_num_value = dy_token_num.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
transformer = TransFormer( transformer = TransFormer(
ModelHyperParams.src_vocab_size, ModelHyperParams.src_vocab_size,
......
...@@ -80,7 +80,7 @@ class TestInplaceAddto(unittest.TestCase): ...@@ -80,7 +80,7 @@ class TestInplaceAddto(unittest.TestCase):
def test_result(self): def test_result(self):
def run_program(enable_addto): def run_program(enable_addto):
np.random.seed(10) np.random.seed(10)
paddle.manual_seed(10) paddle.seed(10)
paddle.framework.random._manual_program_seed(10) paddle.framework.random._manual_program_seed(10)
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
fluid.set_flags({"FLAGS_cudnn_deterministic": True}) fluid.set_flags({"FLAGS_cudnn_deterministic": True})
......
...@@ -35,22 +35,22 @@ class TestInstanceNorm(unittest.TestCase): ...@@ -35,22 +35,22 @@ class TestInstanceNorm(unittest.TestCase):
def error1d(): def error1d():
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
instance_norm1d = paddle.nn.InstanceNorm1d(1) instance_norm1d = paddle.nn.InstanceNorm1D(1)
instance_norm1d(fluid.dygraph.to_variable(x_data_4)) instance_norm1d(fluid.dygraph.to_variable(x_data_4))
def error2d(): def error2d():
x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32') x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')
instance_norm2d = paddle.nn.InstanceNorm2d(1) instance_norm2d = paddle.nn.InstanceNorm2D(1)
instance_norm2d(fluid.dygraph.to_variable(x_data_3)) instance_norm2d(fluid.dygraph.to_variable(x_data_3))
def error3d(): def error3d():
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
instance_norm3d = paddle.nn.BatchNorm3d(1) instance_norm3d = paddle.nn.BatchNorm3D(1)
instance_norm3d(fluid.dygraph.to_variable(x_data_4)) instance_norm3d(fluid.dygraph.to_variable(x_data_4))
def weight_bias_false(): def weight_bias_false():
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
instance_norm3d = paddle.nn.BatchNorm3d( instance_norm3d = paddle.nn.BatchNorm3D(
1, weight_attr=False, bias_attr=False) 1, weight_attr=False, bias_attr=False)
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
...@@ -75,7 +75,7 @@ class TestInstanceNorm(unittest.TestCase): ...@@ -75,7 +75,7 @@ class TestInstanceNorm(unittest.TestCase):
def compute_v2(x): def compute_v2(x):
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
bn = paddle.nn.InstanceNorm2d(shape[1]) bn = paddle.nn.InstanceNorm2D(shape[1])
y = bn(fluid.dygraph.to_variable(x)) y = bn(fluid.dygraph.to_variable(x))
return y.numpy() return y.numpy()
...@@ -104,7 +104,7 @@ class TestInstanceNorm(unittest.TestCase): ...@@ -104,7 +104,7 @@ class TestInstanceNorm(unittest.TestCase):
def compute_v2(x_np): def compute_v2(x_np):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
ins = paddle.nn.InstanceNorm2d(shape[1]) ins = paddle.nn.InstanceNorm2D(shape[1])
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
y = ins(x) y = ins(x)
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
......
...@@ -37,7 +37,7 @@ class TestIrMemoryOptimizeIfElseOp(unittest.TestCase): ...@@ -37,7 +37,7 @@ class TestIrMemoryOptimizeIfElseOp(unittest.TestCase):
use_cuda=True, use_cuda=True,
use_mem_opt=False, use_mem_opt=False,
iter_num=5): iter_num=5):
paddle.manual_seed(100) paddle.seed(100)
paddle.framework.random._manual_program_seed(100) paddle.framework.random._manual_program_seed(100)
prog = Program() prog = Program()
startup_prog = Program() startup_prog = Program()
......
...@@ -222,7 +222,7 @@ class TestJitSaveLoad(unittest.TestCase): ...@@ -222,7 +222,7 @@ class TestJitSaveLoad(unittest.TestCase):
# enable dygraph mode # enable dygraph mode
fluid.enable_dygraph() fluid.enable_dygraph()
# config seed # config seed
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
def train_and_save_model(self, model_path=None): def train_and_save_model(self, model_path=None):
...@@ -370,7 +370,7 @@ class TestJitSaveLoadConfig(unittest.TestCase): ...@@ -370,7 +370,7 @@ class TestJitSaveLoadConfig(unittest.TestCase):
# enable dygraph mode # enable dygraph mode
fluid.enable_dygraph() fluid.enable_dygraph()
# config seed # config seed
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
def test_output_spec(self): def test_output_spec(self):
...@@ -429,7 +429,7 @@ class TestJitMultipleLoading(unittest.TestCase): ...@@ -429,7 +429,7 @@ class TestJitMultipleLoading(unittest.TestCase):
# enable dygraph mode # enable dygraph mode
fluid.enable_dygraph() fluid.enable_dygraph()
# config seed # config seed
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
# train and save base model # train and save base model
self.train_and_save_orig_model() self.train_and_save_orig_model()
...@@ -457,7 +457,7 @@ class TestJitPruneModelAndLoad(unittest.TestCase): ...@@ -457,7 +457,7 @@ class TestJitPruneModelAndLoad(unittest.TestCase):
# enable dygraph mode # enable dygraph mode
fluid.enable_dygraph() fluid.enable_dygraph()
# config seed # config seed
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
def train_and_save(self): def train_and_save(self):
...@@ -512,7 +512,7 @@ class TestJitSaveMultiCases(unittest.TestCase): ...@@ -512,7 +512,7 @@ class TestJitSaveMultiCases(unittest.TestCase):
# enable dygraph mode # enable dygraph mode
fluid.enable_dygraph() fluid.enable_dygraph()
# config seed # config seed
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
def verify_inference_correctness(self, layer, model_path, with_label=False): def verify_inference_correctness(self, layer, model_path, with_label=False):
......
...@@ -57,7 +57,7 @@ class LayerTest(unittest.TestCase): ...@@ -57,7 +57,7 @@ class LayerTest(unittest.TestCase):
@contextlib.contextmanager @contextlib.contextmanager
def static_graph(self): def static_graph(self):
with new_program_scope(): with new_program_scope():
paddle.manual_seed(self.seed) paddle.seed(self.seed)
paddle.framework.random._manual_program_seed(self.seed) paddle.framework.random._manual_program_seed(self.seed)
yield yield
...@@ -77,7 +77,7 @@ class LayerTest(unittest.TestCase): ...@@ -77,7 +77,7 @@ class LayerTest(unittest.TestCase):
def dynamic_graph(self, force_to_use_cpu=False): def dynamic_graph(self, force_to_use_cpu=False):
with fluid.dygraph.guard( with fluid.dygraph.guard(
self._get_place(force_to_use_cpu=force_to_use_cpu)): self._get_place(force_to_use_cpu=force_to_use_cpu)):
paddle.manual_seed(self.seed) paddle.seed(self.seed)
paddle.framework.random._manual_program_seed(self.seed) paddle.framework.random._manual_program_seed(self.seed)
yield yield
......
...@@ -17,16 +17,16 @@ import unittest ...@@ -17,16 +17,16 @@ import unittest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.framework import manual_seed from paddle.framework import seed
from paddle.fluid.framework import Program, default_main_program, default_startup_program from paddle.fluid.framework import Program, default_main_program, default_startup_program
import numpy as np import numpy as np
class TestManualSeed(unittest.TestCase): class TestManualSeed(unittest.TestCase):
def test_manual_seed(self): def test_seed(self):
fluid.enable_dygraph() fluid.enable_dygraph()
gen = paddle.manual_seed(12312321111) gen = paddle.seed(12312321111)
x = fluid.layers.gaussian_random([10], dtype="float32") x = fluid.layers.gaussian_random([10], dtype="float32")
st1 = gen.get_state() st1 = gen.get_state()
x1 = fluid.layers.gaussian_random([10], dtype="float32") x1 = fluid.layers.gaussian_random([10], dtype="float32")
......
...@@ -18,7 +18,7 @@ import paddle ...@@ -18,7 +18,7 @@ import paddle
import copy import copy
np.random.seed(10) np.random.seed(10)
paddle.manual_seed(10) paddle.seed(10)
class TestNormalAPI(unittest.TestCase): class TestNormalAPI(unittest.TestCase):
...@@ -61,7 +61,8 @@ class TestNormalAPI(unittest.TestCase): ...@@ -61,7 +61,8 @@ class TestNormalAPI(unittest.TestCase):
if isinstance(self.mean, np.ndarray) \ if isinstance(self.mean, np.ndarray) \
and isinstance(self.std, np.ndarray): and isinstance(self.std, np.ndarray):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
mean = paddle.fluid.data('Mean', self.mean.shape, self.mean.dtype) mean = paddle.fluid.data('Mean', self.mean.shape,
self.mean.dtype)
std = paddle.fluid.data('Std', self.std.shape, self.std.dtype) std = paddle.fluid.data('Std', self.std.shape, self.std.dtype)
out = paddle.normal(mean, std, self.shape) out = paddle.normal(mean, std, self.shape)
...@@ -76,7 +77,8 @@ class TestNormalAPI(unittest.TestCase): ...@@ -76,7 +77,8 @@ class TestNormalAPI(unittest.TestCase):
return ret_all return ret_all
elif isinstance(self.mean, np.ndarray): elif isinstance(self.mean, np.ndarray):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
mean = paddle.fluid.data('Mean', self.mean.shape, self.mean.dtype) mean = paddle.fluid.data('Mean', self.mean.shape,
self.mean.dtype)
out = paddle.normal(mean, self.std, self.shape) out = paddle.normal(mean, self.std, self.shape)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
......
...@@ -73,7 +73,7 @@ class TestSaveLoad(unittest.TestCase): ...@@ -73,7 +73,7 @@ class TestSaveLoad(unittest.TestCase):
paddle.disable_static() paddle.disable_static()
# config seed # config seed
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
def build_and_train_model(self): def build_and_train_model(self):
......
...@@ -105,7 +105,7 @@ def avg_pool1D_forward_naive(x, ...@@ -105,7 +105,7 @@ def avg_pool1D_forward_naive(x,
return out return out
class TestPool1d_API(unittest.TestCase): class TestPool1D_API(unittest.TestCase):
def setUp(self): def setUp(self):
np.random.seed(123) np.random.seed(123)
self.places = [fluid.CPUPlace()] self.places = [fluid.CPUPlace()]
...@@ -138,7 +138,7 @@ class TestPool1d_API(unittest.TestCase): ...@@ -138,7 +138,7 @@ class TestPool1d_API(unittest.TestCase):
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool1d_dg = paddle.nn.layer.AvgPool1d( avg_pool1d_dg = paddle.nn.layer.AvgPool1D(
kernel_size=2, stride=None, padding=0) kernel_size=2, stride=None, padding=0)
result = avg_pool1d_dg(input) result = avg_pool1d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -159,7 +159,7 @@ class TestPool1d_API(unittest.TestCase): ...@@ -159,7 +159,7 @@ class TestPool1d_API(unittest.TestCase):
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool1d_dg = paddle.nn.AvgPool1d( avg_pool1d_dg = paddle.nn.AvgPool1D(
kernel_size=2, stride=None, padding=1, count_include_pad=True) kernel_size=2, stride=None, padding=1, count_include_pad=True)
result = avg_pool1d_dg(input) result = avg_pool1d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -190,7 +190,7 @@ class TestPool1d_API(unittest.TestCase): ...@@ -190,7 +190,7 @@ class TestPool1d_API(unittest.TestCase):
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool1d_dg = paddle.nn.layer.MaxPool1d( max_pool1d_dg = paddle.nn.layer.MaxPool1D(
kernel_size=2, stride=None, padding=0) kernel_size=2, stride=None, padding=0)
result = max_pool1d_dg(input) result = max_pool1d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -207,7 +207,7 @@ class TestPool1d_API(unittest.TestCase): ...@@ -207,7 +207,7 @@ class TestPool1d_API(unittest.TestCase):
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool1d_dg = paddle.nn.layer.MaxPool1d( max_pool1d_dg = paddle.nn.layer.MaxPool1D(
kernel_size=2, stride=None, padding=0) kernel_size=2, stride=None, padding=0)
result = max_pool1d_dg(input) result = max_pool1d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -248,7 +248,7 @@ class TestPool1d_API(unittest.TestCase): ...@@ -248,7 +248,7 @@ class TestPool1d_API(unittest.TestCase):
self.check_max_dygraph_return_index_results(place) self.check_max_dygraph_return_index_results(place)
class TestPool2dError_API(unittest.TestCase): class TestPool2DError_API(unittest.TestCase):
def test_error_api(self): def test_error_api(self):
def run1(): def run1():
with fluid.dygraph.guard(): with fluid.dygraph.guard():
......
...@@ -22,7 +22,7 @@ import paddle.fluid as fluid ...@@ -22,7 +22,7 @@ import paddle.fluid as fluid
import paddle import paddle
class TestPool2d_API(unittest.TestCase): class TestPool2D_API(unittest.TestCase):
def setUp(self): def setUp(self):
np.random.seed(123) np.random.seed(123)
self.places = [fluid.CPUPlace()] self.places = [fluid.CPUPlace()]
...@@ -63,7 +63,7 @@ class TestPool2d_API(unittest.TestCase): ...@@ -63,7 +63,7 @@ class TestPool2d_API(unittest.TestCase):
pool_type='avg') pool_type='avg')
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool2d_dg = paddle.nn.layer.AvgPool2d( avg_pool2d_dg = paddle.nn.layer.AvgPool2D(
kernel_size=2, stride=2, padding=0) kernel_size=2, stride=2, padding=0)
result = avg_pool2d_dg(input) result = avg_pool2d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -84,7 +84,7 @@ class TestPool2d_API(unittest.TestCase): ...@@ -84,7 +84,7 @@ class TestPool2d_API(unittest.TestCase):
exclusive=False) exclusive=False)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool2d_dg = paddle.nn.layer.AvgPool2d( avg_pool2d_dg = paddle.nn.layer.AvgPool2D(
kernel_size=2, stride=2, padding=1, ceil_mode=False) kernel_size=2, stride=2, padding=1, ceil_mode=False)
result = avg_pool2d_dg(input) result = avg_pool2d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -104,7 +104,7 @@ class TestPool2d_API(unittest.TestCase): ...@@ -104,7 +104,7 @@ class TestPool2d_API(unittest.TestCase):
ceil_mode=True) ceil_mode=True)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool2d_dg = paddle.nn.layer.AvgPool2d( avg_pool2d_dg = paddle.nn.layer.AvgPool2D(
kernel_size=2, stride=2, padding=0, ceil_mode=True) kernel_size=2, stride=2, padding=0, ceil_mode=True)
result = avg_pool2d_dg(input) result = avg_pool2d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -144,7 +144,7 @@ class TestPool2d_API(unittest.TestCase): ...@@ -144,7 +144,7 @@ class TestPool2d_API(unittest.TestCase):
pool_type='max') pool_type='max')
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool2d_dg = paddle.nn.layer.MaxPool2d( max_pool2d_dg = paddle.nn.layer.MaxPool2D(
kernel_size=2, stride=2, padding=0) kernel_size=2, stride=2, padding=0)
result = max_pool2d_dg(input) result = max_pool2d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -188,7 +188,7 @@ class TestPool2d_API(unittest.TestCase): ...@@ -188,7 +188,7 @@ class TestPool2d_API(unittest.TestCase):
exclusive=False) exclusive=False)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool2d_dg = paddle.nn.layer.MaxPool2d( max_pool2d_dg = paddle.nn.layer.MaxPool2D(
kernel_size=2, stride=2, padding=1, ceil_mode=False) kernel_size=2, stride=2, padding=1, ceil_mode=False)
result = max_pool2d_dg(input) result = max_pool2d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -208,7 +208,7 @@ class TestPool2d_API(unittest.TestCase): ...@@ -208,7 +208,7 @@ class TestPool2d_API(unittest.TestCase):
ceil_mode=True) ceil_mode=True)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool2d_dg = paddle.nn.layer.MaxPool2d( max_pool2d_dg = paddle.nn.layer.MaxPool2D(
kernel_size=2, stride=2, padding=0, ceil_mode=True) kernel_size=2, stride=2, padding=0, ceil_mode=True)
result = max_pool2d_dg(input) result = max_pool2d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -233,7 +233,7 @@ class TestPool2d_API(unittest.TestCase): ...@@ -233,7 +233,7 @@ class TestPool2d_API(unittest.TestCase):
padding_algorithm="SAME") padding_algorithm="SAME")
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool2d_dg = paddle.nn.layer.MaxPool2d( max_pool2d_dg = paddle.nn.layer.MaxPool2D(
kernel_size=2, stride=2, padding=0) kernel_size=2, stride=2, padding=0)
result = max_pool2d_dg(input) result = max_pool2d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -254,7 +254,7 @@ class TestPool2d_API(unittest.TestCase): ...@@ -254,7 +254,7 @@ class TestPool2d_API(unittest.TestCase):
padding_algorithm="SAME") padding_algorithm="SAME")
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool2d_dg = paddle.nn.layer.AvgPool2d( avg_pool2d_dg = paddle.nn.layer.AvgPool2D(
kernel_size=2, stride=2, padding=0) kernel_size=2, stride=2, padding=0)
result = avg_pool2d_dg(input) result = avg_pool2d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -279,7 +279,7 @@ class TestPool2d_API(unittest.TestCase): ...@@ -279,7 +279,7 @@ class TestPool2d_API(unittest.TestCase):
pool_type='max') pool_type='max')
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool2d_dg = paddle.nn.layer.MaxPool2d( max_pool2d_dg = paddle.nn.layer.MaxPool2D(
kernel_size=2, stride=2, padding=0) kernel_size=2, stride=2, padding=0)
result = max_pool2d_dg(input) result = max_pool2d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -304,7 +304,7 @@ class TestPool2d_API(unittest.TestCase): ...@@ -304,7 +304,7 @@ class TestPool2d_API(unittest.TestCase):
pool_type='avg') pool_type='avg')
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool2d_dg = paddle.nn.layer.AvgPool2d( avg_pool2d_dg = paddle.nn.layer.AvgPool2D(
kernel_size=2, stride=2, padding=0) kernel_size=2, stride=2, padding=0)
result = avg_pool2d_dg(input) result = avg_pool2d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -325,7 +325,7 @@ class TestPool2d_API(unittest.TestCase): ...@@ -325,7 +325,7 @@ class TestPool2d_API(unittest.TestCase):
self.check_max_dygraph_nhwc_results(place) self.check_max_dygraph_nhwc_results(place)
class TestPool2dError_API(unittest.TestCase): class TestPool2DError_API(unittest.TestCase):
def test_error_api(self): def test_error_api(self):
def run1(): def run1():
with fluid.dygraph.guard(): with fluid.dygraph.guard():
......
...@@ -1018,7 +1018,7 @@ create_test_cudnn_padding_SAME_class(TestCase1_strides) ...@@ -1018,7 +1018,7 @@ create_test_cudnn_padding_SAME_class(TestCase1_strides)
# ----- test API # ----- test API
class TestPool2dAPI(unittest.TestCase): class TestPool2DAPI(unittest.TestCase):
def test_api(self): def test_api(self):
x_NHWC = np.random.random([2, 5, 5, 3]).astype("float32") x_NHWC = np.random.random([2, 5, 5, 3]).astype("float32")
x_NCHW = np.random.random([2, 3, 5, 5]).astype("float32") x_NCHW = np.random.random([2, 3, 5, 5]).astype("float32")
...@@ -1237,7 +1237,7 @@ class TestPool2dAPI(unittest.TestCase): ...@@ -1237,7 +1237,7 @@ class TestPool2dAPI(unittest.TestCase):
data_format="NHWC")) data_format="NHWC"))
class TestPool2dAPI_Error(unittest.TestCase): class TestPool2DAPI_Error(unittest.TestCase):
def test_api(self): def test_api(self):
input_NHWC = fluid.layers.data( input_NHWC = fluid.layers.data(
name="input_NHWC", name="input_NHWC",
......
...@@ -25,7 +25,7 @@ from paddle.nn.functional import avg_pool3d, max_pool3d ...@@ -25,7 +25,7 @@ from paddle.nn.functional import avg_pool3d, max_pool3d
from test_pool3d_op import adaptive_start_index, adaptive_end_index, pool3D_forward_naive, avg_pool3D_forward_naive, max_pool3D_forward_naive from test_pool3d_op import adaptive_start_index, adaptive_end_index, pool3D_forward_naive, avg_pool3D_forward_naive, max_pool3D_forward_naive
class TestPool3d_API(unittest.TestCase): class TestPool3D_API(unittest.TestCase):
def setUp(self): def setUp(self):
np.random.seed(123) np.random.seed(123)
self.places = [fluid.CPUPlace()] self.places = [fluid.CPUPlace()]
...@@ -68,7 +68,7 @@ class TestPool3d_API(unittest.TestCase): ...@@ -68,7 +68,7 @@ class TestPool3d_API(unittest.TestCase):
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool3d_dg = paddle.nn.layer.AvgPool3d( avg_pool3d_dg = paddle.nn.layer.AvgPool3D(
kernel_size=2, stride=None, padding="SAME") kernel_size=2, stride=None, padding="SAME")
result = avg_pool3d_dg(input) result = avg_pool3d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -95,7 +95,7 @@ class TestPool3d_API(unittest.TestCase): ...@@ -95,7 +95,7 @@ class TestPool3d_API(unittest.TestCase):
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool3d_dg = paddle.nn.layer.AvgPool3d( avg_pool3d_dg = paddle.nn.layer.AvgPool3D(
kernel_size=2, kernel_size=2,
stride=None, stride=None,
padding=1, padding=1,
...@@ -120,7 +120,7 @@ class TestPool3d_API(unittest.TestCase): ...@@ -120,7 +120,7 @@ class TestPool3d_API(unittest.TestCase):
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool3d_dg = paddle.nn.layer.AvgPool3d( avg_pool3d_dg = paddle.nn.layer.AvgPool3D(
kernel_size=2, stride=None, padding=0, ceil_mode=True) kernel_size=2, stride=None, padding=0, ceil_mode=True)
result = avg_pool3d_dg(input) result = avg_pool3d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -159,7 +159,7 @@ class TestPool3d_API(unittest.TestCase): ...@@ -159,7 +159,7 @@ class TestPool3d_API(unittest.TestCase):
pool_type='max') pool_type='max')
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool3d_dg = paddle.nn.layer.MaxPool3d( max_pool3d_dg = paddle.nn.layer.MaxPool3D(
kernel_size=2, stride=None, padding=0) kernel_size=2, stride=None, padding=0)
result = max_pool3d_dg(input) result = max_pool3d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -204,7 +204,7 @@ class TestPool3d_API(unittest.TestCase): ...@@ -204,7 +204,7 @@ class TestPool3d_API(unittest.TestCase):
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool3d_dg = paddle.nn.layer.MaxPool3d( max_pool3d_dg = paddle.nn.layer.MaxPool3D(
kernel_size=2, stride=None, padding=0, ceil_mode=True) kernel_size=2, stride=None, padding=0, ceil_mode=True)
result = max_pool3d_dg(input) result = max_pool3d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -225,7 +225,7 @@ class TestPool3d_API(unittest.TestCase): ...@@ -225,7 +225,7 @@ class TestPool3d_API(unittest.TestCase):
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool3d_dg = paddle.nn.layer.MaxPool3d( max_pool3d_dg = paddle.nn.layer.MaxPool3D(
kernel_size=2, stride=None, padding=1, ceil_mode=False) kernel_size=2, stride=None, padding=1, ceil_mode=False)
result = max_pool3d_dg(input) result = max_pool3d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -250,7 +250,7 @@ class TestPool3d_API(unittest.TestCase): ...@@ -250,7 +250,7 @@ class TestPool3d_API(unittest.TestCase):
padding_algorithm="SAME") padding_algorithm="SAME")
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool3d_dg = paddle.nn.layer.MaxPool3d( max_pool3d_dg = paddle.nn.layer.MaxPool3D(
kernel_size=2, stride=2, padding=0) kernel_size=2, stride=2, padding=0)
result = max_pool3d_dg(input) result = max_pool3d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -270,7 +270,7 @@ class TestPool3d_API(unittest.TestCase): ...@@ -270,7 +270,7 @@ class TestPool3d_API(unittest.TestCase):
pool_type='max') pool_type='max')
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool3d_dg = paddle.nn.layer.MaxPool3d( max_pool3d_dg = paddle.nn.layer.MaxPool3D(
kernel_size=2, stride=2, padding=0) kernel_size=2, stride=2, padding=0)
result = max_pool3d_dg(input) result = max_pool3d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -299,7 +299,7 @@ class TestPool3d_API(unittest.TestCase): ...@@ -299,7 +299,7 @@ class TestPool3d_API(unittest.TestCase):
pool_type='avg') pool_type='avg')
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool3d_dg = paddle.nn.layer.AvgPool3d( avg_pool3d_dg = paddle.nn.layer.AvgPool3D(
kernel_size=2, stride=2, padding=0) kernel_size=2, stride=2, padding=0)
result = avg_pool3d_dg(input) result = avg_pool3d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -327,7 +327,7 @@ class TestPool3d_API(unittest.TestCase): ...@@ -327,7 +327,7 @@ class TestPool3d_API(unittest.TestCase):
self.check_max_dygraph_ceilmode_results(place) self.check_max_dygraph_ceilmode_results(place)
class TestPool3dError_API(unittest.TestCase): class TestPool3DError_API(unittest.TestCase):
def test_error_api(self): def test_error_api(self):
def run1(): def run1():
with fluid.dygraph.guard(): with fluid.dygraph.guard():
......
...@@ -219,7 +219,7 @@ def avg_pool3D_forward_naive(x, ...@@ -219,7 +219,7 @@ def avg_pool3D_forward_naive(x,
return out return out
class TestPool3d_Op(OpTest): class TestPool3D_Op(OpTest):
def setUp(self): def setUp(self):
self.op_type = "pool3d" self.op_type = "pool3d"
self.init_kernel_type() self.init_kernel_type()
...@@ -312,7 +312,7 @@ class TestPool3d_Op(OpTest): ...@@ -312,7 +312,7 @@ class TestPool3d_Op(OpTest):
self.adaptive = False self.adaptive = False
class TestCase1(TestPool3d_Op): class TestCase1(TestPool3D_Op):
def init_shape(self): def init_shape(self):
self.shape = [2, 3, 7, 7, 7] self.shape = [2, 3, 7, 7, 7]
...@@ -330,7 +330,7 @@ class TestCase1(TestPool3d_Op): ...@@ -330,7 +330,7 @@ class TestCase1(TestPool3d_Op):
self.global_pool = False self.global_pool = False
class TestCase2(TestPool3d_Op): class TestCase2(TestPool3D_Op):
def init_shape(self): def init_shape(self):
self.shape = [2, 3, 6, 7, 7] self.shape = [2, 3, 6, 7, 7]
...@@ -348,7 +348,7 @@ class TestCase2(TestPool3d_Op): ...@@ -348,7 +348,7 @@ class TestCase2(TestPool3d_Op):
self.global_pool = False self.global_pool = False
class TestCase3(TestPool3d_Op): class TestCase3(TestPool3D_Op):
def init_pool_type(self): def init_pool_type(self):
self.pool_type = "max" self.pool_type = "max"
...@@ -378,7 +378,7 @@ def create_test_cudnn_class(parent): ...@@ -378,7 +378,7 @@ def create_test_cudnn_class(parent):
globals()[cls_name] = TestCUDNNCase globals()[cls_name] = TestCUDNNCase
create_test_cudnn_class(TestPool3d_Op) create_test_cudnn_class(TestPool3D_Op)
create_test_cudnn_class(TestCase1) create_test_cudnn_class(TestCase1)
create_test_cudnn_class(TestCase2) create_test_cudnn_class(TestCase2)
create_test_cudnn_class(TestCase3) create_test_cudnn_class(TestCase3)
...@@ -405,7 +405,7 @@ def create_test_cudnn_fp16_class(parent): ...@@ -405,7 +405,7 @@ def create_test_cudnn_fp16_class(parent):
globals()[cls_name] = TestCUDNNFp16Case globals()[cls_name] = TestCUDNNFp16Case
create_test_cudnn_fp16_class(TestPool3d_Op) create_test_cudnn_fp16_class(TestPool3D_Op)
create_test_cudnn_fp16_class(TestCase1) create_test_cudnn_fp16_class(TestCase1)
create_test_cudnn_fp16_class(TestCase2) create_test_cudnn_fp16_class(TestCase2)
create_test_cudnn_fp16_class(TestCase3) create_test_cudnn_fp16_class(TestCase3)
...@@ -429,7 +429,7 @@ def create_test_cudnn_use_ceil_class(parent): ...@@ -429,7 +429,7 @@ def create_test_cudnn_use_ceil_class(parent):
globals()[cls_name] = TestPool3DUseCeilCase globals()[cls_name] = TestPool3DUseCeilCase
create_test_cudnn_use_ceil_class(TestPool3d_Op) create_test_cudnn_use_ceil_class(TestPool3D_Op)
create_test_cudnn_use_ceil_class(TestCase1) create_test_cudnn_use_ceil_class(TestCase1)
...@@ -480,7 +480,7 @@ class TestAvgPoolAdaptiveAsyOutSize(TestCase1): ...@@ -480,7 +480,7 @@ class TestAvgPoolAdaptiveAsyOutSize(TestCase1):
#-------test pool3d with asymmetric padding------ #-------test pool3d with asymmetric padding------
class TestPool3d_Op_AsyPadding(TestPool3d_Op): class TestPool3D_Op_AsyPadding(TestPool3D_Op):
def init_test_case(self): def init_test_case(self):
self.ksize = [3, 4, 3] self.ksize = [3, 4, 3]
self.strides = [1, 1, 2] self.strides = [1, 1, 2]
...@@ -552,21 +552,21 @@ class TestCase5_AsyPadding(TestCase5): ...@@ -552,21 +552,21 @@ class TestCase5_AsyPadding(TestCase5):
self.shape = [2, 3, 7, 7, 7] self.shape = [2, 3, 7, 7, 7]
create_test_cudnn_class(TestPool3d_Op_AsyPadding) create_test_cudnn_class(TestPool3D_Op_AsyPadding)
create_test_cudnn_class(TestCase1_AsyPadding) create_test_cudnn_class(TestCase1_AsyPadding)
create_test_cudnn_class(TestCase2_AsyPadding) create_test_cudnn_class(TestCase2_AsyPadding)
create_test_cudnn_class(TestCase3_AsyPadding) create_test_cudnn_class(TestCase3_AsyPadding)
create_test_cudnn_class(TestCase4_AsyPadding) create_test_cudnn_class(TestCase4_AsyPadding)
create_test_cudnn_class(TestCase5_AsyPadding) create_test_cudnn_class(TestCase5_AsyPadding)
create_test_cudnn_fp16_class(TestPool3d_Op_AsyPadding) create_test_cudnn_fp16_class(TestPool3D_Op_AsyPadding)
create_test_cudnn_fp16_class(TestCase1_AsyPadding) create_test_cudnn_fp16_class(TestCase1_AsyPadding)
create_test_cudnn_fp16_class(TestCase2_AsyPadding) create_test_cudnn_fp16_class(TestCase2_AsyPadding)
create_test_cudnn_fp16_class(TestCase3_AsyPadding) create_test_cudnn_fp16_class(TestCase3_AsyPadding)
create_test_cudnn_fp16_class(TestCase4_AsyPadding) create_test_cudnn_fp16_class(TestCase4_AsyPadding)
create_test_cudnn_fp16_class(TestCase5_AsyPadding) create_test_cudnn_fp16_class(TestCase5_AsyPadding)
create_test_cudnn_use_ceil_class(TestPool3d_Op_AsyPadding) create_test_cudnn_use_ceil_class(TestPool3D_Op_AsyPadding)
create_test_cudnn_use_ceil_class(TestCase1_AsyPadding) create_test_cudnn_use_ceil_class(TestCase1_AsyPadding)
create_test_use_ceil_class(TestCase1_AsyPadding) create_test_use_ceil_class(TestCase1_AsyPadding)
...@@ -606,7 +606,7 @@ class TestAvgPoolAdaptive_AsyPadding(TestCase1): ...@@ -606,7 +606,7 @@ class TestAvgPoolAdaptive_AsyPadding(TestCase1):
# ------------ test channel_last -------------- # ------------ test channel_last --------------
class TestPool3d_channel_last(TestPool3d_Op): class TestPool3D_channel_last(TestPool3D_Op):
def init_data_format(self): def init_data_format(self):
self.data_format = "NDHWC" self.data_format = "NDHWC"
...@@ -654,14 +654,14 @@ class TestCase5_channel_last(TestCase5): ...@@ -654,14 +654,14 @@ class TestCase5_channel_last(TestCase5):
self.shape = [2, 7, 7, 7, 3] self.shape = [2, 7, 7, 7, 3]
create_test_cudnn_class(TestPool3d_channel_last) create_test_cudnn_class(TestPool3D_channel_last)
create_test_cudnn_class(TestCase1_channel_last) create_test_cudnn_class(TestCase1_channel_last)
create_test_cudnn_class(TestCase2_channel_last) create_test_cudnn_class(TestCase2_channel_last)
create_test_cudnn_class(TestCase3_channel_last) create_test_cudnn_class(TestCase3_channel_last)
create_test_cudnn_class(TestCase4_channel_last) create_test_cudnn_class(TestCase4_channel_last)
create_test_cudnn_class(TestCase5_channel_last) create_test_cudnn_class(TestCase5_channel_last)
create_test_cudnn_use_ceil_class(TestPool3d_channel_last) create_test_cudnn_use_ceil_class(TestPool3D_channel_last)
create_test_cudnn_use_ceil_class(TestCase1_channel_last) create_test_cudnn_use_ceil_class(TestCase1_channel_last)
create_test_use_ceil_class(TestCase1_channel_last) create_test_use_ceil_class(TestCase1_channel_last)
...@@ -716,7 +716,7 @@ class TestAvgPoolAdaptive_channel_last(TestCase1_channel_last): ...@@ -716,7 +716,7 @@ class TestAvgPoolAdaptive_channel_last(TestCase1_channel_last):
# --- asy padding # --- asy padding
class TestPool3d_Op_AsyPadding_channel_last(TestPool3d_Op_AsyPadding): class TestPool3D_Op_AsyPadding_channel_last(TestPool3D_Op_AsyPadding):
def init_data_format(self): def init_data_format(self):
self.data_format = "NDHWC" self.data_format = "NDHWC"
...@@ -764,14 +764,14 @@ class TestCase5_AsyPadding_channel_last(TestCase5_AsyPadding): ...@@ -764,14 +764,14 @@ class TestCase5_AsyPadding_channel_last(TestCase5_AsyPadding):
self.shape = [2, 7, 8, 6, 3] self.shape = [2, 7, 8, 6, 3]
create_test_cudnn_class(TestPool3d_Op_AsyPadding_channel_last) create_test_cudnn_class(TestPool3D_Op_AsyPadding_channel_last)
create_test_cudnn_class(TestCase1_AsyPadding_channel_last) create_test_cudnn_class(TestCase1_AsyPadding_channel_last)
create_test_cudnn_class(TestCase2_AsyPadding_channel_last) create_test_cudnn_class(TestCase2_AsyPadding_channel_last)
create_test_cudnn_class(TestCase3_AsyPadding_channel_last) create_test_cudnn_class(TestCase3_AsyPadding_channel_last)
create_test_cudnn_class(TestCase4_AsyPadding_channel_last) create_test_cudnn_class(TestCase4_AsyPadding_channel_last)
create_test_cudnn_class(TestCase5_AsyPadding_channel_last) create_test_cudnn_class(TestCase5_AsyPadding_channel_last)
create_test_cudnn_use_ceil_class(TestPool3d_Op_AsyPadding_channel_last) create_test_cudnn_use_ceil_class(TestPool3D_Op_AsyPadding_channel_last)
create_test_cudnn_use_ceil_class(TestCase1_AsyPadding_channel_last) create_test_cudnn_use_ceil_class(TestCase1_AsyPadding_channel_last)
create_test_use_ceil_class(TestCase1_AsyPadding_channel_last) create_test_use_ceil_class(TestCase1_AsyPadding_channel_last)
...@@ -812,14 +812,14 @@ def create_test_padding_SAME_class(parent): ...@@ -812,14 +812,14 @@ def create_test_padding_SAME_class(parent):
globals()[cls_name] = TestPaddingSMAECase globals()[cls_name] = TestPaddingSMAECase
create_test_padding_SAME_class(TestPool3d_Op) create_test_padding_SAME_class(TestPool3D_Op)
create_test_padding_SAME_class(TestCase1) create_test_padding_SAME_class(TestCase1)
create_test_padding_SAME_class(TestCase2) create_test_padding_SAME_class(TestCase2)
create_test_padding_SAME_class(TestCase3) create_test_padding_SAME_class(TestCase3)
create_test_padding_SAME_class(TestCase4) create_test_padding_SAME_class(TestCase4)
create_test_padding_SAME_class(TestCase5) create_test_padding_SAME_class(TestCase5)
create_test_padding_SAME_class(TestPool3d_channel_last) create_test_padding_SAME_class(TestPool3D_channel_last)
create_test_padding_SAME_class(TestCase1_channel_last) create_test_padding_SAME_class(TestCase1_channel_last)
create_test_padding_SAME_class(TestCase2_channel_last) create_test_padding_SAME_class(TestCase2_channel_last)
create_test_padding_SAME_class(TestCase3_channel_last) create_test_padding_SAME_class(TestCase3_channel_last)
...@@ -843,14 +843,14 @@ def create_test_cudnn_padding_SAME_class(parent): ...@@ -843,14 +843,14 @@ def create_test_cudnn_padding_SAME_class(parent):
globals()[cls_name] = TestCUDNNPaddingSMAECase globals()[cls_name] = TestCUDNNPaddingSMAECase
create_test_cudnn_padding_SAME_class(TestPool3d_Op) create_test_cudnn_padding_SAME_class(TestPool3D_Op)
create_test_cudnn_padding_SAME_class(TestCase1) create_test_cudnn_padding_SAME_class(TestCase1)
create_test_cudnn_padding_SAME_class(TestCase2) create_test_cudnn_padding_SAME_class(TestCase2)
create_test_cudnn_padding_SAME_class(TestCase3) create_test_cudnn_padding_SAME_class(TestCase3)
create_test_cudnn_padding_SAME_class(TestCase4) create_test_cudnn_padding_SAME_class(TestCase4)
create_test_cudnn_padding_SAME_class(TestCase5) create_test_cudnn_padding_SAME_class(TestCase5)
create_test_cudnn_padding_SAME_class(TestPool3d_channel_last) create_test_cudnn_padding_SAME_class(TestPool3D_channel_last)
create_test_cudnn_padding_SAME_class(TestCase1_channel_last) create_test_cudnn_padding_SAME_class(TestCase1_channel_last)
create_test_cudnn_padding_SAME_class(TestCase2_channel_last) create_test_cudnn_padding_SAME_class(TestCase2_channel_last)
create_test_cudnn_padding_SAME_class(TestCase3_channel_last) create_test_cudnn_padding_SAME_class(TestCase3_channel_last)
...@@ -869,14 +869,14 @@ def create_test_padding_VALID_class(parent): ...@@ -869,14 +869,14 @@ def create_test_padding_VALID_class(parent):
globals()[cls_name] = TestPaddingVALIDCase globals()[cls_name] = TestPaddingVALIDCase
create_test_padding_VALID_class(TestPool3d_Op) create_test_padding_VALID_class(TestPool3D_Op)
create_test_padding_VALID_class(TestCase1) create_test_padding_VALID_class(TestCase1)
create_test_padding_VALID_class(TestCase2) create_test_padding_VALID_class(TestCase2)
create_test_padding_VALID_class(TestCase3) create_test_padding_VALID_class(TestCase3)
create_test_padding_VALID_class(TestCase4) create_test_padding_VALID_class(TestCase4)
create_test_padding_VALID_class(TestCase5) create_test_padding_VALID_class(TestCase5)
create_test_padding_VALID_class(TestPool3d_channel_last) create_test_padding_VALID_class(TestPool3D_channel_last)
create_test_padding_VALID_class(TestCase1_channel_last) create_test_padding_VALID_class(TestCase1_channel_last)
create_test_padding_VALID_class(TestCase2_channel_last) create_test_padding_VALID_class(TestCase2_channel_last)
create_test_padding_VALID_class(TestCase3_channel_last) create_test_padding_VALID_class(TestCase3_channel_last)
...@@ -900,14 +900,14 @@ def create_test_cudnn_padding_VALID_class(parent): ...@@ -900,14 +900,14 @@ def create_test_cudnn_padding_VALID_class(parent):
globals()[cls_name] = TestCUDNNPaddingVALIDCase globals()[cls_name] = TestCUDNNPaddingVALIDCase
create_test_cudnn_padding_VALID_class(TestPool3d_Op) create_test_cudnn_padding_VALID_class(TestPool3D_Op)
create_test_cudnn_padding_VALID_class(TestCase1) create_test_cudnn_padding_VALID_class(TestCase1)
create_test_cudnn_padding_VALID_class(TestCase2) create_test_cudnn_padding_VALID_class(TestCase2)
create_test_cudnn_padding_VALID_class(TestCase3) create_test_cudnn_padding_VALID_class(TestCase3)
create_test_cudnn_padding_VALID_class(TestCase4) create_test_cudnn_padding_VALID_class(TestCase4)
create_test_cudnn_padding_VALID_class(TestCase5) create_test_cudnn_padding_VALID_class(TestCase5)
create_test_cudnn_padding_VALID_class(TestPool3d_channel_last) create_test_cudnn_padding_VALID_class(TestPool3D_channel_last)
create_test_cudnn_padding_VALID_class(TestCase1_channel_last) create_test_cudnn_padding_VALID_class(TestCase1_channel_last)
create_test_cudnn_padding_VALID_class(TestCase2_channel_last) create_test_cudnn_padding_VALID_class(TestCase2_channel_last)
create_test_cudnn_padding_VALID_class(TestCase3_channel_last) create_test_cudnn_padding_VALID_class(TestCase3_channel_last)
...@@ -916,7 +916,7 @@ create_test_cudnn_padding_VALID_class(TestCase5_channel_last) ...@@ -916,7 +916,7 @@ create_test_cudnn_padding_VALID_class(TestCase5_channel_last)
#test API #test API
class TestPool3dAPI(unittest.TestCase): class TestPool3DAPI(unittest.TestCase):
def test_api(self): def test_api(self):
x_NDHWC = np.random.random([2, 5, 5, 5, 3]).astype("float32") x_NDHWC = np.random.random([2, 5, 5, 5, 3]).astype("float32")
x_NCDHW = np.random.random([2, 3, 5, 5, 5]).astype("float32") x_NCDHW = np.random.random([2, 3, 5, 5, 5]).astype("float32")
...@@ -1101,7 +1101,7 @@ class TestPool3dAPI(unittest.TestCase): ...@@ -1101,7 +1101,7 @@ class TestPool3dAPI(unittest.TestCase):
atol=1e-05) atol=1e-05)
class TestPool3dAPI_Error(unittest.TestCase): class TestPool3DAPI_Error(unittest.TestCase):
def test_api(self): def test_api(self):
input_NDHWC = fluid.layers.data( input_NDHWC = fluid.layers.data(
name="input_NDHWC", name="input_NDHWC",
......
...@@ -147,7 +147,7 @@ def test_main(use_cuda, use_py_func_op, use_parallel_executor): ...@@ -147,7 +147,7 @@ def test_main(use_cuda, use_py_func_op, use_parallel_executor):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
with fluid.scope_guard(fluid.core.Scope()): with fluid.scope_guard(fluid.core.Scope()):
gen = paddle.manual_seed(1) gen = paddle.seed(1)
np.random.seed(1) np.random.seed(1)
img = fluid.layers.data(name='image', shape=[784], dtype='float32') img = fluid.layers.data(name='image', shape=[784], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64') label = fluid.layers.data(name='label', shape=[1], dtype='int64')
......
...@@ -35,7 +35,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -35,7 +35,7 @@ class TestGeneratorSeed(unittest.TestCase):
fluid.enable_dygraph() fluid.enable_dygraph()
gen = paddle.manual_seed(12312321111) gen = paddle.seed(12312321111)
x = fluid.layers.uniform_random([10], dtype="float32", min=0.0, max=1.0) x = fluid.layers.uniform_random([10], dtype="float32", min=0.0, max=1.0)
st1 = gen.get_state() st1 = gen.get_state()
...@@ -47,7 +47,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -47,7 +47,7 @@ class TestGeneratorSeed(unittest.TestCase):
x2 = fluid.layers.uniform_random( x2 = fluid.layers.uniform_random(
[10], dtype="float32", min=0.0, max=1.0) [10], dtype="float32", min=0.0, max=1.0)
paddle.manual_seed(12312321111) paddle.seed(12312321111)
x3 = fluid.layers.uniform_random( x3 = fluid.layers.uniform_random(
[10], dtype="float32", min=0.0, max=1.0) [10], dtype="float32", min=0.0, max=1.0)
...@@ -63,7 +63,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -63,7 +63,7 @@ class TestGeneratorSeed(unittest.TestCase):
def test_generator_uniform_random_static(self): def test_generator_uniform_random_static(self):
fluid.disable_dygraph() fluid.disable_dygraph()
gen = paddle.manual_seed(123123143) gen = paddle.seed(123123143)
startup_program = fluid.Program() startup_program = fluid.Program()
train_program = fluid.Program() train_program = fluid.Program()
...@@ -97,7 +97,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -97,7 +97,7 @@ class TestGeneratorSeed(unittest.TestCase):
def test_gen_dropout_dygraph(self): def test_gen_dropout_dygraph(self):
fluid.enable_dygraph() fluid.enable_dygraph()
gen = paddle.manual_seed(111111111) gen = paddle.seed(111111111)
st = gen.get_state() st = gen.get_state()
# x = np.arange(1,101).reshape(2,50).astype("float32") # x = np.arange(1,101).reshape(2,50).astype("float32")
x = fluid.layers.uniform_random( x = fluid.layers.uniform_random(
...@@ -118,7 +118,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -118,7 +118,7 @@ class TestGeneratorSeed(unittest.TestCase):
def test_gen_dropout_static(self): def test_gen_dropout_static(self):
fluid.disable_dygraph() fluid.disable_dygraph()
gen = paddle.manual_seed(123123143) gen = paddle.seed(123123143)
startup_program = fluid.Program() startup_program = fluid.Program()
train_program = fluid.Program() train_program = fluid.Program()
...@@ -144,7 +144,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -144,7 +144,7 @@ class TestGeneratorSeed(unittest.TestCase):
"""Test Generator seed.""" """Test Generator seed."""
fluid.enable_dygraph() fluid.enable_dygraph()
gen = paddle.manual_seed(12312321111) gen = paddle.seed(12312321111)
x = fluid.layers.gaussian_random([10], dtype="float32") x = fluid.layers.gaussian_random([10], dtype="float32")
st1 = gen.get_state() st1 = gen.get_state()
x1 = fluid.layers.gaussian_random([10], dtype="float32") x1 = fluid.layers.gaussian_random([10], dtype="float32")
...@@ -165,7 +165,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -165,7 +165,7 @@ class TestGeneratorSeed(unittest.TestCase):
def test_generator_gaussian_random_static(self): def test_generator_gaussian_random_static(self):
fluid.disable_dygraph() fluid.disable_dygraph()
gen = paddle.manual_seed(123123143) gen = paddle.seed(123123143)
startup_program = fluid.Program() startup_program = fluid.Program()
train_program = fluid.Program() train_program = fluid.Program()
...@@ -203,7 +203,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -203,7 +203,7 @@ class TestGeneratorSeed(unittest.TestCase):
fluid.enable_dygraph() fluid.enable_dygraph()
gen = paddle.manual_seed(12312321111) gen = paddle.seed(12312321111)
x = paddle.randint(low=10, shape=[10], dtype="int32") x = paddle.randint(low=10, shape=[10], dtype="int32")
st1 = gen.get_state() st1 = gen.get_state()
x1 = paddle.randint(low=10, shape=[10], dtype="int32") x1 = paddle.randint(low=10, shape=[10], dtype="int32")
...@@ -224,7 +224,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -224,7 +224,7 @@ class TestGeneratorSeed(unittest.TestCase):
def test_generator_uniform_random_static(self): def test_generator_uniform_random_static(self):
fluid.disable_dygraph() fluid.disable_dygraph()
gen = paddle.manual_seed(123123143) gen = paddle.seed(123123143)
startup_program = fluid.Program() startup_program = fluid.Program()
train_program = fluid.Program() train_program = fluid.Program()
...@@ -259,7 +259,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -259,7 +259,7 @@ class TestGeneratorSeed(unittest.TestCase):
"""Test Generator seed.""" """Test Generator seed."""
fluid.enable_dygraph() fluid.enable_dygraph()
gen = paddle.manual_seed(12312321111) gen = paddle.seed(12312321111)
x = paddle.randint(low=1) x = paddle.randint(low=1)
st1 = gen.get_state() st1 = gen.get_state()
x1 = paddle.randint(low=1) x1 = paddle.randint(low=1)
...@@ -278,7 +278,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -278,7 +278,7 @@ class TestGeneratorSeed(unittest.TestCase):
def test_generator_ranint_static(self): def test_generator_ranint_static(self):
fluid.disable_dygraph() fluid.disable_dygraph()
gen = paddle.manual_seed(123123143) gen = paddle.seed(123123143)
startup_program = fluid.Program() startup_program = fluid.Program()
train_program = fluid.Program() train_program = fluid.Program()
...@@ -315,7 +315,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -315,7 +315,7 @@ class TestGeneratorSeed(unittest.TestCase):
fluid.enable_dygraph() fluid.enable_dygraph()
gen = paddle.manual_seed(12312321111) gen = paddle.seed(12312321111)
x = paddle.randperm(10) x = paddle.randperm(10)
st1 = gen.get_state() st1 = gen.get_state()
x1 = paddle.randperm(10) x1 = paddle.randperm(10)
...@@ -337,7 +337,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -337,7 +337,7 @@ class TestGeneratorSeed(unittest.TestCase):
fluid.disable_dygraph() fluid.disable_dygraph()
paddle.manual_seed(123123143) paddle.seed(123123143)
startup_program = fluid.Program() startup_program = fluid.Program()
train_program = fluid.Program() train_program = fluid.Program()
...@@ -353,7 +353,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -353,7 +353,7 @@ class TestGeneratorSeed(unittest.TestCase):
feed={}, feed={},
fetch_list=[result_1, result_2]) fetch_list=[result_1, result_2])
paddle.manual_seed(123123143) paddle.seed(123123143)
out2 = exe.run(train_program, out2 = exe.run(train_program,
feed={}, feed={},
fetch_list=[result_1, result_2]) fetch_list=[result_1, result_2])
...@@ -371,7 +371,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -371,7 +371,7 @@ class TestGeneratorSeed(unittest.TestCase):
def test_generator_sampling_id_dygraph(self): def test_generator_sampling_id_dygraph(self):
"""Test Generator seed.""" """Test Generator seed."""
gen = paddle.manual_seed(12312321111) gen = paddle.seed(12312321111)
fluid.enable_dygraph() fluid.enable_dygraph()
...@@ -409,7 +409,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -409,7 +409,7 @@ class TestGeneratorSeed(unittest.TestCase):
fluid.disable_dygraph() fluid.disable_dygraph()
paddle.manual_seed(123123143) paddle.seed(123123143)
startup_program = fluid.Program() startup_program = fluid.Program()
train_program = fluid.Program() train_program = fluid.Program()
...@@ -426,7 +426,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -426,7 +426,7 @@ class TestGeneratorSeed(unittest.TestCase):
feed={}, feed={},
fetch_list=[result_1, result_2]) fetch_list=[result_1, result_2])
paddle.manual_seed(123123143) paddle.seed(123123143)
out2 = exe.run(train_program, out2 = exe.run(train_program,
feed={}, feed={},
fetch_list=[result_1, result_2]) fetch_list=[result_1, result_2])
...@@ -445,7 +445,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -445,7 +445,7 @@ class TestGeneratorSeed(unittest.TestCase):
def test_gen_TruncatedNormal_initializer(self): def test_gen_TruncatedNormal_initializer(self):
fluid.disable_dygraph() fluid.disable_dygraph()
gen = paddle.manual_seed(123123143) gen = paddle.seed(123123143)
cur_state = gen.get_state() cur_state = gen.get_state()
startup_program = fluid.Program() startup_program = fluid.Program()
......
...@@ -169,7 +169,7 @@ class TestRegularizer(unittest.TestCase): ...@@ -169,7 +169,7 @@ class TestRegularizer(unittest.TestCase):
return param_sum return param_sum
def check_l2decay_regularizer(self, place, model): def check_l2decay_regularizer(self, place, model):
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program() main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program() startup_prog = fluid.framework.Program()
...@@ -189,7 +189,7 @@ class TestRegularizer(unittest.TestCase): ...@@ -189,7 +189,7 @@ class TestRegularizer(unittest.TestCase):
return param_sum return param_sum
def check_l2decay(self, place, model): def check_l2decay(self, place, model):
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program() main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program() startup_prog = fluid.framework.Program()
...@@ -246,7 +246,7 @@ class TestRegularizer(unittest.TestCase): ...@@ -246,7 +246,7 @@ class TestRegularizer(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
input = fluid.dygraph.to_variable( input = fluid.dygraph.to_variable(
np.random.randn(3, 2).astype('float32')) np.random.randn(3, 2).astype('float32'))
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
linear1 = fluid.dygraph.Linear( linear1 = fluid.dygraph.Linear(
......
...@@ -94,7 +94,7 @@ class TestRegularizer(unittest.TestCase): ...@@ -94,7 +94,7 @@ class TestRegularizer(unittest.TestCase):
return param_sum return param_sum
def check_l2decay_regularizer(self, place, model): def check_l2decay_regularizer(self, place, model):
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program() main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program() startup_prog = fluid.framework.Program()
...@@ -114,7 +114,7 @@ class TestRegularizer(unittest.TestCase): ...@@ -114,7 +114,7 @@ class TestRegularizer(unittest.TestCase):
return param_sum return param_sum
def check_l2decay(self, place, model): def check_l2decay(self, place, model):
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program() main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program() startup_prog = fluid.framework.Program()
...@@ -171,7 +171,7 @@ class TestRegularizer(unittest.TestCase): ...@@ -171,7 +171,7 @@ class TestRegularizer(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
input = fluid.dygraph.to_variable( input = fluid.dygraph.to_variable(
np.random.randn(3, 2).astype('float32')) np.random.randn(3, 2).astype('float32'))
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
linear1 = fluid.dygraph.Linear( linear1 = fluid.dygraph.Linear(
......
...@@ -20,13 +20,13 @@ import unittest ...@@ -20,13 +20,13 @@ import unittest
paddle.disable_static() paddle.disable_static()
SEED = 2020 SEED = 2020
np.random.seed(SEED) np.random.seed(SEED)
paddle.manual_seed(SEED) paddle.seed(SEED)
class Generator(fluid.dygraph.Layer): class Generator(fluid.dygraph.Layer):
def __init__(self): def __init__(self):
super(Generator, self).__init__() super(Generator, self).__init__()
self.conv1 = paddle.nn.Conv2d(3, 3, 3, padding=1) self.conv1 = paddle.nn.Conv2D(3, 3, 3, padding=1)
def forward(self, x): def forward(self, x):
x = self.conv1(x) x = self.conv1(x)
...@@ -37,7 +37,7 @@ class Generator(fluid.dygraph.Layer): ...@@ -37,7 +37,7 @@ class Generator(fluid.dygraph.Layer):
class Discriminator(fluid.dygraph.Layer): class Discriminator(fluid.dygraph.Layer):
def __init__(self): def __init__(self):
super(Discriminator, self).__init__() super(Discriminator, self).__init__()
self.convd = paddle.nn.Conv2d(6, 3, 1) self.convd = paddle.nn.Conv2D(6, 3, 1)
def forward(self, x): def forward(self, x):
x = self.convd(x) x = self.convd(x)
......
...@@ -617,7 +617,7 @@ class ModuleApiTest(unittest.TestCase): ...@@ -617,7 +617,7 @@ class ModuleApiTest(unittest.TestCase):
fluid.enable_dygraph(place) fluid.enable_dygraph(place)
else: else:
fluid.disable_dygraph() fluid.disable_dygraph()
gen = paddle.manual_seed(self._random_seed) gen = paddle.seed(self._random_seed)
gen._is_init_py = False gen._is_init_py = False
paddle.framework.random._manual_program_seed(self._random_seed) paddle.framework.random._manual_program_seed(self._random_seed)
scope = fluid.core.Scope() scope = fluid.core.Scope()
......
...@@ -228,12 +228,12 @@ class TestConvertSyncBatchNorm(unittest.TestCase): ...@@ -228,12 +228,12 @@ class TestConvertSyncBatchNorm(unittest.TestCase):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
compare_model = paddle.nn.Sequential( compare_model = paddle.nn.Sequential(
paddle.nn.Conv2d(3, 5, 3), paddle.nn.BatchNorm2d(5)) paddle.nn.Conv2D(3, 5, 3), paddle.nn.BatchNorm2D(5))
model = paddle.nn.Sequential( model = paddle.nn.Sequential(
paddle.nn.Conv2d(3, 5, 3), paddle.nn.BatchNorm2d(5)) paddle.nn.Conv2D(3, 5, 3), paddle.nn.BatchNorm2D(5))
model = paddle.nn.SyncBatchNorm.convert_sync_batchnorm(model) model = paddle.nn.SyncBatchNorm.convert_sync_batchnorm(model)
for idx, sublayer in enumerate(compare_model.sublayers()): for idx, sublayer in enumerate(compare_model.sublayers()):
if isinstance(sublayer, paddle.nn.BatchNorm2d): if isinstance(sublayer, paddle.nn.BatchNorm2D):
self.assertEqual( self.assertEqual(
isinstance(model[idx], paddle.nn.SyncBatchNorm), True) isinstance(model[idx], paddle.nn.SyncBatchNorm), True)
......
...@@ -211,7 +211,7 @@ def ffn(src, encoder_layer, ffn_fc1_act="relu"): ...@@ -211,7 +211,7 @@ def ffn(src, encoder_layer, ffn_fc1_act="relu"):
class TestTransformer(unittest.TestCase): class TestTransformer(unittest.TestCase):
def test_multi_head_attention(self): def test_multi_head_attention(self):
def multihead_attention_test_helper(self_attention, cache): def multihead_attention_test_helper(self_attention, cache):
paddle.manual_seed(2020) paddle.seed(2020)
paddle.framework.random._manual_program_seed(2020) paddle.framework.random._manual_program_seed(2020)
# self_attention|cross_attention, cache|No cache # self_attention|cross_attention, cache|No cache
with fluid.dygraph.guard(fluid.CPUPlace()): with fluid.dygraph.guard(fluid.CPUPlace()):
...@@ -275,7 +275,7 @@ class TestTransformer(unittest.TestCase): ...@@ -275,7 +275,7 @@ class TestTransformer(unittest.TestCase):
def test_transformer_encoder_layer(self): def test_transformer_encoder_layer(self):
with fluid.dygraph.guard(fluid.CPUPlace()): with fluid.dygraph.guard(fluid.CPUPlace()):
paddle.framework.manual_seed(2020) paddle.framework.seed(2020)
paddle.framework.random._manual_program_seed(2020) paddle.framework.random._manual_program_seed(2020)
ffn_fc1_act = "relu" ffn_fc1_act = "relu"
...@@ -320,7 +320,7 @@ class TestTransformer(unittest.TestCase): ...@@ -320,7 +320,7 @@ class TestTransformer(unittest.TestCase):
def test_transformer_decoder_layer(self): def test_transformer_decoder_layer(self):
with fluid.dygraph.guard(fluid.CPUPlace()): with fluid.dygraph.guard(fluid.CPUPlace()):
paddle.framework.manual_seed(2020) paddle.framework.seed(2020)
activation = "relu" activation = "relu"
normalize_before = False normalize_before = False
batch_size, d_model, n_head, dim_feedforward, dropout, attn_dropout, act_dropout, source_length, target_length = generate_basic_params( batch_size, d_model, n_head, dim_feedforward, dropout, attn_dropout, act_dropout, source_length, target_length = generate_basic_params(
......
...@@ -77,7 +77,7 @@ class TestTranslatedLayer(unittest.TestCase): ...@@ -77,7 +77,7 @@ class TestTranslatedLayer(unittest.TestCase):
paddle.disable_static(place) paddle.disable_static(place)
# config seed # config seed
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
# create network # create network
......
...@@ -235,7 +235,7 @@ class TestUniformRandomOpSelectedRows(unittest.TestCase): ...@@ -235,7 +235,7 @@ class TestUniformRandomOpSelectedRows(unittest.TestCase):
def check_with_place(self, place): def check_with_place(self, place):
scope = core.Scope() scope = core.Scope()
out = scope.var("X").get_selected_rows() out = scope.var("X").get_selected_rows()
paddle.manual_seed(10) paddle.seed(10)
op = Operator( op = Operator(
"uniform_random", "uniform_random",
Out="X", Out="X",
...@@ -256,7 +256,7 @@ class TestUniformRandomOpSelectedRowsWithDiagInit( ...@@ -256,7 +256,7 @@ class TestUniformRandomOpSelectedRowsWithDiagInit(
def check_with_place(self, place): def check_with_place(self, place):
scope = core.Scope() scope = core.Scope()
out = scope.var("X").get_selected_rows() out = scope.var("X").get_selected_rows()
paddle.manual_seed(10) paddle.seed(10)
op = Operator( op = Operator(
"uniform_random", "uniform_random",
Out="X", Out="X",
...@@ -277,7 +277,7 @@ class TestUniformRandomOpSelectedRowsWithDiagInit( ...@@ -277,7 +277,7 @@ class TestUniformRandomOpSelectedRowsWithDiagInit(
class TestUniformRandomOpApi(unittest.TestCase): class TestUniformRandomOpApi(unittest.TestCase):
def test_api(self): def test_api(self):
paddle.manual_seed(10) paddle.seed(10)
x = fluid.layers.data('x', shape=[16], dtype='float32', lod_level=1) x = fluid.layers.data('x', shape=[16], dtype='float32', lod_level=1)
y = fluid.layers.fc(x, y = fluid.layers.fc(x,
size=16, size=16,
...@@ -350,7 +350,7 @@ class TestUniformRandomOp_attr_tensor_API(unittest.TestCase): ...@@ -350,7 +350,7 @@ class TestUniformRandomOp_attr_tensor_API(unittest.TestCase):
class TestUniformRandomOp_API_seed(unittest.TestCase): class TestUniformRandomOp_API_seed(unittest.TestCase):
def test_attr_tensor_API(self): def test_attr_tensor_API(self):
_seed = 10 _seed = 10
gen = paddle.manual_seed(_seed) gen = paddle.seed(_seed)
gen._is_init_py = False gen._is_init_py = False
startup_program = fluid.Program() startup_program = fluid.Program()
train_program = fluid.Program() train_program = fluid.Program()
...@@ -392,7 +392,7 @@ class TestUniformRandomOpSelectedRowsShapeTensor(unittest.TestCase): ...@@ -392,7 +392,7 @@ class TestUniformRandomOpSelectedRowsShapeTensor(unittest.TestCase):
out = scope.var("X").get_selected_rows() out = scope.var("X").get_selected_rows()
shape_tensor = scope.var("Shape").get_tensor() shape_tensor = scope.var("Shape").get_tensor()
shape_tensor.set(np.array([1000, 784]).astype("int64"), place) shape_tensor.set(np.array([1000, 784]).astype("int64"), place)
paddle.manual_seed(10) paddle.seed(10)
op = Operator( op = Operator(
"uniform_random", "uniform_random",
ShapeTensor="Shape", ShapeTensor="Shape",
...@@ -426,7 +426,7 @@ class TestUniformRandomOpSelectedRowsShapeTensorList(unittest.TestCase): ...@@ -426,7 +426,7 @@ class TestUniformRandomOpSelectedRowsShapeTensorList(unittest.TestCase):
shape_1.set(np.array([1000]).astype("int64"), place) shape_1.set(np.array([1000]).astype("int64"), place)
shape_2 = scope.var("shape2").get_tensor() shape_2 = scope.var("shape2").get_tensor()
shape_2.set(np.array([784]).astype("int64"), place) shape_2.set(np.array([784]).astype("int64"), place)
paddle.manual_seed(10) paddle.seed(10)
op = Operator( op = Operator(
"uniform_random", "uniform_random",
ShapeTensorList=["shape1", "shape2"], ShapeTensorList=["shape1", "shape2"],
......
...@@ -416,7 +416,7 @@ class TestVarBase(unittest.TestCase): ...@@ -416,7 +416,7 @@ class TestVarBase(unittest.TestCase):
def test_tensor_str(self): def test_tensor_str(self):
paddle.enable_static() paddle.enable_static()
paddle.disable_static(paddle.CPUPlace()) paddle.disable_static(paddle.CPUPlace())
paddle.manual_seed(10) paddle.seed(10)
a = paddle.rand([10, 20]) a = paddle.rand([10, 20])
paddle.set_printoptions(4, 100, 3) paddle.set_printoptions(4, 100, 3)
a_str = str(a) a_str = str(a)
......
...@@ -19,7 +19,7 @@ import numpy as np ...@@ -19,7 +19,7 @@ import numpy as np
from op_test import OpTest, skip_check_grad_ci from op_test import OpTest, skip_check_grad_ci
class TestVarConv2dOp(OpTest): class TestVarConv2DOp(OpTest):
def setUp(self): def setUp(self):
self.init_op_type() self.init_op_type()
self.set_data() self.set_data()
...@@ -179,7 +179,7 @@ class TestVarConv2dOp(OpTest): ...@@ -179,7 +179,7 @@ class TestVarConv2dOp(OpTest):
['X'], 'Out', max_relative_error=0.005, check_dygraph=False) ['X'], 'Out', max_relative_error=0.005, check_dygraph=False)
class TestVarConv2dOpCase1(TestVarConv2dOp): class TestVarConv2DOpCase1(TestVarConv2DOp):
def set_data(self): def set_data(self):
# set in_ch 1 # set in_ch 1
input_channel = 1 input_channel = 1
...@@ -192,7 +192,7 @@ class TestVarConv2dOpCase1(TestVarConv2dOp): ...@@ -192,7 +192,7 @@ class TestVarConv2dOpCase1(TestVarConv2dOp):
col) col)
class TestVarConv2dOpCase2(TestVarConv2dOp): class TestVarConv2DOpCase2(TestVarConv2DOp):
def set_data(self): def set_data(self):
# set out_ch 1 # set out_ch 1
input_channel = 2 input_channel = 2
...@@ -205,7 +205,7 @@ class TestVarConv2dOpCase2(TestVarConv2dOp): ...@@ -205,7 +205,7 @@ class TestVarConv2dOpCase2(TestVarConv2dOp):
col) col)
class TestVarConv2dOpCase3(TestVarConv2dOp): class TestVarConv2DOpCase3(TestVarConv2DOp):
def set_data(self): def set_data(self):
# set batch 1 # set batch 1
input_channel = 2 input_channel = 2
...@@ -218,7 +218,7 @@ class TestVarConv2dOpCase3(TestVarConv2dOp): ...@@ -218,7 +218,7 @@ class TestVarConv2dOpCase3(TestVarConv2dOp):
col) col)
class TestVarConv2dOpCase4(TestVarConv2dOp): class TestVarConv2DOpCase4(TestVarConv2DOp):
def set_data(self): def set_data(self):
# set filter size very large # set filter size very large
input_channel = 3 input_channel = 3
...@@ -231,7 +231,7 @@ class TestVarConv2dOpCase4(TestVarConv2dOp): ...@@ -231,7 +231,7 @@ class TestVarConv2dOpCase4(TestVarConv2dOp):
col) col)
class TestVarConv2dOpCase5(TestVarConv2dOp): class TestVarConv2DOpCase5(TestVarConv2DOp):
def set_data(self): def set_data(self):
# set input very small # set input very small
input_channel = 50 input_channel = 50
...@@ -247,7 +247,7 @@ class TestVarConv2dOpCase5(TestVarConv2dOp): ...@@ -247,7 +247,7 @@ class TestVarConv2dOpCase5(TestVarConv2dOp):
@skip_check_grad_ci( @skip_check_grad_ci(
reason="[skip shape check] Use shape of input_channel, row and col all is 1 to test special LoDTensor." reason="[skip shape check] Use shape of input_channel, row and col all is 1 to test special LoDTensor."
) )
class TestVarConv2dOpCase6(TestVarConv2dOp): class TestVarConv2DOpCase6(TestVarConv2DOp):
def set_data(self): def set_data(self):
input_channel = 1 input_channel = 1
output_channel = 3 output_channel = 3
...@@ -259,7 +259,7 @@ class TestVarConv2dOpCase6(TestVarConv2dOp): ...@@ -259,7 +259,7 @@ class TestVarConv2dOpCase6(TestVarConv2dOp):
col) col)
class TestVarConv2dOpCase7(TestVarConv2dOp): class TestVarConv2DOpCase7(TestVarConv2DOp):
def set_data(self): def set_data(self):
input_channel = 2 input_channel = 2
output_channel = 3 output_channel = 3
...@@ -271,7 +271,7 @@ class TestVarConv2dOpCase7(TestVarConv2dOp): ...@@ -271,7 +271,7 @@ class TestVarConv2dOpCase7(TestVarConv2dOp):
col) col)
class TestVarConv2dApi(unittest.TestCase): class TestVarConv2DApi(unittest.TestCase):
def test_api(self): def test_api(self):
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -159,7 +159,7 @@ def create_test_padding_VALID_class(parent): ...@@ -159,7 +159,7 @@ def create_test_padding_VALID_class(parent):
globals()[cls_name] = TestPaddingVALIDCase globals()[cls_name] = TestPaddingVALIDCase
class TestConv2dOp(OpTest): class TestConv2DOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "conv2d" self.op_type = "conv2d"
self.use_cudnn = False self.use_cudnn = False
...@@ -274,7 +274,7 @@ class TestConv2dOp(OpTest): ...@@ -274,7 +274,7 @@ class TestConv2dOp(OpTest):
pass pass
class TestWithPad(TestConv2dOp): class TestWithPad(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -284,7 +284,7 @@ class TestWithPad(TestConv2dOp): ...@@ -284,7 +284,7 @@ class TestWithPad(TestConv2dOp):
self.filter_size = [6, f_c, 3, 3] self.filter_size = [6, f_c, 3, 3]
class TestWithStride(TestConv2dOp): class TestWithStride(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -294,7 +294,7 @@ class TestWithStride(TestConv2dOp): ...@@ -294,7 +294,7 @@ class TestWithStride(TestConv2dOp):
self.filter_size = [6, f_c, 3, 3] self.filter_size = [6, f_c, 3, 3]
class TestWithGroup(TestConv2dOp): class TestWithGroup(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -305,7 +305,7 @@ class TestWithGroup(TestConv2dOp): ...@@ -305,7 +305,7 @@ class TestWithGroup(TestConv2dOp):
self.filter_size = [18, f_c, 3, 3] self.filter_size = [18, f_c, 3, 3]
class TestWith1x1(TestConv2dOp): class TestWith1x1(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -318,7 +318,7 @@ class TestWith1x1(TestConv2dOp): ...@@ -318,7 +318,7 @@ class TestWith1x1(TestConv2dOp):
self.groups = 3 self.groups = 3
class TestWithDilation(TestConv2dOp): class TestWithDilation(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -334,7 +334,7 @@ class TestWithDilation(TestConv2dOp): ...@@ -334,7 +334,7 @@ class TestWithDilation(TestConv2dOp):
self.groups = 3 self.groups = 3
class TestWithInput1x1Filter1x1(TestConv2dOp): class TestWithInput1x1Filter1x1(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -356,7 +356,7 @@ class TestWithInput1x1Filter1x1(TestConv2dOp): ...@@ -356,7 +356,7 @@ class TestWithInput1x1Filter1x1(TestConv2dOp):
# ---- test asymmetric padding ---- # ---- test asymmetric padding ----
class TestConv2dOp_v2(OpTest): class TestConv2DOp_v2(OpTest):
def setUp(self): def setUp(self):
self.op_type = "conv2d" self.op_type = "conv2d"
self.use_cudnn = False self.use_cudnn = False
...@@ -482,13 +482,13 @@ class TestConv2dOp_v2(OpTest): ...@@ -482,13 +482,13 @@ class TestConv2dOp_v2(OpTest):
pass pass
class TestConv2dOp_AsyPadding(TestConv2dOp_v2): class TestConv2DOp_AsyPadding(TestConv2DOp_v2):
def init_paddings(self): def init_paddings(self):
self.pad = [0, 0, 1, 2] self.pad = [0, 0, 1, 2]
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithPad_AsyPadding(TestConv2dOp_v2): class TestWithPad_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW self.input_size = [2, 3, 5, 5] # NCHW
...@@ -501,7 +501,7 @@ class TestWithPad_AsyPadding(TestConv2dOp_v2): ...@@ -501,7 +501,7 @@ class TestWithPad_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithStride_AsyPadding(TestConv2dOp_v2): class TestWithStride_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [2, 2] self.stride = [2, 2]
self.input_size = [2, 3, 6, 6] # NCHW self.input_size = [2, 3, 6, 6] # NCHW
...@@ -514,7 +514,7 @@ class TestWithStride_AsyPadding(TestConv2dOp_v2): ...@@ -514,7 +514,7 @@ class TestWithStride_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithGroup_AsyPadding(TestConv2dOp_v2): class TestWithGroup_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 2] self.stride = [1, 2]
...@@ -525,7 +525,7 @@ class TestWithGroup_AsyPadding(TestConv2dOp_v2): ...@@ -525,7 +525,7 @@ class TestWithGroup_AsyPadding(TestConv2dOp_v2):
self.filter_size = [24, f_c, 4, 3] self.filter_size = [24, f_c, 4, 3]
class TestWith1x1_AsyPadding(TestConv2dOp_v2): class TestWith1x1_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW self.input_size = [2, 3, 5, 5] # NCHW
...@@ -541,7 +541,7 @@ class TestWith1x1_AsyPadding(TestConv2dOp_v2): ...@@ -541,7 +541,7 @@ class TestWith1x1_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithDilation_AsyPadding(TestConv2dOp_v2): class TestWithDilation_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 10, 10] # NCHW self.input_size = [2, 3, 10, 10] # NCHW
...@@ -560,7 +560,7 @@ class TestWithDilation_AsyPadding(TestConv2dOp_v2): ...@@ -560,7 +560,7 @@ class TestWithDilation_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithInput1x1Filter1x1_AsyPadding(TestConv2dOp_v2): class TestWithInput1x1Filter1x1_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [40, 3, 1, 1] # NCHW self.input_size = [40, 3, 1, 1] # NCHW
...@@ -577,20 +577,20 @@ class TestWithInput1x1Filter1x1_AsyPadding(TestConv2dOp_v2): ...@@ -577,20 +577,20 @@ class TestWithInput1x1Filter1x1_AsyPadding(TestConv2dOp_v2):
#---------- test SAME VALID ----------- #---------- test SAME VALID -----------
create_test_padding_SAME_class(TestConv2dOp_AsyPadding) create_test_padding_SAME_class(TestConv2DOp_AsyPadding)
create_test_padding_SAME_class(TestWithPad_AsyPadding) create_test_padding_SAME_class(TestWithPad_AsyPadding)
create_test_padding_SAME_class(TestWithStride_AsyPadding) create_test_padding_SAME_class(TestWithStride_AsyPadding)
create_test_padding_SAME_class(TestWithGroup_AsyPadding) create_test_padding_SAME_class(TestWithGroup_AsyPadding)
create_test_padding_SAME_class(TestWithInput1x1Filter1x1_AsyPadding) create_test_padding_SAME_class(TestWithInput1x1Filter1x1_AsyPadding)
create_test_padding_VALID_class(TestConv2dOp_AsyPadding) create_test_padding_VALID_class(TestConv2DOp_AsyPadding)
create_test_padding_VALID_class(TestWithPad_AsyPadding) create_test_padding_VALID_class(TestWithPad_AsyPadding)
create_test_padding_VALID_class(TestWithStride_AsyPadding) create_test_padding_VALID_class(TestWithStride_AsyPadding)
create_test_padding_VALID_class(TestWithGroup_AsyPadding) create_test_padding_VALID_class(TestWithGroup_AsyPadding)
create_test_padding_VALID_class(TestWithInput1x1Filter1x1_AsyPadding) create_test_padding_VALID_class(TestWithInput1x1Filter1x1_AsyPadding)
# ------------ test channel last --------- # ------------ test channel last ---------
create_test_channel_last_class(TestConv2dOp_AsyPadding) create_test_channel_last_class(TestConv2DOp_AsyPadding)
create_test_channel_last_class(TestWithPad_AsyPadding) create_test_channel_last_class(TestWithPad_AsyPadding)
create_test_channel_last_class(TestWithGroup_AsyPadding) create_test_channel_last_class(TestWithGroup_AsyPadding)
create_test_channel_last_class(TestWith1x1_AsyPadding) create_test_channel_last_class(TestWith1x1_AsyPadding)
......
...@@ -14,9 +14,8 @@ ...@@ -14,9 +14,8 @@
# TODO: import framework api under this directory # TODO: import framework api under this directory
__all__ = [ __all__ = [
'create_parameter', 'ParamAttr', 'create_parameter', 'ParamAttr', 'CPUPlace', 'CUDAPlace', 'CUDAPinnedPlace',
'CPUPlace', 'CUDAPlace', 'CUDAPinnedPlace', 'get_default_dtype', 'get_default_dtype', 'set_default_dtype'
'set_default_dtype'
] ]
__all__ += [ __all__ += [
...@@ -25,7 +24,7 @@ __all__ += [ ...@@ -25,7 +24,7 @@ __all__ += [
] ]
from . import random from . import random
from .random import manual_seed from .random import seed
from .framework import get_default_dtype from .framework import get_default_dtype
from .framework import set_default_dtype from .framework import set_default_dtype
......
...@@ -16,10 +16,10 @@ ...@@ -16,10 +16,10 @@
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
__all__ = ['manual_seed', 'get_cuda_rng_state', 'set_cuda_rng_state'] __all__ = ['seed', 'get_cuda_rng_state', 'set_cuda_rng_state']
def manual_seed(seed): def seed(seed):
""" """
Sets the seed for global default generator, which manages the random number generation. Sets the seed for global default generator, which manages the random number generation.
...@@ -34,7 +34,7 @@ def manual_seed(seed): ...@@ -34,7 +34,7 @@ def manual_seed(seed):
.. code-block:: python .. code-block:: python
import paddle import paddle
gen = paddle.manual_seed(102) gen = paddle.seed(102)
""" """
#TODO(zhiqiu): 1. remove program.random_seed when all random-related op upgrade #TODO(zhiqiu): 1. remove program.random_seed when all random-related op upgrade
...@@ -109,7 +109,7 @@ def _manual_program_seed(seed): ...@@ -109,7 +109,7 @@ def _manual_program_seed(seed):
""" """
Sets global seed for generating random numbers. Sets global seed for generating random numbers.
NOTE(zhiqiu): This is the original implemention of manual_seed. Keeps it temporally NOTE(zhiqiu): This is the original implemention of seed. Keeps it temporally
since CUDA generator is not developed, so we need it in the unittest. since CUDA generator is not developed, so we need it in the unittest.
Args: Args:
......
...@@ -51,14 +51,14 @@ def summary(net, input_size, dtypes=None): ...@@ -51,14 +51,14 @@ def summary(net, input_size, dtypes=None):
super(LeNet, self).__init__() super(LeNet, self).__init__()
self.num_classes = num_classes self.num_classes = num_classes
self.features = nn.Sequential( self.features = nn.Sequential(
nn.Conv2d( nn.Conv2D(
1, 6, 3, stride=1, padding=1), 1, 6, 3, stride=1, padding=1),
nn.ReLU(), nn.ReLU(),
nn.MaxPool2d(2, 2), nn.MaxPool2D(2, 2),
nn.Conv2d( nn.Conv2D(
6, 16, 5, stride=1, padding=0), 6, 16, 5, stride=1, padding=0),
nn.ReLU(), nn.ReLU(),
nn.MaxPool2d(2, 2)) nn.MaxPool2D(2, 2))
if num_classes > 0: if num_classes > 0:
self.fc = nn.Sequential( self.fc = nn.Sequential(
......
...@@ -81,29 +81,29 @@ from .layer.common import Flatten #DEFINE_ALIAS ...@@ -81,29 +81,29 @@ from .layer.common import Flatten #DEFINE_ALIAS
from .layer.common import Upsample #DEFINE_ALIAS from .layer.common import Upsample #DEFINE_ALIAS
from .layer.common import Bilinear #DEFINE_ALIAS from .layer.common import Bilinear #DEFINE_ALIAS
from .layer.common import Dropout #DEFINE_ALIAS from .layer.common import Dropout #DEFINE_ALIAS
from .layer.common import Dropout2d #DEFINE_ALIAS from .layer.common import Dropout2D #DEFINE_ALIAS
from .layer.common import Dropout3d #DEFINE_ALIAS from .layer.common import Dropout3D #DEFINE_ALIAS
from .layer.common import AlphaDropout #DEFINE_ALIAS from .layer.common import AlphaDropout #DEFINE_ALIAS
from .layer.pooling import AvgPool1d #DEFINE_ALIAS from .layer.pooling import AvgPool1D #DEFINE_ALIAS
from .layer.pooling import AvgPool2d #DEFINE_ALIAS from .layer.pooling import AvgPool2D #DEFINE_ALIAS
from .layer.pooling import AvgPool3d #DEFINE_ALIAS from .layer.pooling import AvgPool3D #DEFINE_ALIAS
from .layer.pooling import MaxPool1d #DEFINE_ALIAS from .layer.pooling import MaxPool1D #DEFINE_ALIAS
from .layer.pooling import MaxPool2d #DEFINE_ALIAS from .layer.pooling import MaxPool2D #DEFINE_ALIAS
from .layer.pooling import MaxPool3d #DEFINE_ALIAS from .layer.pooling import MaxPool3D #DEFINE_ALIAS
from .layer.pooling import AdaptiveAvgPool1d #DEFINE_ALIAS from .layer.pooling import AdaptiveAvgPool1D #DEFINE_ALIAS
from .layer.pooling import AdaptiveAvgPool2d #DEFINE_ALIAS from .layer.pooling import AdaptiveAvgPool2D #DEFINE_ALIAS
from .layer.pooling import AdaptiveAvgPool3d #DEFINE_ALIAS from .layer.pooling import AdaptiveAvgPool3D #DEFINE_ALIAS
from .layer.pooling import AdaptiveMaxPool1d #DEFINE_ALIAS from .layer.pooling import AdaptiveMaxPool1D #DEFINE_ALIAS
from .layer.pooling import AdaptiveMaxPool2d #DEFINE_ALIAS from .layer.pooling import AdaptiveMaxPool2D #DEFINE_ALIAS
from .layer.pooling import AdaptiveMaxPool3d #DEFINE_ALIAS from .layer.pooling import AdaptiveMaxPool3D #DEFINE_ALIAS
from .layer.conv import Conv1d #DEFINE_ALIAS from .layer.conv import Conv1D #DEFINE_ALIAS
from .layer.conv import Conv2d #DEFINE_ALIAS from .layer.conv import Conv2D #DEFINE_ALIAS
from .layer.conv import Conv3d #DEFINE_ALIAS from .layer.conv import Conv3D #DEFINE_ALIAS
from .layer.conv import ConvTranspose1d #DEFINE_ALIAS from .layer.conv import Conv1DTranspose #DEFINE_ALIAS
from .layer.conv import ConvTranspose2d #DEFINE_ALIAS from .layer.conv import Conv2DTranspose #DEFINE_ALIAS
from .layer.conv import ConvTranspose3d #DEFINE_ALIAS from .layer.conv import Conv3DTranspose #DEFINE_ALIAS
# from .layer.conv import TreeConv #DEFINE_ALIAS # from .layer.conv import TreeConv #DEFINE_ALIAS
# from .layer.conv import Conv1D #DEFINE_ALIAS # from .layer.conv import Conv1D #DEFINE_ALIAS
from .layer.extension import RowConv #DEFINE_ALIAS from .layer.extension import RowConv #DEFINE_ALIAS
...@@ -125,12 +125,12 @@ from .layer.norm import SyncBatchNorm #DEFINE_ALIAS ...@@ -125,12 +125,12 @@ from .layer.norm import SyncBatchNorm #DEFINE_ALIAS
from .layer.norm import GroupNorm #DEFINE_ALIAS from .layer.norm import GroupNorm #DEFINE_ALIAS
from .layer.norm import LayerNorm #DEFINE_ALIAS from .layer.norm import LayerNorm #DEFINE_ALIAS
from .layer.norm import SpectralNorm #DEFINE_ALIAS from .layer.norm import SpectralNorm #DEFINE_ALIAS
from .layer.norm import InstanceNorm1d #DEFINE_ALIAS from .layer.norm import InstanceNorm1D #DEFINE_ALIAS
from .layer.norm import InstanceNorm2d #DEFINE_ALIAS from .layer.norm import InstanceNorm2D #DEFINE_ALIAS
from .layer.norm import InstanceNorm3d #DEFINE_ALIAS from .layer.norm import InstanceNorm3D #DEFINE_ALIAS
from .layer.norm import BatchNorm1d #DEFINE_ALIAS from .layer.norm import BatchNorm1D #DEFINE_ALIAS
from .layer.norm import BatchNorm2d #DEFINE_ALIAS from .layer.norm import BatchNorm2D #DEFINE_ALIAS
from .layer.norm import BatchNorm3d #DEFINE_ALIAS from .layer.norm import BatchNorm3D #DEFINE_ALIAS
from .layer.norm import LocalResponseNorm #DEFINE_ALIAS from .layer.norm import LocalResponseNorm #DEFINE_ALIAS
from .layer.rnn import RNNCellBase #DEFINE_ALIAS from .layer.rnn import RNNCellBase #DEFINE_ALIAS
......
...@@ -405,7 +405,7 @@ def conv2d(x, ...@@ -405,7 +405,7 @@ def conv2d(x,
points. If dilation is a tuple, it must contain two integers, (dilation_height, points. If dilation is a tuple, it must contain two integers, (dilation_height,
dilation_width). Otherwise, dilation_height = dilation_width = dilation. dilation_width). Otherwise, dilation_height = dilation_width = dilation.
Default: dilation = 1. Default: dilation = 1.
groups (int): The groups number of the Conv2d Layer. According to grouped groups (int): The groups number of the Conv2D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2, convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only of the input channels, while the second half of the filters is only
...@@ -896,7 +896,7 @@ def conv_transpose2d(x, ...@@ -896,7 +896,7 @@ def conv_transpose2d(x,
Default: padding = 0. Default: padding = 0.
output_padding(int|list|tuple, optional): Additional size added to one side output_padding(int|list|tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0. of each dimension in the output shape. Default: 0.
groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by groups(int, optional): The groups number of the Conv2D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the first half of the input channels, while the second half of the
...@@ -1122,7 +1122,7 @@ def conv3d(x, ...@@ -1122,7 +1122,7 @@ def conv3d(x,
If dilation is a tuple, it must contain three integers, (dilation_depth, dilation_height, If dilation is a tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation. dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
Default: dilation = 1. Default: dilation = 1.
groups (int): The groups number of the Conv3d Layer. According to grouped groups (int): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2, convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only of the input channels, while the second half of the filters is only
...@@ -1340,7 +1340,7 @@ def conv_transpose3d(x, ...@@ -1340,7 +1340,7 @@ def conv_transpose3d(x,
Default: padding = 0. Default: padding = 0.
output_padding(int|list|tuple, optional): Additional size added to one side output_padding(int|list|tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0. of each dimension in the output shape. Default: 0.
groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by groups(int, optional): The groups number of the Conv3D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the first half of the input channels, while the second half of the
......
...@@ -127,7 +127,7 @@ def batch_norm(x, ...@@ -127,7 +127,7 @@ def batch_norm(x,
""" """
Applies Batch Normalization as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift . Applies Batch Normalization as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .
nn.functional.batch_norm is uesd for nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d. Please use above API for BatchNorm. nn.functional.batch_norm is uesd for nn.BatchNorm1D, nn.BatchNorm2D, nn.BatchNorm3D. Please use above API for BatchNorm.
Parameters: Parameters:
x(Tesnor): input value. It's data type should be float32, float64. x(Tesnor): input value. It's data type should be float32, float64.
...@@ -338,7 +338,7 @@ def instance_norm(x, ...@@ -338,7 +338,7 @@ def instance_norm(x,
data_format="NCHW", data_format="NCHW",
name=None): name=None):
""" """
See more detail in nn.layer.InstanceNorm2d. See more detail in nn.layer.InstanceNorm2D.
Parameters: Parameters:
x(Tensor): Input Tensor. It's data type should be float32, float64. x(Tensor): Input Tensor. It's data type should be float32, float64.
......
...@@ -53,27 +53,27 @@ from .common import Linear #DEFINE_ALIAS ...@@ -53,27 +53,27 @@ from .common import Linear #DEFINE_ALIAS
from .common import Flatten #DEFINE_ALIAS from .common import Flatten #DEFINE_ALIAS
from .common import Upsample #DEFINE_ALIAS from .common import Upsample #DEFINE_ALIAS
from .common import Dropout #DEFINE_ALIAS from .common import Dropout #DEFINE_ALIAS
from .common import Dropout2d #DEFINE_ALIAS from .common import Dropout2D #DEFINE_ALIAS
from .common import Dropout3d #DEFINE_ALIAS from .common import Dropout3D #DEFINE_ALIAS
from .common import AlphaDropout #DEFINE_ALIAS from .common import AlphaDropout #DEFINE_ALIAS
from .pooling import AvgPool1d #DEFINE_ALIAS from .pooling import AvgPool1D #DEFINE_ALIAS
from .pooling import AvgPool2d #DEFINE_ALIAS from .pooling import AvgPool2D #DEFINE_ALIAS
from .pooling import AvgPool3d #DEFINE_ALIAS from .pooling import AvgPool3D #DEFINE_ALIAS
from .pooling import MaxPool1d #DEFINE_ALIAS from .pooling import MaxPool1D #DEFINE_ALIAS
from .pooling import MaxPool2d #DEFINE_ALIAS from .pooling import MaxPool2D #DEFINE_ALIAS
from .pooling import MaxPool3d #DEFINE_ALIAS from .pooling import MaxPool3D #DEFINE_ALIAS
from .pooling import AdaptiveAvgPool1d #DEFINE_ALIAS from .pooling import AdaptiveAvgPool1D #DEFINE_ALIAS
from .pooling import AdaptiveAvgPool2d #DEFINE_ALIAS from .pooling import AdaptiveAvgPool2D #DEFINE_ALIAS
from .pooling import AdaptiveAvgPool3d #DEFINE_ALIAS from .pooling import AdaptiveAvgPool3D #DEFINE_ALIAS
from .pooling import AdaptiveMaxPool1d #DEFINE_ALIAS from .pooling import AdaptiveMaxPool1D #DEFINE_ALIAS
from .pooling import AdaptiveMaxPool2d #DEFINE_ALIAS from .pooling import AdaptiveMaxPool2D #DEFINE_ALIAS
from .pooling import AdaptiveMaxPool3d #DEFINE_ALIAS from .pooling import AdaptiveMaxPool3D #DEFINE_ALIAS
from .conv import Conv1d #DEFINE_ALIAS from .conv import Conv1D #DEFINE_ALIAS
from .conv import Conv2d #DEFINE_ALIAS from .conv import Conv2D #DEFINE_ALIAS
from .conv import Conv3d #DEFINE_ALIAS from .conv import Conv3D #DEFINE_ALIAS
from .conv import ConvTranspose1d #DEFINE_ALIAS from .conv import Conv1DTranspose #DEFINE_ALIAS
from .conv import ConvTranspose2d #DEFINE_ALIAS from .conv import Conv2DTranspose #DEFINE_ALIAS
from .conv import ConvTranspose3d #DEFINE_ALIAS from .conv import Conv3DTranspose #DEFINE_ALIAS
# from .conv import TreeConv #DEFINE_ALIAS # from .conv import TreeConv #DEFINE_ALIAS
# from .conv import Conv1D #DEFINE_ALIAS # from .conv import Conv1D #DEFINE_ALIAS
from .extension import RowConv #DEFINE_ALIAS from .extension import RowConv #DEFINE_ALIAS
......
...@@ -32,8 +32,8 @@ __all__ = [ ...@@ -32,8 +32,8 @@ __all__ = [
'Pad3D', 'Pad3D',
'CosineSimilarity', 'CosineSimilarity',
'Dropout', 'Dropout',
'Dropout2d', 'Dropout2D',
'Dropout3d', 'Dropout3D',
'Bilinear', 'Bilinear',
'AlphaDropout', 'AlphaDropout',
] ]
...@@ -708,12 +708,12 @@ class Dropout(layers.Layer): ...@@ -708,12 +708,12 @@ class Dropout(layers.Layer):
return out return out
class Dropout2d(layers.Layer): class Dropout2D(layers.Layer):
""" """
Randomly zero out entire channels (in the batched input 4d tensor with the shape `NCHW` , Randomly zero out entire channels (in the batched input 4d tensor with the shape `NCHW` ,
a channel is a 2D feature map with the shape `HW`). Each channel will be zeroed out independently a channel is a 2D feature map with the shape `HW`). Each channel will be zeroed out independently
on every forward call with probability `p` using samples from a Bernoulli distribution. on every forward call with probability `p` using samples from a Bernoulli distribution.
Dropout2d will help promote independence between feature maps as described in the paper: Dropout2D will help promote independence between feature maps as described in the paper:
`Efficient Object Localization Using Convolutional Networks <https://arxiv.org/abs/1411.4280>`_ `Efficient Object Localization Using Convolutional Networks <https://arxiv.org/abs/1411.4280>`_
See ``paddle.nn.functional.dropout2d`` for more details. See ``paddle.nn.functional.dropout2d`` for more details.
...@@ -740,7 +740,7 @@ class Dropout2d(layers.Layer): ...@@ -740,7 +740,7 @@ class Dropout2d(layers.Layer):
paddle.disable_static() paddle.disable_static()
x = np.random.random(size=(2, 3, 4, 5)).astype('float32') x = np.random.random(size=(2, 3, 4, 5)).astype('float32')
x = paddle.to_tensor(x) x = paddle.to_tensor(x)
m = paddle.nn.Dropout2d(p=0.5) m = paddle.nn.Dropout2D(p=0.5)
y_train = m(x) y_train = m(x)
m.eval() # switch the model to test phase m.eval() # switch the model to test phase
y_test = m(x) y_test = m(x)
...@@ -750,7 +750,7 @@ class Dropout2d(layers.Layer): ...@@ -750,7 +750,7 @@ class Dropout2d(layers.Layer):
""" """
def __init__(self, p=0.5, data_format='NCHW', name=None): def __init__(self, p=0.5, data_format='NCHW', name=None):
super(Dropout2d, self).__init__() super(Dropout2D, self).__init__()
self.p = p self.p = p
self.data_format = data_format self.data_format = data_format
...@@ -766,12 +766,12 @@ class Dropout2d(layers.Layer): ...@@ -766,12 +766,12 @@ class Dropout2d(layers.Layer):
return out return out
class Dropout3d(layers.Layer): class Dropout3D(layers.Layer):
""" """
Randomly zero out entire channels (in the batched input 5d tensor with the shape `NCDHW` , Randomly zero out entire channels (in the batched input 5d tensor with the shape `NCDHW` ,
a channel is a 3D feature map with the shape `DHW` ). Each channel will be zeroed out independently a channel is a 3D feature map with the shape `DHW` ). Each channel will be zeroed out independently
on every forward call with probability `p` using samples from a Bernoulli distribution. on every forward call with probability `p` using samples from a Bernoulli distribution.
Dropout3d will help promote independence between feature maps as described in the paper: Dropout3D will help promote independence between feature maps as described in the paper:
`Efficient Object Localization Using Convolutional Networks <https://arxiv.org/abs/1411.4280>`_ `Efficient Object Localization Using Convolutional Networks <https://arxiv.org/abs/1411.4280>`_
See ``paddle.nn.functional.dropout3d`` for more details. See ``paddle.nn.functional.dropout3d`` for more details.
...@@ -798,7 +798,7 @@ class Dropout3d(layers.Layer): ...@@ -798,7 +798,7 @@ class Dropout3d(layers.Layer):
paddle.disable_static() paddle.disable_static()
x = np.random.random(size=(2, 3, 4, 5, 6)).astype('float32') x = np.random.random(size=(2, 3, 4, 5, 6)).astype('float32')
x = paddle.to_tensor(x) x = paddle.to_tensor(x)
m = paddle.nn.Dropout3d(p=0.5) m = paddle.nn.Dropout3D(p=0.5)
y_train = m(x) y_train = m(x)
m.eval() # switch the model to test phase m.eval() # switch the model to test phase
y_test = m(x) y_test = m(x)
...@@ -808,7 +808,7 @@ class Dropout3d(layers.Layer): ...@@ -808,7 +808,7 @@ class Dropout3d(layers.Layer):
""" """
def __init__(self, p=0.5, data_format='NCDHW', name=None): def __init__(self, p=0.5, data_format='NCDHW', name=None):
super(Dropout3d, self).__init__() super(Dropout3D, self).__init__()
self.p = p self.p = p
self.data_format = data_format self.data_format = data_format
......
...@@ -15,12 +15,12 @@ ...@@ -15,12 +15,12 @@
# TODO: define classes of convolutional neural network # TODO: define classes of convolutional neural network
__all__ = [ __all__ = [
'Conv1d', 'Conv1D',
'Conv2d', 'Conv2D',
'Conv3d', 'Conv3D',
'ConvTranspose1d', 'Conv1DTranspose',
'ConvTranspose2d', 'Conv2DTranspose',
'ConvTranspose3d', 'Conv3DTranspose',
] ]
import numpy as np import numpy as np
...@@ -113,9 +113,9 @@ class _ConvNd(layers.Layer): ...@@ -113,9 +113,9 @@ class _ConvNd(layers.Layer):
attr=self._bias_attr, shape=[self._out_channels], is_bias=True) attr=self._bias_attr, shape=[self._out_channels], is_bias=True)
class Conv1d(_ConvNd): class Conv1D(_ConvNd):
""" """
This interface is used to construct a callable object of the ``Conv1d`` class. This interface is used to construct a callable object of the ``Conv1D`` class.
For more details, refer to code examples. For more details, refer to code examples.
The convolution1D layer calculates the output based on the input, filter The convolution1D layer calculates the output based on the input, filter
and stride, padding, dilation, groups parameters. Input and and stride, padding, dilation, groups parameters. Input and
...@@ -194,7 +194,7 @@ class Conv1d(_ConvNd): ...@@ -194,7 +194,7 @@ class Conv1d(_ConvNd):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle.nn import Conv1d from paddle.nn import Conv1D
import numpy as np import numpy as np
x = np.array([[[4, 8, 1, 9], x = np.array([[[4, 8, 1, 9],
[7, 2, 0, 9], [7, 2, 0, 9],
...@@ -208,7 +208,7 @@ class Conv1d(_ConvNd): ...@@ -208,7 +208,7 @@ class Conv1d(_ConvNd):
[5, 6, 8]]]).astype(np.float32) [5, 6, 8]]]).astype(np.float32)
paddle.disable_static() paddle.disable_static()
x_t = paddle.to_tensor(x) x_t = paddle.to_tensor(x)
conv = Conv1d(3, 2, 3) conv = Conv1D(3, 2, 3)
conv.weight.set_value(w) conv.weight.set_value(w)
y_t = conv(x_t) y_t = conv(x_t)
y_np = y_t.numpy() y_np = y_t.numpy()
...@@ -229,7 +229,7 @@ class Conv1d(_ConvNd): ...@@ -229,7 +229,7 @@ class Conv1d(_ConvNd):
weight_attr=None, weight_attr=None,
bias_attr=None, bias_attr=None,
data_format="NCL"): data_format="NCL"):
super(Conv1d, self).__init__( super(Conv1D, self).__init__(
in_channels, in_channels,
out_channels, out_channels,
kernel_size, kernel_size,
...@@ -266,9 +266,9 @@ class Conv1d(_ConvNd): ...@@ -266,9 +266,9 @@ class Conv1d(_ConvNd):
return out return out
class ConvTranspose1d(_ConvNd): class Conv1DTranspose(_ConvNd):
""" """
This interface is used to construct a callable object of the ``ConvTranspose1d`` class. This interface is used to construct a callable object of the ``Conv1DTranspose`` class.
For more details, refer to code examples. For more details, refer to code examples.
The 1-D convolution transpose layer calculates the output based on the input, The 1-D convolution transpose layer calculates the output based on the input,
filter, and dilation, stride, padding. Input(Input) and output(Output) filter, and dilation, stride, padding. Input(Input) and output(Output)
...@@ -340,7 +340,7 @@ class ConvTranspose1d(_ConvNd): ...@@ -340,7 +340,7 @@ class ConvTranspose1d(_ConvNd):
`[pad]` or `[pad_left, pad_right]`. Default: padding = 0. `[pad]` or `[pad_left, pad_right]`. Default: padding = 0.
output_padding(int|list|tuple, optional): The count of zeros to be added to tail of each dimension. output_padding(int|list|tuple, optional): The count of zeros to be added to tail of each dimension.
If it is a tuple, it must contain one integer. Default: 0. If it is a tuple, it must contain one integer. Default: 0.
groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by groups(int, optional): The groups number of the Conv2D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the first half of the input channels, while the second half of the
...@@ -379,7 +379,7 @@ class ConvTranspose1d(_ConvNd): ...@@ -379,7 +379,7 @@ class ConvTranspose1d(_ConvNd):
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle.nn import ConvTranspose1d from paddle.nn import Conv1DTranspose
import numpy as np import numpy as np
paddle.disable_static() paddle.disable_static()
...@@ -390,7 +390,7 @@ class ConvTranspose1d(_ConvNd): ...@@ -390,7 +390,7 @@ class ConvTranspose1d(_ConvNd):
y=np.array([[[7, 0]], y=np.array([[[7, 0]],
[[4, 2]]]).astype(np.float32) [[4, 2]]]).astype(np.float32)
x_t = paddle.to_tensor(x) x_t = paddle.to_tensor(x)
conv = ConvTranspose1d(2, 1, 2) conv = Conv1DTranspose(2, 1, 2)
conv.weight.set_value(y) conv.weight.set_value(y)
y_t = conv(x_t) y_t = conv(x_t)
y_np = y_t.numpy() y_np = y_t.numpy()
...@@ -411,7 +411,7 @@ class ConvTranspose1d(_ConvNd): ...@@ -411,7 +411,7 @@ class ConvTranspose1d(_ConvNd):
weight_attr=None, weight_attr=None,
bias_attr=None, bias_attr=None,
data_format="NCL"): data_format="NCL"):
super(ConvTranspose1d, self).__init__( super(Conv1DTranspose, self).__init__(
in_channels, in_channels,
out_channels, out_channels,
kernel_size, kernel_size,
...@@ -441,9 +441,9 @@ class ConvTranspose1d(_ConvNd): ...@@ -441,9 +441,9 @@ class ConvTranspose1d(_ConvNd):
return out return out
class Conv2d(_ConvNd): class Conv2D(_ConvNd):
""" """
This interface is used to construct a callable object of the ``Conv2d`` class. This interface is used to construct a callable object of the ``Conv2D`` class.
For more details, refer to code examples. For more details, refer to code examples.
The convolution2D layer calculates the output based on the input, filter The convolution2D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input and and strides, paddings, dilations, groups parameters. Input and
...@@ -491,7 +491,7 @@ class Conv2d(_ConvNd): ...@@ -491,7 +491,7 @@ class Conv2d(_ConvNd):
dilation(int|list|tuple, optional): The dilation size. If dilation is a tuple, it must dilation(int|list|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1. dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int, optional): The groups number of the Conv3d Layer. According to grouped groups(int, optional): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2, convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only of the input channels, while the second half of the filters is only
...@@ -536,10 +536,12 @@ class Conv2d(_ConvNd): ...@@ -536,10 +536,12 @@ class Conv2d(_ConvNd):
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
paddle.disable_static()
x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.) x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.Conv2d(4, 6, (3, 3)) conv = nn.Conv2D(4, 6, (3, 3))
y_var = conv(x_var) y_var = conv(x_var)
y_np = y_var.numpy() y_np = y_var.numpy()
print(y_np.shape) print(y_np.shape)
...@@ -558,7 +560,7 @@ class Conv2d(_ConvNd): ...@@ -558,7 +560,7 @@ class Conv2d(_ConvNd):
weight_attr=None, weight_attr=None,
bias_attr=None, bias_attr=None,
data_format="NCHW"): data_format="NCHW"):
super(Conv2d, self).__init__( super(Conv2D, self).__init__(
in_channels, in_channels,
out_channels, out_channels,
kernel_size, kernel_size,
...@@ -600,9 +602,9 @@ class Conv2d(_ConvNd): ...@@ -600,9 +602,9 @@ class Conv2d(_ConvNd):
return out return out
class ConvTranspose2d(_ConvNd): class Conv2DTranspose(_ConvNd):
""" """
This interface is used to construct a callable object of the ``ConvTranspose2d`` class. This interface is used to construct a callable object of the ``Conv2DTranspose`` class.
For more details, refer to code examples. For more details, refer to code examples.
The convolution2D transpose layer calculates the output based on the input, The convolution2D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input and output filter, and dilations, strides, paddings. Input and output
...@@ -653,7 +655,7 @@ class ConvTranspose2d(_ConvNd): ...@@ -653,7 +655,7 @@ class ConvTranspose2d(_ConvNd):
dilation(int|list|tuple, optional): The dilation size. If dilation is a tuple, it must dilation(int|list|tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1. dilation_H = dilation_W = dilation. Default: 1.
groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by groups(int, optional): The groups number of the Conv2D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the first half of the input channels, while the second half of the
...@@ -701,10 +703,12 @@ class ConvTranspose2d(_ConvNd): ...@@ -701,10 +703,12 @@ class ConvTranspose2d(_ConvNd):
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
paddle.disable_static()
x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.) x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.ConvTranspose2d(4, 6, (3, 3)) conv = nn.Conv2DTranspose(4, 6, (3, 3))
y_var = conv(x_var) y_var = conv(x_var)
y_np = y_var.numpy() y_np = y_var.numpy()
print(y_np.shape) print(y_np.shape)
...@@ -723,7 +727,7 @@ class ConvTranspose2d(_ConvNd): ...@@ -723,7 +727,7 @@ class ConvTranspose2d(_ConvNd):
weight_attr=None, weight_attr=None,
bias_attr=None, bias_attr=None,
data_format="NCHW"): data_format="NCHW"):
super(ConvTranspose2d, self).__init__( super(Conv2DTranspose, self).__init__(
in_channels, in_channels,
out_channels, out_channels,
kernel_size, kernel_size,
...@@ -758,7 +762,7 @@ class ConvTranspose2d(_ConvNd): ...@@ -758,7 +762,7 @@ class ConvTranspose2d(_ConvNd):
return out return out
class Conv3d(_ConvNd): class Conv3D(_ConvNd):
""" """
**Convlution3d Layer** **Convlution3d Layer**
The convolution3d layer calculates the output based on the input, filter The convolution3d layer calculates the output based on the input, filter
...@@ -802,7 +806,7 @@ class Conv3d(_ConvNd): ...@@ -802,7 +806,7 @@ class Conv3d(_ConvNd):
dilation(int|list|tuple, optional): The dilation size. If dilation is a tuple, it must dilation(int|list|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1. dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int, optional): The groups number of the Conv3d Layer. According to grouped groups(int, optional): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2, convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only of the input channels, while the second half of the filters is only
...@@ -853,10 +857,12 @@ class Conv3d(_ConvNd): ...@@ -853,10 +857,12 @@ class Conv3d(_ConvNd):
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
paddle.disable_static()
x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.) x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.Conv3d(4, 6, (3, 3, 3)) conv = nn.Conv3D(4, 6, (3, 3, 3))
y_var = conv(x_var) y_var = conv(x_var)
y_np = y_var.numpy() y_np = y_var.numpy()
print(y_np.shape) print(y_np.shape)
...@@ -875,7 +881,7 @@ class Conv3d(_ConvNd): ...@@ -875,7 +881,7 @@ class Conv3d(_ConvNd):
weight_attr=None, weight_attr=None,
bias_attr=None, bias_attr=None,
data_format="NCDHW"): data_format="NCDHW"):
super(Conv3d, self).__init__( super(Conv3D, self).__init__(
in_channels, in_channels,
out_channels, out_channels,
kernel_size, kernel_size,
...@@ -917,7 +923,7 @@ class Conv3d(_ConvNd): ...@@ -917,7 +923,7 @@ class Conv3d(_ConvNd):
return out return out
class ConvTranspose3d(_ConvNd): class Conv3DTranspose(_ConvNd):
""" """
**Convlution3D transpose layer** **Convlution3D transpose layer**
The convolution3D transpose layer calculates the output based on the input, The convolution3D transpose layer calculates the output based on the input,
...@@ -981,7 +987,7 @@ class ConvTranspose3d(_ConvNd): ...@@ -981,7 +987,7 @@ class ConvTranspose3d(_ConvNd):
dilation(int|list|tuple, optional): The dilation size. If dilation is a tuple, it must dilation(int|list|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1. dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by groups(int, optional): The groups number of the Conv3D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the first half of the input channels, while the second half of the
...@@ -1035,10 +1041,12 @@ class ConvTranspose3d(_ConvNd): ...@@ -1035,10 +1041,12 @@ class ConvTranspose3d(_ConvNd):
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
paddle.disable_static()
x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.) x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.ConvTranspose3d(4, 6, (3, 3, 3)) conv = nn.Conv3DTranspose(4, 6, (3, 3, 3))
y_var = conv(x_var) y_var = conv(x_var)
y_np = y_var.numpy() y_np = y_var.numpy()
print(y_np.shape) print(y_np.shape)
...@@ -1057,7 +1065,7 @@ class ConvTranspose3d(_ConvNd): ...@@ -1057,7 +1065,7 @@ class ConvTranspose3d(_ConvNd):
weight_attr=None, weight_attr=None,
bias_attr=None, bias_attr=None,
data_format="NCDHW"): data_format="NCDHW"):
super(ConvTranspose3d, self).__init__( super(Conv3DTranspose, self).__init__(
in_channels, in_channels,
out_channels, out_channels,
kernel_size, kernel_size,
......
...@@ -54,17 +54,17 @@ from ...fluid.dygraph.base import no_grad ...@@ -54,17 +54,17 @@ from ...fluid.dygraph.base import no_grad
from .. import functional as F from .. import functional as F
__all__ = [ __all__ = [
'BatchNorm', 'GroupNorm', 'LayerNorm', 'SpectralNorm', 'BatchNorm1d', 'BatchNorm', 'GroupNorm', 'LayerNorm', 'SpectralNorm', 'BatchNorm1D',
'BatchNorm2d', 'BatchNorm3d', 'InstanceNorm1d', 'InstanceNorm2d', 'BatchNorm2D', 'BatchNorm3D', 'InstanceNorm1D', 'InstanceNorm2D',
'InstanceNorm3d', 'SyncBatchNorm', 'LocalResponseNorm' 'InstanceNorm3D', 'SyncBatchNorm', 'LocalResponseNorm'
] ]
class _InstanceNormBase(layers.Layer): class _InstanceNormBase(layers.Layer):
""" """
This class is based class for InstanceNorm1d, 2d, 3d. This class is based class for InstanceNorm1D, 2d, 3d.
See InstaceNorm1d, InstanceNorm2d or InstanceNorm3d for more details. See InstaceNorm1D, InstanceNorm2D or InstanceNorm3D for more details.
""" """
def __init__(self, def __init__(self,
...@@ -109,7 +109,7 @@ class _InstanceNormBase(layers.Layer): ...@@ -109,7 +109,7 @@ class _InstanceNormBase(layers.Layer):
input, weight=self.scale, bias=self.bias, eps=self._epsilon) input, weight=self.scale, bias=self.bias, eps=self._epsilon)
class InstanceNorm1d(_InstanceNormBase): class InstanceNorm1D(_InstanceNormBase):
""" """
Applies Instance Normalization over a 3D input (a mini-batch of 1D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization . Applies Instance Normalization over a 3D input (a mini-batch of 1D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization .
...@@ -174,7 +174,7 @@ class InstanceNorm1d(_InstanceNormBase): ...@@ -174,7 +174,7 @@ class InstanceNorm1d(_InstanceNormBase):
np.random.seed(123) np.random.seed(123)
x_data = np.random.random(size=(2, 2, 3)).astype('float32') x_data = np.random.random(size=(2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
instance_norm = paddle.nn.InstanceNorm1d(2) instance_norm = paddle.nn.InstanceNorm1D(2)
instance_norm_out = instance_norm(x) instance_norm_out = instance_norm(x)
print(instance_norm_out.numpy()) print(instance_norm_out.numpy())
...@@ -187,7 +187,7 @@ class InstanceNorm1d(_InstanceNormBase): ...@@ -187,7 +187,7 @@ class InstanceNorm1d(_InstanceNormBase):
len(input.shape))) len(input.shape)))
class InstanceNorm2d(_InstanceNormBase): class InstanceNorm2D(_InstanceNormBase):
""" """
Applies Instance Normalization over a 4D input (a mini-batch of 2D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization . Applies Instance Normalization over a 4D input (a mini-batch of 2D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization .
...@@ -251,7 +251,7 @@ class InstanceNorm2d(_InstanceNormBase): ...@@ -251,7 +251,7 @@ class InstanceNorm2d(_InstanceNormBase):
np.random.seed(123) np.random.seed(123)
x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
instance_norm = paddle.nn.InstanceNorm2d(2) instance_norm = paddle.nn.InstanceNorm2D(2)
instance_norm_out = instance_norm(x) instance_norm_out = instance_norm(x)
print(instance_norm_out.numpy()) print(instance_norm_out.numpy())
...@@ -263,7 +263,7 @@ class InstanceNorm2d(_InstanceNormBase): ...@@ -263,7 +263,7 @@ class InstanceNorm2d(_InstanceNormBase):
len(input.shape))) len(input.shape)))
class InstanceNorm3d(_InstanceNormBase): class InstanceNorm3D(_InstanceNormBase):
""" """
Applies Instance Normalization over a 5D input (a mini-batch of 3D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization . Applies Instance Normalization over a 5D input (a mini-batch of 3D inputs with additional channel dimension) as described in the paper Instance Normalization: The Missing Ingredient for Fast Stylization .
...@@ -327,7 +327,7 @@ class InstanceNorm3d(_InstanceNormBase): ...@@ -327,7 +327,7 @@ class InstanceNorm3d(_InstanceNormBase):
np.random.seed(123) np.random.seed(123)
x_data = np.random.random(size=(2, 2, 2, 2, 3)).astype('float32') x_data = np.random.random(size=(2, 2, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
instance_norm = paddle.nn.InstanceNorm3d(2) instance_norm = paddle.nn.InstanceNorm3D(2)
instance_norm_out = instance_norm(x) instance_norm_out = instance_norm(x)
print(instance_norm_out.numpy()) print(instance_norm_out.numpy())
...@@ -671,7 +671,7 @@ class _BatchNormBase(layers.Layer): ...@@ -671,7 +671,7 @@ class _BatchNormBase(layers.Layer):
data_format=self._data_format) data_format=self._data_format)
class BatchNorm1d(_BatchNormBase): class BatchNorm1D(_BatchNormBase):
""" """
Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D inputswith additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift . Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D inputswith additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .
...@@ -747,7 +747,7 @@ class BatchNorm1d(_BatchNormBase): ...@@ -747,7 +747,7 @@ class BatchNorm1d(_BatchNormBase):
np.random.seed(123) np.random.seed(123)
x_data = np.random.random(size=(2, 1, 3)).astype('float32') x_data = np.random.random(size=(2, 1, 3)).astype('float32')
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
batch_norm = paddle.nn.BatchNorm1d(1) batch_norm = paddle.nn.BatchNorm1D(1)
batch_norm_out = batch_norm(x) batch_norm_out = batch_norm(x)
print(batch_norm_out.numpy()) print(batch_norm_out.numpy())
...@@ -768,7 +768,7 @@ class BatchNorm1d(_BatchNormBase): ...@@ -768,7 +768,7 @@ class BatchNorm1d(_BatchNormBase):
len(input.shape))) len(input.shape)))
class BatchNorm2d(_BatchNormBase): class BatchNorm2D(_BatchNormBase):
""" """
Applies Batch Normalization over a 4D input (a mini-batch of 2D inputswith additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift . Applies Batch Normalization over a 4D input (a mini-batch of 2D inputswith additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .
...@@ -843,7 +843,7 @@ class BatchNorm2d(_BatchNormBase): ...@@ -843,7 +843,7 @@ class BatchNorm2d(_BatchNormBase):
np.random.seed(123) np.random.seed(123)
x_data = np.random.random(size=(2, 1, 2, 3)).astype('float32') x_data = np.random.random(size=(2, 1, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
batch_norm = paddle.nn.BatchNorm2d(1) batch_norm = paddle.nn.BatchNorm2D(1)
batch_norm_out = batch_norm(x) batch_norm_out = batch_norm(x)
print(batch_norm_out.numpy()) print(batch_norm_out.numpy())
...@@ -863,7 +863,7 @@ class BatchNorm2d(_BatchNormBase): ...@@ -863,7 +863,7 @@ class BatchNorm2d(_BatchNormBase):
len(input.shape))) len(input.shape)))
class BatchNorm3d(_BatchNormBase): class BatchNorm3D(_BatchNormBase):
""" """
Applies Batch Normalization over a 5D input (a mini-batch of 3D inputswith additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift . Applies Batch Normalization over a 5D input (a mini-batch of 3D inputswith additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .
...@@ -938,7 +938,7 @@ class BatchNorm3d(_BatchNormBase): ...@@ -938,7 +938,7 @@ class BatchNorm3d(_BatchNormBase):
np.random.seed(123) np.random.seed(123)
x_data = np.random.random(size=(2, 1, 2, 2, 3)).astype('float32') x_data = np.random.random(size=(2, 1, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
batch_norm = paddle.nn.BatchNorm3d(1) batch_norm = paddle.nn.BatchNorm3D(1)
batch_norm_out = batch_norm(x) batch_norm_out = batch_norm(x)
print(batch_norm_out.numpy()) print(batch_norm_out.numpy())
...@@ -1141,7 +1141,7 @@ class SyncBatchNorm(_BatchNormBase): ...@@ -1141,7 +1141,7 @@ class SyncBatchNorm(_BatchNormBase):
import paddle.nn as nn import paddle.nn as nn
paddle.disable_static() paddle.disable_static()
model = nn.Sequential(nn.Conv2d(3, 5, 3), nn.BatchNorm2d(5)) model = nn.Sequential(nn.Conv2D(3, 5, 3), nn.BatchNorm2D(5))
sync_model = nn.SyncBatchNorm.convert_sync_batchnorm(model) sync_model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
""" """
......
...@@ -17,22 +17,22 @@ from ...fluid.layer_helper import LayerHelper ...@@ -17,22 +17,22 @@ from ...fluid.layer_helper import LayerHelper
from .. import functional as F from .. import functional as F
__all__ = [ __all__ = [
'AvgPool1d', 'AvgPool1D',
'AvgPool2d', 'AvgPool2D',
'AvgPool3d', 'AvgPool3D',
'MaxPool1d', 'MaxPool1D',
'MaxPool2d', 'MaxPool2D',
'MaxPool3d', 'MaxPool3D',
'AdaptiveAvgPool1d', 'AdaptiveAvgPool1D',
'AdaptiveAvgPool2d', 'AdaptiveAvgPool2D',
'AdaptiveAvgPool3d', 'AdaptiveAvgPool3D',
'AdaptiveMaxPool1d', 'AdaptiveMaxPool1D',
'AdaptiveMaxPool2d', 'AdaptiveMaxPool2D',
'AdaptiveMaxPool3d', 'AdaptiveMaxPool3D',
] ]
class AvgPool1d(layers.Layer): class AvgPool1D(layers.Layer):
""" """
This operation applies a 1D average pooling over an input signal composed This operation applies a 1D average pooling over an input signal composed
of several input planes, based on the input, output_size, return_indices parameters. of several input planes, based on the input, output_size, return_indices parameters.
...@@ -93,8 +93,8 @@ class AvgPool1d(layers.Layer): ...@@ -93,8 +93,8 @@ class AvgPool1d(layers.Layer):
paddle.disable_static() paddle.disable_static()
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
AvgPool1d = nn.AvgPool1d(kernel_size=2, stride=2, padding=0) AvgPool1D = nn.AvgPool1D(kernel_size=2, stride=2, padding=0)
pool_out = AvgPool1d(data) pool_out = AvgPool1D(data)
# pool_out shape: [1, 3, 16] # pool_out shape: [1, 3, 16]
""" """
...@@ -106,7 +106,7 @@ class AvgPool1d(layers.Layer): ...@@ -106,7 +106,7 @@ class AvgPool1d(layers.Layer):
count_include_pad=True, count_include_pad=True,
ceil_mode=False, ceil_mode=False,
name=None): name=None):
super(AvgPool1d, self).__init__() super(AvgPool1D, self).__init__()
self.kernel_size = kernel_size self.kernel_size = kernel_size
self.stride = stride self.stride = stride
self.padding = padding self.padding = padding
...@@ -120,7 +120,7 @@ class AvgPool1d(layers.Layer): ...@@ -120,7 +120,7 @@ class AvgPool1d(layers.Layer):
return out return out
class AvgPool2d(layers.Layer): class AvgPool2D(layers.Layer):
""" """
This operation applies 2D average pooling over input features based on the input, This operation applies 2D average pooling over input features based on the input,
and kernel_size, stride, padding parameters. Input(X) and Output(Out) are and kernel_size, stride, padding parameters. Input(X) and Output(Out) are
...@@ -185,7 +185,7 @@ class AvgPool2d(layers.Layer): ...@@ -185,7 +185,7 @@ class AvgPool2d(layers.Layer):
# max pool2d # max pool2d
input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32)) input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32))
AvgPool2d = nn.AvgPool2d(kernel_size=2, AvgPool2D = nn.AvgPool2D(kernel_size=2,
stride=2, padding=0) stride=2, padding=0)
output = AvgPoo2d(input) output = AvgPoo2d(input)
# output.shape [1, 3, 16, 16] # output.shape [1, 3, 16, 16]
...@@ -201,7 +201,7 @@ class AvgPool2d(layers.Layer): ...@@ -201,7 +201,7 @@ class AvgPool2d(layers.Layer):
divisor_override=None, divisor_override=None,
data_format="NCHW", data_format="NCHW",
name=None): name=None):
super(AvgPool2d, self).__init__() super(AvgPool2D, self).__init__()
self.ksize = kernel_size self.ksize = kernel_size
self.stride = stride self.stride = stride
self.padding = padding self.padding = padding
...@@ -224,7 +224,7 @@ class AvgPool2d(layers.Layer): ...@@ -224,7 +224,7 @@ class AvgPool2d(layers.Layer):
name=self.name) name=self.name)
class AvgPool3d(layers.Layer): class AvgPool3D(layers.Layer):
""" """
This operation applies 3D max pooling over input features based on the input, This operation applies 3D max pooling over input features based on the input,
and kernel_size, stride, padding parameters. Input(X) and Output(Out) are and kernel_size, stride, padding parameters. Input(X) and Output(Out) are
...@@ -277,9 +277,9 @@ class AvgPool3d(layers.Layer): ...@@ -277,9 +277,9 @@ class AvgPool3d(layers.Layer):
# avg pool3d # avg pool3d
input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 2, 3, 32, 32]).astype(np.float32)) input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 2, 3, 32, 32]).astype(np.float32))
AvgPool3d = nn.AvgPool3d(kernel_size=2, AvgPool3D = nn.AvgPool3D(kernel_size=2,
stride=2, padding=0) stride=2, padding=0)
output = AvgPool3d(input) output = AvgPool3D(input)
# output.shape [1, 2, 3, 16, 16] # output.shape [1, 2, 3, 16, 16]
""" """
...@@ -293,7 +293,7 @@ class AvgPool3d(layers.Layer): ...@@ -293,7 +293,7 @@ class AvgPool3d(layers.Layer):
divisor_override=None, divisor_override=None,
data_format="NCDHW", data_format="NCDHW",
name=None): name=None):
super(AvgPool3d, self).__init__() super(AvgPool3D, self).__init__()
self.ksize = kernel_size self.ksize = kernel_size
self.stride = stride self.stride = stride
self.padding = padding self.padding = padding
...@@ -316,7 +316,7 @@ class AvgPool3d(layers.Layer): ...@@ -316,7 +316,7 @@ class AvgPool3d(layers.Layer):
name=self.name) name=self.name)
class MaxPool1d(layers.Layer): class MaxPool1D(layers.Layer):
""" """
Applies a 1D max pooling over an input signal composed of several input planes based Applies a 1D max pooling over an input signal composed of several input planes based
on the input, output_size, return_indices parameters. on the input, output_size, return_indices parameters.
...@@ -373,12 +373,12 @@ class MaxPool1d(layers.Layer): ...@@ -373,12 +373,12 @@ class MaxPool1d(layers.Layer):
paddle.disable_static() paddle.disable_static()
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
MaxPool1d = nn.MaxPool1d(kernel_size=2, stride=2, padding=0) MaxPool1D = nn.MaxPool1D(kernel_size=2, stride=2, padding=0)
pool_out = MaxPool1d(data) pool_out = MaxPool1D(data)
# pool_out shape: [1, 3, 16] # pool_out shape: [1, 3, 16]
MaxPool1d = nn.MaxPool1d(kernel_size=2, stride=2, padding=0, return_indices=True) MaxPool1D = nn.MaxPool1D(kernel_size=2, stride=2, padding=0, return_indices=True)
pool_out, indices = MaxPool1d(data) pool_out, indices = MaxPool1D(data)
# pool_out shape: [1, 3, 16], indices shape: [1, 3, 16] # pool_out shape: [1, 3, 16], indices shape: [1, 3, 16]
""" """
...@@ -390,7 +390,7 @@ class MaxPool1d(layers.Layer): ...@@ -390,7 +390,7 @@ class MaxPool1d(layers.Layer):
return_indices=False, return_indices=False,
ceil_mode=False, ceil_mode=False,
name=None): name=None):
super(MaxPool1d, self).__init__() super(MaxPool1D, self).__init__()
self.kernel_size = kernel_size self.kernel_size = kernel_size
self.stride = stride self.stride = stride
self.padding = padding self.padding = padding
...@@ -404,7 +404,7 @@ class MaxPool1d(layers.Layer): ...@@ -404,7 +404,7 @@ class MaxPool1d(layers.Layer):
return out return out
class MaxPool2d(layers.Layer): class MaxPool2D(layers.Layer):
""" """
This operation applies 2D max pooling over input feature based on the input, This operation applies 2D max pooling over input feature based on the input,
and kernel_size, stride, padding parameters. Input(X) and Output(Out) are and kernel_size, stride, padding parameters. Input(X) and Output(Out) are
...@@ -468,14 +468,14 @@ class MaxPool2d(layers.Layer): ...@@ -468,14 +468,14 @@ class MaxPool2d(layers.Layer):
# max pool2d # max pool2d
input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32)) input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32, 32]).astype(np.float32))
MaxPool2d = nn.MaxPool2d(kernel_size=2, MaxPool2D = nn.MaxPool2D(kernel_size=2,
stride=2, padding=0) stride=2, padding=0)
output = MaxPool2d(input) output = MaxPool2D(input)
# output.shape [1, 3, 16, 16] # output.shape [1, 3, 16, 16]
# for return_indices=True # for return_indices=True
MaxPool2d = nn.MaxPool2d(kernel_size=2,stride=2, padding=0, return_indices=True) MaxPool2D = nn.MaxPool2D(kernel_size=2,stride=2, padding=0, return_indices=True)
output, max_indices = MaxPool2d(input) output, max_indices = MaxPool2D(input)
# output.shape [1, 3, 16, 16], max_indices.shape [1, 3, 16, 16], # output.shape [1, 3, 16, 16], max_indices.shape [1, 3, 16, 16],
""" """
...@@ -487,7 +487,7 @@ class MaxPool2d(layers.Layer): ...@@ -487,7 +487,7 @@ class MaxPool2d(layers.Layer):
ceil_mode=False, ceil_mode=False,
data_format="NCHW", data_format="NCHW",
name=None): name=None):
super(MaxPool2d, self).__init__() super(MaxPool2D, self).__init__()
self.ksize = kernel_size self.ksize = kernel_size
self.stride = stride self.stride = stride
self.padding = padding self.padding = padding
...@@ -507,7 +507,7 @@ class MaxPool2d(layers.Layer): ...@@ -507,7 +507,7 @@ class MaxPool2d(layers.Layer):
name=self.name) name=self.name)
class MaxPool3d(layers.Layer): class MaxPool3D(layers.Layer):
""" """
This operation applies 3D max pooling over input features based on the input, This operation applies 3D max pooling over input features based on the input,
and kernel_size, stride, padding parameters. Input(X) and Output(Out) are and kernel_size, stride, padding parameters. Input(X) and Output(Out) are
...@@ -559,14 +559,14 @@ class MaxPool3d(layers.Layer): ...@@ -559,14 +559,14 @@ class MaxPool3d(layers.Layer):
# max pool3d # max pool3d
input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 2, 3, 32, 32]).astype(np.float32)) input = paddle.to_tensor(np.random.uniform(-1, 1, [1, 2, 3, 32, 32]).astype(np.float32))
MaxPool3d = nn.MaxPool3d(kernel_size=2, MaxPool3D = nn.MaxPool3D(kernel_size=2,
stride=2, padding=0) stride=2, padding=0)
output = MaxPool3d(input) output = MaxPool3D(input)
# output.shape [1, 2, 3, 16, 16] # output.shape [1, 2, 3, 16, 16]
# for return_indices=True # for return_indices=True
MaxPool3d = nn.MaxPool3d(kernel_size=2,stride=2, padding=0, return_indices=True) MaxPool3D = nn.MaxPool3D(kernel_size=2,stride=2, padding=0, return_indices=True)
output, max_indices = MaxPool3d(input) output, max_indices = MaxPool3D(input)
# output.shape [1, 2, 3, 16, 16], max_indices.shape [1, 2, 3, 16, 16], # output.shape [1, 2, 3, 16, 16], max_indices.shape [1, 2, 3, 16, 16],
""" """
...@@ -578,7 +578,7 @@ class MaxPool3d(layers.Layer): ...@@ -578,7 +578,7 @@ class MaxPool3d(layers.Layer):
ceil_mode=False, ceil_mode=False,
data_format="NCDHW", data_format="NCDHW",
name=None): name=None):
super(MaxPool3d, self).__init__() super(MaxPool3D, self).__init__()
self.ksize = kernel_size self.ksize = kernel_size
self.stride = stride self.stride = stride
self.padding = padding self.padding = padding
...@@ -598,7 +598,7 @@ class MaxPool3d(layers.Layer): ...@@ -598,7 +598,7 @@ class MaxPool3d(layers.Layer):
name=self.name) name=self.name)
class AdaptiveAvgPool1d(layers.Layer): class AdaptiveAvgPool1D(layers.Layer):
""" """
This operation applies a 1D adaptive average pooling over an input signal composed This operation applies a 1D adaptive average pooling over an input signal composed
...@@ -653,13 +653,13 @@ class AdaptiveAvgPool1d(layers.Layer): ...@@ -653,13 +653,13 @@ class AdaptiveAvgPool1d(layers.Layer):
paddle.disable_static() paddle.disable_static()
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
AdaptiveAvgPool1d = nn.AdaptiveAvgPool1d(output_size=16) AdaptiveAvgPool1D = nn.AdaptiveAvgPool1D(output_size=16)
pool_out = AdaptiveAvgPool1d(data) pool_out = AdaptiveAvgPool1D(data)
# pool_out shape: [1, 3, 16] # pool_out shape: [1, 3, 16]
""" """
def __init__(self, output_size, name=None): def __init__(self, output_size, name=None):
super(AdaptiveAvgPool1d, self).__init__() super(AdaptiveAvgPool1D, self).__init__()
self.output_size = output_size self.output_size = output_size
self.name = name self.name = name
...@@ -667,7 +667,7 @@ class AdaptiveAvgPool1d(layers.Layer): ...@@ -667,7 +667,7 @@ class AdaptiveAvgPool1d(layers.Layer):
return F.adaptive_avg_pool1d(input, self.output_size, self.name) return F.adaptive_avg_pool1d(input, self.output_size, self.name)
class AdaptiveAvgPool2d(layers.Layer): class AdaptiveAvgPool2D(layers.Layer):
""" """
This operation applies 2D adaptive avg pooling on input tensor. The h and w dimensions This operation applies 2D adaptive avg pooling on input tensor. The h and w dimensions
...@@ -704,7 +704,7 @@ class AdaptiveAvgPool2d(layers.Layer): ...@@ -704,7 +704,7 @@ class AdaptiveAvgPool2d(layers.Layer):
output (Tensor): The output tensor of adaptive avg pool2d operator, which is a 4-D tensor. The data type is same as input x. output (Tensor): The output tensor of adaptive avg pool2d operator, which is a 4-D tensor. The data type is same as input x.
Returns: Returns:
A callable object of AdaptiveAvgPool2d. A callable object of AdaptiveAvgPool2D.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -730,13 +730,13 @@ class AdaptiveAvgPool2d(layers.Layer): ...@@ -730,13 +730,13 @@ class AdaptiveAvgPool2d(layers.Layer):
input_data = np.random.rand(2, 3, 32, 32) input_data = np.random.rand(2, 3, 32, 32)
x = paddle.to_tensor(input_data) x = paddle.to_tensor(input_data)
# x.shape is [2, 3, 32, 32] # x.shape is [2, 3, 32, 32]
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=3) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=3)
pool_out = adaptive_avg_pool(x = x) pool_out = adaptive_avg_pool(x = x)
# pool_out.shape is [2, 3, 3, 3] # pool_out.shape is [2, 3, 3, 3]
""" """
def __init__(self, output_size, data_format="NCHW", name=None): def __init__(self, output_size, data_format="NCHW", name=None):
super(AdaptiveAvgPool2d, self).__init__() super(AdaptiveAvgPool2D, self).__init__()
self._output_size = output_size self._output_size = output_size
self._data_format = data_format self._data_format = data_format
self._name = name self._name = name
...@@ -749,7 +749,7 @@ class AdaptiveAvgPool2d(layers.Layer): ...@@ -749,7 +749,7 @@ class AdaptiveAvgPool2d(layers.Layer):
name=self._name) name=self._name)
class AdaptiveAvgPool3d(layers.Layer): class AdaptiveAvgPool3D(layers.Layer):
""" """
This operation applies 3D adaptive avg pooling on input tensor. The h and w dimensions This operation applies 3D adaptive avg pooling on input tensor. The h and w dimensions
...@@ -789,7 +789,7 @@ class AdaptiveAvgPool3d(layers.Layer): ...@@ -789,7 +789,7 @@ class AdaptiveAvgPool3d(layers.Layer):
output (Tensor): The output tensor of adaptive avg pool3d operator, which is a 5-D tensor. The data type is same as input x. output (Tensor): The output tensor of adaptive avg pool3d operator, which is a 5-D tensor. The data type is same as input x.
Returns: Returns:
A callable object of AdaptiveAvgPool3d. A callable object of AdaptiveAvgPool3D.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -818,13 +818,13 @@ class AdaptiveAvgPool3d(layers.Layer): ...@@ -818,13 +818,13 @@ class AdaptiveAvgPool3d(layers.Layer):
input_data = np.random.rand(2, 3, 8, 32, 32) input_data = np.random.rand(2, 3, 8, 32, 32)
x = paddle.to_tensor(input_data) x = paddle.to_tensor(input_data)
# x.shape is [2, 3, 8, 32, 32] # x.shape is [2, 3, 8, 32, 32]
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d(output_size=3) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(output_size=3)
pool_out = adaptive_avg_pool(x = x) pool_out = adaptive_avg_pool(x = x)
# pool_out = [2, 3, 3, 3, 3] # pool_out = [2, 3, 3, 3, 3]
""" """
def __init__(self, output_size, data_format="NCDHW", name=None): def __init__(self, output_size, data_format="NCDHW", name=None):
super(AdaptiveAvgPool3d, self).__init__() super(AdaptiveAvgPool3D, self).__init__()
self._output_size = output_size self._output_size = output_size
self._data_format = data_format self._data_format = data_format
self._name = name self._name = name
...@@ -837,7 +837,7 @@ class AdaptiveAvgPool3d(layers.Layer): ...@@ -837,7 +837,7 @@ class AdaptiveAvgPool3d(layers.Layer):
name=self._name) name=self._name)
class AdaptiveMaxPool1d(layers.Layer): class AdaptiveMaxPool1D(layers.Layer):
""" """
This operation applies a 1D adaptive max pooling over an input signal composed This operation applies a 1D adaptive max pooling over an input signal composed
...@@ -894,19 +894,19 @@ class AdaptiveMaxPool1d(layers.Layer): ...@@ -894,19 +894,19 @@ class AdaptiveMaxPool1d(layers.Layer):
paddle.disable_static() paddle.disable_static()
data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32)) data = paddle.to_tensor(np.random.uniform(-1, 1, [1, 3, 32]).astype(np.float32))
AdaptiveMaxPool1d = nn.AdaptiveMaxPool1d(output_size=16) AdaptiveMaxPool1D = nn.AdaptiveMaxPool1D(output_size=16)
pool_out = AdaptiveMaxPool1d(data) pool_out = AdaptiveMaxPool1D(data)
# pool_out shape: [1, 3, 16] # pool_out shape: [1, 3, 16]
# for return_indices = true # for return_indices = true
AdaptiveMaxPool1d = nn.AdaptiveMaxPool1d(output_size=16, return_indices=True) AdaptiveMaxPool1D = nn.AdaptiveMaxPool1D(output_size=16, return_indices=True)
pool_out, indices = AdaptiveMaxPool1d(data) pool_out, indices = AdaptiveMaxPool1D(data)
# pool_out shape: [1, 3, 16], indices shape: [1, 3, 16] # pool_out shape: [1, 3, 16], indices shape: [1, 3, 16]
""" """
def __init__(self, output_size, return_indices=False, name=None): def __init__(self, output_size, return_indices=False, name=None):
super(AdaptiveMaxPool1d, self).__init__() super(AdaptiveMaxPool1D, self).__init__()
self.output_size = output_size self.output_size = output_size
self.return_indices = return_indices self.return_indices = return_indices
self.name = name self.name = name
...@@ -916,7 +916,7 @@ class AdaptiveMaxPool1d(layers.Layer): ...@@ -916,7 +916,7 @@ class AdaptiveMaxPool1d(layers.Layer):
self.return_indices, self.name) self.return_indices, self.name)
class AdaptiveMaxPool2d(layers.Layer): class AdaptiveMaxPool2D(layers.Layer):
""" """
This operation applies 2D adaptive max pooling on input tensor. The h and w dimensions This operation applies 2D adaptive max pooling on input tensor. The h and w dimensions
of the output tensor are determined by the parameter output_size. The difference between adaptive pooling and pooling is adaptive one focus on the output size. of the output tensor are determined by the parameter output_size. The difference between adaptive pooling and pooling is adaptive one focus on the output size.
...@@ -941,7 +941,7 @@ class AdaptiveMaxPool2d(layers.Layer): ...@@ -941,7 +941,7 @@ class AdaptiveMaxPool2d(layers.Layer):
output (Tensor): The output tensor of adaptive max pool2d operator, which is a 4-D tensor. The data type is same as input x. output (Tensor): The output tensor of adaptive max pool2d operator, which is a 4-D tensor. The data type is same as input x.
Returns: Returns:
A callable object of AdaptiveMaxPool2d. A callable object of AdaptiveMaxPool2D.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -965,12 +965,12 @@ class AdaptiveMaxPool2d(layers.Layer): ...@@ -965,12 +965,12 @@ class AdaptiveMaxPool2d(layers.Layer):
paddle.disable_static() paddle.disable_static()
input_data = np.random.rand(2, 3, 32, 32) input_data = np.random.rand(2, 3, 32, 32)
x = paddle.to_tensor(input_data) x = paddle.to_tensor(input_data)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=3, return_indices=True) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=3, return_indices=True)
pool_out, indices = adaptive_max_pool(x = x) pool_out, indices = adaptive_max_pool(x = x)
""" """
def __init__(self, output_size, return_indices=False, name=None): def __init__(self, output_size, return_indices=False, name=None):
super(AdaptiveMaxPool2d, self).__init__() super(AdaptiveMaxPool2D, self).__init__()
self._output_size = output_size self._output_size = output_size
self._return_indices = return_indices self._return_indices = return_indices
self._name = name self._name = name
...@@ -983,7 +983,7 @@ class AdaptiveMaxPool2d(layers.Layer): ...@@ -983,7 +983,7 @@ class AdaptiveMaxPool2d(layers.Layer):
name=self._name) name=self._name)
class AdaptiveMaxPool3d(layers.Layer): class AdaptiveMaxPool3D(layers.Layer):
""" """
This operation applies 3D adaptive max pooling on input tensor. The h and w dimensions This operation applies 3D adaptive max pooling on input tensor. The h and w dimensions
of the output tensor are determined by the parameter output_size. The difference between adaptive pooling and pooling is adaptive one focus on the output size. of the output tensor are determined by the parameter output_size. The difference between adaptive pooling and pooling is adaptive one focus on the output size.
...@@ -1010,7 +1010,7 @@ class AdaptiveMaxPool3d(layers.Layer): ...@@ -1010,7 +1010,7 @@ class AdaptiveMaxPool3d(layers.Layer):
x (Tensor): The input tensor of adaptive max pool3d operator, which is a 5-D tensor. The data type can be float32, float64. x (Tensor): The input tensor of adaptive max pool3d operator, which is a 5-D tensor. The data type can be float32, float64.
output (Tensor): The output tensor of adaptive max pool3d operator, which is a 5-D tensor. The data type is same as input x. output (Tensor): The output tensor of adaptive max pool3d operator, which is a 5-D tensor. The data type is same as input x.
Returns: Returns:
A callable object of AdaptiveMaxPool3d. A callable object of AdaptiveMaxPool3D.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -1037,17 +1037,17 @@ class AdaptiveMaxPool3d(layers.Layer): ...@@ -1037,17 +1037,17 @@ class AdaptiveMaxPool3d(layers.Layer):
paddle.disable_static() paddle.disable_static()
input_data = np.random.rand(2, 3, 8, 32, 32) input_data = np.random.rand(2, 3, 8, 32, 32)
x = paddle.to_tensor(input_data) x = paddle.to_tensor(input_data)
pool = paddle.nn.AdaptiveMaxPool3d(output_size=4) pool = paddle.nn.AdaptiveMaxPool3D(output_size=4)
out = pool(x) out = pool(x)
# out shape: [2, 3, 4, 4, 4] # out shape: [2, 3, 4, 4, 4]
pool = paddle.nn.AdaptiveMaxPool3d(output_size=3, return_indices=True) pool = paddle.nn.AdaptiveMaxPool3D(output_size=3, return_indices=True)
out, indices = pool(x) out, indices = pool(x)
# out shape: [2, 3, 4, 4, 4], indices shape: [2, 3, 4, 4, 4] # out shape: [2, 3, 4, 4, 4], indices shape: [2, 3, 4, 4, 4]
""" """
def __init__(self, output_size, return_indices=False, name=None): def __init__(self, output_size, return_indices=False, name=None):
super(AdaptiveMaxPool3d, self).__init__() super(AdaptiveMaxPool3D, self).__init__()
self._output_size = output_size self._output_size = output_size
self._return_indices = return_indices self._return_indices = return_indices
self._name = name self._name = name
......
...@@ -61,11 +61,11 @@ class L1Decay(fluid.regularizer.L1Decay): ...@@ -61,11 +61,11 @@ class L1Decay(fluid.regularizer.L1Decay):
# Example2: set Regularizer in parameters # Example2: set Regularizer in parameters
# Set L1 regularization in parameters. # Set L1 regularization in parameters.
# Global regularizer does not take effect on my_conv2d for this case. # Global regularizer does not take effect on my_conv2d for this case.
from paddle.nn import Conv2d from paddle.nn import Conv2D
from paddle import ParamAttr from paddle import ParamAttr
from paddle.regularizer import L2Decay from paddle.regularizer import L2Decay
my_conv2d = Conv2d( my_conv2d = Conv2D(
in_channels=10, in_channels=10,
out_channels=10, out_channels=10,
kernel_size=1, kernel_size=1,
...@@ -123,11 +123,11 @@ class L2Decay(fluid.regularizer.L2Decay): ...@@ -123,11 +123,11 @@ class L2Decay(fluid.regularizer.L2Decay):
# Example2: set Regularizer in parameters # Example2: set Regularizer in parameters
# Set L2 regularization in parameters. # Set L2 regularization in parameters.
# Global regularizer does not take effect on my_conv2d for this case. # Global regularizer does not take effect on my_conv2d for this case.
from paddle.nn import Conv2d from paddle.nn import Conv2D
from paddle import ParamAttr from paddle import ParamAttr
from paddle.regularizer import L2Decay from paddle.regularizer import L2Decay
my_conv2d = Conv2d( my_conv2d = Conv2D(
in_channels=10, in_channels=10,
out_channels=10, out_channels=10,
kernel_size=1, kernel_size=1,
......
...@@ -59,13 +59,13 @@ def bernoulli(x, name=None): ...@@ -59,13 +59,13 @@ def bernoulli(x, name=None):
import paddle import paddle
paddle.manual_seed(100) # on CPU device paddle.seed(100) # on CPU device
x = paddle.rand([2,3]) x = paddle.rand([2,3])
print(x.numpy()) print(x.numpy())
# [[0.5535528 0.20714243 0.01162981] # [[0.5535528 0.20714243 0.01162981]
# [0.51577556 0.36369765 0.2609165 ]] # [0.51577556 0.36369765 0.2609165 ]]
paddle.manual_seed(200) # on CPU device paddle.seed(200) # on CPU device
out = paddle.bernoulli(x) out = paddle.bernoulli(x)
print(out.numpy()) print(out.numpy())
# [[0. 0. 0.] # [[0. 0. 0.]
...@@ -110,13 +110,13 @@ def multinomial(x, num_samples=1, replacement=False, name=None): ...@@ -110,13 +110,13 @@ def multinomial(x, num_samples=1, replacement=False, name=None):
import paddle import paddle
paddle.manual_seed(100) # on CPU device paddle.seed(100) # on CPU device
x = paddle.rand([2,4]) x = paddle.rand([2,4])
print(x.numpy()) print(x.numpy())
# [[0.5535528 0.20714243 0.01162981 0.51577556] # [[0.5535528 0.20714243 0.01162981 0.51577556]
# [0.36369765 0.2609165 0.18905126 0.5621971 ]] # [0.36369765 0.2609165 0.18905126 0.5621971 ]]
paddle.manual_seed(200) # on CPU device paddle.seed(200) # on CPU device
out1 = paddle.multinomial(x, num_samples=5, replacement=True) out1 = paddle.multinomial(x, num_samples=5, replacement=True)
print(out1.numpy()) print(out1.numpy())
# [[3 3 0 0 0] # [[3 3 0 0 0]
...@@ -126,7 +126,7 @@ def multinomial(x, num_samples=1, replacement=False, name=None): ...@@ -126,7 +126,7 @@ def multinomial(x, num_samples=1, replacement=False, name=None):
# InvalidArgumentError: When replacement is False, number of samples # InvalidArgumentError: When replacement is False, number of samples
# should be less than non-zero categories # should be less than non-zero categories
paddle.manual_seed(300) # on CPU device paddle.seed(300) # on CPU device
out3 = paddle.multinomial(x, num_samples=3) out3 = paddle.multinomial(x, num_samples=3)
print(out3.numpy()) print(out3.numpy())
# [[3 0 1] # [[3 0 1]
......
...@@ -52,7 +52,7 @@ def set_printoptions(precision=None, ...@@ -52,7 +52,7 @@ def set_printoptions(precision=None,
import paddle import paddle
paddle.manual_seed(10) paddle.seed(10)
a = paddle.rand([10, 20]) a = paddle.rand([10, 20])
paddle.set_printoptions(4, 100, 3) paddle.set_printoptions(4, 100, 3)
print(a) print(a)
......
...@@ -25,7 +25,7 @@ import tempfile ...@@ -25,7 +25,7 @@ import tempfile
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle import to_tensor from paddle import to_tensor
from paddle.nn import Conv2d, Linear, ReLU, Sequential, Softmax from paddle.nn import Conv2D, Linear, ReLU, Sequential, Softmax
from paddle import Model from paddle import Model
from paddle.static import InputSpec from paddle.static import InputSpec
...@@ -44,11 +44,11 @@ class LeNetDygraph(paddle.nn.Layer): ...@@ -44,11 +44,11 @@ class LeNetDygraph(paddle.nn.Layer):
super(LeNetDygraph, self).__init__() super(LeNetDygraph, self).__init__()
self.num_classes = num_classes self.num_classes = num_classes
self.features = Sequential( self.features = Sequential(
Conv2d( Conv2D(
1, 6, 3, stride=1, padding=1), 1, 6, 3, stride=1, padding=1),
ReLU(), ReLU(),
paddle.fluid.dygraph.Pool2D(2, 'max', 2), paddle.fluid.dygraph.Pool2D(2, 'max', 2),
Conv2d( Conv2D(
6, 16, 5, stride=1, padding=0), 6, 16, 5, stride=1, padding=0),
ReLU(), ReLU(),
paddle.fluid.dygraph.Pool2D(2, 'max', 2)) paddle.fluid.dygraph.Pool2D(2, 'max', 2))
...@@ -142,7 +142,7 @@ class TestModel(unittest.TestCase): ...@@ -142,7 +142,7 @@ class TestModel(unittest.TestCase):
cls.test_dataset, places=cls.device, batch_size=64) cls.test_dataset, places=cls.device, batch_size=64)
seed = 333 seed = 333
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
dy_lenet = LeNetDygraph() dy_lenet = LeNetDygraph()
...@@ -194,7 +194,7 @@ class TestModel(unittest.TestCase): ...@@ -194,7 +194,7 @@ class TestModel(unittest.TestCase):
def fit(self, dynamic, num_replicas=None, rank=None): def fit(self, dynamic, num_replicas=None, rank=None):
fluid.enable_dygraph(self.device) if dynamic else None fluid.enable_dygraph(self.device) if dynamic else None
seed = 333 seed = 333
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
net = LeNet() net = LeNet()
...@@ -306,7 +306,7 @@ class MyDataset(Dataset): ...@@ -306,7 +306,7 @@ class MyDataset(Dataset):
class TestModelFunction(unittest.TestCase): class TestModelFunction(unittest.TestCase):
def set_seed(self, seed=1024): def set_seed(self, seed=1024):
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
def test_train_batch(self, dynamic=True): def test_train_batch(self, dynamic=True):
......
...@@ -38,14 +38,14 @@ class LeNet(nn.Layer): ...@@ -38,14 +38,14 @@ class LeNet(nn.Layer):
super(LeNet, self).__init__() super(LeNet, self).__init__()
self.num_classes = num_classes self.num_classes = num_classes
self.features = nn.Sequential( self.features = nn.Sequential(
nn.Conv2d( nn.Conv2D(
1, 6, 3, stride=1, padding=1), 1, 6, 3, stride=1, padding=1),
nn.ReLU(), nn.ReLU(),
nn.MaxPool2d(2, 2), nn.MaxPool2D(2, 2),
nn.Conv2d( nn.Conv2D(
6, 16, 5, stride=1, padding=0), 6, 16, 5, stride=1, padding=0),
nn.ReLU(), nn.ReLU(),
nn.MaxPool2d(2, 2)) nn.MaxPool2D(2, 2))
if num_classes > 0: if num_classes > 0:
self.fc = nn.Sequential( self.fc = nn.Sequential(
......
...@@ -36,7 +36,7 @@ class ConvBNLayer(nn.Layer): ...@@ -36,7 +36,7 @@ class ConvBNLayer(nn.Layer):
num_groups=1): num_groups=1):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self._conv = nn.Conv2d( self._conv = nn.Conv2D(
in_channels, in_channels,
out_channels, out_channels,
kernel_size, kernel_size,
...@@ -45,7 +45,7 @@ class ConvBNLayer(nn.Layer): ...@@ -45,7 +45,7 @@ class ConvBNLayer(nn.Layer):
groups=num_groups, groups=num_groups,
bias_attr=False) bias_attr=False)
self._norm_layer = nn.BatchNorm2d(out_channels) self._norm_layer = nn.BatchNorm2D(out_channels)
self._act = nn.ReLU() self._act = nn.ReLU()
def forward(self, x): def forward(self, x):
...@@ -214,7 +214,7 @@ class MobileNetV1(nn.Layer): ...@@ -214,7 +214,7 @@ class MobileNetV1(nn.Layer):
self.dwsl.append(dws6) self.dwsl.append(dws6)
if with_pool: if with_pool:
self.pool2d_avg = nn.AdaptiveAvgPool2d(1) self.pool2d_avg = nn.AdaptiveAvgPool2D(1)
if num_classes > 0: if num_classes > 0:
self.fc = nn.Linear(int(1024 * scale), num_classes) self.fc = nn.Linear(int(1024 * scale), num_classes)
......
...@@ -46,11 +46,11 @@ class ConvBNReLU(nn.Sequential): ...@@ -46,11 +46,11 @@ class ConvBNReLU(nn.Sequential):
kernel_size=3, kernel_size=3,
stride=1, stride=1,
groups=1, groups=1,
norm_layer=nn.BatchNorm2d): norm_layer=nn.BatchNorm2D):
padding = (kernel_size - 1) // 2 padding = (kernel_size - 1) // 2
super(ConvBNReLU, self).__init__( super(ConvBNReLU, self).__init__(
nn.Conv2d( nn.Conv2D(
in_planes, in_planes,
out_planes, out_planes,
kernel_size, kernel_size,
...@@ -68,7 +68,7 @@ class InvertedResidual(nn.Layer): ...@@ -68,7 +68,7 @@ class InvertedResidual(nn.Layer):
oup, oup,
stride, stride,
expand_ratio, expand_ratio,
norm_layer=nn.BatchNorm2d): norm_layer=nn.BatchNorm2D):
super(InvertedResidual, self).__init__() super(InvertedResidual, self).__init__()
self.stride = stride self.stride = stride
assert stride in [1, 2] assert stride in [1, 2]
...@@ -88,7 +88,7 @@ class InvertedResidual(nn.Layer): ...@@ -88,7 +88,7 @@ class InvertedResidual(nn.Layer):
stride=stride, stride=stride,
groups=hidden_dim, groups=hidden_dim,
norm_layer=norm_layer), norm_layer=norm_layer),
nn.Conv2d( nn.Conv2D(
hidden_dim, oup, 1, 1, 0, bias_attr=False), hidden_dim, oup, 1, 1, 0, bias_attr=False),
norm_layer(oup), norm_layer(oup),
]) ])
...@@ -127,7 +127,7 @@ class MobileNetV2(nn.Layer): ...@@ -127,7 +127,7 @@ class MobileNetV2(nn.Layer):
block = InvertedResidual block = InvertedResidual
round_nearest = 8 round_nearest = 8
norm_layer = nn.BatchNorm2d norm_layer = nn.BatchNorm2D
inverted_residual_setting = [ inverted_residual_setting = [
[1, 16, 1, 1], [1, 16, 1, 1],
[6, 24, 2, 2], [6, 24, 2, 2],
...@@ -169,7 +169,7 @@ class MobileNetV2(nn.Layer): ...@@ -169,7 +169,7 @@ class MobileNetV2(nn.Layer):
self.features = nn.Sequential(*features) self.features = nn.Sequential(*features)
if with_pool: if with_pool:
self.pool2d_avg = nn.AdaptiveAvgPool2d(1) self.pool2d_avg = nn.AdaptiveAvgPool2D(1)
if self.num_classes > 0: if self.num_classes > 0:
self.classifier = nn.Sequential( self.classifier = nn.Sequential(
......
...@@ -52,17 +52,17 @@ class BasicBlock(nn.Layer): ...@@ -52,17 +52,17 @@ class BasicBlock(nn.Layer):
norm_layer=None): norm_layer=None):
super(BasicBlock, self).__init__() super(BasicBlock, self).__init__()
if norm_layer is None: if norm_layer is None:
norm_layer = nn.BatchNorm2d norm_layer = nn.BatchNorm2D
if dilation > 1: if dilation > 1:
raise NotImplementedError( raise NotImplementedError(
"Dilation > 1 not supported in BasicBlock") "Dilation > 1 not supported in BasicBlock")
self.conv1 = nn.Conv2d( self.conv1 = nn.Conv2D(
inplanes, planes, 3, padding=1, stride=stride, bias_attr=False) inplanes, planes, 3, padding=1, stride=stride, bias_attr=False)
self.bn1 = norm_layer(planes) self.bn1 = norm_layer(planes)
self.relu = nn.ReLU() self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias_attr=False) self.conv2 = nn.Conv2D(planes, planes, 3, padding=1, bias_attr=False)
self.bn2 = norm_layer(planes) self.bn2 = norm_layer(planes)
self.downsample = downsample self.downsample = downsample
self.stride = stride self.stride = stride
...@@ -101,13 +101,13 @@ class BottleneckBlock(nn.Layer): ...@@ -101,13 +101,13 @@ class BottleneckBlock(nn.Layer):
norm_layer=None): norm_layer=None):
super(BottleneckBlock, self).__init__() super(BottleneckBlock, self).__init__()
if norm_layer is None: if norm_layer is None:
norm_layer = nn.BatchNorm2d norm_layer = nn.BatchNorm2D
width = int(planes * (base_width / 64.)) * groups width = int(planes * (base_width / 64.)) * groups
self.conv1 = nn.Conv2d(inplanes, width, 1, bias_attr=False) self.conv1 = nn.Conv2D(inplanes, width, 1, bias_attr=False)
self.bn1 = norm_layer(width) self.bn1 = norm_layer(width)
self.conv2 = nn.Conv2d( self.conv2 = nn.Conv2D(
width, width,
width, width,
3, 3,
...@@ -118,7 +118,7 @@ class BottleneckBlock(nn.Layer): ...@@ -118,7 +118,7 @@ class BottleneckBlock(nn.Layer):
bias_attr=False) bias_attr=False)
self.bn2 = norm_layer(width) self.bn2 = norm_layer(width)
self.conv3 = nn.Conv2d( self.conv3 = nn.Conv2D(
width, planes * self.expansion, 1, bias_attr=False) width, planes * self.expansion, 1, bias_attr=False)
self.bn3 = norm_layer(planes * self.expansion) self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU() self.relu = nn.ReLU()
...@@ -183,12 +183,12 @@ class ResNet(nn.Layer): ...@@ -183,12 +183,12 @@ class ResNet(nn.Layer):
layers = layer_cfg[depth] layers = layer_cfg[depth]
self.num_classes = num_classes self.num_classes = num_classes
self.with_pool = with_pool self.with_pool = with_pool
self._norm_layer = nn.BatchNorm2d self._norm_layer = nn.BatchNorm2D
self.inplanes = 64 self.inplanes = 64
self.dilation = 1 self.dilation = 1
self.conv1 = nn.Conv2d( self.conv1 = nn.Conv2D(
3, 3,
self.inplanes, self.inplanes,
kernel_size=7, kernel_size=7,
...@@ -197,13 +197,13 @@ class ResNet(nn.Layer): ...@@ -197,13 +197,13 @@ class ResNet(nn.Layer):
bias_attr=False) bias_attr=False)
self.bn1 = self._norm_layer(self.inplanes) self.bn1 = self._norm_layer(self.inplanes)
self.relu = nn.ReLU() self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.maxpool = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0]) self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
if with_pool: if with_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.avgpool = nn.AdaptiveAvgPool2D((1, 1))
if num_classes > 0: if num_classes > 0:
self.fc = nn.Linear(512 * block.expansion, num_classes) self.fc = nn.Linear(512 * block.expansion, num_classes)
...@@ -217,7 +217,7 @@ class ResNet(nn.Layer): ...@@ -217,7 +217,7 @@ class ResNet(nn.Layer):
stride = 1 stride = 1
if stride != 1 or self.inplanes != planes * block.expansion: if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential( downsample = nn.Sequential(
nn.Conv2d( nn.Conv2D(
self.inplanes, self.inplanes,
planes * block.expansion, planes * block.expansion,
1, 1,
......
...@@ -57,7 +57,7 @@ class VGG(nn.Layer): ...@@ -57,7 +57,7 @@ class VGG(nn.Layer):
def __init__(self, features, num_classes=1000): def __init__(self, features, num_classes=1000):
super(VGG, self).__init__() super(VGG, self).__init__()
self.features = features self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.avgpool = nn.AdaptiveAvgPool2D((7, 7))
self.classifier = nn.Sequential( self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096), nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(), nn.ReLU(),
...@@ -80,11 +80,11 @@ def make_layers(cfg, batch_norm=False): ...@@ -80,11 +80,11 @@ def make_layers(cfg, batch_norm=False):
in_channels = 3 in_channels = 3
for v in cfg: for v in cfg:
if v == 'M': if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)] layers += [nn.MaxPool2D(kernel_size=2, stride=2)]
else: else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) conv2d = nn.Conv2D(in_channels, v, kernel_size=3, padding=1)
if batch_norm: if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU()] layers += [conv2d, nn.BatchNorm2D(v), nn.ReLU()]
else: else:
layers += [conv2d, nn.ReLU()] layers += [conv2d, nn.ReLU()]
in_channels = v in_channels = v
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册