未验证 提交 7c1aa0d6 编写于 作者: C cnn 提交者: GitHub

2.0rc api rename (#28088)

* rename manual_seed to seed

* rename xxx1d-->xxx1D, xxx2d-->xxx2D, xxx3d-->xxx3D

* rename manual_seed --> seed

* do not rename .cc, .cu and .h file

* rename manual_seed --> seed

* rename manual_seed --> seed

* rename manual_seed --> seed

* rename manual_seed --> seed

* disable_static on doc example code

* donot change manual_seed on generator

* add enable_static on sample code

* convert python/paddle/fluid/layers/nn.py to bak

* fix typo

* fix code style

* fix seed to manual_seed when call functions of Generator()

* fix bug
上级 68c473e3
...@@ -222,7 +222,7 @@ from .tensor.search import sort #DEFINE_ALIAS ...@@ -222,7 +222,7 @@ from .tensor.search import sort #DEFINE_ALIAS
from .tensor.to_string import set_printoptions from .tensor.to_string import set_printoptions
from .framework.random import manual_seed #DEFINE_ALIAS from .framework.random import seed #DEFINE_ALIAS
from .framework.random import get_cuda_rng_state #DEFINE_ALIAS from .framework.random import get_cuda_rng_state #DEFINE_ALIAS
from .framework.random import set_cuda_rng_state #DEFINE_ALIAS from .framework.random import set_cuda_rng_state #DEFINE_ALIAS
from .framework import ParamAttr #DEFINE_ALIAS from .framework import ParamAttr #DEFINE_ALIAS
......
...@@ -37,7 +37,7 @@ def auto_cast(enable=True, custom_white_list=None, custom_black_list=None): ...@@ -37,7 +37,7 @@ def auto_cast(enable=True, custom_white_list=None, custom_black_list=None):
import paddle import paddle
conv2d = paddle.nn.Conv2d(3, 2, 3, bias_attr=False) conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast(): with paddle.amp.auto_cast():
......
...@@ -50,7 +50,7 @@ class GradScaler(AmpScaler): ...@@ -50,7 +50,7 @@ class GradScaler(AmpScaler):
import paddle import paddle
model = paddle.nn.Conv2d(3, 2, 3, bias_attr=True) model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True)
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters()) optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024) scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
...@@ -90,7 +90,7 @@ class GradScaler(AmpScaler): ...@@ -90,7 +90,7 @@ class GradScaler(AmpScaler):
import paddle import paddle
model = paddle.nn.Conv2d(3, 2, 3, bias_attr=True) model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True)
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters()) optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024) scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
...@@ -122,7 +122,7 @@ class GradScaler(AmpScaler): ...@@ -122,7 +122,7 @@ class GradScaler(AmpScaler):
import paddle import paddle
model = paddle.nn.Conv2d(3, 2, 3, bias_attr=True) model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True)
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters()) optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024) scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
......
...@@ -670,13 +670,13 @@ class Categorical(Distribution): ...@@ -670,13 +670,13 @@ class Categorical(Distribution):
import paddle import paddle
from paddle.distribution import Categorical from paddle.distribution import Categorical
paddle.manual_seed(100) # on CPU device paddle.seed(100) # on CPU device
x = paddle.rand([6]) x = paddle.rand([6])
print(x.numpy()) print(x.numpy())
# [0.5535528 0.20714243 0.01162981 # [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ] # 0.51577556 0.36369765 0.2609165 ]
paddle.manual_seed(200) # on CPU device paddle.seed(200) # on CPU device
y = paddle.rand([6]) y = paddle.rand([6])
print(y.numpy()) print(y.numpy())
# [0.77663314 0.90824795 0.15685187 # [0.77663314 0.90824795 0.15685187
...@@ -685,7 +685,7 @@ class Categorical(Distribution): ...@@ -685,7 +685,7 @@ class Categorical(Distribution):
cat = Categorical(x) cat = Categorical(x)
cat2 = Categorical(y) cat2 = Categorical(y)
paddle.manual_seed(1000) # on CPU device paddle.seed(1000) # on CPU device
cat.sample([2,3]) cat.sample([2,3])
# [[0, 0, 5], # [[0, 0, 5],
# [3, 4, 5]] # [3, 4, 5]]
...@@ -744,7 +744,7 @@ class Categorical(Distribution): ...@@ -744,7 +744,7 @@ class Categorical(Distribution):
import paddle import paddle
from paddle.distribution import Categorical from paddle.distribution import Categorical
paddle.manual_seed(100) # on CPU device paddle.seed(100) # on CPU device
x = paddle.rand([6]) x = paddle.rand([6])
print(x.numpy()) print(x.numpy())
# [0.5535528 0.20714243 0.01162981 # [0.5535528 0.20714243 0.01162981
...@@ -752,7 +752,7 @@ class Categorical(Distribution): ...@@ -752,7 +752,7 @@ class Categorical(Distribution):
cat = Categorical(x) cat = Categorical(x)
paddle.manual_seed(1000) # on CPU device paddle.seed(1000) # on CPU device
cat.sample([2,3]) cat.sample([2,3])
# [[0, 0, 5], # [[0, 0, 5],
# [3, 4, 5]] # [3, 4, 5]]
...@@ -791,13 +791,13 @@ class Categorical(Distribution): ...@@ -791,13 +791,13 @@ class Categorical(Distribution):
import paddle import paddle
from paddle.distribution import Categorical from paddle.distribution import Categorical
paddle.manual_seed(100) # on CPU device paddle.seed(100) # on CPU device
x = paddle.rand([6]) x = paddle.rand([6])
print(x.numpy()) print(x.numpy())
# [0.5535528 0.20714243 0.01162981 # [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ] # 0.51577556 0.36369765 0.2609165 ]
paddle.manual_seed(200) # on CPU device paddle.seed(200) # on CPU device
y = paddle.rand([6]) y = paddle.rand([6])
print(y.numpy()) print(y.numpy())
# [0.77663314 0.90824795 0.15685187 # [0.77663314 0.90824795 0.15685187
...@@ -842,7 +842,7 @@ class Categorical(Distribution): ...@@ -842,7 +842,7 @@ class Categorical(Distribution):
import paddle import paddle
from paddle.distribution import Categorical from paddle.distribution import Categorical
paddle.manual_seed(100) # on CPU device paddle.seed(100) # on CPU device
x = paddle.rand([6]) x = paddle.rand([6])
print(x.numpy()) print(x.numpy())
# [0.5535528 0.20714243 0.01162981 # [0.5535528 0.20714243 0.01162981
...@@ -887,7 +887,7 @@ class Categorical(Distribution): ...@@ -887,7 +887,7 @@ class Categorical(Distribution):
import paddle import paddle
from paddle.distribution import Categorical from paddle.distribution import Categorical
paddle.manual_seed(100) # on CPU device paddle.seed(100) # on CPU device
x = paddle.rand([6]) x = paddle.rand([6])
print(x.numpy()) print(x.numpy())
# [0.5535528 0.20714243 0.01162981 # [0.5535528 0.20714243 0.01162981
...@@ -953,7 +953,7 @@ class Categorical(Distribution): ...@@ -953,7 +953,7 @@ class Categorical(Distribution):
import paddle import paddle
from paddle.distribution import Categorical from paddle.distribution import Categorical
paddle.manual_seed(100) # on CPU device paddle.seed(100) # on CPU device
x = paddle.rand([6]) x = paddle.rand([6])
print(x.numpy()) print(x.numpy())
# [0.5535528 0.20714243 0.01162981 # [0.5535528 0.20714243 0.01162981
......
...@@ -114,7 +114,7 @@ class TestWeightDecay(unittest.TestCase): ...@@ -114,7 +114,7 @@ class TestWeightDecay(unittest.TestCase):
return param_sum return param_sum
def check_weight_decay(self, place, model): def check_weight_decay(self, place, model):
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program() main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program() startup_prog = fluid.framework.Program()
...@@ -137,7 +137,7 @@ class TestWeightDecay(unittest.TestCase): ...@@ -137,7 +137,7 @@ class TestWeightDecay(unittest.TestCase):
return param_sum return param_sum
def check_weight_decay2(self, place, model): def check_weight_decay2(self, place, model):
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program() main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program() startup_prog = fluid.framework.Program()
......
...@@ -1058,7 +1058,7 @@ class Layer(core.Layer): ...@@ -1058,7 +1058,7 @@ class Layer(core.Layer):
super(Mylayer, self).__init__() super(Mylayer, self).__init__()
self.linear1 = paddle.nn.Linear(10, 10) self.linear1 = paddle.nn.Linear(10, 10)
self.linear2 = paddle.nn.Linear(5, 5) self.linear2 = paddle.nn.Linear(5, 5)
self.conv2d = paddle.nn.Conv2d(3, 2, 3) self.conv2d = paddle.nn.Conv2D(3, 2, 3)
self.embedding = paddle.nn.Embedding(128, 16) self.embedding = paddle.nn.Embedding(128, 16)
self.h_0 = paddle.to_tensor(np.zeros([10, 10]).astype('float32')) self.h_0 = paddle.to_tensor(np.zeros([10, 10]).astype('float32'))
......
...@@ -110,7 +110,7 @@ class Conv2D(layers.Layer): ...@@ -110,7 +110,7 @@ class Conv2D(layers.Layer):
dilation (int or tuple, optional): The dilation size. If dilation is a tuple, it must dilation (int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1. dilation_H = dilation_W = dilation. Default: 1.
groups (int, optional): The groups number of the Conv2d Layer. According to grouped groups (int, optional): The groups number of the Conv2D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2, convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only of the input channels, while the second half of the filters is only
...@@ -345,7 +345,7 @@ class Conv3D(layers.Layer): ...@@ -345,7 +345,7 @@ class Conv3D(layers.Layer):
dilation (int|tuple, optional): The dilation size. If dilation is a tuple, it must dilation (int|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1. dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups (int, optional): The groups number of the Conv3d Layer. According to grouped groups (int, optional): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2, convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only of the input channels, while the second half of the filters is only
...@@ -574,7 +574,7 @@ class Conv3DTranspose(layers.Layer): ...@@ -574,7 +574,7 @@ class Conv3DTranspose(layers.Layer):
dilation(int|tuple, optional): The dilation size. If dilation is a tuple, it must dilation(int|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1. dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by groups(int, optional): The groups number of the Conv3D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the first half of the input channels, while the second half of the
...@@ -2541,7 +2541,7 @@ class Conv2DTranspose(layers.Layer): ...@@ -2541,7 +2541,7 @@ class Conv2DTranspose(layers.Layer):
dilation(int or tuple, optional): The dilation size. If dilation is a tuple, it must dilation(int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1. dilation_H = dilation_W = dilation. Default: 1.
groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by groups(int, optional): The groups number of the Conv2D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the first half of the input channels, while the second half of the
......
...@@ -749,7 +749,7 @@ class BilinearInitializer(Initializer): ...@@ -749,7 +749,7 @@ class BilinearInitializer(Initializer):
regularizer=L2Decay(0.), regularizer=L2Decay(0.),
initializer=nn.initializer.Bilinear()) initializer=nn.initializer.Bilinear())
data = paddle.rand([B, 3, H, W], dtype='float32') data = paddle.rand([B, 3, H, W], dtype='float32')
conv_up = nn.ConvTranspose2d(3, conv_up = nn.Conv2DTranspose(3,
out_channels=C, out_channels=C,
kernel_size=2 * factor - factor % 2, kernel_size=2 * factor - factor % 2,
padding=int( padding=int(
......
...@@ -43,7 +43,7 @@ def simple_img_conv_pool(input, ...@@ -43,7 +43,7 @@ def simple_img_conv_pool(input,
act=None, act=None,
use_cudnn=True): use_cudnn=True):
""" """
:api_attr: Static Graph :api_attr: Static Graph
The simple_img_conv_pool api is composed of :ref:`api_fluid_layers_conv2d` and :ref:`api_fluid_layers_pool2d` . The simple_img_conv_pool api is composed of :ref:`api_fluid_layers_conv2d` and :ref:`api_fluid_layers_pool2d` .
...@@ -106,6 +106,8 @@ def simple_img_conv_pool(input, ...@@ -106,6 +106,8 @@ def simple_img_conv_pool(input,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
img = fluid.data(name='img', shape=[100, 1, 28, 28], dtype='float32') img = fluid.data(name='img', shape=[100, 1, 28, 28], dtype='float32')
conv_pool = fluid.nets.simple_img_conv_pool(input=img, conv_pool = fluid.nets.simple_img_conv_pool(input=img,
filter_size=5, filter_size=5,
...@@ -151,37 +153,37 @@ def img_conv_group(input, ...@@ -151,37 +153,37 @@ def img_conv_group(input,
pool_type="max", pool_type="max",
use_cudnn=True): use_cudnn=True):
""" """
:api_attr: Static Graph :api_attr: Static Graph
The Image Convolution Group is composed of Convolution2d, BatchNorm, DropOut, The Image Convolution Group is composed of Convolution2d, BatchNorm, DropOut,
and Pool2d. According to the input arguments, img_conv_group will do serials of and Pool2D. According to the input arguments, img_conv_group will do serials of
computation for Input using Convolution2d, BatchNorm, DropOut, and pass the last computation for Input using Convolution2d, BatchNorm, DropOut, and pass the last
result to Pool2d. result to Pool2D.
Args: Args:
input (Variable): The input is 4-D Tensor with shape [N, C, H, W], the data type of input is float32 or float64. input (Variable): The input is 4-D Tensor with shape [N, C, H, W], the data type of input is float32 or float64.
conv_num_filter(list|tuple): Indicates the numbers of filter of this group. conv_num_filter(list|tuple): Indicates the numbers of filter of this group.
pool_size (int|list|tuple): The pooling size of Pool2d Layer. If pool_size pool_size (int|list|tuple): The pooling size of Pool2D Layer. If pool_size
is a list or tuple, it must contain two integers, (pool_size_height, pool_size_width). is a list or tuple, it must contain two integers, (pool_size_height, pool_size_width).
Otherwise, the pool_size_height = pool_size_width = pool_size. Otherwise, the pool_size_height = pool_size_width = pool_size.
conv_padding (int|list|tuple): The padding size of the Conv2d Layer. If padding is conv_padding (int|list|tuple): The padding size of the Conv2D Layer. If padding is
a list or tuple, its length must be equal to the length of conv_num_filter. a list or tuple, its length must be equal to the length of conv_num_filter.
Otherwise the conv_padding of all Conv2d Layers are the same. Default 1. Otherwise the conv_padding of all Conv2D Layers are the same. Default 1.
conv_filter_size (int|list|tuple): The filter size. If filter_size is a list or conv_filter_size (int|list|tuple): The filter size. If filter_size is a list or
tuple, its length must be equal to the length of conv_num_filter. tuple, its length must be equal to the length of conv_num_filter.
Otherwise the conv_filter_size of all Conv2d Layers are the same. Default 3. Otherwise the conv_filter_size of all Conv2D Layers are the same. Default 3.
conv_act (str): Activation type for Conv2d Layer that is not followed by BatchNorm. conv_act (str): Activation type for Conv2D Layer that is not followed by BatchNorm.
Default: None. Default: None.
param_attr (ParamAttr): The parameters to the Conv2d Layer. Default: None param_attr (ParamAttr): The parameters to the Conv2D Layer. Default: None
conv_with_batchnorm (bool|list): Indicates whether to use BatchNorm after Conv2d Layer. conv_with_batchnorm (bool|list): Indicates whether to use BatchNorm after Conv2D Layer.
If conv_with_batchnorm is a list, its length must be equal to the length of If conv_with_batchnorm is a list, its length must be equal to the length of
conv_num_filter. Otherwise, conv_with_batchnorm indicates whether all the conv_num_filter. Otherwise, conv_with_batchnorm indicates whether all the
Conv2d Layer follows a BatchNorm. Default False. Conv2D Layer follows a BatchNorm. Default False.
conv_batchnorm_drop_rate (float|list): Indicates the drop_rate of Dropout Layer conv_batchnorm_drop_rate (float|list): Indicates the drop_rate of Dropout Layer
after BatchNorm. If conv_batchnorm_drop_rate is a list, its length must be after BatchNorm. If conv_batchnorm_drop_rate is a list, its length must be
equal to the length of conv_num_filter. Otherwise, drop_rate of all Dropout equal to the length of conv_num_filter. Otherwise, drop_rate of all Dropout
Layers is conv_batchnorm_drop_rate. Default 0.0. Layers is conv_batchnorm_drop_rate. Default 0.0.
pool_stride (int|list|tuple): The pooling stride of Pool2d layer. If pool_stride pool_stride (int|list|tuple): The pooling stride of Pool2D layer. If pool_stride
is a list or tuple, it must contain two integers, (pooling_stride_H, is a list or tuple, it must contain two integers, (pooling_stride_H,
pooling_stride_W). Otherwise, the pooling_stride_H = pooling_stride_W = pool_stride. pooling_stride_W). Otherwise, the pooling_stride_H = pooling_stride_W = pool_stride.
Default 1. Default 1.
...@@ -192,12 +194,15 @@ def img_conv_group(input, ...@@ -192,12 +194,15 @@ def img_conv_group(input,
Return: Return:
A Variable holding Tensor representing the final result after serial computation using Convolution2d, A Variable holding Tensor representing the final result after serial computation using Convolution2d,
BatchNorm, DropOut, and Pool2d, whose data type is the same with input. BatchNorm, DropOut, and Pool2D, whose data type is the same with input.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
conv_pool = fluid.nets.img_conv_group(input=img, conv_pool = fluid.nets.img_conv_group(input=img,
conv_padding=1, conv_padding=1,
...@@ -261,7 +266,7 @@ def sequence_conv_pool(input, ...@@ -261,7 +266,7 @@ def sequence_conv_pool(input,
pool_type="max", pool_type="max",
bias_attr=None): bias_attr=None):
""" """
:api_attr: Static Graph :api_attr: Static Graph
**This api takes input as an LoDTensor. If input is a Tensor, please use** **This api takes input as an LoDTensor. If input is a Tensor, please use**
:ref:`api_fluid_nets_simple_img_conv_pool` **instead** :ref:`api_fluid_nets_simple_img_conv_pool` **instead**
...@@ -300,6 +305,8 @@ def sequence_conv_pool(input, ...@@ -300,6 +305,8 @@ def sequence_conv_pool(input,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
input_dim = 100 #len(word_dict) input_dim = 100 #len(word_dict)
emb_dim = 128 emb_dim = 128
hid_dim = 512 hid_dim = 512
...@@ -327,7 +334,7 @@ def sequence_conv_pool(input, ...@@ -327,7 +334,7 @@ def sequence_conv_pool(input,
def glu(input, dim=-1): def glu(input, dim=-1):
""" """
:api_attr: Static Graph :api_attr: Static Graph
The Gated Linear Units(GLU) composed by :ref:`api_fluid_layers_split` , The Gated Linear Units(GLU) composed by :ref:`api_fluid_layers_split` ,
:ref:`api_fluid_layers_sigmoid` and :ref:`api_fluid_layers_elementwise_mul` . :ref:`api_fluid_layers_sigmoid` and :ref:`api_fluid_layers_elementwise_mul` .
...@@ -356,6 +363,9 @@ def glu(input, dim=-1): ...@@ -356,6 +363,9 @@ def glu(input, dim=-1):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data( data = fluid.data(
name="words", shape=[-1, 6, 3, 9], dtype="float32") name="words", shape=[-1, 6, 3, 9], dtype="float32")
# shape of output: [-1, 3, 3, 9] # shape of output: [-1, 3, 3, 9]
...@@ -375,7 +385,7 @@ def scaled_dot_product_attention(queries, ...@@ -375,7 +385,7 @@ def scaled_dot_product_attention(queries,
num_heads=1, num_heads=1,
dropout_rate=0.): dropout_rate=0.):
""" """
:api_attr: Static Graph :api_attr: Static Graph
This interface Multi-Head Attention using scaled dot product. This interface Multi-Head Attention using scaled dot product.
Attention mechanism can be seen as mapping a query and a set of key-value Attention mechanism can be seen as mapping a query and a set of key-value
...@@ -435,7 +445,9 @@ def scaled_dot_product_attention(queries, ...@@ -435,7 +445,9 @@ def scaled_dot_product_attention(queries,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
queries = fluid.data(name="queries", shape=[3, 5, 9], dtype="float32") queries = fluid.data(name="queries", shape=[3, 5, 9], dtype="float32")
keys = fluid.data(name="keys", shape=[3, 6, 9], dtype="float32") keys = fluid.data(name="keys", shape=[3, 6, 9], dtype="float32")
values = fluid.data(name="values", shape=[3, 6, 10], dtype="float32") values = fluid.data(name="values", shape=[3, 6, 10], dtype="float32")
......
...@@ -564,7 +564,7 @@ def train_bmn(args, place, to_static): ...@@ -564,7 +564,7 @@ def train_bmn(args, place, to_static):
loss_data = [] loss_data = []
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
global local_random global local_random
local_random = np.random.RandomState(SEED) local_random = np.random.RandomState(SEED)
......
...@@ -450,7 +450,7 @@ def do_train(args, to_static): ...@@ -450,7 +450,7 @@ def do_train(args, to_static):
place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda( place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace() ) else fluid.CPUPlace()
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
reader = get_random_input_data(args.batch_size, args.vocab_size, reader = get_random_input_data(args.batch_size, args.vocab_size,
......
...@@ -451,7 +451,7 @@ def train_mobilenet(args, to_static): ...@@ -451,7 +451,7 @@ def train_mobilenet(args, to_static):
with fluid.dygraph.guard(args.place): with fluid.dygraph.guard(args.place):
np.random.seed(SEED) np.random.seed(SEED)
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
if args.model == "MobileNetV1": if args.model == "MobileNetV1":
......
...@@ -218,7 +218,7 @@ def train(place): ...@@ -218,7 +218,7 @@ def train(place):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
ptb_model = PtbModel( ptb_model = PtbModel(
hidden_size=hidden_size, hidden_size=hidden_size,
......
...@@ -210,7 +210,7 @@ def train(place): ...@@ -210,7 +210,7 @@ def train(place):
batch_num = 200 batch_num = 200
paddle.disable_static(place) paddle.disable_static(place)
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
ptb_model = PtbModel( ptb_model = PtbModel(
hidden_size=hidden_size, hidden_size=hidden_size,
......
...@@ -65,7 +65,7 @@ def train(args, place, to_static): ...@@ -65,7 +65,7 @@ def train(args, place, to_static):
env.seed(SEED) env.seed(SEED)
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
local_random = np.random.RandomState(SEED) local_random = np.random.RandomState(SEED)
......
...@@ -219,7 +219,7 @@ def train(to_static): ...@@ -219,7 +219,7 @@ def train(to_static):
""" """
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
np.random.seed(SEED) np.random.seed(SEED)
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
train_reader = paddle.batch( train_reader = paddle.batch(
......
...@@ -66,7 +66,7 @@ class ConvBNLayer(paddle.nn.Layer): ...@@ -66,7 +66,7 @@ class ConvBNLayer(paddle.nn.Layer):
act=None): act=None):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self._conv = paddle.nn.Conv2d( self._conv = paddle.nn.Conv2D(
in_channels=num_channels, in_channels=num_channels,
out_channels=num_filters, out_channels=num_filters,
kernel_size=filter_size, kernel_size=filter_size,
...@@ -214,7 +214,7 @@ def train(to_static): ...@@ -214,7 +214,7 @@ def train(to_static):
""" """
paddle.disable_static(place) paddle.disable_static(place)
np.random.seed(SEED) np.random.seed(SEED)
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
train_reader = paddle.batch( train_reader = paddle.batch(
......
...@@ -334,7 +334,7 @@ def train(train_reader, to_static): ...@@ -334,7 +334,7 @@ def train(train_reader, to_static):
np.random.seed(SEED) np.random.seed(SEED)
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
se_resnext = SeResNeXt() se_resnext = SeResNeXt()
optimizer = optimizer_setting(train_parameters, se_resnext.parameters()) optimizer = optimizer_setting(train_parameters, se_resnext.parameters())
......
...@@ -286,7 +286,7 @@ def train(args, to_static): ...@@ -286,7 +286,7 @@ def train(args, to_static):
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
np.random.seed(SEED) np.random.seed(SEED)
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
train_reader = fake_data_reader(args.class_num, args.vocab_size, train_reader = fake_data_reader(args.class_num, args.vocab_size,
......
...@@ -108,7 +108,7 @@ def train(conf_dict, to_static): ...@@ -108,7 +108,7 @@ def train(conf_dict, to_static):
place = fluid.CPUPlace() place = fluid.CPUPlace()
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
conf_dict['dict_size'] = len(vocab) conf_dict['dict_size'] = len(vocab)
......
...@@ -106,7 +106,7 @@ def train(conf_dict, to_static): ...@@ -106,7 +106,7 @@ def train(conf_dict, to_static):
place = paddle.CPUPlace() place = paddle.CPUPlace()
paddle.disable_static(place) paddle.disable_static(place)
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
conf_dict['dict_size'] = len(vocab) conf_dict['dict_size'] = len(vocab)
......
...@@ -33,7 +33,7 @@ STEP_NUM = 10 ...@@ -33,7 +33,7 @@ STEP_NUM = 10
def train_static(args, batch_generator): def train_static(args, batch_generator):
paddle.enable_static() paddle.enable_static()
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
train_prog = fluid.Program() train_prog = fluid.Program()
startup_prog = fluid.Program() startup_prog = fluid.Program()
...@@ -131,7 +131,7 @@ def train_static(args, batch_generator): ...@@ -131,7 +131,7 @@ def train_static(args, batch_generator):
def train_dygraph(args, batch_generator): def train_dygraph(args, batch_generator):
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
if SEED is not None: if SEED is not None:
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
# define data loader # define data loader
train_loader = fluid.io.DataLoader.from_generator(capacity=10) train_loader = fluid.io.DataLoader.from_generator(capacity=10)
...@@ -223,7 +223,7 @@ def train_dygraph(args, batch_generator): ...@@ -223,7 +223,7 @@ def train_dygraph(args, batch_generator):
def predict_dygraph(args, batch_generator): def predict_dygraph(args, batch_generator):
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
# define data loader # define data loader
...@@ -295,7 +295,7 @@ def predict_dygraph(args, batch_generator): ...@@ -295,7 +295,7 @@ def predict_dygraph(args, batch_generator):
def predict_static(args, batch_generator): def predict_static(args, batch_generator):
test_prog = fluid.Program() test_prog = fluid.Program()
with fluid.program_guard(test_prog): with fluid.program_guard(test_prog):
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
# define input and reader # define input and reader
......
...@@ -272,7 +272,7 @@ def train(args, fake_data_reader, to_static): ...@@ -272,7 +272,7 @@ def train(args, fake_data_reader, to_static):
random.seed(0) random.seed(0)
np.random.seed(0) np.random.seed(0)
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(1000) paddle.seed(1000)
paddle.framework.random._manual_program_seed(1000) paddle.framework.random._manual_program_seed(1000)
video_model = TSM_ResNet("TSM", train_config, 'Train') video_model = TSM_ResNet("TSM", train_config, 'Train')
......
...@@ -20,7 +20,7 @@ import struct ...@@ -20,7 +20,7 @@ import struct
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive, TestConv2dOp from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive, TestConv2DOp
def conv2d_residual_naive(out, residual): def conv2d_residual_naive(out, residual):
...@@ -31,7 +31,7 @@ def conv2d_residual_naive(out, residual): ...@@ -31,7 +31,7 @@ def conv2d_residual_naive(out, residual):
@unittest.skipIf(not core.supports_bfloat16(), @unittest.skipIf(not core.supports_bfloat16(),
"place does not support BF16 evaluation") "place does not support BF16 evaluation")
class TestConv2dBf16Op(TestConv2dOp): class TestConv2DBf16Op(TestConv2DOp):
def setUp(self): def setUp(self):
self.op_type = "conv2d" self.op_type = "conv2d"
self.use_cudnn = False self.use_cudnn = False
...@@ -110,7 +110,7 @@ class TestConv2dBf16Op(TestConv2dOp): ...@@ -110,7 +110,7 @@ class TestConv2dBf16Op(TestConv2dOp):
pass pass
def init_test_case(self): def init_test_case(self):
TestConv2dOp.init_test_case(self) TestConv2DOp.init_test_case(self)
self.input_size = [1, 1, 5, 5] # NCHW self.input_size = [1, 1, 5, 5] # NCHW
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.input_residual_size = [1, 2, 3, 3] self.input_residual_size = [1, 2, 3, 3]
...@@ -130,7 +130,7 @@ class TestConv2dBf16Op(TestConv2dOp): ...@@ -130,7 +130,7 @@ class TestConv2dBf16Op(TestConv2dOp):
self.fuse_residual = True self.fuse_residual = True
class TestConv2d(TestConv2dBf16Op): class TestConv2D(TestConv2DBf16Op):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -144,19 +144,19 @@ class TestConv2d(TestConv2dBf16Op): ...@@ -144,19 +144,19 @@ class TestConv2d(TestConv2dBf16Op):
self.input_type = np.uint16 self.input_type = np.uint16
class TestWithPad(TestConv2d): class TestWithPad(TestConv2D):
def init_test_case(self): def init_test_case(self):
TestConv2d.init_test_case(self) TestConv2D.init_test_case(self)
self.pad = [1, 1] self.pad = [1, 1]
self.input_residual_size = [2, 6, 5, 5] self.input_residual_size = [2, 6, 5, 5]
class TestWithGroup(TestConv2d): class TestWithGroup(TestConv2D):
def init_group(self): def init_group(self):
self.groups = 3 self.groups = 3
class TestWithStride(TestConv2dBf16Op): class TestWithStride(TestConv2DBf16Op):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -170,7 +170,7 @@ class TestWithStride(TestConv2dBf16Op): ...@@ -170,7 +170,7 @@ class TestWithStride(TestConv2dBf16Op):
self.input_type = np.uint16 self.input_type = np.uint16
class TestWithDilations(TestConv2dBf16Op): class TestWithDilations(TestConv2DBf16Op):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -185,7 +185,7 @@ class TestWithDilations(TestConv2dBf16Op): ...@@ -185,7 +185,7 @@ class TestWithDilations(TestConv2dBf16Op):
self.input_type = np.uint16 self.input_type = np.uint16
class TestWith1x1ForceFP32Output(TestConv2dBf16Op): class TestWith1x1ForceFP32Output(TestConv2DBf16Op):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -201,7 +201,7 @@ class TestWith1x1ForceFP32Output(TestConv2dBf16Op): ...@@ -201,7 +201,7 @@ class TestWith1x1ForceFP32Output(TestConv2dBf16Op):
self.fuse_residual = False self.fuse_residual = False
class TestWithInput1x1Filter1x1(TestConv2dBf16Op): class TestWithInput1x1Filter1x1(TestConv2DBf16Op):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
......
...@@ -19,7 +19,7 @@ import numpy as np ...@@ -19,7 +19,7 @@ import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive, TestConv2dOp from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive, TestConv2DOp
def conv2d_forward_refer(input, filter, group, conv_param): def conv2d_forward_refer(input, filter, group, conv_param):
...@@ -28,7 +28,7 @@ def conv2d_forward_refer(input, filter, group, conv_param): ...@@ -28,7 +28,7 @@ def conv2d_forward_refer(input, filter, group, conv_param):
return out return out
class TestConv2dInt8Op(TestConv2dOp): class TestConv2DInt8Op(TestConv2DOp):
def setUp(self): def setUp(self):
self.op_type = "conv2d" self.op_type = "conv2d"
self.use_cudnn = False self.use_cudnn = False
...@@ -162,7 +162,7 @@ class TestConv2dInt8Op(TestConv2dOp): ...@@ -162,7 +162,7 @@ class TestConv2dInt8Op(TestConv2dOp):
pass pass
def init_test_case(self): def init_test_case(self):
TestConv2dOp.init_test_case(self) TestConv2DOp.init_test_case(self)
self.input_size = [1, 1, 5, 5] # NCHW self.input_size = [1, 1, 5, 5] # NCHW
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.input_residual_size = [1, 2, 3, 3] self.input_residual_size = [1, 2, 3, 3]
...@@ -186,7 +186,7 @@ class TestConv2dInt8Op(TestConv2dOp): ...@@ -186,7 +186,7 @@ class TestConv2dInt8Op(TestConv2dOp):
#--------------------test conv2d u8 in and u8 out with residual fuse-------------------- #--------------------test conv2d u8 in and u8 out with residual fuse--------------------
class TestConv2d(TestConv2dInt8Op): class TestConv2D(TestConv2DInt8Op):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -201,19 +201,19 @@ class TestConv2d(TestConv2dInt8Op): ...@@ -201,19 +201,19 @@ class TestConv2d(TestConv2dInt8Op):
self.scale_in_eltwise = 0.6 self.scale_in_eltwise = 0.6
class TestWithPad(TestConv2d): class TestWithPad(TestConv2D):
def init_test_case(self): def init_test_case(self):
TestConv2d.init_test_case(self) TestConv2D.init_test_case(self)
self.pad = [1, 1] self.pad = [1, 1]
self.input_residual_size = [2, 6, 5, 5] self.input_residual_size = [2, 6, 5, 5]
class TestWithGroup(TestConv2d): class TestWithGroup(TestConv2D):
def init_group(self): def init_group(self):
self.groups = 3 self.groups = 3
class TestWithStride(TestConv2dInt8Op): class TestWithStride(TestConv2DInt8Op):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -228,7 +228,7 @@ class TestWithStride(TestConv2dInt8Op): ...@@ -228,7 +228,7 @@ class TestWithStride(TestConv2dInt8Op):
self.scale_in_eltwise = 0.5 self.scale_in_eltwise = 0.5
class TestWithDilations(TestConv2dInt8Op): class TestWithDilations(TestConv2DInt8Op):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -244,7 +244,7 @@ class TestWithDilations(TestConv2dInt8Op): ...@@ -244,7 +244,7 @@ class TestWithDilations(TestConv2dInt8Op):
self.scale_in_eltwise = 0.5 self.scale_in_eltwise = 0.5
class TestWith1x1(TestConv2dInt8Op): class TestWith1x1(TestConv2DInt8Op):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -259,7 +259,7 @@ class TestWith1x1(TestConv2dInt8Op): ...@@ -259,7 +259,7 @@ class TestWith1x1(TestConv2dInt8Op):
self.scale_in_eltwise = 0.5 self.scale_in_eltwise = 0.5
class TestWithInput1x1Filter1x1(TestConv2dInt8Op): class TestWithInput1x1Filter1x1(TestConv2DInt8Op):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -356,7 +356,7 @@ def create_test_int8_class(parent): ...@@ -356,7 +356,7 @@ def create_test_int8_class(parent):
globals()[cls_name_u8s8_re_1] = TestU8S8ResCase globals()[cls_name_u8s8_re_1] = TestU8S8ResCase
create_test_int8_class(TestConv2dInt8Op) create_test_int8_class(TestConv2DInt8Op)
create_test_int8_class(TestWithPad) create_test_int8_class(TestWithPad)
create_test_int8_class(TestWithStride) create_test_int8_class(TestWithStride)
create_test_int8_class(TestWithDilations) create_test_int8_class(TestWithDilations)
...@@ -365,7 +365,7 @@ create_test_int8_class(TestWith1x1) ...@@ -365,7 +365,7 @@ create_test_int8_class(TestWith1x1)
create_test_int8_class(TestWithInput1x1Filter1x1) create_test_int8_class(TestWithInput1x1Filter1x1)
class TestConv2dOp_AsyPadding_INT_MKLDNN(TestConv2dInt8Op): class TestConv2DOp_AsyPadding_INT_MKLDNN(TestConv2DInt8Op):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
...@@ -374,13 +374,13 @@ class TestConv2dOp_AsyPadding_INT_MKLDNN(TestConv2dInt8Op): ...@@ -374,13 +374,13 @@ class TestConv2dOp_AsyPadding_INT_MKLDNN(TestConv2dInt8Op):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestConv2dOp_Same_INT_MKLDNN(TestConv2dOp_AsyPadding_INT_MKLDNN): class TestConv2DOp_Same_INT_MKLDNN(TestConv2DOp_AsyPadding_INT_MKLDNN):
def init_paddings(self): def init_paddings(self):
self.pad = [0, 0] self.pad = [0, 0]
self.padding_algorithm = "SAME" self.padding_algorithm = "SAME"
class TestConv2dOp_Valid_INT_MKLDNN(TestConv2dOp_AsyPadding_INT_MKLDNN): class TestConv2DOp_Valid_INT_MKLDNN(TestConv2DOp_AsyPadding_INT_MKLDNN):
def init_paddings(self): def init_paddings(self):
self.pad = [1, 1] self.pad = [1, 1]
self.padding_algorithm = "VALID" self.padding_algorithm = "VALID"
......
...@@ -19,7 +19,7 @@ import numpy as np ...@@ -19,7 +19,7 @@ import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci
from paddle.fluid.tests.unittests.test_conv2d_op import TestConv2dOp, TestConv2dOp_v2 from paddle.fluid.tests.unittests.test_conv2d_op import TestConv2DOp, TestConv2DOp_v2
def conv2d_bias_naive(out, bias): def conv2d_bias_naive(out, bias):
...@@ -36,7 +36,7 @@ def conv2d_residual_naive(out, residual): ...@@ -36,7 +36,7 @@ def conv2d_residual_naive(out, residual):
return out return out
class TestConv2dMKLDNNOp(TestConv2dOp): class TestConv2DMKLDNNOp(TestConv2DOp):
def init_group(self): def init_group(self):
self.groups = 1 self.groups = 1
...@@ -64,7 +64,7 @@ class TestConv2dMKLDNNOp(TestConv2dOp): ...@@ -64,7 +64,7 @@ class TestConv2dMKLDNNOp(TestConv2dOp):
self.fuse_residual_connection = False self.fuse_residual_connection = False
self.input_residual_size = None self.input_residual_size = None
TestConv2dOp.setUp(self) TestConv2DOp.setUp(self)
output = self.outputs['Output'] output = self.outputs['Output']
...@@ -106,9 +106,9 @@ class TestConv2dMKLDNNOp(TestConv2dOp): ...@@ -106,9 +106,9 @@ class TestConv2dMKLDNNOp(TestConv2dOp):
@skip_check_grad_ci( @skip_check_grad_ci(
reason="Fusion is for inference only, check_grad is not required.") reason="Fusion is for inference only, check_grad is not required.")
class TestWithbreluFusion(TestConv2dMKLDNNOp): class TestWithbreluFusion(TestConv2DMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dMKLDNNOp.init_test_case(self) TestConv2DMKLDNNOp.init_test_case(self)
self.fuse_activation = "relu6" self.fuse_activation = "relu6"
self.fuse_alpha = 6.0 self.fuse_alpha = 6.0
self.dsttype = np.float32 self.dsttype = np.float32
...@@ -116,9 +116,9 @@ class TestWithbreluFusion(TestConv2dMKLDNNOp): ...@@ -116,9 +116,9 @@ class TestWithbreluFusion(TestConv2dMKLDNNOp):
@skip_check_grad_ci( @skip_check_grad_ci(
reason="Fusion is for inference only, check_grad is not required.") reason="Fusion is for inference only, check_grad is not required.")
class TestWithFuse(TestConv2dMKLDNNOp): class TestWithFuse(TestConv2DMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dMKLDNNOp.init_test_case(self) TestConv2DMKLDNNOp.init_test_case(self)
self.pad = [1, 1] self.pad = [1, 1]
self.fuse_bias = True self.fuse_bias = True
self.bias_size = [6] self.bias_size = [6]
...@@ -126,22 +126,22 @@ class TestWithFuse(TestConv2dMKLDNNOp): ...@@ -126,22 +126,22 @@ class TestWithFuse(TestConv2dMKLDNNOp):
self.input_residual_size = [2, 6, 5, 5] self.input_residual_size = [2, 6, 5, 5]
class TestWithPadWithBias(TestConv2dMKLDNNOp): class TestWithPadWithBias(TestConv2DMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dMKLDNNOp.init_test_case(self) TestConv2DMKLDNNOp.init_test_case(self)
self.pad = [1, 1] self.pad = [1, 1]
self.input_size = [2, 3, 6, 6] self.input_size = [2, 3, 6, 6]
class TestWithStride(TestConv2dMKLDNNOp): class TestWithStride(TestConv2DMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dMKLDNNOp.init_test_case(self) TestConv2DMKLDNNOp.init_test_case(self)
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
self.input_size = [2, 3, 6, 6] self.input_size = [2, 3, 6, 6]
class TestWithGroup(TestConv2dMKLDNNOp): class TestWithGroup(TestConv2DMKLDNNOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -154,15 +154,15 @@ class TestWithGroup(TestConv2dMKLDNNOp): ...@@ -154,15 +154,15 @@ class TestWithGroup(TestConv2dMKLDNNOp):
self.groups = 3 self.groups = 3
class TestWith1x1(TestConv2dMKLDNNOp): class TestWith1x1(TestConv2DMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dMKLDNNOp.init_test_case(self) TestConv2DMKLDNNOp.init_test_case(self)
self.filter_size = [40, 3, 1, 1] self.filter_size = [40, 3, 1, 1]
class TestWithInput1x1Filter1x1(TestConv2dMKLDNNOp): class TestWithInput1x1Filter1x1(TestConv2DMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dMKLDNNOp.init_test_case(self) TestConv2DMKLDNNOp.init_test_case(self)
self.input_size = [2, 60, 1, 1] # NCHW self.input_size = [2, 60, 1, 1] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0 assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
...@@ -172,7 +172,7 @@ class TestWithInput1x1Filter1x1(TestConv2dMKLDNNOp): ...@@ -172,7 +172,7 @@ class TestWithInput1x1Filter1x1(TestConv2dMKLDNNOp):
self.groups = 3 self.groups = 3
class TestConv2dOp_AsyPadding_MKLDNN(TestConv2dOp_v2): class TestConv2DOp_AsyPadding_MKLDNN(TestConv2DOp_v2):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
self.dtype = np.float32 self.dtype = np.float32
...@@ -182,19 +182,19 @@ class TestConv2dOp_AsyPadding_MKLDNN(TestConv2dOp_v2): ...@@ -182,19 +182,19 @@ class TestConv2dOp_AsyPadding_MKLDNN(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestConv2dOp_Same_MKLDNN(TestConv2dOp_AsyPadding_MKLDNN): class TestConv2DOp_Same_MKLDNN(TestConv2DOp_AsyPadding_MKLDNN):
def init_paddings(self): def init_paddings(self):
self.pad = [0, 0] self.pad = [0, 0]
self.padding_algorithm = "SAME" self.padding_algorithm = "SAME"
class TestConv2dOp_Valid_MKLDNN(TestConv2dOp_AsyPadding_MKLDNN): class TestConv2DOp_Valid_MKLDNN(TestConv2DOp_AsyPadding_MKLDNN):
def init_paddings(self): def init_paddings(self):
self.pad = [1, 1] self.pad = [1, 1]
self.padding_algorithm = "VALID" self.padding_algorithm = "VALID"
class TestConv2dOp_Valid_NHWC_MKLDNN(TestConv2dOp_Valid_MKLDNN): class TestConv2DOp_Valid_NHWC_MKLDNN(TestConv2DOp_Valid_MKLDNN):
def init_data_format(self): def init_data_format(self):
self.data_format = "NHWC" self.data_format = "NHWC"
...@@ -203,21 +203,21 @@ class TestConv2dOp_Valid_NHWC_MKLDNN(TestConv2dOp_Valid_MKLDNN): ...@@ -203,21 +203,21 @@ class TestConv2dOp_Valid_NHWC_MKLDNN(TestConv2dOp_Valid_MKLDNN):
self.input_size = [N, H, W, C] self.input_size = [N, H, W, C]
class TestConv2dOp_Same_NHWC_MKLDNN(TestConv2dOp_Valid_NHWC_MKLDNN): class TestConv2DOp_Same_NHWC_MKLDNN(TestConv2DOp_Valid_NHWC_MKLDNN):
def init_paddings(self): def init_paddings(self):
self.pad = [0, 0] self.pad = [0, 0]
self.padding_algorithm = "SAME" self.padding_algorithm = "SAME"
class TestConv2dOp_AsyPadding_NHWC_MKLDNN(TestConv2dOp_Valid_NHWC_MKLDNN): class TestConv2DOp_AsyPadding_NHWC_MKLDNN(TestConv2DOp_Valid_NHWC_MKLDNN):
def init_paddings(self): def init_paddings(self):
self.pad = [0, 0, 1, 2] self.pad = [0, 0, 1, 2]
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestMKLDNNDilations(TestConv2dMKLDNNOp): class TestMKLDNNDilations(TestConv2DMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dMKLDNNOp.init_test_case(self) TestConv2DMKLDNNOp.init_test_case(self)
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 10, 10] # NCHW self.input_size = [2, 3, 10, 10] # NCHW
......
...@@ -19,7 +19,7 @@ import numpy as np ...@@ -19,7 +19,7 @@ import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.test_conv2d_transpose_op import conv2dtranspose_forward_naive, TestConv2dTransposeOp from paddle.fluid.tests.unittests.test_conv2d_transpose_op import conv2dtranspose_forward_naive, TestConv2DTransposeOp
def conv2d_bias_naive(out, bias): def conv2d_bias_naive(out, bias):
...@@ -30,7 +30,7 @@ def conv2d_bias_naive(out, bias): ...@@ -30,7 +30,7 @@ def conv2d_bias_naive(out, bias):
return out return out
class TestConv2dTransposeMKLDNNOp(TestConv2dTransposeOp): class TestConv2DTransposeMKLDNNOp(TestConv2DTransposeOp):
def test_check_grad(self): def test_check_grad(self):
return return
...@@ -64,7 +64,7 @@ class TestConv2dTransposeMKLDNNOp(TestConv2dTransposeOp): ...@@ -64,7 +64,7 @@ class TestConv2dTransposeMKLDNNOp(TestConv2dTransposeOp):
def setUp(self): def setUp(self):
TestConv2dTransposeOp.setUp(self) TestConv2DTransposeOp.setUp(self)
output = self.outputs['Output'] output = self.outputs['Output']
...@@ -86,46 +86,46 @@ class TestConv2dTransposeMKLDNNOp(TestConv2dTransposeOp): ...@@ -86,46 +86,46 @@ class TestConv2dTransposeMKLDNNOp(TestConv2dTransposeOp):
self.outputs['Output'] = output self.outputs['Output'] = output
class TestMKLDNNFuseBias(TestConv2dTransposeMKLDNNOp): class TestMKLDNNFuseBias(TestConv2DTransposeMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self) TestConv2DTransposeMKLDNNOp.init_test_case(self)
self.pad = [1, 1] self.pad = [1, 1]
self.fuse_bias = True self.fuse_bias = True
self.bias_size = [6] self.bias_size = [6]
class TestMKLDNNWithPad(TestConv2dTransposeMKLDNNOp): class TestMKLDNNWithPad(TestConv2DTransposeMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self) TestConv2DTransposeMKLDNNOp.init_test_case(self)
self.pad = [1, 1] self.pad = [1, 1]
self.input_size = [2, 3, 10, 10] self.input_size = [2, 3, 10, 10]
class TestMKLDNNWithStride(TestConv2dTransposeMKLDNNOp): class TestMKLDNNWithStride(TestConv2DTransposeMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self) TestConv2DTransposeMKLDNNOp.init_test_case(self)
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
self.input_size = [2, 3, 6, 6] # NCHW self.input_size = [2, 3, 6, 6] # NCHW
class TestMKLDNNWithAsymPad(TestConv2dTransposeMKLDNNOp): class TestMKLDNNWithAsymPad(TestConv2DTransposeMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self) TestConv2DTransposeMKLDNNOp.init_test_case(self)
self.pad = [0, 0, 1, 2] self.pad = [0, 0, 1, 2]
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestMKLDNNWithSamePad(TestConv2dTransposeMKLDNNOp): class TestMKLDNNWithSamePad(TestConv2DTransposeMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self) TestConv2DTransposeMKLDNNOp.init_test_case(self)
self.pad = [0, 0] self.pad = [0, 0]
self.padding_algorithm = "SAME" self.padding_algorithm = "SAME"
class TestMKLDNNWithValidPad(TestConv2dTransposeMKLDNNOp): class TestMKLDNNWithValidPad(TestConv2DTransposeMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self) TestConv2DTransposeMKLDNNOp.init_test_case(self)
self.pad = [1, 1] self.pad = [1, 1]
self.padding_algorithm = "VALID" self.padding_algorithm = "VALID"
...@@ -138,10 +138,10 @@ class TestMKLDNNWithValidPad_NHWC(TestMKLDNNWithValidPad): ...@@ -138,10 +138,10 @@ class TestMKLDNNWithValidPad_NHWC(TestMKLDNNWithValidPad):
self.input_size = [N, H, W, C] self.input_size = [N, H, W, C]
class TestConv2dTransposeMKLDNNWithDilationsExplicitPad( class TestConv2DTransposeMKLDNNWithDilationsExplicitPad(
TestConv2dTransposeMKLDNNOp): TestConv2DTransposeMKLDNNOp):
def init_test_case(self): def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self) TestConv2DTransposeMKLDNNOp.init_test_case(self)
self.stride = [2, 1] self.stride = [2, 1]
self.dilations = [1, 2] self.dilations = [1, 2]
self.groups = 1 self.groups = 1
......
...@@ -16,10 +16,10 @@ from __future__ import print_function ...@@ -16,10 +16,10 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
from paddle.fluid.tests.unittests.test_conv3d_op import TestConv3dOp, TestCase1, TestWithGroup1, TestWithGroup2, TestWith1x1, TestWithInput1x1Filter1x1, TestConv3dOp_2 from paddle.fluid.tests.unittests.test_conv3d_op import TestConv3DOp, TestCase1, TestWithGroup1, TestWithGroup2, TestWith1x1, TestWithInput1x1Filter1x1, TestConv3DOp_2
class TestMKLDNN(TestConv3dOp): class TestMKLDNN(TestConv3DOp):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
self.data_format = "NCHW" self.data_format = "NCHW"
...@@ -61,7 +61,7 @@ class TestMKLDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1): ...@@ -61,7 +61,7 @@ class TestMKLDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1):
self.dtype = np.float32 self.dtype = np.float32
class TestConv3dOp_AsyPadding_MKLDNN(TestConv3dOp): class TestConv3DOp_AsyPadding_MKLDNN(TestConv3DOp):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
self.data_format = "NCHW" self.data_format = "NCHW"
...@@ -72,7 +72,7 @@ class TestConv3dOp_AsyPadding_MKLDNN(TestConv3dOp): ...@@ -72,7 +72,7 @@ class TestConv3dOp_AsyPadding_MKLDNN(TestConv3dOp):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestConv3dOp_Same_MKLDNN(TestConv3dOp_AsyPadding_MKLDNN): class TestConv3DOp_Same_MKLDNN(TestConv3DOp_AsyPadding_MKLDNN):
def init_paddings(self): def init_paddings(self):
self.pad = [0, 0, 0] self.pad = [0, 0, 0]
self.padding_algorithm = "SAME" self.padding_algorithm = "SAME"
...@@ -83,7 +83,7 @@ class TestConv3dOp_Same_MKLDNN(TestConv3dOp_AsyPadding_MKLDNN): ...@@ -83,7 +83,7 @@ class TestConv3dOp_Same_MKLDNN(TestConv3dOp_AsyPadding_MKLDNN):
self.dtype = np.float32 self.dtype = np.float32
class TestConv3dOp_Valid_MKLDNN(TestConv3dOp_AsyPadding_MKLDNN): class TestConv3DOp_Valid_MKLDNN(TestConv3DOp_AsyPadding_MKLDNN):
def init_paddings(self): def init_paddings(self):
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
self.padding_algorithm = "VALID" self.padding_algorithm = "VALID"
......
...@@ -23,7 +23,7 @@ from paddle.fluid.tests.unittests.op_test import OpTest ...@@ -23,7 +23,7 @@ from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, avg_pool2D_forward_naive, max_pool2D_forward_naive from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, avg_pool2D_forward_naive, max_pool2D_forward_naive
class TestPool2dMKLDNNInt8_Op(TestPool2D_Op): class TestPool2DMKLDNNInt8_Op(TestPool2D_Op):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
...@@ -51,7 +51,7 @@ class TestPool2dMKLDNNInt8_Op(TestPool2D_Op): ...@@ -51,7 +51,7 @@ class TestPool2dMKLDNNInt8_Op(TestPool2D_Op):
pass pass
class TestCase1Avg(TestPool2dMKLDNNInt8_Op): class TestCase1Avg(TestPool2DMKLDNNInt8_Op):
def init_test_case(self): def init_test_case(self):
self.shape = [2, 3, 7, 7] self.shape = [2, 3, 7, 7]
self.ksize = [3, 3] self.ksize = [3, 3]
...@@ -65,7 +65,7 @@ class TestCase1Avg(TestPool2dMKLDNNInt8_Op): ...@@ -65,7 +65,7 @@ class TestCase1Avg(TestPool2dMKLDNNInt8_Op):
self.exclusive = True self.exclusive = True
class TestCase2Avg(TestPool2dMKLDNNInt8_Op): class TestCase2Avg(TestPool2DMKLDNNInt8_Op):
def init_test_case(self): def init_test_case(self):
self.shape = [2, 3, 7, 7] self.shape = [2, 3, 7, 7]
self.ksize = [3, 3] self.ksize = [3, 3]
...@@ -79,7 +79,7 @@ class TestCase2Avg(TestPool2dMKLDNNInt8_Op): ...@@ -79,7 +79,7 @@ class TestCase2Avg(TestPool2dMKLDNNInt8_Op):
self.exclusive = False self.exclusive = False
class TestCase0Max(TestPool2dMKLDNNInt8_Op): class TestCase0Max(TestPool2DMKLDNNInt8_Op):
def init_pool_type(self): def init_pool_type(self):
self.pool_type = "max" self.pool_type = "max"
self.pool2D_forward_naive = max_pool2D_forward_naive self.pool2D_forward_naive = max_pool2D_forward_naive
...@@ -114,7 +114,7 @@ def create_test_s8_u8_class(parent): ...@@ -114,7 +114,7 @@ def create_test_s8_u8_class(parent):
globals()[cls_name_u8] = TestU8Case globals()[cls_name_u8] = TestU8Case
create_test_s8_u8_class(TestPool2dMKLDNNInt8_Op) create_test_s8_u8_class(TestPool2DMKLDNNInt8_Op)
create_test_s8_u8_class(TestCase1Avg) create_test_s8_u8_class(TestCase1Avg)
create_test_s8_u8_class(TestCase2Avg) create_test_s8_u8_class(TestCase2Avg)
create_test_s8_u8_class(TestCase0Max) create_test_s8_u8_class(TestCase0Max)
......
...@@ -26,7 +26,7 @@ import paddle.fluid as fluid ...@@ -26,7 +26,7 @@ import paddle.fluid as fluid
import paddle.fluid.dygraph as dygraph import paddle.fluid.dygraph as dygraph
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
from paddle.nn import Conv2d, Linear, SyncBatchNorm from paddle.nn import Conv2D, Linear, SyncBatchNorm
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase
...@@ -42,7 +42,7 @@ class TestLayer(fluid.dygraph.Layer): ...@@ -42,7 +42,7 @@ class TestLayer(fluid.dygraph.Layer):
act=None): act=None):
super(TestLayer, self).__init__() super(TestLayer, self).__init__()
self._conv = Conv2d( self._conv = Conv2D(
in_channels=num_channels, in_channels=num_channels,
out_channels=num_filters, out_channels=num_filters,
kernel_size=filter_size, kernel_size=filter_size,
...@@ -53,7 +53,7 @@ class TestLayer(fluid.dygraph.Layer): ...@@ -53,7 +53,7 @@ class TestLayer(fluid.dygraph.Layer):
self._sync_batch_norm = SyncBatchNorm(num_filters) self._sync_batch_norm = SyncBatchNorm(num_filters)
self._conv2 = Conv2d( self._conv2 = Conv2D(
in_channels=num_filters, in_channels=num_filters,
out_channels=num_filters, out_channels=num_filters,
kernel_size=filter_size, kernel_size=filter_size,
......
...@@ -65,7 +65,7 @@ class TestParallelExecutorBase(unittest.TestCase): ...@@ -65,7 +65,7 @@ class TestParallelExecutorBase(unittest.TestCase):
feed_data_reader, FeedDataReader feed_data_reader, FeedDataReader
), "feed_data_reader must be type of FeedDataReader" ), "feed_data_reader must be type of FeedDataReader"
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
main = fluid.Program() main = fluid.Program()
startup = fluid.Program() startup = fluid.Program()
......
...@@ -259,7 +259,7 @@ class TestLSTM(unittest.TestCase): ...@@ -259,7 +259,7 @@ class TestLSTM(unittest.TestCase):
def test_predict(self): def test_predict(self):
place = paddle.set_device(self.place) place = paddle.set_device(self.place)
paddle.manual_seed(123) paddle.seed(123)
np.random.seed(123) np.random.seed(123)
class Net(paddle.nn.Layer): class Net(paddle.nn.Layer):
......
...@@ -72,7 +72,7 @@ def avg_pool1D_forward_naive(x, ...@@ -72,7 +72,7 @@ def avg_pool1D_forward_naive(x,
return out return out
class TestPool1d_API(unittest.TestCase): class TestPool1D_API(unittest.TestCase):
def setUp(self): def setUp(self):
np.random.seed(123) np.random.seed(123)
self.places = [fluid.CPUPlace()] self.places = [fluid.CPUPlace()]
...@@ -89,7 +89,7 @@ class TestPool1d_API(unittest.TestCase): ...@@ -89,7 +89,7 @@ class TestPool1d_API(unittest.TestCase):
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
ada_max_pool1d_dg = paddle.nn.layer.AdaptiveAvgPool1d( ada_max_pool1d_dg = paddle.nn.layer.AdaptiveAvgPool1D(
output_size=16) output_size=16)
result = ada_max_pool1d_dg(input) result = ada_max_pool1d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
......
...@@ -84,7 +84,7 @@ def adaptive_pool2d_forward(x, output_size, data_format='NCHW', ...@@ -84,7 +84,7 @@ def adaptive_pool2d_forward(x, output_size, data_format='NCHW',
return out return out
class TestAdaptiveAvgPool2dAPI(unittest.TestCase): class TestAdaptiveAvgPool2DAPI(unittest.TestCase):
def setUp(self): def setUp(self):
self.x_np = np.random.random([2, 3, 7, 7]).astype("float32") self.x_np = np.random.random([2, 3, 7, 7]).astype("float32")
self.res_1_np = adaptive_pool2d_forward( self.res_1_np = adaptive_pool2d_forward(
...@@ -179,7 +179,7 @@ class TestAdaptiveAvgPool2dAPI(unittest.TestCase): ...@@ -179,7 +179,7 @@ class TestAdaptiveAvgPool2dAPI(unittest.TestCase):
assert np.allclose(out_6.numpy(), self.res_3_np) assert np.allclose(out_6.numpy(), self.res_3_np)
class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase): class TestAdaptiveAvgPool2DClassAPI(unittest.TestCase):
def setUp(self): def setUp(self):
self.x_np = np.random.random([2, 3, 7, 7]).astype("float32") self.x_np = np.random.random([2, 3, 7, 7]).astype("float32")
self.res_1_np = adaptive_pool2d_forward( self.res_1_np = adaptive_pool2d_forward(
...@@ -207,20 +207,20 @@ class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase): ...@@ -207,20 +207,20 @@ class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32")
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[3, 3]) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[3, 3])
out_1 = adaptive_avg_pool(x=x) out_1 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=5) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=5)
out_2 = adaptive_avg_pool(x=x) out_2 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[2, 5]) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[2, 5])
out_3 = adaptive_avg_pool(x=x) out_3 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(
output_size=[3, 3], data_format="NHWC") output_size=[3, 3], data_format="NHWC")
out_4 = adaptive_avg_pool(x=x) out_4 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(
output_size=[None, 3]) output_size=[None, 3])
out_5 = adaptive_avg_pool(x=x) out_5 = adaptive_avg_pool(x=x)
...@@ -247,20 +247,20 @@ class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase): ...@@ -247,20 +247,20 @@ class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase):
paddle.disable_static(place=place) paddle.disable_static(place=place)
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[3, 3]) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[3, 3])
out_1 = adaptive_avg_pool(x=x) out_1 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=5) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=5)
out_2 = adaptive_avg_pool(x=x) out_2 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[2, 5]) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[2, 5])
out_3 = adaptive_avg_pool(x=x) out_3 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(
output_size=[3, 3], data_format="NHWC") output_size=[3, 3], data_format="NHWC")
out_4 = adaptive_avg_pool(x=x) out_4 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(
output_size=[None, 3]) output_size=[None, 3])
out_5 = adaptive_avg_pool(x=x) out_5 = adaptive_avg_pool(x=x)
......
...@@ -99,7 +99,7 @@ def adaptive_pool3d_forward(x, ...@@ -99,7 +99,7 @@ def adaptive_pool3d_forward(x,
return out return out
class TestAdaptiveAvgPool3dAPI(unittest.TestCase): class TestAdaptiveAvgPool3DAPI(unittest.TestCase):
def setUp(self): def setUp(self):
self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32") self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32")
self.res_1_np = adaptive_pool3d_forward( self.res_1_np = adaptive_pool3d_forward(
...@@ -125,7 +125,8 @@ class TestAdaptiveAvgPool3dAPI(unittest.TestCase): ...@@ -125,7 +125,8 @@ class TestAdaptiveAvgPool3dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static() paddle.enable_static()
x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") x = paddle.fluid.data(
name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
out_1 = paddle.nn.functional.adaptive_avg_pool3d( out_1 = paddle.nn.functional.adaptive_avg_pool3d(
x=x, output_size=[3, 3, 3]) x=x, output_size=[3, 3, 3])
...@@ -194,7 +195,7 @@ class TestAdaptiveAvgPool3dAPI(unittest.TestCase): ...@@ -194,7 +195,7 @@ class TestAdaptiveAvgPool3dAPI(unittest.TestCase):
assert np.allclose(out_6.numpy(), self.res_3_np) assert np.allclose(out_6.numpy(), self.res_3_np)
class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase): class TestAdaptiveAvgPool3DClassAPI(unittest.TestCase):
def setUp(self): def setUp(self):
self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32") self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32")
self.res_1_np = adaptive_pool3d_forward( self.res_1_np = adaptive_pool3d_forward(
...@@ -220,24 +221,25 @@ class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase): ...@@ -220,24 +221,25 @@ class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static() paddle.enable_static()
x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") x = paddle.fluid.data(
name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(
output_size=[3, 3, 3]) output_size=[3, 3, 3])
out_1 = adaptive_avg_pool(x=x) out_1 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d(output_size=5) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(output_size=5)
out_2 = adaptive_avg_pool(x=x) out_2 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(
output_size=[2, 3, 5]) output_size=[2, 3, 5])
out_3 = adaptive_avg_pool(x=x) out_3 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(
output_size=[3, 3, 3], data_format="NDHWC") output_size=[3, 3, 3], data_format="NDHWC")
out_4 = adaptive_avg_pool(x=x) out_4 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(
output_size=[None, 3, None]) output_size=[None, 3, None])
out_5 = adaptive_avg_pool(x=x) out_5 = adaptive_avg_pool(x=x)
...@@ -264,22 +266,22 @@ class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase): ...@@ -264,22 +266,22 @@ class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase):
paddle.disable_static(place=place) paddle.disable_static(place=place)
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(
output_size=[3, 3, 3]) output_size=[3, 3, 3])
out_1 = adaptive_avg_pool(x=x) out_1 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d(output_size=5) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(output_size=5)
out_2 = adaptive_avg_pool(x=x) out_2 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(
output_size=[2, 3, 5]) output_size=[2, 3, 5])
out_3 = adaptive_avg_pool(x=x) out_3 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(
output_size=[3, 3, 3], data_format="NDHWC") output_size=[3, 3, 3], data_format="NDHWC")
out_4 = adaptive_avg_pool(x=x) out_4 = adaptive_avg_pool(x=x)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(
output_size=[None, 3, None]) output_size=[None, 3, None])
out_5 = adaptive_avg_pool(x=x) out_5 = adaptive_avg_pool(x=x)
......
...@@ -63,7 +63,7 @@ def max_pool1D_forward_naive(x, ...@@ -63,7 +63,7 @@ def max_pool1D_forward_naive(x,
return out return out
class TestPool1d_API(unittest.TestCase): class TestPool1D_API(unittest.TestCase):
def setUp(self): def setUp(self):
np.random.seed(123) np.random.seed(123)
self.places = [fluid.CPUPlace()] self.places = [fluid.CPUPlace()]
...@@ -80,7 +80,7 @@ class TestPool1d_API(unittest.TestCase): ...@@ -80,7 +80,7 @@ class TestPool1d_API(unittest.TestCase):
input_np, ksize=[16], strides=[0], paddings=[0], adaptive=True) input_np, ksize=[16], strides=[0], paddings=[0], adaptive=True)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
ada_max_pool1d_dg = paddle.nn.layer.AdaptiveMaxPool1d( ada_max_pool1d_dg = paddle.nn.layer.AdaptiveMaxPool1D(
output_size=16) output_size=16)
result = ada_max_pool1d_dg(input) result = ada_max_pool1d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
......
...@@ -84,7 +84,7 @@ def adaptive_pool2d_forward(x, output_size, data_format='NCHW', ...@@ -84,7 +84,7 @@ def adaptive_pool2d_forward(x, output_size, data_format='NCHW',
return out return out
class TestAdaptiveMaxPool2dAPI(unittest.TestCase): class TestAdaptiveMaxPool2DAPI(unittest.TestCase):
def setUp(self): def setUp(self):
self.x_np = np.random.random([2, 3, 7, 7]).astype("float32") self.x_np = np.random.random([2, 3, 7, 7]).astype("float32")
self.res_1_np = adaptive_pool2d_forward( self.res_1_np = adaptive_pool2d_forward(
...@@ -174,7 +174,7 @@ class TestAdaptiveMaxPool2dAPI(unittest.TestCase): ...@@ -174,7 +174,7 @@ class TestAdaptiveMaxPool2dAPI(unittest.TestCase):
assert np.allclose(out_5.numpy(), self.res_5_np) assert np.allclose(out_5.numpy(), self.res_5_np)
class TestAdaptiveMaxPool2dClassAPI(unittest.TestCase): class TestAdaptiveMaxPool2DClassAPI(unittest.TestCase):
def setUp(self): def setUp(self):
self.x_np = np.random.random([2, 3, 7, 7]).astype("float32") self.x_np = np.random.random([2, 3, 7, 7]).astype("float32")
self.res_1_np = adaptive_pool2d_forward( self.res_1_np = adaptive_pool2d_forward(
...@@ -202,20 +202,20 @@ class TestAdaptiveMaxPool2dClassAPI(unittest.TestCase): ...@@ -202,20 +202,20 @@ class TestAdaptiveMaxPool2dClassAPI(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32")
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=[3, 3]) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[3, 3])
out_1 = adaptive_max_pool(x=x) out_1 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=5) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=5)
out_2 = adaptive_max_pool(x=x) out_2 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=[2, 5]) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[2, 5])
out_3 = adaptive_max_pool(x=x) out_3 = adaptive_max_pool(x=x)
# adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d( # adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
# output_size=[3, 3], data_format="NHWC") # output_size=[3, 3], data_format="NHWC")
# out_4 = adaptive_max_pool(x=x) # out_4 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d( adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
output_size=[None, 3]) output_size=[None, 3])
out_5 = adaptive_max_pool(x=x) out_5 = adaptive_max_pool(x=x)
...@@ -242,20 +242,20 @@ class TestAdaptiveMaxPool2dClassAPI(unittest.TestCase): ...@@ -242,20 +242,20 @@ class TestAdaptiveMaxPool2dClassAPI(unittest.TestCase):
paddle.disable_static(place=place) paddle.disable_static(place=place)
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=[3, 3]) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[3, 3])
out_1 = adaptive_max_pool(x=x) out_1 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=5) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=5)
out_2 = adaptive_max_pool(x=x) out_2 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=[2, 5]) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[2, 5])
out_3 = adaptive_max_pool(x=x) out_3 = adaptive_max_pool(x=x)
#adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d( #adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
# output_size=[3, 3], data_format="NHWC") # output_size=[3, 3], data_format="NHWC")
#out_4 = adaptive_max_pool(x=x) #out_4 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d( adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
output_size=[None, 3]) output_size=[None, 3])
out_5 = adaptive_max_pool(x=x) out_5 = adaptive_max_pool(x=x)
......
...@@ -99,7 +99,7 @@ def adaptive_pool3d_forward(x, ...@@ -99,7 +99,7 @@ def adaptive_pool3d_forward(x,
return out return out
class TestAdaptiveMaxPool3dAPI(unittest.TestCase): class TestAdaptiveMaxPool3DAPI(unittest.TestCase):
def setUp(self): def setUp(self):
self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32") self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32")
self.res_1_np = adaptive_pool3d_forward( self.res_1_np = adaptive_pool3d_forward(
...@@ -125,7 +125,8 @@ class TestAdaptiveMaxPool3dAPI(unittest.TestCase): ...@@ -125,7 +125,8 @@ class TestAdaptiveMaxPool3dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static() paddle.enable_static()
x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") x = paddle.fluid.data(
name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
out_1 = paddle.nn.functional.adaptive_max_pool3d( out_1 = paddle.nn.functional.adaptive_max_pool3d(
x=x, output_size=[3, 3, 3]) x=x, output_size=[3, 3, 3])
...@@ -189,7 +190,7 @@ class TestAdaptiveMaxPool3dAPI(unittest.TestCase): ...@@ -189,7 +190,7 @@ class TestAdaptiveMaxPool3dAPI(unittest.TestCase):
assert np.allclose(out_5.numpy(), self.res_5_np) assert np.allclose(out_5.numpy(), self.res_5_np)
class TestAdaptiveMaxPool3dClassAPI(unittest.TestCase): class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase):
def setUp(self): def setUp(self):
self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32") self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32")
self.res_1_np = adaptive_pool3d_forward( self.res_1_np = adaptive_pool3d_forward(
...@@ -215,24 +216,25 @@ class TestAdaptiveMaxPool3dClassAPI(unittest.TestCase): ...@@ -215,24 +216,25 @@ class TestAdaptiveMaxPool3dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static() paddle.enable_static()
x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") x = paddle.fluid.data(
name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
output_size=[3, 3, 3]) output_size=[3, 3, 3])
out_1 = adaptive_max_pool(x=x) out_1 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d(output_size=5) adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(output_size=5)
out_2 = adaptive_max_pool(x=x) out_2 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
output_size=[2, 3, 5]) output_size=[2, 3, 5])
out_3 = adaptive_max_pool(x=x) out_3 = adaptive_max_pool(x=x)
# adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( # adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
# output_size=[3, 3, 3], data_format="NDHWC") # output_size=[3, 3, 3], data_format="NDHWC")
# out_4 = adaptive_max_pool(x=x) # out_4 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
output_size=[None, 3, None]) output_size=[None, 3, None])
out_5 = adaptive_max_pool(x=x) out_5 = adaptive_max_pool(x=x)
...@@ -259,22 +261,22 @@ class TestAdaptiveMaxPool3dClassAPI(unittest.TestCase): ...@@ -259,22 +261,22 @@ class TestAdaptiveMaxPool3dClassAPI(unittest.TestCase):
paddle.disable_static(place=place) paddle.disable_static(place=place)
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
output_size=[3, 3, 3]) output_size=[3, 3, 3])
out_1 = adaptive_max_pool(x=x) out_1 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d(output_size=5) adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(output_size=5)
out_2 = adaptive_max_pool(x=x) out_2 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
output_size=[2, 3, 5]) output_size=[2, 3, 5])
out_3 = adaptive_max_pool(x=x) out_3 = adaptive_max_pool(x=x)
# adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( # adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
# output_size=[3, 3, 3], data_format="NDHWC") # output_size=[3, 3, 3], data_format="NDHWC")
# out_4 = adaptive_max_pool(x=x) # out_4 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(
output_size=[None, 3, None]) output_size=[None, 3, None])
out_5 = adaptive_max_pool(x=x) out_5 = adaptive_max_pool(x=x)
......
...@@ -32,7 +32,7 @@ class TestBatchNorm(unittest.TestCase): ...@@ -32,7 +32,7 @@ class TestBatchNorm(unittest.TestCase):
places.append(fluid.CUDAPlace(0)) places.append(fluid.CUDAPlace(0))
for p in places: for p in places:
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
batch_norm1d = paddle.nn.BatchNorm1d(1, name="test") batch_norm1d = paddle.nn.BatchNorm1D(1, name="test")
def test_error(self): def test_error(self):
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
...@@ -45,32 +45,32 @@ class TestBatchNorm(unittest.TestCase): ...@@ -45,32 +45,32 @@ class TestBatchNorm(unittest.TestCase):
def error1d_dataformat(): def error1d_dataformat():
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
batch_norm1d = paddle.nn.BatchNorm1d(1, data_format='NCDHW') batch_norm1d = paddle.nn.BatchNorm1D(1, data_format='NCDHW')
batch_norm1d(fluid.dygraph.to_variable(x_data_4)) batch_norm1d(fluid.dygraph.to_variable(x_data_4))
def error2d_dataformat(): def error2d_dataformat():
x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32') x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')
batch_norm2d = paddle.nn.BatchNorm2d(1, data_format='NCDHW') batch_norm2d = paddle.nn.BatchNorm2D(1, data_format='NCDHW')
batch_norm2d(fluid.dygraph.to_variable(x_data_3)) batch_norm2d(fluid.dygraph.to_variable(x_data_3))
def error3d_dataformat(): def error3d_dataformat():
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
batch_norm3d = paddle.nn.BatchNorm3d(1, data_format='NCL') batch_norm3d = paddle.nn.BatchNorm3D(1, data_format='NCL')
batch_norm3d(fluid.dygraph.to_variable(x_data_4)) batch_norm3d(fluid.dygraph.to_variable(x_data_4))
def error1d(): def error1d():
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
batch_norm1d = paddle.nn.BatchNorm1d(1) batch_norm1d = paddle.nn.BatchNorm1D(1)
batch_norm1d(fluid.dygraph.to_variable(x_data_4)) batch_norm1d(fluid.dygraph.to_variable(x_data_4))
def error2d(): def error2d():
x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32') x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')
batch_norm2d = paddle.nn.BatchNorm2d(1) batch_norm2d = paddle.nn.BatchNorm2D(1)
batch_norm2d(fluid.dygraph.to_variable(x_data_3)) batch_norm2d(fluid.dygraph.to_variable(x_data_3))
def error3d(): def error3d():
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
batch_norm3d = paddle.nn.BatchNorm3d(1) batch_norm3d = paddle.nn.BatchNorm3D(1)
batch_norm3d(fluid.dygraph.to_variable(x_data_4)) batch_norm3d(fluid.dygraph.to_variable(x_data_4))
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
...@@ -99,7 +99,7 @@ class TestBatchNorm(unittest.TestCase): ...@@ -99,7 +99,7 @@ class TestBatchNorm(unittest.TestCase):
def compute_v2(x): def compute_v2(x):
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
bn = paddle.nn.BatchNorm2d(shape[1]) bn = paddle.nn.BatchNorm2D(shape[1])
y = bn(fluid.dygraph.to_variable(x)) y = bn(fluid.dygraph.to_variable(x))
return y.numpy() return y.numpy()
...@@ -120,7 +120,7 @@ class TestBatchNorm(unittest.TestCase): ...@@ -120,7 +120,7 @@ class TestBatchNorm(unittest.TestCase):
def compute_v4(x): def compute_v4(x):
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
bn = paddle.nn.BatchNorm2d( bn = paddle.nn.BatchNorm2D(
shape[1], weight_attr=False, bias_attr=False) shape[1], weight_attr=False, bias_attr=False)
y = bn(fluid.dygraph.to_variable(x)) y = bn(fluid.dygraph.to_variable(x))
return y.numpy() return y.numpy()
...@@ -155,7 +155,7 @@ class TestBatchNorm(unittest.TestCase): ...@@ -155,7 +155,7 @@ class TestBatchNorm(unittest.TestCase):
def compute_v2(x_np): def compute_v2(x_np):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
bn = paddle.nn.BatchNorm2d(shape[1]) bn = paddle.nn.BatchNorm2D(shape[1])
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
y = bn(x) y = bn(x)
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
...@@ -183,8 +183,8 @@ class TestBatchNormChannelLast(unittest.TestCase): ...@@ -183,8 +183,8 @@ class TestBatchNormChannelLast(unittest.TestCase):
for p in self.places: for p in self.places:
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
x = paddle.randn([2, 6, 4]) x = paddle.randn([2, 6, 4])
net1 = paddle.nn.BatchNorm1d(4, data_format="NLC") net1 = paddle.nn.BatchNorm1D(4, data_format="NLC")
net2 = paddle.nn.BatchNorm1d(4) net2 = paddle.nn.BatchNorm1D(4)
net2.weight = net1.weight net2.weight = net1.weight
net2.bias = net1.bias net2.bias = net1.bias
y1 = net1(x) y1 = net1(x)
...@@ -197,8 +197,8 @@ class TestBatchNormChannelLast(unittest.TestCase): ...@@ -197,8 +197,8 @@ class TestBatchNormChannelLast(unittest.TestCase):
for p in self.places: for p in self.places:
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
x = paddle.randn([2, 6, 6, 4]) x = paddle.randn([2, 6, 6, 4])
net1 = paddle.nn.BatchNorm2d(4, data_format="NHWC") net1 = paddle.nn.BatchNorm2D(4, data_format="NHWC")
net2 = paddle.nn.BatchNorm2d(4) net2 = paddle.nn.BatchNorm2D(4)
net2.weight = net1.weight net2.weight = net1.weight
net2.bias = net1.bias net2.bias = net1.bias
y1 = net1(x) y1 = net1(x)
...@@ -211,8 +211,8 @@ class TestBatchNormChannelLast(unittest.TestCase): ...@@ -211,8 +211,8 @@ class TestBatchNormChannelLast(unittest.TestCase):
for p in self.places: for p in self.places:
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
x = paddle.randn([2, 6, 6, 6, 4]) x = paddle.randn([2, 6, 6, 6, 4])
net1 = paddle.nn.BatchNorm3d(4, data_format="NDHWC") net1 = paddle.nn.BatchNorm3D(4, data_format="NDHWC")
net2 = paddle.nn.BatchNorm3d(4) net2 = paddle.nn.BatchNorm3D(4)
net2.weight = net1.weight net2.weight = net1.weight
net2.bias = net1.bias net2.bias = net1.bias
y1 = net1(x) y1 = net1(x)
......
...@@ -47,7 +47,7 @@ class InplaceTestBase(unittest.TestCase): ...@@ -47,7 +47,7 @@ class InplaceTestBase(unittest.TestCase):
def build_program_and_scope(self): def build_program_and_scope(self):
self.place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace() self.place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace()
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
startup_program = fluid.Program() startup_program = fluid.Program()
main_program = fluid.Program() main_program = fluid.Program()
......
...@@ -30,7 +30,7 @@ class TestCompiledProgram(unittest.TestCase): ...@@ -30,7 +30,7 @@ class TestCompiledProgram(unittest.TestCase):
self.label = np.random.randint( self.label = np.random.randint(
low=0, high=10, size=[16, 1], dtype=np.int64) low=0, high=10, size=[16, 1], dtype=np.int64)
with new_program_scope(): with new_program_scope():
paddle.manual_seed(self.seed) paddle.seed(self.seed)
paddle.framework.random._manual_program_seed(self.seed) paddle.framework.random._manual_program_seed(self.seed)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace() ) else fluid.CPUPlace()
...@@ -47,7 +47,7 @@ class TestCompiledProgram(unittest.TestCase): ...@@ -47,7 +47,7 @@ class TestCompiledProgram(unittest.TestCase):
def test_compiled_program_base(self): def test_compiled_program_base(self):
with new_program_scope(): with new_program_scope():
paddle.manual_seed(self.seed) paddle.seed(self.seed)
paddle.framework.random._manual_program_seed(self.seed) paddle.framework.random._manual_program_seed(self.seed)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace() ) else fluid.CPUPlace()
...@@ -65,7 +65,7 @@ class TestCompiledProgram(unittest.TestCase): ...@@ -65,7 +65,7 @@ class TestCompiledProgram(unittest.TestCase):
def test_compiled_program_with_data_parallel(self): def test_compiled_program_with_data_parallel(self):
with new_program_scope(): with new_program_scope():
paddle.manual_seed(self.seed) paddle.seed(self.seed)
paddle.framework.random._manual_program_seed(self.seed) paddle.framework.random._manual_program_seed(self.seed)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace() ) else fluid.CPUPlace()
......
...@@ -21,7 +21,7 @@ import paddle.fluid.initializer as I ...@@ -21,7 +21,7 @@ import paddle.fluid.initializer as I
import unittest import unittest
class Conv1dTestCase(unittest.TestCase): class Conv1DTestCase(unittest.TestCase):
def __init__(self, def __init__(self,
methodName='runTest', methodName='runTest',
batch_size=4, batch_size=4,
...@@ -37,7 +37,7 @@ class Conv1dTestCase(unittest.TestCase): ...@@ -37,7 +37,7 @@ class Conv1dTestCase(unittest.TestCase):
no_bias=False, no_bias=False,
dtype="float32", dtype="float32",
data_format="NCL"): data_format="NCL"):
super(Conv1dTestCase, self).__init__(methodName) super(Conv1DTestCase, self).__init__(methodName)
self.batch_size = batch_size self.batch_size = batch_size
self.num_channels = num_channels self.num_channels = num_channels
self.num_filters = num_filters self.num_filters = num_filters
...@@ -107,7 +107,7 @@ class Conv1dTestCase(unittest.TestCase): ...@@ -107,7 +107,7 @@ class Conv1dTestCase(unittest.TestCase):
def paddle_nn_layer(self): def paddle_nn_layer(self):
x_var = paddle.to_tensor(self.input) x_var = paddle.to_tensor(self.input)
conv = nn.Conv1d( conv = nn.Conv1D(
self.num_channels, self.num_channels,
self.num_filters, self.num_filters,
self.filter_size, self.filter_size,
...@@ -139,7 +139,7 @@ class Conv1dTestCase(unittest.TestCase): ...@@ -139,7 +139,7 @@ class Conv1dTestCase(unittest.TestCase):
self._test_equivalence(place) self._test_equivalence(place)
class Conv1dErrorTestCase(Conv1dTestCase): class Conv1DErrorTestCase(Conv1DTestCase):
def runTest(self): def runTest(self):
place = fluid.CPUPlace() place = fluid.CPUPlace()
with dg.guard(place): with dg.guard(place):
...@@ -147,7 +147,7 @@ class Conv1dErrorTestCase(Conv1dTestCase): ...@@ -147,7 +147,7 @@ class Conv1dErrorTestCase(Conv1dTestCase):
self.paddle_nn_layer() self.paddle_nn_layer()
class Conv1dTypeErrorTestCase(Conv1dTestCase): class Conv1DTypeErrorTestCase(Conv1DTestCase):
def runTest(self): def runTest(self):
place = fluid.CPUPlace() place = fluid.CPUPlace()
with dg.guard(place): with dg.guard(place):
...@@ -156,27 +156,27 @@ class Conv1dTypeErrorTestCase(Conv1dTestCase): ...@@ -156,27 +156,27 @@ class Conv1dTypeErrorTestCase(Conv1dTestCase):
def add_cases(suite): def add_cases(suite):
suite.addTest(Conv1dTestCase(methodName='runTest')) suite.addTest(Conv1DTestCase(methodName='runTest'))
suite.addTest(Conv1dTestCase(methodName='runTest', stride=[1], dilation=2)) suite.addTest(Conv1DTestCase(methodName='runTest', stride=[1], dilation=2))
suite.addTest(Conv1dTestCase(methodName='runTest', stride=2, dilation=(1))) suite.addTest(Conv1DTestCase(methodName='runTest', stride=2, dilation=(1)))
suite.addTest( suite.addTest(
Conv1dTestCase( Conv1DTestCase(
methodName='runTest', padding="same", no_bias=True)) methodName='runTest', padding="same", no_bias=True))
suite.addTest( suite.addTest(
Conv1dTestCase( Conv1DTestCase(
methodName='runTest', filter_size=3, padding='valid')) methodName='runTest', filter_size=3, padding='valid'))
suite.addTest( suite.addTest(
Conv1dTestCase( Conv1DTestCase(
methodName='runTest', padding=2, data_format='NLC')) methodName='runTest', padding=2, data_format='NLC'))
suite.addTest(Conv1dTestCase(methodName='runTest', padding=[1])) suite.addTest(Conv1DTestCase(methodName='runTest', padding=[1]))
suite.addTest(Conv1dTestCase(methodName='runTest', padding=[1, 2])) suite.addTest(Conv1DTestCase(methodName='runTest', padding=[1, 2]))
suite.addTest(Conv1dTestCase(methodName='runTest', padding=2)) suite.addTest(Conv1DTestCase(methodName='runTest', padding=2))
suite.addTest(Conv1dTestCase(methodName='runTest')) suite.addTest(Conv1DTestCase(methodName='runTest'))
suite.addTest( suite.addTest(
Conv1dTestCase( Conv1DTestCase(
methodName='runTest', groups=2, padding="valid")) methodName='runTest', groups=2, padding="valid"))
suite.addTest( suite.addTest(
Conv1dTestCase( Conv1DTestCase(
methodName='runTest', methodName='runTest',
num_filters=6, num_filters=6,
num_channels=3, num_channels=3,
...@@ -187,22 +187,22 @@ def add_cases(suite): ...@@ -187,22 +187,22 @@ def add_cases(suite):
def add_error_cases(suite): def add_error_cases(suite):
suite.addTest( suite.addTest(
Conv1dTypeErrorTestCase( Conv1DTypeErrorTestCase(
methodName='runTest', padding_mode="reflect", padding="valid")) methodName='runTest', padding_mode="reflect", padding="valid"))
suite.addTest( suite.addTest(
Conv1dErrorTestCase( Conv1DErrorTestCase(
methodName='runTest', data_format="VALID")) methodName='runTest', data_format="VALID"))
suite.addTest( suite.addTest(
Conv1dErrorTestCase( Conv1DErrorTestCase(
methodName='runTest', padding_mode="VALID")) methodName='runTest', padding_mode="VALID"))
suite.addTest( suite.addTest(
Conv1dErrorTestCase( Conv1DErrorTestCase(
methodName='runTest', num_channels=5, groups=2)) methodName='runTest', num_channels=5, groups=2))
suite.addTest( suite.addTest(
Conv1dErrorTestCase( Conv1DErrorTestCase(
methodName='runTest', num_filters=8, num_channels=15, groups=3)) methodName='runTest', num_filters=8, num_channels=15, groups=3))
suite.addTest( suite.addTest(
Conv1dErrorTestCase( Conv1DErrorTestCase(
methodName='runTest', padding=[1, 2, 3, 4, 5])) methodName='runTest', padding=[1, 2, 3, 4, 5]))
......
...@@ -21,7 +21,7 @@ import paddle.fluid.initializer as I ...@@ -21,7 +21,7 @@ import paddle.fluid.initializer as I
import unittest import unittest
class ConvTranspose1dTestCase(unittest.TestCase): class Conv1DTransposeTestCase(unittest.TestCase):
def __init__(self, def __init__(self,
methodName='runTest', methodName='runTest',
batch_size=4, batch_size=4,
...@@ -38,7 +38,7 @@ class ConvTranspose1dTestCase(unittest.TestCase): ...@@ -38,7 +38,7 @@ class ConvTranspose1dTestCase(unittest.TestCase):
no_bias=False, no_bias=False,
data_format="NCL", data_format="NCL",
dtype="float32"): dtype="float32"):
super(ConvTranspose1dTestCase, self).__init__(methodName) super(Conv1DTransposeTestCase, self).__init__(methodName)
self.batch_size = batch_size self.batch_size = batch_size
self.in_channels = in_channels self.in_channels = in_channels
self.out_channels = out_channels self.out_channels = out_channels
...@@ -113,7 +113,7 @@ class ConvTranspose1dTestCase(unittest.TestCase): ...@@ -113,7 +113,7 @@ class ConvTranspose1dTestCase(unittest.TestCase):
def paddle_nn_layer(self): def paddle_nn_layer(self):
x_var = paddle.to_tensor(self.input) x_var = paddle.to_tensor(self.input)
conv = nn.ConvTranspose1d( conv = nn.Conv1DTranspose(
self.in_channels, self.in_channels,
self.out_channels, self.out_channels,
self.filter_size, self.filter_size,
...@@ -145,7 +145,7 @@ class ConvTranspose1dTestCase(unittest.TestCase): ...@@ -145,7 +145,7 @@ class ConvTranspose1dTestCase(unittest.TestCase):
self._test_equivalence(place) self._test_equivalence(place)
class ConvTranspose1dErrorTestCase(ConvTranspose1dTestCase): class Conv1DTransposeErrorTestCase(Conv1DTransposeTestCase):
def runTest(self): def runTest(self):
place = fluid.CPUPlace() place = fluid.CPUPlace()
with dg.guard(place): with dg.guard(place):
...@@ -154,68 +154,68 @@ class ConvTranspose1dErrorTestCase(ConvTranspose1dTestCase): ...@@ -154,68 +154,68 @@ class ConvTranspose1dErrorTestCase(ConvTranspose1dTestCase):
def add_cases(suite): def add_cases(suite):
suite.addTest(ConvTranspose1dTestCase(methodName='runTest')) suite.addTest(Conv1DTransposeTestCase(methodName='runTest'))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', stride=[2], no_bias=True, dilation=2)) methodName='runTest', stride=[2], no_bias=True, dilation=2))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', methodName='runTest',
filter_size=(3), filter_size=(3),
output_size=[36], output_size=[36],
stride=[2], stride=[2],
dilation=2)) dilation=2))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', stride=2, dilation=(2))) methodName='runTest', stride=2, dilation=(2)))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', padding="valid")) methodName='runTest', padding="valid"))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', padding='valid')) methodName='runTest', padding='valid'))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', filter_size=1, padding=3)) methodName='runTest', filter_size=1, padding=3))
suite.addTest(ConvTranspose1dTestCase(methodName='runTest', padding=[2])) suite.addTest(Conv1DTransposeTestCase(methodName='runTest', padding=[2]))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', data_format="NLC")) methodName='runTest', data_format="NLC"))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', groups=2, padding="valid")) methodName='runTest', groups=2, padding="valid"))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', methodName='runTest',
out_channels=6, out_channels=6,
in_channels=3, in_channels=3,
groups=3, groups=3,
padding="valid")) padding="valid"))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', methodName='runTest',
data_format="NLC", data_format="NLC",
spartial_shape=16, spartial_shape=16,
output_size=18)) output_size=18))
suite.addTest( suite.addTest(
ConvTranspose1dTestCase( Conv1DTransposeTestCase(
methodName='runTest', data_format="NLC", stride=3, methodName='runTest', data_format="NLC", stride=3,
output_padding=2)) output_padding=2))
suite.addTest(ConvTranspose1dTestCase(methodName='runTest', padding=[1, 2])) suite.addTest(Conv1DTransposeTestCase(methodName='runTest', padding=[1, 2]))
def add_error_cases(suite): def add_error_cases(suite):
suite.addTest( suite.addTest(
ConvTranspose1dErrorTestCase( Conv1DTransposeErrorTestCase(
methodName='runTest', data_format="not_valid")) methodName='runTest', data_format="not_valid"))
suite.addTest( suite.addTest(
ConvTranspose1dErrorTestCase( Conv1DTransposeErrorTestCase(
methodName='runTest', in_channels=5, groups=2)) methodName='runTest', in_channels=5, groups=2))
suite.addTest( suite.addTest(
ConvTranspose1dErrorTestCase( Conv1DTransposeErrorTestCase(
methodName='runTest', stride=2, output_padding=3)) methodName='runTest', stride=2, output_padding=3))
suite.addTest( suite.addTest(
ConvTranspose1dErrorTestCase( Conv1DTransposeErrorTestCase(
methodName='runTest', output_size="not_valid")) methodName='runTest', output_size="not_valid"))
......
...@@ -45,7 +45,7 @@ def create_test_padding_VALID_class(parent): ...@@ -45,7 +45,7 @@ def create_test_padding_VALID_class(parent):
globals()[cls_name] = TestPaddingVALIDCase globals()[cls_name] = TestPaddingVALIDCase
class TestConv2dFusionOp(OpTest): class TestConv2DFusionOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "conv2d_fusion" self.op_type = "conv2d_fusion"
self.exhaustive_search = False self.exhaustive_search = False
...@@ -157,28 +157,28 @@ class TestConv2dFusionOp(OpTest): ...@@ -157,28 +157,28 @@ class TestConv2dFusionOp(OpTest):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithoutResidual(TestConv2dFusionOp): class TestWithoutResidual(TestConv2DFusionOp):
def init_residual(self): def init_residual(self):
self.add_residual_data = False self.add_residual_data = False
class TestIdentityActivation(TestConv2dFusionOp): class TestIdentityActivation(TestConv2DFusionOp):
def init_activation(self): def init_activation(self):
self.activation = 'identity' self.activation = 'identity'
class TestIdentityActivation(TestConv2dFusionOp): class TestIdentityActivation(TestConv2DFusionOp):
def init_activation(self): def init_activation(self):
self.activation = 'identity' self.activation = 'identity'
self.add_residual_data = False self.add_residual_data = False
class TestWithGroup(TestConv2dFusionOp): class TestWithGroup(TestConv2DFusionOp):
def init_group(self): def init_group(self):
self.groups = 3 self.groups = 3
class TestWithDilation(TestConv2dFusionOp): class TestWithDilation(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -194,12 +194,12 @@ class TestWithDilation(TestConv2dFusionOp): ...@@ -194,12 +194,12 @@ class TestWithDilation(TestConv2dFusionOp):
self.groups = 3 self.groups = 3
class TestCUDNNExhaustiveSearch(TestConv2dFusionOp): class TestCUDNNExhaustiveSearch(TestConv2DFusionOp):
def set_search_method(self): def set_search_method(self):
self.exhaustive_search = True self.exhaustive_search = True
class TestMultipleOutputs(TestConv2dFusionOp): class TestMultipleOutputs(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -215,13 +215,13 @@ class TestMultipleOutputs(TestConv2dFusionOp): ...@@ -215,13 +215,13 @@ class TestMultipleOutputs(TestConv2dFusionOp):
self.outputs['Outputs'] = [('out1', out1), ('out2', out2)] self.outputs['Outputs'] = [('out1', out1), ('out2', out2)]
class TestAsyPadding(TestConv2dFusionOp): class TestAsyPadding(TestConv2DFusionOp):
def init_paddings(self): def init_paddings(self):
self.pad = [0, 0, 1, 2] self.pad = [0, 0, 1, 2]
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithPad_AsyPadding(TestConv2dFusionOp): class TestWithPad_AsyPadding(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 10, 10] # NCHW self.input_size = [2, 3, 10, 10] # NCHW
...@@ -234,7 +234,7 @@ class TestWithPad_AsyPadding(TestConv2dFusionOp): ...@@ -234,7 +234,7 @@ class TestWithPad_AsyPadding(TestConv2dFusionOp):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithStride_AsyPadding(TestConv2dFusionOp): class TestWithStride_AsyPadding(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.stride = [2, 2] self.stride = [2, 2]
self.input_size = [2, 3, 6, 6] # NCHW self.input_size = [2, 3, 6, 6] # NCHW
...@@ -247,7 +247,7 @@ class TestWithStride_AsyPadding(TestConv2dFusionOp): ...@@ -247,7 +247,7 @@ class TestWithStride_AsyPadding(TestConv2dFusionOp):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWith1x1_AsyPadding(TestConv2dFusionOp): class TestWith1x1_AsyPadding(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW self.input_size = [2, 3, 5, 5] # NCHW
...@@ -263,12 +263,12 @@ class TestWith1x1_AsyPadding(TestConv2dFusionOp): ...@@ -263,12 +263,12 @@ class TestWith1x1_AsyPadding(TestConv2dFusionOp):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithGroup_AsyPadding(TestConv2dFusionOp): class TestWithGroup_AsyPadding(TestConv2DFusionOp):
def init_group(self): def init_group(self):
self.groups = 3 self.groups = 3
class TestWithDepthWise3x3_AsyPadding(TestConv2dFusionOp): class TestWithDepthWise3x3_AsyPadding(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [3, 4, 10, 10] # NCHW self.input_size = [3, 4, 10, 10] # NCHW
...@@ -287,7 +287,7 @@ class TestWithDepthWise3x3_AsyPadding(TestConv2dFusionOp): ...@@ -287,7 +287,7 @@ class TestWithDepthWise3x3_AsyPadding(TestConv2dFusionOp):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithDepthWise5x5_AsyPadding(TestConv2dFusionOp): class TestWithDepthWise5x5_AsyPadding(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 4, 10, 10] # NCHW self.input_size = [2, 4, 10, 10] # NCHW
...@@ -303,7 +303,7 @@ class TestWithDepthWise5x5_AsyPadding(TestConv2dFusionOp): ...@@ -303,7 +303,7 @@ class TestWithDepthWise5x5_AsyPadding(TestConv2dFusionOp):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithDepthWise7x7_AsyPadding(TestConv2dFusionOp): class TestWithDepthWise7x7_AsyPadding(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.stride = [2, 2] self.stride = [2, 2]
self.input_size = [2, 8, 10, 10] # NCHW self.input_size = [2, 8, 10, 10] # NCHW
...@@ -319,7 +319,7 @@ class TestWithDepthWise7x7_AsyPadding(TestConv2dFusionOp): ...@@ -319,7 +319,7 @@ class TestWithDepthWise7x7_AsyPadding(TestConv2dFusionOp):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithDilation_AsyPadding(TestConv2dFusionOp): class TestWithDilation_AsyPadding(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 10, 10] # NCHW self.input_size = [2, 3, 10, 10] # NCHW
...@@ -338,7 +338,7 @@ class TestWithDilation_AsyPadding(TestConv2dFusionOp): ...@@ -338,7 +338,7 @@ class TestWithDilation_AsyPadding(TestConv2dFusionOp):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithInput1x1Filter1x1_AsyPadding(TestConv2dFusionOp): class TestWithInput1x1Filter1x1_AsyPadding(TestConv2DFusionOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 1, 1] # NCHW self.input_size = [2, 3, 1, 1] # NCHW
......
...@@ -166,7 +166,7 @@ class Conv2DTestCase(unittest.TestCase): ...@@ -166,7 +166,7 @@ class Conv2DTestCase(unittest.TestCase):
def paddle_nn_layer(self): def paddle_nn_layer(self):
x_var = dg.to_variable(self.input) x_var = dg.to_variable(self.input)
conv = nn.Conv2d( conv = nn.Conv2D(
self.num_channels, self.num_channels,
self.num_filters, self.num_filters,
self.filter_size, self.filter_size,
......
...@@ -289,7 +289,7 @@ def create_test_cudnn_padding_VALID_class(parent): ...@@ -289,7 +289,7 @@ def create_test_cudnn_padding_VALID_class(parent):
globals()[cls_name] = TestCUDNNPaddingVALIDCase globals()[cls_name] = TestCUDNNPaddingVALIDCase
class TestConv2dOp(OpTest): class TestConv2DOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "conv2d" self.op_type = "conv2d"
self.use_cudnn = False self.use_cudnn = False
...@@ -412,7 +412,7 @@ class TestConv2dOp(OpTest): ...@@ -412,7 +412,7 @@ class TestConv2dOp(OpTest):
pass pass
class TestWithPad(TestConv2dOp): class TestWithPad(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -422,7 +422,7 @@ class TestWithPad(TestConv2dOp): ...@@ -422,7 +422,7 @@ class TestWithPad(TestConv2dOp):
self.filter_size = [6, f_c, 3, 3] self.filter_size = [6, f_c, 3, 3]
class TestWithStride(TestConv2dOp): class TestWithStride(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -432,7 +432,7 @@ class TestWithStride(TestConv2dOp): ...@@ -432,7 +432,7 @@ class TestWithStride(TestConv2dOp):
self.filter_size = [6, f_c, 3, 3] self.filter_size = [6, f_c, 3, 3]
class TestWithGroup(TestConv2dOp): class TestWithGroup(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -443,7 +443,7 @@ class TestWithGroup(TestConv2dOp): ...@@ -443,7 +443,7 @@ class TestWithGroup(TestConv2dOp):
self.filter_size = [18, f_c, 3, 3] self.filter_size = [18, f_c, 3, 3]
class TestWith1x1(TestConv2dOp): class TestWith1x1(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -456,7 +456,7 @@ class TestWith1x1(TestConv2dOp): ...@@ -456,7 +456,7 @@ class TestWith1x1(TestConv2dOp):
self.groups = 3 self.groups = 3
class TestWithDepthWise3x3(TestConv2dOp): class TestWithDepthWise3x3(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -472,7 +472,7 @@ class TestWithDepthWise3x3(TestConv2dOp): ...@@ -472,7 +472,7 @@ class TestWithDepthWise3x3(TestConv2dOp):
self.groups = 4 self.groups = 4
class TestWithDepthWise5x5(TestConv2dOp): class TestWithDepthWise5x5(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -485,7 +485,7 @@ class TestWithDepthWise5x5(TestConv2dOp): ...@@ -485,7 +485,7 @@ class TestWithDepthWise5x5(TestConv2dOp):
self.groups = 4 self.groups = 4
class TestWithDepthWise7x7(TestConv2dOp): class TestWithDepthWise7x7(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -498,7 +498,7 @@ class TestWithDepthWise7x7(TestConv2dOp): ...@@ -498,7 +498,7 @@ class TestWithDepthWise7x7(TestConv2dOp):
self.groups = 8 self.groups = 8
class TestWithDilation(TestConv2dOp): class TestWithDilation(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -514,7 +514,7 @@ class TestWithDilation(TestConv2dOp): ...@@ -514,7 +514,7 @@ class TestWithDilation(TestConv2dOp):
self.groups = 3 self.groups = 3
class TestWithInput1x1Filter1x1(TestConv2dOp): class TestWithInput1x1Filter1x1(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -527,18 +527,18 @@ class TestWithInput1x1Filter1x1(TestConv2dOp): ...@@ -527,18 +527,18 @@ class TestWithInput1x1Filter1x1(TestConv2dOp):
self.groups = 3 self.groups = 3
#----------------Conv2dCUDNN---------------- #----------------Conv2DCUDNN----------------
create_test_cudnn_class(TestConv2dOp) create_test_cudnn_class(TestConv2DOp)
create_test_cudnn_class(TestWithPad) create_test_cudnn_class(TestWithPad)
create_test_cudnn_class(TestWithStride) create_test_cudnn_class(TestWithStride)
create_test_cudnn_class(TestWithGroup) create_test_cudnn_class(TestWithGroup)
create_test_cudnn_class(TestWith1x1) create_test_cudnn_class(TestWith1x1)
create_test_cudnn_class(TestWithInput1x1Filter1x1) create_test_cudnn_class(TestWithInput1x1Filter1x1)
#----------------Conv2dCUDNN fp16---------------- #----------------Conv2DCUDNN fp16----------------
create_test_cudnn_fp16_class(TestConv2dOp, grad_check=False) create_test_cudnn_fp16_class(TestConv2DOp, grad_check=False)
create_test_cudnn_fp16_class(TestWithPad, grad_check=False) create_test_cudnn_fp16_class(TestWithPad, grad_check=False)
create_test_cudnn_fp16_class(TestWithStride, grad_check=False) create_test_cudnn_fp16_class(TestWithStride, grad_check=False)
create_test_cudnn_fp16_class(TestWithGroup, grad_check=False) create_test_cudnn_fp16_class(TestWithGroup, grad_check=False)
...@@ -548,7 +548,7 @@ create_test_cudnn_fp16_class(TestWithInput1x1Filter1x1, grad_check=False) ...@@ -548,7 +548,7 @@ create_test_cudnn_fp16_class(TestWithInput1x1Filter1x1, grad_check=False)
#----------------TestDepthwiseConv ----- #----------------TestDepthwiseConv -----
class TestDepthwiseConv(TestConv2dOp): class TestDepthwiseConv(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.pad = [1, 1] self.pad = [1, 1]
...@@ -561,7 +561,7 @@ class TestDepthwiseConv(TestConv2dOp): ...@@ -561,7 +561,7 @@ class TestDepthwiseConv(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestDepthwiseConv2(TestConv2dOp): class TestDepthwiseConv2(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.pad = [1, 1] self.pad = [1, 1]
...@@ -574,7 +574,7 @@ class TestDepthwiseConv2(TestConv2dOp): ...@@ -574,7 +574,7 @@ class TestDepthwiseConv2(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestDepthwiseConv3(TestConv2dOp): class TestDepthwiseConv3(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.pad = [1, 1] self.pad = [1, 1]
...@@ -587,7 +587,7 @@ class TestDepthwiseConv3(TestConv2dOp): ...@@ -587,7 +587,7 @@ class TestDepthwiseConv3(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestDepthwiseConvWithDilation(TestConv2dOp): class TestDepthwiseConvWithDilation(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.pad = [1, 1] self.pad = [1, 1]
...@@ -601,7 +601,7 @@ class TestDepthwiseConvWithDilation(TestConv2dOp): ...@@ -601,7 +601,7 @@ class TestDepthwiseConvWithDilation(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestDepthwiseConvWithDilation2(TestConv2dOp): class TestDepthwiseConvWithDilation2(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.pad = [1, 1] self.pad = [1, 1]
...@@ -615,7 +615,7 @@ class TestDepthwiseConvWithDilation2(TestConv2dOp): ...@@ -615,7 +615,7 @@ class TestDepthwiseConvWithDilation2(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestDepthwiseConvandFuse(TestConv2dOp): class TestDepthwiseConvandFuse(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -629,7 +629,7 @@ class TestDepthwiseConvandFuse(TestConv2dOp): ...@@ -629,7 +629,7 @@ class TestDepthwiseConvandFuse(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestDepthwiseConv2andFuse(TestConv2dOp): class TestDepthwiseConv2andFuse(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -643,7 +643,7 @@ class TestDepthwiseConv2andFuse(TestConv2dOp): ...@@ -643,7 +643,7 @@ class TestDepthwiseConv2andFuse(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestDepthwiseConv3andFuse(TestConv2dOp): class TestDepthwiseConv3andFuse(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -657,7 +657,7 @@ class TestDepthwiseConv3andFuse(TestConv2dOp): ...@@ -657,7 +657,7 @@ class TestDepthwiseConv3andFuse(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestDepthwiseConvWithDilationandFuse(TestConv2dOp): class TestDepthwiseConvWithDilationandFuse(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -672,7 +672,7 @@ class TestDepthwiseConvWithDilationandFuse(TestConv2dOp): ...@@ -672,7 +672,7 @@ class TestDepthwiseConvWithDilationandFuse(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestDepthwiseConvWithDilation2andFuse(TestConv2dOp): class TestDepthwiseConvWithDilation2andFuse(TestConv2DOp):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -687,13 +687,13 @@ class TestDepthwiseConvWithDilation2andFuse(TestConv2dOp): ...@@ -687,13 +687,13 @@ class TestDepthwiseConvWithDilation2andFuse(TestConv2dOp):
self.op_type = "depthwise_conv2d" self.op_type = "depthwise_conv2d"
class TestCUDNNExhaustiveSearch(TestConv2dOp): class TestCUDNNExhaustiveSearch(TestConv2DOp):
def init_kernel_type(self): def init_kernel_type(self):
self.use_cudnn = True self.use_cudnn = True
self.exhaustive_search = True self.exhaustive_search = True
class TestConv2dOpError(unittest.TestCase): class TestConv2DOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
...@@ -724,7 +724,7 @@ class TestConv2dOpError(unittest.TestCase): ...@@ -724,7 +724,7 @@ class TestConv2dOpError(unittest.TestCase):
# ---- test asymmetric padding ---- # ---- test asymmetric padding ----
class TestConv2dOp_v2(OpTest): class TestConv2DOp_v2(OpTest):
def setUp(self): def setUp(self):
self.op_type = "conv2d" self.op_type = "conv2d"
self.use_cudnn = False self.use_cudnn = False
...@@ -854,13 +854,13 @@ class TestConv2dOp_v2(OpTest): ...@@ -854,13 +854,13 @@ class TestConv2dOp_v2(OpTest):
pass pass
class TestConv2dOp_AsyPadding(TestConv2dOp_v2): class TestConv2DOp_AsyPadding(TestConv2DOp_v2):
def init_paddings(self): def init_paddings(self):
self.pad = [0, 0, 1, 2] self.pad = [0, 0, 1, 2]
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithPad_AsyPadding(TestConv2dOp_v2): class TestWithPad_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW self.input_size = [2, 3, 5, 5] # NCHW
...@@ -873,7 +873,7 @@ class TestWithPad_AsyPadding(TestConv2dOp_v2): ...@@ -873,7 +873,7 @@ class TestWithPad_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithStride_AsyPadding(TestConv2dOp_v2): class TestWithStride_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [2, 2] self.stride = [2, 2]
self.input_size = [2, 3, 6, 6] # NCHW self.input_size = [2, 3, 6, 6] # NCHW
...@@ -886,7 +886,7 @@ class TestWithStride_AsyPadding(TestConv2dOp_v2): ...@@ -886,7 +886,7 @@ class TestWithStride_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithGroup_AsyPadding(TestConv2dOp_v2): class TestWithGroup_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 2] self.stride = [1, 2]
...@@ -897,7 +897,7 @@ class TestWithGroup_AsyPadding(TestConv2dOp_v2): ...@@ -897,7 +897,7 @@ class TestWithGroup_AsyPadding(TestConv2dOp_v2):
self.filter_size = [24, f_c, 4, 3] self.filter_size = [24, f_c, 4, 3]
class TestWith1x1_AsyPadding(TestConv2dOp_v2): class TestWith1x1_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW self.input_size = [2, 3, 5, 5] # NCHW
...@@ -913,7 +913,7 @@ class TestWith1x1_AsyPadding(TestConv2dOp_v2): ...@@ -913,7 +913,7 @@ class TestWith1x1_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithDepthWise3x3_AsyPadding(TestConv2dOp_v2): class TestWithDepthWise3x3_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [3, 4, 10, 10] # NCHW self.input_size = [3, 4, 10, 10] # NCHW
...@@ -932,7 +932,7 @@ class TestWithDepthWise3x3_AsyPadding(TestConv2dOp_v2): ...@@ -932,7 +932,7 @@ class TestWithDepthWise3x3_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithDepthWise5x5_AsyPadding(TestConv2dOp_v2): class TestWithDepthWise5x5_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 4, 10, 10] # NCHW self.input_size = [2, 4, 10, 10] # NCHW
...@@ -948,7 +948,7 @@ class TestWithDepthWise5x5_AsyPadding(TestConv2dOp_v2): ...@@ -948,7 +948,7 @@ class TestWithDepthWise5x5_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithDepthWise7x7_AsyPadding(TestConv2dOp_v2): class TestWithDepthWise7x7_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [2, 2] self.stride = [2, 2]
self.input_size = [2, 8, 10, 10] # NCHW self.input_size = [2, 8, 10, 10] # NCHW
...@@ -964,7 +964,7 @@ class TestWithDepthWise7x7_AsyPadding(TestConv2dOp_v2): ...@@ -964,7 +964,7 @@ class TestWithDepthWise7x7_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithDilation_AsyPadding(TestConv2dOp_v2): class TestWithDilation_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [2, 3, 10, 10] # NCHW self.input_size = [2, 3, 10, 10] # NCHW
...@@ -983,7 +983,7 @@ class TestWithDilation_AsyPadding(TestConv2dOp_v2): ...@@ -983,7 +983,7 @@ class TestWithDilation_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithInput1x1Filter1x1_AsyPadding(TestConv2dOp_v2): class TestWithInput1x1Filter1x1_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.input_size = [40, 3, 1, 1] # NCHW self.input_size = [40, 3, 1, 1] # NCHW
...@@ -999,7 +999,7 @@ class TestWithInput1x1Filter1x1_AsyPadding(TestConv2dOp_v2): ...@@ -999,7 +999,7 @@ class TestWithInput1x1Filter1x1_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
create_test_cudnn_class(TestConv2dOp_AsyPadding) create_test_cudnn_class(TestConv2DOp_AsyPadding)
create_test_cudnn_class(TestWithPad_AsyPadding) create_test_cudnn_class(TestWithPad_AsyPadding)
create_test_cudnn_class(TestWithStride_AsyPadding) create_test_cudnn_class(TestWithStride_AsyPadding)
create_test_cudnn_class(TestWithGroup_AsyPadding) create_test_cudnn_class(TestWithGroup_AsyPadding)
...@@ -1007,7 +1007,7 @@ create_test_cudnn_class(TestWith1x1_AsyPadding) ...@@ -1007,7 +1007,7 @@ create_test_cudnn_class(TestWith1x1_AsyPadding)
create_test_cudnn_class(TestWithInput1x1Filter1x1_AsyPadding) create_test_cudnn_class(TestWithInput1x1Filter1x1_AsyPadding)
class TestDepthwiseConv_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConv_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.stride = [2, 2] self.stride = [2, 2]
...@@ -1023,7 +1023,7 @@ class TestDepthwiseConv_AsyPadding(TestConv2dOp_v2): ...@@ -1023,7 +1023,7 @@ class TestDepthwiseConv_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv2_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConv2_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.stride = [1, 1] self.stride = [1, 1]
...@@ -1039,7 +1039,7 @@ class TestDepthwiseConv2_AsyPadding(TestConv2dOp_v2): ...@@ -1039,7 +1039,7 @@ class TestDepthwiseConv2_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv3_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConv3_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.stride = [1, 1] self.stride = [1, 1]
...@@ -1055,7 +1055,7 @@ class TestDepthwiseConv3_AsyPadding(TestConv2dOp_v2): ...@@ -1055,7 +1055,7 @@ class TestDepthwiseConv3_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvWithDilation_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConvWithDilation_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.pad = [1, 1] self.pad = [1, 1]
...@@ -1073,7 +1073,7 @@ class TestDepthwiseConvWithDilation_AsyPadding(TestConv2dOp_v2): ...@@ -1073,7 +1073,7 @@ class TestDepthwiseConvWithDilation_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvWithDilation2_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConvWithDilation2_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.use_cuda = True self.use_cuda = True
self.pad = [1, 1] self.pad = [1, 1]
...@@ -1091,7 +1091,7 @@ class TestDepthwiseConvWithDilation2_AsyPadding(TestConv2dOp_v2): ...@@ -1091,7 +1091,7 @@ class TestDepthwiseConvWithDilation2_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvandFuse_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConvandFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -1109,7 +1109,7 @@ class TestDepthwiseConvandFuse_AsyPadding(TestConv2dOp_v2): ...@@ -1109,7 +1109,7 @@ class TestDepthwiseConvandFuse_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv2andFuse_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConv2andFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -1127,7 +1127,7 @@ class TestDepthwiseConv2andFuse_AsyPadding(TestConv2dOp_v2): ...@@ -1127,7 +1127,7 @@ class TestDepthwiseConv2andFuse_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv3andFuse_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConv3andFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -1145,7 +1145,7 @@ class TestDepthwiseConv3andFuse_AsyPadding(TestConv2dOp_v2): ...@@ -1145,7 +1145,7 @@ class TestDepthwiseConv3andFuse_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvWithDilationandFuse_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConvWithDilationandFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -1164,7 +1164,7 @@ class TestDepthwiseConvWithDilationandFuse_AsyPadding(TestConv2dOp_v2): ...@@ -1164,7 +1164,7 @@ class TestDepthwiseConvWithDilationandFuse_AsyPadding(TestConv2dOp_v2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvWithDilation2andFuse_AsyPadding(TestConv2dOp_v2): class TestDepthwiseConvWithDilation2andFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self): def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True self.fuse_relu_before_depthwise_conv = True
self.use_cuda = True self.use_cuda = True
...@@ -1184,25 +1184,25 @@ class TestDepthwiseConvWithDilation2andFuse_AsyPadding(TestConv2dOp_v2): ...@@ -1184,25 +1184,25 @@ class TestDepthwiseConvWithDilation2andFuse_AsyPadding(TestConv2dOp_v2):
#---------- test SAME VALID ----------- #---------- test SAME VALID -----------
create_test_padding_SAME_class(TestConv2dOp_AsyPadding) create_test_padding_SAME_class(TestConv2DOp_AsyPadding)
create_test_padding_SAME_class(TestWithPad_AsyPadding) create_test_padding_SAME_class(TestWithPad_AsyPadding)
create_test_padding_SAME_class(TestWithStride_AsyPadding) create_test_padding_SAME_class(TestWithStride_AsyPadding)
create_test_padding_SAME_class(TestWithGroup_AsyPadding) create_test_padding_SAME_class(TestWithGroup_AsyPadding)
create_test_padding_SAME_class(TestWithInput1x1Filter1x1_AsyPadding) create_test_padding_SAME_class(TestWithInput1x1Filter1x1_AsyPadding)
create_test_padding_VALID_class(TestConv2dOp_AsyPadding) create_test_padding_VALID_class(TestConv2DOp_AsyPadding)
create_test_padding_VALID_class(TestWithPad_AsyPadding) create_test_padding_VALID_class(TestWithPad_AsyPadding)
create_test_padding_VALID_class(TestWithStride_AsyPadding) create_test_padding_VALID_class(TestWithStride_AsyPadding)
create_test_padding_VALID_class(TestWithGroup_AsyPadding) create_test_padding_VALID_class(TestWithGroup_AsyPadding)
create_test_padding_VALID_class(TestWithInput1x1Filter1x1_AsyPadding) create_test_padding_VALID_class(TestWithInput1x1Filter1x1_AsyPadding)
create_test_cudnn_padding_SAME_class(TestConv2dOp_AsyPadding) create_test_cudnn_padding_SAME_class(TestConv2DOp_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithPad_AsyPadding) create_test_cudnn_padding_SAME_class(TestWithPad_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithStride_AsyPadding) create_test_cudnn_padding_SAME_class(TestWithStride_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithGroup_AsyPadding) create_test_cudnn_padding_SAME_class(TestWithGroup_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithInput1x1Filter1x1_AsyPadding) create_test_cudnn_padding_SAME_class(TestWithInput1x1Filter1x1_AsyPadding)
create_test_cudnn_padding_VALID_class(TestConv2dOp_AsyPadding) create_test_cudnn_padding_VALID_class(TestConv2DOp_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithPad_AsyPadding) create_test_cudnn_padding_VALID_class(TestWithPad_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithStride_AsyPadding) create_test_cudnn_padding_VALID_class(TestWithStride_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithGroup_AsyPadding) create_test_cudnn_padding_VALID_class(TestWithGroup_AsyPadding)
...@@ -1221,7 +1221,7 @@ create_test_padding_VALID_class(TestDepthwiseConvandFuse_AsyPadding) ...@@ -1221,7 +1221,7 @@ create_test_padding_VALID_class(TestDepthwiseConvandFuse_AsyPadding)
create_test_padding_VALID_class(TestDepthwiseConvWithDilationandFuse_AsyPadding) create_test_padding_VALID_class(TestDepthwiseConvWithDilationandFuse_AsyPadding)
# ------------ test channel last --------- # ------------ test channel last ---------
create_test_channel_last_class(TestConv2dOp_AsyPadding) create_test_channel_last_class(TestConv2DOp_AsyPadding)
create_test_channel_last_class(TestWithPad_AsyPadding) create_test_channel_last_class(TestWithPad_AsyPadding)
create_test_channel_last_class(TestWithGroup_AsyPadding) create_test_channel_last_class(TestWithGroup_AsyPadding)
create_test_channel_last_class(TestWith1x1_AsyPadding) create_test_channel_last_class(TestWith1x1_AsyPadding)
...@@ -1232,14 +1232,14 @@ create_test_channel_last_class(TestDepthwiseConvWithDilation2_AsyPadding) ...@@ -1232,14 +1232,14 @@ create_test_channel_last_class(TestDepthwiseConvWithDilation2_AsyPadding)
create_test_channel_last_class(TestDepthwiseConvandFuse_AsyPadding) create_test_channel_last_class(TestDepthwiseConvandFuse_AsyPadding)
create_test_channel_last_class(TestDepthwiseConvWithDilationandFuse_AsyPadding) create_test_channel_last_class(TestDepthwiseConvWithDilationandFuse_AsyPadding)
create_test_cudnn_channel_last_class(TestConv2dOp_AsyPadding) create_test_cudnn_channel_last_class(TestConv2DOp_AsyPadding)
create_test_cudnn_channel_last_class(TestWithPad_AsyPadding) create_test_cudnn_channel_last_class(TestWithPad_AsyPadding)
create_test_cudnn_channel_last_class(TestWithStride_AsyPadding) create_test_cudnn_channel_last_class(TestWithStride_AsyPadding)
create_test_cudnn_channel_last_class(TestWithGroup_AsyPadding) create_test_cudnn_channel_last_class(TestWithGroup_AsyPadding)
create_test_cudnn_channel_last_class(TestWithDilation_AsyPadding) create_test_cudnn_channel_last_class(TestWithDilation_AsyPadding)
create_test_cudnn_channel_last_fp16_class( create_test_cudnn_channel_last_fp16_class(
TestConv2dOp_AsyPadding, grad_check=False) TestConv2DOp_AsyPadding, grad_check=False)
create_test_cudnn_channel_last_fp16_class( create_test_cudnn_channel_last_fp16_class(
TestWithPad_AsyPadding, grad_check=False) TestWithPad_AsyPadding, grad_check=False)
create_test_cudnn_channel_last_fp16_class( create_test_cudnn_channel_last_fp16_class(
...@@ -1251,7 +1251,7 @@ create_test_cudnn_channel_last_fp16_class( ...@@ -1251,7 +1251,7 @@ create_test_cudnn_channel_last_fp16_class(
# --------- test python API --------------- # --------- test python API ---------------
class TestConv2dAPI(unittest.TestCase): class TestConv2DAPI(unittest.TestCase):
def test_api(self): def test_api(self):
input_NHWC = fluid.layers.data( input_NHWC = fluid.layers.data(
...@@ -1327,7 +1327,7 @@ class TestConv2dAPI(unittest.TestCase): ...@@ -1327,7 +1327,7 @@ class TestConv2dAPI(unittest.TestCase):
data_format="NCHW") data_format="NCHW")
class TestConv2dAPI_Error(unittest.TestCase): class TestConv2DAPI_Error(unittest.TestCase):
def test_api(self): def test_api(self):
input = fluid.layers.data( input = fluid.layers.data(
name="input", name="input",
......
...@@ -155,7 +155,7 @@ class Conv2DTransposeTestCase(unittest.TestCase): ...@@ -155,7 +155,7 @@ class Conv2DTransposeTestCase(unittest.TestCase):
else: else:
output_size = self.output_size output_size = self.output_size
conv = nn.ConvTranspose2d( conv = nn.Conv2DTranspose(
self.num_channels, self.num_channels,
self.num_filters, self.num_filters,
self.filter_size, self.filter_size,
......
...@@ -111,7 +111,7 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs): ...@@ -111,7 +111,7 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs):
return out return out
class TestConv2dTransposeOp(OpTest): class TestConv2DTransposeOp(OpTest):
def setUp(self): def setUp(self):
# init as conv transpose # init as conv transpose
self.dtype = np.float64 self.dtype = np.float64
...@@ -211,7 +211,7 @@ class TestConv2dTransposeOp(OpTest): ...@@ -211,7 +211,7 @@ class TestConv2dTransposeOp(OpTest):
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
class TestWithSymmetricPad(TestConv2dTransposeOp): class TestWithSymmetricPad(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -222,7 +222,7 @@ class TestWithSymmetricPad(TestConv2dTransposeOp): ...@@ -222,7 +222,7 @@ class TestWithSymmetricPad(TestConv2dTransposeOp):
self.filter_size = [f_c, 6, 3, 3] self.filter_size = [f_c, 6, 3, 3]
class TestWithAsymmetricPad(TestConv2dTransposeOp): class TestWithAsymmetricPad(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 0, 1, 2] self.pad = [1, 0, 1, 2]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -233,7 +233,7 @@ class TestWithAsymmetricPad(TestConv2dTransposeOp): ...@@ -233,7 +233,7 @@ class TestWithAsymmetricPad(TestConv2dTransposeOp):
self.filter_size = [f_c, 6, 3, 3] self.filter_size = [f_c, 6, 3, 3]
class TestWithSAMEPad(TestConv2dTransposeOp): class TestWithSAMEPad(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.stride = [2, 1] self.stride = [2, 1]
self.dilations = [1, 2] self.dilations = [1, 2]
...@@ -244,7 +244,7 @@ class TestWithSAMEPad(TestConv2dTransposeOp): ...@@ -244,7 +244,7 @@ class TestWithSAMEPad(TestConv2dTransposeOp):
self.padding_algorithm = 'SAME' self.padding_algorithm = 'SAME'
class TestWithVALIDPad(TestConv2dTransposeOp): class TestWithVALIDPad(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [1, 1]
self.dilations = [1, 1] self.dilations = [1, 1]
...@@ -255,7 +255,7 @@ class TestWithVALIDPad(TestConv2dTransposeOp): ...@@ -255,7 +255,7 @@ class TestWithVALIDPad(TestConv2dTransposeOp):
self.padding_algorithm = 'VALID' self.padding_algorithm = 'VALID'
class TestWithGroups(TestConv2dTransposeOp): class TestWithGroups(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -266,7 +266,7 @@ class TestWithGroups(TestConv2dTransposeOp): ...@@ -266,7 +266,7 @@ class TestWithGroups(TestConv2dTransposeOp):
self.filter_size = [f_c, 3, 3, 3] self.filter_size = [f_c, 3, 3, 3]
class TestWithStride(TestConv2dTransposeOp): class TestWithStride(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -277,7 +277,7 @@ class TestWithStride(TestConv2dTransposeOp): ...@@ -277,7 +277,7 @@ class TestWithStride(TestConv2dTransposeOp):
self.filter_size = [f_c, 6, 3, 3] self.filter_size = [f_c, 6, 3, 3]
class TestWithDilation(TestConv2dTransposeOp): class TestWithDilation(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -288,7 +288,7 @@ class TestWithDilation(TestConv2dTransposeOp): ...@@ -288,7 +288,7 @@ class TestWithDilation(TestConv2dTransposeOp):
self.filter_size = [f_c, 6, 3, 3] self.filter_size = [f_c, 6, 3, 3]
class TestWithEvenUpsample(TestConv2dTransposeOp): class TestWithEvenUpsample(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [2, 2] self.pad = [2, 2]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -300,7 +300,7 @@ class TestWithEvenUpsample(TestConv2dTransposeOp): ...@@ -300,7 +300,7 @@ class TestWithEvenUpsample(TestConv2dTransposeOp):
self.filter_size = [f_c, 6, 5, 5] self.filter_size = [f_c, 6, 5, 5]
class TestWithEvenUpsampleOutputPadding(TestConv2dTransposeOp): class TestWithEvenUpsampleOutputPadding(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [2, 2] self.pad = [2, 2]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -312,7 +312,7 @@ class TestWithEvenUpsampleOutputPadding(TestConv2dTransposeOp): ...@@ -312,7 +312,7 @@ class TestWithEvenUpsampleOutputPadding(TestConv2dTransposeOp):
self.filter_size = [f_c, 6, 5, 5] self.filter_size = [f_c, 6, 5, 5]
class Test_NHWC(TestConv2dTransposeOp): class Test_NHWC(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -324,7 +324,7 @@ class Test_NHWC(TestConv2dTransposeOp): ...@@ -324,7 +324,7 @@ class Test_NHWC(TestConv2dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithSymmetricPad_NHWC(TestConv2dTransposeOp): class TestWithSymmetricPad_NHWC(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -336,7 +336,7 @@ class TestWithSymmetricPad_NHWC(TestConv2dTransposeOp): ...@@ -336,7 +336,7 @@ class TestWithSymmetricPad_NHWC(TestConv2dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithAsymmetricPad_NHWC(TestConv2dTransposeOp): class TestWithAsymmetricPad_NHWC(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 0, 1, 2] self.pad = [1, 0, 1, 2]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -348,7 +348,7 @@ class TestWithAsymmetricPad_NHWC(TestConv2dTransposeOp): ...@@ -348,7 +348,7 @@ class TestWithAsymmetricPad_NHWC(TestConv2dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithGroups_NHWC(TestConv2dTransposeOp): class TestWithGroups_NHWC(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -360,7 +360,7 @@ class TestWithGroups_NHWC(TestConv2dTransposeOp): ...@@ -360,7 +360,7 @@ class TestWithGroups_NHWC(TestConv2dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithStride_NHWC(TestConv2dTransposeOp): class TestWithStride_NHWC(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -372,7 +372,7 @@ class TestWithStride_NHWC(TestConv2dTransposeOp): ...@@ -372,7 +372,7 @@ class TestWithStride_NHWC(TestConv2dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithDilation_NHWC(TestConv2dTransposeOp): class TestWithDilation_NHWC(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -384,7 +384,7 @@ class TestWithDilation_NHWC(TestConv2dTransposeOp): ...@@ -384,7 +384,7 @@ class TestWithDilation_NHWC(TestConv2dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithEvenUpsample_NHWC(TestConv2dTransposeOp): class TestWithEvenUpsample_NHWC(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [2, 2] self.pad = [2, 2]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -397,7 +397,7 @@ class TestWithEvenUpsample_NHWC(TestConv2dTransposeOp): ...@@ -397,7 +397,7 @@ class TestWithEvenUpsample_NHWC(TestConv2dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithEvenUpsample_NHWC_output_padding(TestConv2dTransposeOp): class TestWithEvenUpsample_NHWC_output_padding(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [2, 2] self.pad = [2, 2]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -413,7 +413,7 @@ class TestWithEvenUpsample_NHWC_output_padding(TestConv2dTransposeOp): ...@@ -413,7 +413,7 @@ class TestWithEvenUpsample_NHWC_output_padding(TestConv2dTransposeOp):
# ------------ test_cudnn ------------ # ------------ test_cudnn ------------
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
class TestCUDNN(TestConv2dTransposeOp): class TestCUDNN(TestConv2DTransposeOp):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
...@@ -547,7 +547,7 @@ class TestCUDNNWithEvenUpsample(TestWithEvenUpsample): ...@@ -547,7 +547,7 @@ class TestCUDNNWithEvenUpsample(TestWithEvenUpsample):
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
class TestCUDNN_NHWC(TestConv2dTransposeOp): class TestCUDNN_NHWC(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 1]
...@@ -654,7 +654,7 @@ class TestCUDNNWithEvenUpsample_NHWC(TestWithEvenUpsample): ...@@ -654,7 +654,7 @@ class TestCUDNNWithEvenUpsample_NHWC(TestWithEvenUpsample):
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
class TestDepthwiseConvTranspose(TestConv2dTransposeOp): class TestDepthwiseConvTranspose(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -667,7 +667,7 @@ class TestDepthwiseConvTranspose(TestConv2dTransposeOp): ...@@ -667,7 +667,7 @@ class TestDepthwiseConvTranspose(TestConv2dTransposeOp):
self.op_type = "depthwise_conv2d_transpose" self.op_type = "depthwise_conv2d_transpose"
class TestDepthwiseConvTransposeAsymmetricPad(TestConv2dTransposeOp): class TestDepthwiseConvTransposeAsymmetricPad(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 0, 1, 2] self.pad = [1, 0, 1, 2]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -681,7 +681,7 @@ class TestDepthwiseConvTransposeAsymmetricPad(TestConv2dTransposeOp): ...@@ -681,7 +681,7 @@ class TestDepthwiseConvTransposeAsymmetricPad(TestConv2dTransposeOp):
self.data_format = 'NCHW' self.data_format = 'NCHW'
class TestDepthwiseConvTransposeSAMEPad(TestConv2dTransposeOp): class TestDepthwiseConvTransposeSAMEPad(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.stride = [2, 2] self.stride = [2, 2]
self.dilations = [1, 1] self.dilations = [1, 1]
...@@ -694,7 +694,7 @@ class TestDepthwiseConvTransposeSAMEPad(TestConv2dTransposeOp): ...@@ -694,7 +694,7 @@ class TestDepthwiseConvTransposeSAMEPad(TestConv2dTransposeOp):
self.padding_algorithm = 'SAME' self.padding_algorithm = 'SAME'
class TestDepthwiseConvTransposeVALIDPad(TestConv2dTransposeOp): class TestDepthwiseConvTransposeVALIDPad(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.stride = [2, 2] self.stride = [2, 2]
self.dilations = [1, 1] self.dilations = [1, 1]
...@@ -707,7 +707,7 @@ class TestDepthwiseConvTransposeVALIDPad(TestConv2dTransposeOp): ...@@ -707,7 +707,7 @@ class TestDepthwiseConvTransposeVALIDPad(TestConv2dTransposeOp):
self.padding_algorithm = 'VALID' self.padding_algorithm = 'VALID'
class TestDepthwiseConvTranspose_NHWC_4x4kernel(TestConv2dTransposeOp): class TestDepthwiseConvTranspose_NHWC_4x4kernel(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -721,7 +721,7 @@ class TestDepthwiseConvTranspose_NHWC_4x4kernel(TestConv2dTransposeOp): ...@@ -721,7 +721,7 @@ class TestDepthwiseConvTranspose_NHWC_4x4kernel(TestConv2dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestDepthwiseConvTranspose_NHWC_3x3kernel(TestConv2dTransposeOp): class TestDepthwiseConvTranspose_NHWC_3x3kernel(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -735,7 +735,7 @@ class TestDepthwiseConvTranspose_NHWC_3x3kernel(TestConv2dTransposeOp): ...@@ -735,7 +735,7 @@ class TestDepthwiseConvTranspose_NHWC_3x3kernel(TestConv2dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestDepthwiseConvTransposeAsymmetricPad_NHWC(TestConv2dTransposeOp): class TestDepthwiseConvTransposeAsymmetricPad_NHWC(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 0, 1, 2] self.pad = [1, 0, 1, 2]
self.stride = [2, 2] self.stride = [2, 2]
...@@ -751,7 +751,7 @@ class TestDepthwiseConvTransposeAsymmetricPad_NHWC(TestConv2dTransposeOp): ...@@ -751,7 +751,7 @@ class TestDepthwiseConvTransposeAsymmetricPad_NHWC(TestConv2dTransposeOp):
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
class TestCUDNN_FP16(TestConv2dTransposeOp): class TestCUDNN_FP16(TestConv2DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.dtype = np.float16 self.dtype = np.float16
self.pad = [1, 1] self.pad = [1, 1]
...@@ -867,7 +867,7 @@ class TestCUDNNWithEvenUpsample_NHWC_FP16(TestCUDNN_FP16): ...@@ -867,7 +867,7 @@ class TestCUDNNWithEvenUpsample_NHWC_FP16(TestCUDNN_FP16):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestConv2dTransposeAPI(unittest.TestCase): class TestConv2DTransposeAPI(unittest.TestCase):
def test_case1(self): def test_case1(self):
data1 = fluid.layers.data( data1 = fluid.layers.data(
name='data1', shape=[3, 5, 5], dtype='float32') name='data1', shape=[3, 5, 5], dtype='float32')
...@@ -945,7 +945,7 @@ class TestConv2dTransposeAPI(unittest.TestCase): ...@@ -945,7 +945,7 @@ class TestConv2dTransposeAPI(unittest.TestCase):
self.assertIsNotNone(results[6]) self.assertIsNotNone(results[6])
class TestConv2dTransposeOpException(unittest.TestCase): class TestConv2DTransposeOpException(unittest.TestCase):
def test_exception(self): def test_exception(self):
data = fluid.layers.data(name='data', shape=[3, 5, 5], dtype="float32") data = fluid.layers.data(name='data', shape=[3, 5, 5], dtype="float32")
......
...@@ -135,7 +135,7 @@ class Conv3DTestCase(unittest.TestCase): ...@@ -135,7 +135,7 @@ class Conv3DTestCase(unittest.TestCase):
def paddle_nn_layer(self): def paddle_nn_layer(self):
x_var = dg.to_variable(self.input) x_var = dg.to_variable(self.input)
conv = nn.Conv3d( conv = nn.Conv3D(
self.num_channels, self.num_channels,
self.num_filters, self.num_filters,
self.filter_size, self.filter_size,
......
...@@ -228,7 +228,7 @@ def create_test_cudnn_channel_last_class(parent): ...@@ -228,7 +228,7 @@ def create_test_cudnn_channel_last_class(parent):
globals()[cls_name] = TestCudnnChannelLastCase globals()[cls_name] = TestCudnnChannelLastCase
class TestConv3dOp(OpTest): class TestConv3DOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "conv3d" self.op_type = "conv3d"
self.use_cudnn = False self.use_cudnn = False
...@@ -334,7 +334,7 @@ class TestConv3dOp(OpTest): ...@@ -334,7 +334,7 @@ class TestConv3dOp(OpTest):
pass pass
class TestCase1(TestConv3dOp): class TestCase1(TestConv3DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -344,7 +344,7 @@ class TestCase1(TestConv3dOp): ...@@ -344,7 +344,7 @@ class TestCase1(TestConv3dOp):
self.filter_size = [6, f_c, 3, 3, 3] self.filter_size = [6, f_c, 3, 3, 3]
class TestWithGroup1(TestConv3dOp): class TestWithGroup1(TestConv3DOp):
def init_group(self): def init_group(self):
self.groups = 3 self.groups = 3
...@@ -354,7 +354,7 @@ class TestWithGroup2(TestCase1): ...@@ -354,7 +354,7 @@ class TestWithGroup2(TestCase1):
self.groups = 3 self.groups = 3
class TestWith1x1(TestConv3dOp): class TestWith1x1(TestConv3DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0, 0] self.pad = [0, 0, 0]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -370,7 +370,7 @@ class TestWith1x1(TestConv3dOp): ...@@ -370,7 +370,7 @@ class TestWith1x1(TestConv3dOp):
self.groups = 3 self.groups = 3
class TestWithInput1x1Filter1x1(TestConv3dOp): class TestWithInput1x1Filter1x1(TestConv3DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0, 0] self.pad = [0, 0, 0]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -386,7 +386,7 @@ class TestWithInput1x1Filter1x1(TestConv3dOp): ...@@ -386,7 +386,7 @@ class TestWithInput1x1Filter1x1(TestConv3dOp):
self.groups = 3 self.groups = 3
class TestWithDilation(TestConv3dOp): class TestWithDilation(TestConv3DOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0, 0] self.pad = [0, 0, 0]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -402,19 +402,19 @@ class TestWithDilation(TestConv3dOp): ...@@ -402,19 +402,19 @@ class TestWithDilation(TestConv3dOp):
self.groups = 3 self.groups = 3
#---------------- Conv3dCUDNN ---------------- #---------------- Conv3DCUDNN ----------------
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
class TestCUDNN(TestConv3dOp): class TestCUDNN(TestConv3DOp):
def init_kernel_type(self): def init_kernel_type(self):
self.use_cudnn = True self.use_cudnn = True
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
class TestFP16CUDNN(TestConv3dOp): class TestFP16CUDNN(TestConv3DOp):
def init_kernel_type(self): def init_kernel_type(self):
self.use_cudnn = True self.use_cudnn = True
self.dtype = np.float16 self.dtype = np.float16
...@@ -519,7 +519,7 @@ class TestCUDNNExhaustiveSearch(TestCUDNN): ...@@ -519,7 +519,7 @@ class TestCUDNNExhaustiveSearch(TestCUDNN):
# ---- test asymmetric padding ---- # ---- test asymmetric padding ----
class TestConv3dOp_2(OpTest): class TestConv3DOp_2(OpTest):
def setUp(self): def setUp(self):
self.op_type = "conv3d" self.op_type = "conv3d"
self.use_cudnn = False self.use_cudnn = False
...@@ -624,7 +624,7 @@ class TestConv3dOp_2(OpTest): ...@@ -624,7 +624,7 @@ class TestConv3dOp_2(OpTest):
self.data_format = "NCDHW" self.data_format = "NCDHW"
class TestConv3dOp_AsyPadding(TestConv3dOp_2): class TestConv3DOp_AsyPadding(TestConv3DOp_2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1, 2] self.stride = [1, 1, 2]
self.input_size = [2, 3, 4, 4, 4] # NCDHW self.input_size = [2, 3, 4, 4, 4] # NCDHW
...@@ -637,7 +637,7 @@ class TestConv3dOp_AsyPadding(TestConv3dOp_2): ...@@ -637,7 +637,7 @@ class TestConv3dOp_AsyPadding(TestConv3dOp_2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestConv3dOp_DiffDataInDiffDim(TestConv3dOp_2): class TestConv3DOp_DiffDataInDiffDim(TestConv3DOp_2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1, 2] self.stride = [1, 1, 2]
self.input_size = [2, 3, 4, 5, 5] # NCDHW self.input_size = [2, 3, 4, 5, 5] # NCDHW
...@@ -650,12 +650,12 @@ class TestConv3dOp_DiffDataInDiffDim(TestConv3dOp_2): ...@@ -650,12 +650,12 @@ class TestConv3dOp_DiffDataInDiffDim(TestConv3dOp_2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
create_test_padding_SAME_class(TestConv3dOp_DiffDataInDiffDim) create_test_padding_SAME_class(TestConv3DOp_DiffDataInDiffDim)
create_test_padding_VALID_class(TestConv3dOp_DiffDataInDiffDim) create_test_padding_VALID_class(TestConv3DOp_DiffDataInDiffDim)
create_test_channel_last_class(TestConv3dOp_DiffDataInDiffDim) create_test_channel_last_class(TestConv3DOp_DiffDataInDiffDim)
class TestCase1_AsyPadding(TestConv3dOp_2): class TestCase1_AsyPadding(TestConv3DOp_2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] # NCDHW self.input_size = [2, 3, 4, 4, 4] # NCDHW
...@@ -668,7 +668,7 @@ class TestCase1_AsyPadding(TestConv3dOp_2): ...@@ -668,7 +668,7 @@ class TestCase1_AsyPadding(TestConv3dOp_2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithGroup1_AsyPadding(TestConv3dOp_2): class TestWithGroup1_AsyPadding(TestConv3DOp_2):
def init_group(self): def init_group(self):
self.groups = 3 self.groups = 3
...@@ -677,7 +677,7 @@ class TestWithGroup1_AsyPadding(TestConv3dOp_2): ...@@ -677,7 +677,7 @@ class TestWithGroup1_AsyPadding(TestConv3dOp_2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithGroup2_AsyPadding(TestConv3dOp_2): class TestWithGroup2_AsyPadding(TestConv3DOp_2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] # NCDHW self.input_size = [2, 3, 4, 4, 4] # NCDHW
...@@ -693,7 +693,7 @@ class TestWithGroup2_AsyPadding(TestConv3dOp_2): ...@@ -693,7 +693,7 @@ class TestWithGroup2_AsyPadding(TestConv3dOp_2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWith1x1_AsyPadding(TestConv3dOp_2): class TestWith1x1_AsyPadding(TestConv3DOp_2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] self.input_size = [2, 3, 4, 4, 4]
...@@ -712,7 +712,7 @@ class TestWith1x1_AsyPadding(TestConv3dOp_2): ...@@ -712,7 +712,7 @@ class TestWith1x1_AsyPadding(TestConv3dOp_2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestWithDilation_AsyPadding(TestConv3dOp_2): class TestWithDilation_AsyPadding(TestConv3DOp_2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
self.input_size = [2, 3, 6, 6, 6] self.input_size = [2, 3, 6, 6, 6]
...@@ -731,41 +731,41 @@ class TestWithDilation_AsyPadding(TestConv3dOp_2): ...@@ -731,41 +731,41 @@ class TestWithDilation_AsyPadding(TestConv3dOp_2):
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
create_test_cudnn_class(TestConv3dOp_AsyPadding) create_test_cudnn_class(TestConv3DOp_AsyPadding)
create_test_cudnn_class(TestWithGroup1_AsyPadding) create_test_cudnn_class(TestWithGroup1_AsyPadding)
create_test_cudnn_class(TestWithGroup2_AsyPadding) create_test_cudnn_class(TestWithGroup2_AsyPadding)
create_test_cudnn_class(TestWith1x1_AsyPadding) create_test_cudnn_class(TestWith1x1_AsyPadding)
create_test_cudnn_class(TestWithDilation_AsyPadding) create_test_cudnn_class(TestWithDilation_AsyPadding)
create_test_padding_SAME_class(TestConv3dOp_AsyPadding) create_test_padding_SAME_class(TestConv3DOp_AsyPadding)
create_test_padding_SAME_class(TestWithGroup1_AsyPadding) create_test_padding_SAME_class(TestWithGroup1_AsyPadding)
create_test_padding_SAME_class(TestWith1x1_AsyPadding) create_test_padding_SAME_class(TestWith1x1_AsyPadding)
create_test_padding_VALID_class(TestConv3dOp_AsyPadding) create_test_padding_VALID_class(TestConv3DOp_AsyPadding)
create_test_padding_VALID_class(TestWithGroup1_AsyPadding) create_test_padding_VALID_class(TestWithGroup1_AsyPadding)
create_test_padding_VALID_class(TestWith1x1_AsyPadding) create_test_padding_VALID_class(TestWith1x1_AsyPadding)
create_test_cudnn_padding_SAME_class(TestConv3dOp_AsyPadding) create_test_cudnn_padding_SAME_class(TestConv3DOp_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithGroup1_AsyPadding) create_test_cudnn_padding_SAME_class(TestWithGroup1_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWith1x1_AsyPadding) create_test_cudnn_padding_SAME_class(TestWith1x1_AsyPadding)
create_test_cudnn_padding_VALID_class(TestConv3dOp_AsyPadding) create_test_cudnn_padding_VALID_class(TestConv3DOp_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithGroup1_AsyPadding) create_test_cudnn_padding_VALID_class(TestWithGroup1_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWith1x1_AsyPadding) create_test_cudnn_padding_VALID_class(TestWith1x1_AsyPadding)
create_test_channel_last_class(TestConv3dOp_AsyPadding) create_test_channel_last_class(TestConv3DOp_AsyPadding)
create_test_channel_last_class(TestWithGroup1_AsyPadding) create_test_channel_last_class(TestWithGroup1_AsyPadding)
create_test_channel_last_class(TestWith1x1_AsyPadding) create_test_channel_last_class(TestWith1x1_AsyPadding)
create_test_channel_last_class(TestConv3dOp_AsyPadding) create_test_channel_last_class(TestConv3DOp_AsyPadding)
create_test_channel_last_class(TestWithGroup1_AsyPadding) create_test_channel_last_class(TestWithGroup1_AsyPadding)
create_test_channel_last_class(TestWith1x1_AsyPadding) create_test_channel_last_class(TestWith1x1_AsyPadding)
create_test_cudnn_channel_last_class(TestConv3dOp_AsyPadding) create_test_cudnn_channel_last_class(TestConv3DOp_AsyPadding)
create_test_cudnn_channel_last_class(TestWithGroup1_AsyPadding) create_test_cudnn_channel_last_class(TestWithGroup1_AsyPadding)
create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding) create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding)
create_test_cudnn_channel_last_class(TestConv3dOp_AsyPadding) create_test_cudnn_channel_last_class(TestConv3DOp_AsyPadding)
create_test_cudnn_channel_last_class(TestWithGroup1_AsyPadding) create_test_cudnn_channel_last_class(TestWithGroup1_AsyPadding)
create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding) create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding)
...@@ -777,7 +777,7 @@ create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding) ...@@ -777,7 +777,7 @@ create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding)
# --------- test python API --------------- # --------- test python API ---------------
class TestConv3dAPI(unittest.TestCase): class TestConv3DAPI(unittest.TestCase):
def test_api(self): def test_api(self):
input_NDHWC = fluid.layers.data( input_NDHWC = fluid.layers.data(
...@@ -853,7 +853,7 @@ class TestConv3dAPI(unittest.TestCase): ...@@ -853,7 +853,7 @@ class TestConv3dAPI(unittest.TestCase):
data_format="NCDHW") data_format="NCDHW")
class TestConv3dAPI_Error(unittest.TestCase): class TestConv3DAPI_Error(unittest.TestCase):
def test_api(self): def test_api(self):
input = fluid.layers.data( input = fluid.layers.data(
name="input", name="input",
......
...@@ -139,7 +139,7 @@ class Conv3DTransposeTestCase(unittest.TestCase): ...@@ -139,7 +139,7 @@ class Conv3DTransposeTestCase(unittest.TestCase):
def paddle_nn_layer(self): def paddle_nn_layer(self):
x_var = dg.to_variable(self.input) x_var = dg.to_variable(self.input)
conv = nn.ConvTranspose3d( conv = nn.Conv3DTranspose(
self.num_channels, self.num_channels,
self.num_filters, self.num_filters,
self.filter_size, self.filter_size,
......
...@@ -107,7 +107,7 @@ def conv3dtranspose_forward_naive(input_, filter_, attrs): ...@@ -107,7 +107,7 @@ def conv3dtranspose_forward_naive(input_, filter_, attrs):
return out return out
class TestConv3dTransposeOp(OpTest): class TestConv3DTransposeOp(OpTest):
def setUp(self): def setUp(self):
# init as conv transpose # init as conv transpose
self.use_cudnn = False self.use_cudnn = False
...@@ -200,7 +200,7 @@ class TestConv3dTransposeOp(OpTest): ...@@ -200,7 +200,7 @@ class TestConv3dTransposeOp(OpTest):
self.op_type = "conv3d_transpose" self.op_type = "conv3d_transpose"
class TestWithSymmetricPad(TestConv3dTransposeOp): class TestWithSymmetricPad(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.check_no_input = True self.check_no_input = True
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
...@@ -212,7 +212,7 @@ class TestWithSymmetricPad(TestConv3dTransposeOp): ...@@ -212,7 +212,7 @@ class TestWithSymmetricPad(TestConv3dTransposeOp):
self.filter_size = [f_c, 6, 3, 3, 3] self.filter_size = [f_c, 6, 3, 3, 3]
class TestWithAsymmetricPad(TestConv3dTransposeOp): class TestWithAsymmetricPad(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 0, 1, 0, 1, 2] self.pad = [1, 0, 1, 0, 1, 2]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -223,7 +223,7 @@ class TestWithAsymmetricPad(TestConv3dTransposeOp): ...@@ -223,7 +223,7 @@ class TestWithAsymmetricPad(TestConv3dTransposeOp):
self.filter_size = [f_c, 6, 3, 3, 3] self.filter_size = [f_c, 6, 3, 3, 3]
class TestWithSAMEPad(TestConv3dTransposeOp): class TestWithSAMEPad(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1, 2] self.stride = [1, 1, 2]
self.dilations = [1, 2, 1] self.dilations = [1, 2, 1]
...@@ -234,7 +234,7 @@ class TestWithSAMEPad(TestConv3dTransposeOp): ...@@ -234,7 +234,7 @@ class TestWithSAMEPad(TestConv3dTransposeOp):
self.padding_algorithm = 'SAME' self.padding_algorithm = 'SAME'
class TestWithVALIDPad(TestConv3dTransposeOp): class TestWithVALIDPad(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.stride = [2, 1, 1] self.stride = [2, 1, 1]
self.dilations = [1, 1, 1] self.dilations = [1, 1, 1]
...@@ -245,7 +245,7 @@ class TestWithVALIDPad(TestConv3dTransposeOp): ...@@ -245,7 +245,7 @@ class TestWithVALIDPad(TestConv3dTransposeOp):
self.padding_algorithm = 'VALID' self.padding_algorithm = 'VALID'
class TestWithStride(TestConv3dTransposeOp): class TestWithStride(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.check_no_filter = True self.check_no_filter = True
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
...@@ -257,7 +257,7 @@ class TestWithStride(TestConv3dTransposeOp): ...@@ -257,7 +257,7 @@ class TestWithStride(TestConv3dTransposeOp):
self.filter_size = [f_c, 6, 3, 3, 3] self.filter_size = [f_c, 6, 3, 3, 3]
class TestWithGroups(TestConv3dTransposeOp): class TestWithGroups(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -268,7 +268,7 @@ class TestWithGroups(TestConv3dTransposeOp): ...@@ -268,7 +268,7 @@ class TestWithGroups(TestConv3dTransposeOp):
self.filter_size = [f_c, 3, 3, 3, 3] self.filter_size = [f_c, 3, 3, 3, 3]
class TestWithDilation(TestConv3dTransposeOp): class TestWithDilation(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -279,7 +279,7 @@ class TestWithDilation(TestConv3dTransposeOp): ...@@ -279,7 +279,7 @@ class TestWithDilation(TestConv3dTransposeOp):
self.filter_size = [f_c, 6, 3, 3, 3] self.filter_size = [f_c, 6, 3, 3, 3]
class Test_NHWC(TestConv3dTransposeOp): class Test_NHWC(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0, 0] self.pad = [0, 0, 0]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -294,7 +294,7 @@ class Test_NHWC(TestConv3dTransposeOp): ...@@ -294,7 +294,7 @@ class Test_NHWC(TestConv3dTransposeOp):
# ------------ test_cudnn ------------ # ------------ test_cudnn ------------
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
class TestCUDNN(TestConv3dTransposeOp): class TestCUDNN(TestConv3DTransposeOp):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv3d_transpose" self.op_type = "conv3d_transpose"
...@@ -419,7 +419,7 @@ class TestCUDNNWithGroups(TestWithGroups): ...@@ -419,7 +419,7 @@ class TestCUDNNWithGroups(TestWithGroups):
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
class TestCUDNN_NHWC(TestConv3dTransposeOp): class TestCUDNN_NHWC(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0, 0] self.pad = [0, 0, 0]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
......
...@@ -20,10 +20,10 @@ import numpy as np ...@@ -20,10 +20,10 @@ import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
from op_test import OpTest from op_test import OpTest
from test_conv3d_transpose_op import conv3dtranspose_forward_naive, TestConv3dTransposeOp from test_conv3d_transpose_op import TestConv3DTransposeOp
class TestWithSymmetricPad_NHWC(TestConv3dTransposeOp): class TestWithSymmetricPad_NHWC(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -35,7 +35,7 @@ class TestWithSymmetricPad_NHWC(TestConv3dTransposeOp): ...@@ -35,7 +35,7 @@ class TestWithSymmetricPad_NHWC(TestConv3dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithAsymmetricPad_NHWC(TestConv3dTransposeOp): class TestWithAsymmetricPad_NHWC(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 0, 1, 0, 1, 2] self.pad = [1, 0, 1, 0, 1, 2]
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
...@@ -47,7 +47,7 @@ class TestWithAsymmetricPad_NHWC(TestConv3dTransposeOp): ...@@ -47,7 +47,7 @@ class TestWithAsymmetricPad_NHWC(TestConv3dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithGroups_NHWC(TestConv3dTransposeOp): class TestWithGroups_NHWC(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.check_no_filter = True self.check_no_filter = True
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
...@@ -60,7 +60,7 @@ class TestWithGroups_NHWC(TestConv3dTransposeOp): ...@@ -60,7 +60,7 @@ class TestWithGroups_NHWC(TestConv3dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithStride_NHWC(TestConv3dTransposeOp): class TestWithStride_NHWC(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
self.stride = [2, 2, 2] self.stride = [2, 2, 2]
...@@ -72,7 +72,7 @@ class TestWithStride_NHWC(TestConv3dTransposeOp): ...@@ -72,7 +72,7 @@ class TestWithStride_NHWC(TestConv3dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestWithDilation_NHWC(TestConv3dTransposeOp): class TestWithDilation_NHWC(TestConv3DTransposeOp):
def init_test_case(self): def init_test_case(self):
self.check_no_input = True self.check_no_input = True
self.pad = [1, 1, 1] self.pad = [1, 1, 1]
...@@ -85,7 +85,7 @@ class TestWithDilation_NHWC(TestConv3dTransposeOp): ...@@ -85,7 +85,7 @@ class TestWithDilation_NHWC(TestConv3dTransposeOp):
self.data_format = 'NHWC' self.data_format = 'NHWC'
class TestConv3dTransposeAPI(unittest.TestCase): class TestConv3DTransposeAPI(unittest.TestCase):
def test_case1(self): def test_case1(self):
data1 = fluid.layers.data( data1 = fluid.layers.data(
name='data1', shape=[3, 5, 5, 5], dtype='float32') name='data1', shape=[3, 5, 5, 5], dtype='float32')
...@@ -164,7 +164,7 @@ class TestConv3dTransposeAPI(unittest.TestCase): ...@@ -164,7 +164,7 @@ class TestConv3dTransposeAPI(unittest.TestCase):
self.assertIsNotNone(results[6]) self.assertIsNotNone(results[6])
class TestConv3dTransposeOpException(unittest.TestCase): class TestConv3DTransposeOpException(unittest.TestCase):
def test_exception(self): def test_exception(self):
data = fluid.layers.data( data = fluid.layers.data(
name='data', shape=[3, 5, 5, 5], dtype="float32") name='data', shape=[3, 5, 5, 5], dtype="float32")
......
...@@ -438,7 +438,7 @@ class TestConv3DDoubleGradCheck_ChannelLast(unittest.TestCase): ...@@ -438,7 +438,7 @@ class TestConv3DDoubleGradCheck_ChannelLast(unittest.TestCase):
self.func(p) self.func(p)
class TestConv3dDoubleGradCheck_ChannelLast_AsyPadding(unittest.TestCase): class TestConv3DDoubleGradCheck_ChannelLast_AsyPadding(unittest.TestCase):
@prog_scope() @prog_scope()
def func(self, place): def func(self, place):
shape = [2, 2, 2, 2, 3] shape = [2, 2, 2, 2, 3]
......
...@@ -31,7 +31,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -31,7 +31,7 @@ class TestGeneratorSeed(unittest.TestCase):
""" """
def test_gen_dropout_dygraph(self): def test_gen_dropout_dygraph(self):
gen = paddle.manual_seed(12343) gen = paddle.seed(12343)
fluid.enable_dygraph() fluid.enable_dygraph()
...@@ -70,13 +70,13 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -70,13 +70,13 @@ class TestGeneratorSeed(unittest.TestCase):
"""Test Generator seed.""" """Test Generator seed."""
fluid.enable_dygraph() fluid.enable_dygraph()
paddle.manual_seed(12312321111) paddle.seed(12312321111)
x = fluid.layers.gaussian_random([120], dtype="float32") x = fluid.layers.gaussian_random([120], dtype="float32")
st1 = paddle.get_cuda_rng_state() st1 = paddle.get_cuda_rng_state()
x1 = fluid.layers.gaussian_random([120], dtype="float32") x1 = fluid.layers.gaussian_random([120], dtype="float32")
paddle.set_cuda_rng_state(st1) paddle.set_cuda_rng_state(st1)
x2 = fluid.layers.gaussian_random([120], dtype="float32") x2 = fluid.layers.gaussian_random([120], dtype="float32")
paddle.manual_seed(12312321111) paddle.seed(12312321111)
x3 = fluid.layers.gaussian_random([120], dtype="float32") x3 = fluid.layers.gaussian_random([120], dtype="float32")
x_np = x.numpy() x_np = x.numpy()
x1_np = x1.numpy() x1_np = x1.numpy()
...@@ -93,13 +93,13 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -93,13 +93,13 @@ class TestGeneratorSeed(unittest.TestCase):
fluid.enable_dygraph() fluid.enable_dygraph()
gen = paddle.manual_seed(12312321111) gen = paddle.seed(12312321111)
x = paddle.randint(low=10, shape=[10], dtype="int32") x = paddle.randint(low=10, shape=[10], dtype="int32")
st1 = gen.get_state() st1 = gen.get_state()
x1 = paddle.randint(low=10, shape=[10], dtype="int32") x1 = paddle.randint(low=10, shape=[10], dtype="int32")
gen.set_state(st1) gen.set_state(st1)
x2 = paddle.randint(low=10, shape=[10], dtype="int32") x2 = paddle.randint(low=10, shape=[10], dtype="int32")
paddle.manual_seed(12312321111) paddle.seed(12312321111)
x3 = paddle.randint(low=10, shape=[10], dtype="int32") x3 = paddle.randint(low=10, shape=[10], dtype="int32")
x_np = x.numpy() x_np = x.numpy()
x1_np = x1.numpy() x1_np = x1.numpy()
...@@ -114,7 +114,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -114,7 +114,7 @@ class TestGeneratorSeed(unittest.TestCase):
def test_gen_TruncatedNormal_initializer(self): def test_gen_TruncatedNormal_initializer(self):
fluid.disable_dygraph() fluid.disable_dygraph()
gen = paddle.manual_seed(123123143) gen = paddle.seed(123123143)
cur_state = paddle.get_cuda_rng_state() cur_state = paddle.get_cuda_rng_state()
startup_program = fluid.Program() startup_program = fluid.Program()
...@@ -140,7 +140,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -140,7 +140,7 @@ class TestGeneratorSeed(unittest.TestCase):
feed={}, feed={},
fetch_list=[result_1, result_2]) fetch_list=[result_1, result_2])
paddle.manual_seed(123123143) paddle.seed(123123143)
with fluid.program_guard(train_program, startup_program): with fluid.program_guard(train_program, startup_program):
exe.run(startup_program) exe.run(startup_program)
out2 = exe.run(train_program, out2 = exe.run(train_program,
......
...@@ -34,7 +34,7 @@ def random_reader(): ...@@ -34,7 +34,7 @@ def random_reader():
def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): def simple_fc_net(places, use_legacy_py_reader, use_double_buffer):
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
startup_prog = fluid.Program() startup_prog = fluid.Program()
main_prog = fluid.Program() main_prog = fluid.Program()
......
...@@ -286,7 +286,7 @@ class TestModulatedDeformableConvInvalidInput(unittest.TestCase): ...@@ -286,7 +286,7 @@ class TestModulatedDeformableConvInvalidInput(unittest.TestCase):
self.assertRaises(TypeError, test_invalid_offset) self.assertRaises(TypeError, test_invalid_offset)
class TestDeformConv2dAPI(unittest.TestCase): class TestDeformConv2DAPI(unittest.TestCase):
def test_api(self): def test_api(self):
def test_deform_conv2d_v1(): def test_deform_conv2d_v1():
paddle.enable_static() paddle.enable_static()
......
...@@ -487,7 +487,7 @@ class TestDropoutCAPI(unittest.TestCase): ...@@ -487,7 +487,7 @@ class TestDropoutCAPI(unittest.TestCase):
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
class TestDropout2dFAPI(unittest.TestCase): class TestDropout2DFAPI(unittest.TestCase):
def setUp(self): def setUp(self):
np.random.seed(123) np.random.seed(123)
self.places = [fluid.CPUPlace()] self.places = [fluid.CPUPlace()]
...@@ -535,7 +535,7 @@ class TestDropout2dFAPI(unittest.TestCase): ...@@ -535,7 +535,7 @@ class TestDropout2dFAPI(unittest.TestCase):
self.assertTrue(np.allclose(res.numpy(), res_np)) self.assertTrue(np.allclose(res.numpy(), res_np))
class TestDropout2dFAPIError(unittest.TestCase): class TestDropout2DFAPIError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
...@@ -554,7 +554,7 @@ class TestDropout2dFAPIError(unittest.TestCase): ...@@ -554,7 +554,7 @@ class TestDropout2dFAPIError(unittest.TestCase):
self.assertRaises(ValueError, test_dataformat) self.assertRaises(ValueError, test_dataformat)
class TestDropout2dCAPI(unittest.TestCase): class TestDropout2DCAPI(unittest.TestCase):
def setUp(self): def setUp(self):
np.random.seed(123) np.random.seed(123)
self.places = [fluid.CPUPlace()] self.places = [fluid.CPUPlace()]
...@@ -567,13 +567,13 @@ class TestDropout2dCAPI(unittest.TestCase): ...@@ -567,13 +567,13 @@ class TestDropout2dCAPI(unittest.TestCase):
input_np = np.random.random([2, 3, 4, 5]).astype("float32") input_np = np.random.random([2, 3, 4, 5]).astype("float32")
result_np = input_np result_np = input_np
input = fluid.dygraph.to_variable(input_np) input = fluid.dygraph.to_variable(input_np)
m = paddle.nn.Dropout2d(p=0.) m = paddle.nn.Dropout2D(p=0.)
m.eval() m.eval()
result = m(input) result = m(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
class TestDropout3dFAPI(unittest.TestCase): class TestDropout3DFAPI(unittest.TestCase):
def setUp(self): def setUp(self):
np.random.seed(123) np.random.seed(123)
self.places = [fluid.CPUPlace()] self.places = [fluid.CPUPlace()]
...@@ -621,7 +621,7 @@ class TestDropout3dFAPI(unittest.TestCase): ...@@ -621,7 +621,7 @@ class TestDropout3dFAPI(unittest.TestCase):
self.assertTrue(np.allclose(res.numpy(), res_np)) self.assertTrue(np.allclose(res.numpy(), res_np))
class TestDropout3dFAPIError(unittest.TestCase): class TestDropout3DFAPIError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
...@@ -640,7 +640,7 @@ class TestDropout3dFAPIError(unittest.TestCase): ...@@ -640,7 +640,7 @@ class TestDropout3dFAPIError(unittest.TestCase):
self.assertRaises(ValueError, test_dataformat) self.assertRaises(ValueError, test_dataformat)
class TestDropout3dCAPI(unittest.TestCase): class TestDropout3DCAPI(unittest.TestCase):
def setUp(self): def setUp(self):
np.random.seed(123) np.random.seed(123)
self.places = [fluid.CPUPlace()] self.places = [fluid.CPUPlace()]
...@@ -653,7 +653,7 @@ class TestDropout3dCAPI(unittest.TestCase): ...@@ -653,7 +653,7 @@ class TestDropout3dCAPI(unittest.TestCase):
input_np = np.random.random([2, 3, 4, 5, 6]).astype("float32") input_np = np.random.random([2, 3, 4, 5, 6]).astype("float32")
result_np = input_np result_np = input_np
input = fluid.dygraph.to_variable(input_np) input = fluid.dygraph.to_variable(input_np)
m = paddle.nn.Dropout3d(p=0.) m = paddle.nn.Dropout3D(p=0.)
m.eval() m.eval()
result = m(input) result = m(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
......
...@@ -110,7 +110,7 @@ class TestDygraphMultiForward(unittest.TestCase): ...@@ -110,7 +110,7 @@ class TestDygraphMultiForward(unittest.TestCase):
epoch_num = 1 epoch_num = 1
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
mnist = MNIST() mnist = MNIST()
sgd = SGDOptimizer( sgd = SGDOptimizer(
...@@ -143,7 +143,7 @@ class TestDygraphMultiForward(unittest.TestCase): ...@@ -143,7 +143,7 @@ class TestDygraphMultiForward(unittest.TestCase):
dy_param_init_value[param.name] = param.numpy() dy_param_init_value[param.name] = param.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
exe = fluid.Executor(fluid.CPUPlace( exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
......
...@@ -117,7 +117,7 @@ class TestDygraphWeightNorm(unittest.TestCase): ...@@ -117,7 +117,7 @@ class TestDygraphWeightNorm(unittest.TestCase):
def test_check_output(self): def test_check_output(self):
fluid.enable_imperative() fluid.enable_imperative()
linear = paddle.nn.Conv2d(2, 3, 3) linear = paddle.nn.Conv2D(2, 3, 3)
before_weight = linear.weight.numpy() before_weight = linear.weight.numpy()
if self.dim == None: if self.dim == None:
self.dim = -1 self.dim = -1
...@@ -179,7 +179,7 @@ class TestDygraphRemoveWeightNorm(unittest.TestCase): ...@@ -179,7 +179,7 @@ class TestDygraphRemoveWeightNorm(unittest.TestCase):
def test_check_output(self): def test_check_output(self):
fluid.enable_imperative() fluid.enable_imperative()
linear = paddle.nn.Conv2d(2, 3, 3) linear = paddle.nn.Conv2D(2, 3, 3)
before_weight = linear.weight before_weight = linear.weight
wn = weight_norm(linear, dim=self.dim) wn = weight_norm(linear, dim=self.dim)
rwn = remove_weight_norm(linear) rwn = remove_weight_norm(linear)
......
...@@ -466,7 +466,7 @@ class PaddingRNNTestBase(unittest.TestCase): ...@@ -466,7 +466,7 @@ class PaddingRNNTestBase(unittest.TestCase):
pass pass
def _prepare_program(self, config, parallel=True): def _prepare_program(self, config, parallel=True):
paddle.manual_seed(config.random_seed) paddle.seed(config.random_seed)
self.main_program = fluid.Program() self.main_program = fluid.Program()
self.startup_program = fluid.Program() self.startup_program = fluid.Program()
with fluid.program_guard(self.main_program, self.startup_program): with fluid.program_guard(self.main_program, self.startup_program):
......
...@@ -39,7 +39,7 @@ class TestEmbeddingIdStopGradientBase(unittest.TestCase): ...@@ -39,7 +39,7 @@ class TestEmbeddingIdStopGradientBase(unittest.TestCase):
def run_program(self, place, stop_gradient=False): def run_program(self, place, stop_gradient=False):
np.random.seed(1) np.random.seed(1)
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
startup_program = fluid.Program() startup_program = fluid.Program()
......
...@@ -137,7 +137,7 @@ class TestFCOpWithPadding(TestFCOp): ...@@ -137,7 +137,7 @@ class TestFCOpWithPadding(TestFCOp):
class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase): class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase):
def test_api(self): def test_api(self):
def run_program(num_flatten_dims): def run_program(num_flatten_dims):
paddle.manual_seed(SEED) paddle.seed(SEED)
startup_program = Program() startup_program = Program()
main_program = Program() main_program = Program()
......
...@@ -57,7 +57,7 @@ class TestFuseBatchNormActPass(unittest.TestCase): ...@@ -57,7 +57,7 @@ class TestFuseBatchNormActPass(unittest.TestCase):
return x, y, loss return x, y, loss
def check(self, place, use_cuda): def check(self, place, use_cuda):
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
main_program = fluid.Program() main_program = fluid.Program()
startup_program = fluid.Program() startup_program = fluid.Program()
......
...@@ -158,7 +158,7 @@ class TestFusedBnAddActAPI(unittest.TestCase): ...@@ -158,7 +158,7 @@ class TestFusedBnAddActAPI(unittest.TestCase):
return x, y, loss return x, y, loss
def check(self, place, use_cuda): def check(self, place, use_cuda):
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
iters = 5 iters = 5
batch_size = 16 batch_size = 16
......
...@@ -38,7 +38,7 @@ class TestGaussianRandomOp(OpTest): ...@@ -38,7 +38,7 @@ class TestGaussianRandomOp(OpTest):
"seed": 10, "seed": 10,
"use_mkldnn": self.use_mkldnn "use_mkldnn": self.use_mkldnn
} }
paddle.manual_seed(10) paddle.seed(10)
self.outputs = {'Out': np.zeros((123, 92), dtype='float32')} self.outputs = {'Out': np.zeros((123, 92), dtype='float32')}
......
...@@ -30,8 +30,6 @@ class TestGenerator(unittest.TestCase): ...@@ -30,8 +30,6 @@ class TestGenerator(unittest.TestCase):
"""Test basic generator.""" """Test basic generator."""
gen = generator.Generator() gen = generator.Generator()
gen.manual_seed(123123143) gen.manual_seed(123123143)
s = gen.initial_seed()
s = gen.seed()
st = gen.get_state() st = gen.get_state()
gen.set_state(st) gen.set_state(st)
gen.random() gen.random()
......
...@@ -35,7 +35,7 @@ def random_reader(): ...@@ -35,7 +35,7 @@ def random_reader():
def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): def simple_fc_net(places, use_legacy_py_reader, use_double_buffer):
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
startup_prog = fluid.Program() startup_prog = fluid.Program()
main_prog = fluid.Program() main_prog = fluid.Program()
......
...@@ -269,7 +269,7 @@ class TestHSigmoidOpWithSparseGrad(unittest.TestCase): ...@@ -269,7 +269,7 @@ class TestHSigmoidOpWithSparseGrad(unittest.TestCase):
def training_test(self, is_sparse): def training_test(self, is_sparse):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
paddle.manual_seed(1) paddle.seed(1)
start_up = fluid.default_startup_program() start_up = fluid.default_startup_program()
x = np.arange(6).reshape(6) x = np.arange(6).reshape(6)
path_table = np.array([(1, 2, -1), (1, 2, -1)]).astype('int64') path_table = np.array([(1, 2, -1), (1, 2, -1)]).astype('int64')
......
...@@ -120,7 +120,7 @@ class TestAmpScaler(unittest.TestCase): ...@@ -120,7 +120,7 @@ class TestAmpScaler(unittest.TestCase):
inp_np = np.random.random(size=[1, 3, 128, 128]).astype(np.float32) inp_np = np.random.random(size=[1, 3, 128, 128]).astype(np.float32)
def run_simple_conv(inp_np, use_scaler=True): def run_simple_conv(inp_np, use_scaler=True):
paddle.manual_seed(10) paddle.seed(10)
paddle.framework.random._manual_program_seed(10) paddle.framework.random._manual_program_seed(10)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
model = SimpleConv( model = SimpleConv(
...@@ -205,7 +205,7 @@ class TestResnet2(unittest.TestCase): ...@@ -205,7 +205,7 @@ class TestResnet2(unittest.TestCase):
paddle.disable_static() paddle.disable_static()
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
resnet = ResNet(use_cudnn=True) resnet = ResNet(use_cudnn=True)
...@@ -282,7 +282,7 @@ class TestResnet(unittest.TestCase): ...@@ -282,7 +282,7 @@ class TestResnet(unittest.TestCase):
batch_num = 1 batch_num = 1
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
resnet = ResNet(use_cudnn=True) resnet = ResNet(use_cudnn=True)
......
...@@ -206,7 +206,7 @@ class TestDygraphDeepCF(unittest.TestCase): ...@@ -206,7 +206,7 @@ class TestDygraphDeepCF(unittest.TestCase):
else: else:
(users_np, items_np, labels_np, num_users, num_items, (users_np, items_np, labels_np, num_users, num_items,
matrix) = get_data() matrix) = get_data()
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
startup = fluid.Program() startup = fluid.Program()
main = fluid.Program() main = fluid.Program()
...@@ -243,7 +243,7 @@ class TestDygraphDeepCF(unittest.TestCase): ...@@ -243,7 +243,7 @@ class TestDygraphDeepCF(unittest.TestCase):
sys.stderr.write('static loss %s\n' % static_loss) sys.stderr.write('static loss %s\n' % static_loss)
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
deepcf = DeepCF(num_users, num_items, matrix) deepcf = DeepCF(num_users, num_items, matrix)
...@@ -268,7 +268,7 @@ class TestDygraphDeepCF(unittest.TestCase): ...@@ -268,7 +268,7 @@ class TestDygraphDeepCF(unittest.TestCase):
sys.stderr.write('dynamic loss: %s %s\n' % (slice, dy_loss)) sys.stderr.write('dynamic loss: %s %s\n' % (slice, dy_loss))
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
deepcf2 = DeepCF(num_users, num_items, matrix) deepcf2 = DeepCF(num_users, num_items, matrix)
......
...@@ -311,7 +311,7 @@ class TestDygraphDoubleGradVisitedUniq(TestCase): ...@@ -311,7 +311,7 @@ class TestDygraphDoubleGradVisitedUniq(TestCase):
fluid.set_flags({'FLAGS_sort_sum_gradient': True}) fluid.set_flags({'FLAGS_sort_sum_gradient': True})
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(123) paddle.seed(123)
paddle.framework.random._manual_program_seed(123) paddle.framework.random._manual_program_seed(123)
a = fluid.dygraph.to_variable(value) a = fluid.dygraph.to_variable(value)
a.stop_gradient = False a.stop_gradient = False
...@@ -328,7 +328,7 @@ class TestDygraphDoubleGradVisitedUniq(TestCase): ...@@ -328,7 +328,7 @@ class TestDygraphDoubleGradVisitedUniq(TestCase):
grad_1 = dx[0].numpy() grad_1 = dx[0].numpy()
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(123) paddle.seed(123)
paddle.framework.random._manual_program_seed(123) paddle.framework.random._manual_program_seed(123)
a = fluid.dygraph.to_variable(value) a = fluid.dygraph.to_variable(value)
a.stop_gradient = False a.stop_gradient = False
......
...@@ -56,7 +56,7 @@ class Generator(fluid.Layer): ...@@ -56,7 +56,7 @@ class Generator(fluid.Layer):
class TestDygraphGAN(unittest.TestCase): class TestDygraphGAN(unittest.TestCase):
def test_gan_float32(self): def test_gan_float32(self):
seed = 90 seed = 90
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
startup = fluid.Program() startup = fluid.Program()
discriminate_p = fluid.Program() discriminate_p = fluid.Program()
...@@ -131,7 +131,7 @@ class TestDygraphGAN(unittest.TestCase): ...@@ -131,7 +131,7 @@ class TestDygraphGAN(unittest.TestCase):
dy_params = dict() dy_params = dict()
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
discriminator = Discriminator() discriminator = Discriminator()
...@@ -176,7 +176,7 @@ class TestDygraphGAN(unittest.TestCase): ...@@ -176,7 +176,7 @@ class TestDygraphGAN(unittest.TestCase):
dy_params2 = dict() dy_params2 = dict()
with fluid.dygraph.guard(): with fluid.dygraph.guard():
fluid.set_flags({'FLAGS_sort_sum_gradient': True}) fluid.set_flags({'FLAGS_sort_sum_gradient': True})
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
discriminator2 = Discriminator() discriminator2 = Discriminator()
generator2 = Generator() generator2 = Generator()
......
...@@ -61,7 +61,7 @@ class GCN(fluid.Layer): ...@@ -61,7 +61,7 @@ class GCN(fluid.Layer):
class TestDygraphGNN(unittest.TestCase): class TestDygraphGNN(unittest.TestCase):
def test_gnn_float32(self): def test_gnn_float32(self):
paddle.manual_seed(90) paddle.seed(90)
paddle.framework.random._manual_program_seed(90) paddle.framework.random._manual_program_seed(90)
startup = fluid.Program() startup = fluid.Program()
main = fluid.Program() main = fluid.Program()
...@@ -112,7 +112,7 @@ class TestDygraphGNN(unittest.TestCase): ...@@ -112,7 +112,7 @@ class TestDygraphGNN(unittest.TestCase):
scope.find_var(model.gc.weight.name).get_tensor()) scope.find_var(model.gc.weight.name).get_tensor())
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(90) paddle.seed(90)
paddle.framework.random._manual_program_seed(90) paddle.framework.random._manual_program_seed(90)
features = np.ones([1, 100, 50], dtype=np.float32) features = np.ones([1, 100, 50], dtype=np.float32)
...@@ -138,7 +138,7 @@ class TestDygraphGNN(unittest.TestCase): ...@@ -138,7 +138,7 @@ class TestDygraphGNN(unittest.TestCase):
model_gc_weight_value = model.gc.weight.numpy() model_gc_weight_value = model.gc.weight.numpy()
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(90) paddle.seed(90)
paddle.framework.random._manual_program_seed(90) paddle.framework.random._manual_program_seed(90)
features2 = np.ones([1, 100, 50], dtype=np.float32) features2 = np.ones([1, 100, 50], dtype=np.float32)
......
...@@ -28,11 +28,11 @@ class LeNetDygraph(fluid.dygraph.Layer): ...@@ -28,11 +28,11 @@ class LeNetDygraph(fluid.dygraph.Layer):
super(LeNetDygraph, self).__init__() super(LeNetDygraph, self).__init__()
self.num_classes = num_classes self.num_classes = num_classes
self.features = nn.Sequential( self.features = nn.Sequential(
nn.Conv2d( nn.Conv2D(
1, 6, 3, stride=1, padding=1), 1, 6, 3, stride=1, padding=1),
nn.ReLU(), nn.ReLU(),
paddle.fluid.dygraph.Pool2D(2, 'max', 2), paddle.fluid.dygraph.Pool2D(2, 'max', 2),
nn.Conv2d( nn.Conv2D(
6, 16, 5, stride=1, padding=0), 6, 16, 5, stride=1, padding=0),
nn.ReLU(), nn.ReLU(),
paddle.fluid.dygraph.Pool2D(2, 'max', 2)) paddle.fluid.dygraph.Pool2D(2, 'max', 2))
...@@ -60,7 +60,7 @@ def init_weights(layer): ...@@ -60,7 +60,7 @@ def init_weights(layer):
new_bias = paddle.fluid.layers.fill_constant( new_bias = paddle.fluid.layers.fill_constant(
layer.bias.shape, layer.bias.dtype, value=-0.1) layer.bias.shape, layer.bias.dtype, value=-0.1)
layer.bias.set_value(new_bias) layer.bias.set_value(new_bias)
elif type(layer) == nn.Conv2d: elif type(layer) == nn.Conv2D:
new_weight = paddle.fluid.layers.fill_constant( new_weight = paddle.fluid.layers.fill_constant(
layer.weight.shape, layer.weight.dtype, value=0.7) layer.weight.shape, layer.weight.dtype, value=0.7)
layer.weight.set_value(new_weight) layer.weight.set_value(new_weight)
...@@ -80,7 +80,7 @@ class TestLayerApply(unittest.TestCase): ...@@ -80,7 +80,7 @@ class TestLayerApply(unittest.TestCase):
if type(layer) == nn.Linear: if type(layer) == nn.Linear:
np.testing.assert_allclose(layer.weight.numpy(), 0.9) np.testing.assert_allclose(layer.weight.numpy(), 0.9)
np.testing.assert_allclose(layer.bias.numpy(), -0.1) np.testing.assert_allclose(layer.bias.numpy(), -0.1)
elif type(layer) == nn.Conv2d: elif type(layer) == nn.Conv2D:
np.testing.assert_allclose(layer.weight.numpy(), 0.7) np.testing.assert_allclose(layer.weight.numpy(), 0.7)
np.testing.assert_allclose(layer.bias.numpy(), -0.2) np.testing.assert_allclose(layer.bias.numpy(), -0.2)
......
...@@ -27,11 +27,11 @@ class LeNetDygraph(fluid.dygraph.Layer): ...@@ -27,11 +27,11 @@ class LeNetDygraph(fluid.dygraph.Layer):
def __init__(self): def __init__(self):
super(LeNetDygraph, self).__init__() super(LeNetDygraph, self).__init__()
self.features = nn.Sequential( self.features = nn.Sequential(
nn.Conv2d( nn.Conv2D(
1, 6, 3, stride=1, padding=1), 1, 6, 3, stride=1, padding=1),
nn.ReLU(), nn.ReLU(),
paddle.fluid.dygraph.Pool2D(2, 'max', 2), paddle.fluid.dygraph.Pool2D(2, 'max', 2),
nn.Conv2d( nn.Conv2D(
6, 16, 5, stride=1, padding=0), 6, 16, 5, stride=1, padding=0),
nn.ReLU(), nn.ReLU(),
paddle.fluid.dygraph.Pool2D(2, 'max', 2)) paddle.fluid.dygraph.Pool2D(2, 'max', 2))
......
...@@ -95,7 +95,7 @@ class TestDygraphSimpleNet(unittest.TestCase): ...@@ -95,7 +95,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
for is_sort_sum_gradient in [True, False]: for is_sort_sum_gradient in [True, False]:
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
simple_net = SimpleNet( simple_net = SimpleNet(
...@@ -140,7 +140,7 @@ class TestDygraphSimpleNet(unittest.TestCase): ...@@ -140,7 +140,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
dy_loss_value = dy_loss.numpy() dy_loss_value = dy_loss.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
simple_net = SimpleNet( simple_net = SimpleNet(
......
...@@ -403,7 +403,7 @@ class TestDygraphOCRAttention(unittest.TestCase): ...@@ -403,7 +403,7 @@ class TestDygraphOCRAttention(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
fluid.set_flags({'FLAGS_sort_sum_gradient': True}) fluid.set_flags({'FLAGS_sort_sum_gradient': True})
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
ocr_attention = OCRAttention() ocr_attention = OCRAttention()
...@@ -454,7 +454,7 @@ class TestDygraphOCRAttention(unittest.TestCase): ...@@ -454,7 +454,7 @@ class TestDygraphOCRAttention(unittest.TestCase):
dy_param_value[param.name] = param.numpy() dy_param_value[param.name] = param.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
exe = fluid.Executor(fluid.CPUPlace( exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
......
...@@ -74,7 +74,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -74,7 +74,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
try: try:
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
mlp = MLP() mlp = MLP()
optimizer = self.get_optimizer_dygraph( optimizer = self.get_optimizer_dygraph(
...@@ -91,7 +91,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -91,7 +91,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
) else fluid.CUDAPlace(0) ) else fluid.CUDAPlace(0)
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
mlp = MLP() mlp = MLP()
...@@ -132,7 +132,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -132,7 +132,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
dy_param_value[param.name] = param.numpy() dy_param_value[param.name] = param.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
if place == None: if place == None:
......
...@@ -74,7 +74,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -74,7 +74,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
try: try:
paddle.disable_static() paddle.disable_static()
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
mlp = MLP() mlp = MLP()
optimizer = self.get_optimizer_dygraph( optimizer = self.get_optimizer_dygraph(
...@@ -93,7 +93,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -93,7 +93,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
) else fluid.CUDAPlace(0) ) else fluid.CUDAPlace(0)
paddle.disable_static(place) paddle.disable_static(place)
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
mlp = MLP() mlp = MLP()
...@@ -142,7 +142,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -142,7 +142,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
if place == None: if place == None:
......
...@@ -226,7 +226,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -226,7 +226,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
traced_layer = None traced_layer = None
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -294,7 +294,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -294,7 +294,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
dy_last_hidden_value = last_hidden.numpy() dy_last_hidden_value = last_hidden.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
ptb_model = PtbModel( ptb_model = PtbModel(
hidden_size=hidden_size, hidden_size=hidden_size,
......
...@@ -45,7 +45,7 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase): ...@@ -45,7 +45,7 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
fluid.set_flags({'FLAGS_sort_sum_gradient': True}) fluid.set_flags({'FLAGS_sort_sum_gradient': True})
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
...@@ -95,7 +95,7 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase): ...@@ -95,7 +95,7 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase):
dy_last_hidden_value = last_hidden.numpy() dy_last_hidden_value = last_hidden.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
ptb_model = PtbModel( ptb_model = PtbModel(
......
...@@ -64,7 +64,7 @@ class TestImperativeMnist(unittest.TestCase): ...@@ -64,7 +64,7 @@ class TestImperativeMnist(unittest.TestCase):
mask = np.array(mask_list).astype("float32") mask = np.array(mask_list).astype("float32")
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
policy = Policy(input_size=4) policy = Policy(input_size=4)
...@@ -105,7 +105,7 @@ class TestImperativeMnist(unittest.TestCase): ...@@ -105,7 +105,7 @@ class TestImperativeMnist(unittest.TestCase):
dy_param_value[param.name] = param.numpy() dy_param_value[param.name] = param.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
exe = fluid.Executor(fluid.CPUPlace( exe = fluid.Executor(fluid.CPUPlace(
......
...@@ -251,7 +251,7 @@ class TestDygraphResnet(unittest.TestCase): ...@@ -251,7 +251,7 @@ class TestDygraphResnet(unittest.TestCase):
traced_layer = None traced_layer = None
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
resnet = ResNet() resnet = ResNet()
...@@ -334,7 +334,7 @@ class TestDygraphResnet(unittest.TestCase): ...@@ -334,7 +334,7 @@ class TestDygraphResnet(unittest.TestCase):
dy_param_value[param.name] = param.numpy() dy_param_value[param.name] = param.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
exe = fluid.Executor(fluid.CPUPlace( exe = fluid.Executor(fluid.CPUPlace(
......
...@@ -78,7 +78,7 @@ class TestDygraphResnetSortGradient(unittest.TestCase): ...@@ -78,7 +78,7 @@ class TestDygraphResnetSortGradient(unittest.TestCase):
batch_num = 10 batch_num = 10
with fluid.dygraph.guard(): with fluid.dygraph.guard():
fluid.set_flags({'FLAGS_sort_sum_gradient': True}) fluid.set_flags({'FLAGS_sort_sum_gradient': True})
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
resnet = ResNet() resnet = ResNet()
...@@ -137,7 +137,7 @@ class TestDygraphResnetSortGradient(unittest.TestCase): ...@@ -137,7 +137,7 @@ class TestDygraphResnetSortGradient(unittest.TestCase):
dy_param_value[param.name] = param.numpy() dy_param_value[param.name] = param.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
exe = fluid.Executor(fluid.CPUPlace( exe = fluid.Executor(fluid.CPUPlace(
......
...@@ -219,7 +219,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -219,7 +219,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -305,7 +305,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -305,7 +305,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -414,7 +414,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -414,7 +414,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -521,7 +521,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -521,7 +521,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -711,7 +711,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -711,7 +711,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -802,7 +802,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -802,7 +802,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
......
...@@ -219,7 +219,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -219,7 +219,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -308,7 +308,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -308,7 +308,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -416,7 +416,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -416,7 +416,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -524,7 +524,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -524,7 +524,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -638,7 +638,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -638,7 +638,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -717,7 +717,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -717,7 +717,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
...@@ -808,7 +808,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -808,7 +808,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200 batch_num = 200
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
......
...@@ -311,7 +311,7 @@ class TestImperativeResneXt(unittest.TestCase): ...@@ -311,7 +311,7 @@ class TestImperativeResneXt(unittest.TestCase):
batch_num = 1 batch_num = 1
epoch_num = 1 epoch_num = 1
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
se_resnext = SeResNeXt() se_resnext = SeResNeXt()
...@@ -372,7 +372,7 @@ class TestImperativeResneXt(unittest.TestCase): ...@@ -372,7 +372,7 @@ class TestImperativeResneXt(unittest.TestCase):
dy_param_value[param.name] = param.numpy() dy_param_value[param.name] = param.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
exe = fluid.Executor(fluid.CPUPlace( exe = fluid.Executor(fluid.CPUPlace(
......
...@@ -102,7 +102,7 @@ class TestDygraphSimpleNet(unittest.TestCase): ...@@ -102,7 +102,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
for is_sort_sum_gradient in [True, False]: for is_sort_sum_gradient in [True, False]:
traced_layer = None traced_layer = None
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
simple_net = SimpleNet( simple_net = SimpleNet(
...@@ -146,7 +146,7 @@ class TestDygraphSimpleNet(unittest.TestCase): ...@@ -146,7 +146,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
dy_loss_value = dy_loss.numpy() dy_loss_value = dy_loss.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
simple_net = SimpleNet( simple_net = SimpleNet(
......
...@@ -468,7 +468,7 @@ def build_optimizer(layer, cfg, loss=None): ...@@ -468,7 +468,7 @@ def build_optimizer(layer, cfg, loss=None):
class DyGraphTrainModel(object): class DyGraphTrainModel(object):
def __init__(self, cfg): def __init__(self, cfg):
paddle.manual_seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
self.generator = Generator(cfg) self.generator = Generator(cfg)
...@@ -529,7 +529,7 @@ class StaticGraphTrainModel(object): ...@@ -529,7 +529,7 @@ class StaticGraphTrainModel(object):
shape=[None, cfg.c_dim], dtype='float32', name='label_trg') shape=[None, cfg.c_dim], dtype='float32', name='label_trg')
return image_real, label_org, label_trg return image_real, label_org, label_trg
paddle.manual_seed(cfg.seed) paddle.seed(cfg.seed)
paddle.framework.random._manual_program_seed(cfg.seed) paddle.framework.random._manual_program_seed(cfg.seed)
self.gen_program = fluid.Program() self.gen_program = fluid.Program()
gen_startup_program = fluid.Program() gen_startup_program = fluid.Program()
......
...@@ -951,7 +951,7 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): ...@@ -951,7 +951,7 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
with guard(): with guard():
fluid.set_flags({'FLAGS_sort_sum_gradient': True}) fluid.set_flags({'FLAGS_sort_sum_gradient': True})
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
transformer = TransFormer( transformer = TransFormer(
ModelHyperParams.src_vocab_size, ModelHyperParams.src_vocab_size,
...@@ -1035,7 +1035,7 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): ...@@ -1035,7 +1035,7 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
dy_token_num_value = dy_token_num.numpy() dy_token_num_value = dy_token_num.numpy()
with new_program_scope(): with new_program_scope():
paddle.manual_seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
transformer = TransFormer( transformer = TransFormer(
ModelHyperParams.src_vocab_size, ModelHyperParams.src_vocab_size,
......
...@@ -80,7 +80,7 @@ class TestInplaceAddto(unittest.TestCase): ...@@ -80,7 +80,7 @@ class TestInplaceAddto(unittest.TestCase):
def test_result(self): def test_result(self):
def run_program(enable_addto): def run_program(enable_addto):
np.random.seed(10) np.random.seed(10)
paddle.manual_seed(10) paddle.seed(10)
paddle.framework.random._manual_program_seed(10) paddle.framework.random._manual_program_seed(10)
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
fluid.set_flags({"FLAGS_cudnn_deterministic": True}) fluid.set_flags({"FLAGS_cudnn_deterministic": True})
......
...@@ -35,22 +35,22 @@ class TestInstanceNorm(unittest.TestCase): ...@@ -35,22 +35,22 @@ class TestInstanceNorm(unittest.TestCase):
def error1d(): def error1d():
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
instance_norm1d = paddle.nn.InstanceNorm1d(1) instance_norm1d = paddle.nn.InstanceNorm1D(1)
instance_norm1d(fluid.dygraph.to_variable(x_data_4)) instance_norm1d(fluid.dygraph.to_variable(x_data_4))
def error2d(): def error2d():
x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32') x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')
instance_norm2d = paddle.nn.InstanceNorm2d(1) instance_norm2d = paddle.nn.InstanceNorm2D(1)
instance_norm2d(fluid.dygraph.to_variable(x_data_3)) instance_norm2d(fluid.dygraph.to_variable(x_data_3))
def error3d(): def error3d():
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
instance_norm3d = paddle.nn.BatchNorm3d(1) instance_norm3d = paddle.nn.BatchNorm3D(1)
instance_norm3d(fluid.dygraph.to_variable(x_data_4)) instance_norm3d(fluid.dygraph.to_variable(x_data_4))
def weight_bias_false(): def weight_bias_false():
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
instance_norm3d = paddle.nn.BatchNorm3d( instance_norm3d = paddle.nn.BatchNorm3D(
1, weight_attr=False, bias_attr=False) 1, weight_attr=False, bias_attr=False)
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
...@@ -75,7 +75,7 @@ class TestInstanceNorm(unittest.TestCase): ...@@ -75,7 +75,7 @@ class TestInstanceNorm(unittest.TestCase):
def compute_v2(x): def compute_v2(x):
with fluid.dygraph.guard(p): with fluid.dygraph.guard(p):
bn = paddle.nn.InstanceNorm2d(shape[1]) bn = paddle.nn.InstanceNorm2D(shape[1])
y = bn(fluid.dygraph.to_variable(x)) y = bn(fluid.dygraph.to_variable(x))
return y.numpy() return y.numpy()
...@@ -104,7 +104,7 @@ class TestInstanceNorm(unittest.TestCase): ...@@ -104,7 +104,7 @@ class TestInstanceNorm(unittest.TestCase):
def compute_v2(x_np): def compute_v2(x_np):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
ins = paddle.nn.InstanceNorm2d(shape[1]) ins = paddle.nn.InstanceNorm2D(shape[1])
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
y = ins(x) y = ins(x)
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
......
...@@ -37,7 +37,7 @@ class TestIrMemoryOptimizeIfElseOp(unittest.TestCase): ...@@ -37,7 +37,7 @@ class TestIrMemoryOptimizeIfElseOp(unittest.TestCase):
use_cuda=True, use_cuda=True,
use_mem_opt=False, use_mem_opt=False,
iter_num=5): iter_num=5):
paddle.manual_seed(100) paddle.seed(100)
paddle.framework.random._manual_program_seed(100) paddle.framework.random._manual_program_seed(100)
prog = Program() prog = Program()
startup_prog = Program() startup_prog = Program()
......
...@@ -222,7 +222,7 @@ class TestJitSaveLoad(unittest.TestCase): ...@@ -222,7 +222,7 @@ class TestJitSaveLoad(unittest.TestCase):
# enable dygraph mode # enable dygraph mode
fluid.enable_dygraph() fluid.enable_dygraph()
# config seed # config seed
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
def train_and_save_model(self, model_path=None): def train_and_save_model(self, model_path=None):
...@@ -370,7 +370,7 @@ class TestJitSaveLoadConfig(unittest.TestCase): ...@@ -370,7 +370,7 @@ class TestJitSaveLoadConfig(unittest.TestCase):
# enable dygraph mode # enable dygraph mode
fluid.enable_dygraph() fluid.enable_dygraph()
# config seed # config seed
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
def test_output_spec(self): def test_output_spec(self):
...@@ -429,7 +429,7 @@ class TestJitMultipleLoading(unittest.TestCase): ...@@ -429,7 +429,7 @@ class TestJitMultipleLoading(unittest.TestCase):
# enable dygraph mode # enable dygraph mode
fluid.enable_dygraph() fluid.enable_dygraph()
# config seed # config seed
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
# train and save base model # train and save base model
self.train_and_save_orig_model() self.train_and_save_orig_model()
...@@ -457,7 +457,7 @@ class TestJitPruneModelAndLoad(unittest.TestCase): ...@@ -457,7 +457,7 @@ class TestJitPruneModelAndLoad(unittest.TestCase):
# enable dygraph mode # enable dygraph mode
fluid.enable_dygraph() fluid.enable_dygraph()
# config seed # config seed
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
def train_and_save(self): def train_and_save(self):
...@@ -512,7 +512,7 @@ class TestJitSaveMultiCases(unittest.TestCase): ...@@ -512,7 +512,7 @@ class TestJitSaveMultiCases(unittest.TestCase):
# enable dygraph mode # enable dygraph mode
fluid.enable_dygraph() fluid.enable_dygraph()
# config seed # config seed
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
def verify_inference_correctness(self, layer, model_path, with_label=False): def verify_inference_correctness(self, layer, model_path, with_label=False):
......
...@@ -57,7 +57,7 @@ class LayerTest(unittest.TestCase): ...@@ -57,7 +57,7 @@ class LayerTest(unittest.TestCase):
@contextlib.contextmanager @contextlib.contextmanager
def static_graph(self): def static_graph(self):
with new_program_scope(): with new_program_scope():
paddle.manual_seed(self.seed) paddle.seed(self.seed)
paddle.framework.random._manual_program_seed(self.seed) paddle.framework.random._manual_program_seed(self.seed)
yield yield
...@@ -77,7 +77,7 @@ class LayerTest(unittest.TestCase): ...@@ -77,7 +77,7 @@ class LayerTest(unittest.TestCase):
def dynamic_graph(self, force_to_use_cpu=False): def dynamic_graph(self, force_to_use_cpu=False):
with fluid.dygraph.guard( with fluid.dygraph.guard(
self._get_place(force_to_use_cpu=force_to_use_cpu)): self._get_place(force_to_use_cpu=force_to_use_cpu)):
paddle.manual_seed(self.seed) paddle.seed(self.seed)
paddle.framework.random._manual_program_seed(self.seed) paddle.framework.random._manual_program_seed(self.seed)
yield yield
......
...@@ -17,16 +17,16 @@ import unittest ...@@ -17,16 +17,16 @@ import unittest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.framework import manual_seed from paddle.framework import seed
from paddle.fluid.framework import Program, default_main_program, default_startup_program from paddle.fluid.framework import Program, default_main_program, default_startup_program
import numpy as np import numpy as np
class TestManualSeed(unittest.TestCase): class TestManualSeed(unittest.TestCase):
def test_manual_seed(self): def test_seed(self):
fluid.enable_dygraph() fluid.enable_dygraph()
gen = paddle.manual_seed(12312321111) gen = paddle.seed(12312321111)
x = fluid.layers.gaussian_random([10], dtype="float32") x = fluid.layers.gaussian_random([10], dtype="float32")
st1 = gen.get_state() st1 = gen.get_state()
x1 = fluid.layers.gaussian_random([10], dtype="float32") x1 = fluid.layers.gaussian_random([10], dtype="float32")
......
...@@ -18,7 +18,7 @@ import paddle ...@@ -18,7 +18,7 @@ import paddle
import copy import copy
np.random.seed(10) np.random.seed(10)
paddle.manual_seed(10) paddle.seed(10)
class TestNormalAPI(unittest.TestCase): class TestNormalAPI(unittest.TestCase):
...@@ -61,7 +61,8 @@ class TestNormalAPI(unittest.TestCase): ...@@ -61,7 +61,8 @@ class TestNormalAPI(unittest.TestCase):
if isinstance(self.mean, np.ndarray) \ if isinstance(self.mean, np.ndarray) \
and isinstance(self.std, np.ndarray): and isinstance(self.std, np.ndarray):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
mean = paddle.fluid.data('Mean', self.mean.shape, self.mean.dtype) mean = paddle.fluid.data('Mean', self.mean.shape,
self.mean.dtype)
std = paddle.fluid.data('Std', self.std.shape, self.std.dtype) std = paddle.fluid.data('Std', self.std.shape, self.std.dtype)
out = paddle.normal(mean, std, self.shape) out = paddle.normal(mean, std, self.shape)
...@@ -76,7 +77,8 @@ class TestNormalAPI(unittest.TestCase): ...@@ -76,7 +77,8 @@ class TestNormalAPI(unittest.TestCase):
return ret_all return ret_all
elif isinstance(self.mean, np.ndarray): elif isinstance(self.mean, np.ndarray):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
mean = paddle.fluid.data('Mean', self.mean.shape, self.mean.dtype) mean = paddle.fluid.data('Mean', self.mean.shape,
self.mean.dtype)
out = paddle.normal(mean, self.std, self.shape) out = paddle.normal(mean, self.std, self.shape)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
......
...@@ -73,7 +73,7 @@ class TestSaveLoad(unittest.TestCase): ...@@ -73,7 +73,7 @@ class TestSaveLoad(unittest.TestCase):
paddle.disable_static() paddle.disable_static()
# config seed # config seed
paddle.manual_seed(SEED) paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED) paddle.framework.random._manual_program_seed(SEED)
def build_and_train_model(self): def build_and_train_model(self):
......
...@@ -105,7 +105,7 @@ def avg_pool1D_forward_naive(x, ...@@ -105,7 +105,7 @@ def avg_pool1D_forward_naive(x,
return out return out
class TestPool1d_API(unittest.TestCase): class TestPool1D_API(unittest.TestCase):
def setUp(self): def setUp(self):
np.random.seed(123) np.random.seed(123)
self.places = [fluid.CPUPlace()] self.places = [fluid.CPUPlace()]
...@@ -138,7 +138,7 @@ class TestPool1d_API(unittest.TestCase): ...@@ -138,7 +138,7 @@ class TestPool1d_API(unittest.TestCase):
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool1d_dg = paddle.nn.layer.AvgPool1d( avg_pool1d_dg = paddle.nn.layer.AvgPool1D(
kernel_size=2, stride=None, padding=0) kernel_size=2, stride=None, padding=0)
result = avg_pool1d_dg(input) result = avg_pool1d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -159,7 +159,7 @@ class TestPool1d_API(unittest.TestCase): ...@@ -159,7 +159,7 @@ class TestPool1d_API(unittest.TestCase):
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool1d_dg = paddle.nn.AvgPool1d( avg_pool1d_dg = paddle.nn.AvgPool1D(
kernel_size=2, stride=None, padding=1, count_include_pad=True) kernel_size=2, stride=None, padding=1, count_include_pad=True)
result = avg_pool1d_dg(input) result = avg_pool1d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -190,7 +190,7 @@ class TestPool1d_API(unittest.TestCase): ...@@ -190,7 +190,7 @@ class TestPool1d_API(unittest.TestCase):
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool1d_dg = paddle.nn.layer.MaxPool1d( max_pool1d_dg = paddle.nn.layer.MaxPool1D(
kernel_size=2, stride=None, padding=0) kernel_size=2, stride=None, padding=0)
result = max_pool1d_dg(input) result = max_pool1d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -207,7 +207,7 @@ class TestPool1d_API(unittest.TestCase): ...@@ -207,7 +207,7 @@ class TestPool1d_API(unittest.TestCase):
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
max_pool1d_dg = paddle.nn.layer.MaxPool1d( max_pool1d_dg = paddle.nn.layer.MaxPool1D(
kernel_size=2, stride=None, padding=0) kernel_size=2, stride=None, padding=0)
result = max_pool1d_dg(input) result = max_pool1d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
...@@ -248,7 +248,7 @@ class TestPool1d_API(unittest.TestCase): ...@@ -248,7 +248,7 @@ class TestPool1d_API(unittest.TestCase):
self.check_max_dygraph_return_index_results(place) self.check_max_dygraph_return_index_results(place)
class TestPool2dError_API(unittest.TestCase): class TestPool2DError_API(unittest.TestCase):
def test_error_api(self): def test_error_api(self):
def run1(): def run1():
with fluid.dygraph.guard(): with fluid.dygraph.guard():
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册