From 3ce727a81ccb0c61d1665cac146b36d1dd9f0c7d Mon Sep 17 00:00:00 2001 From: huangjun12 <2399845970@qq.com> Date: Mon, 31 Aug 2020 18:40:02 +0800 Subject: [PATCH] rename Dropout2D/3D to Dropout2d/3d, test=develop (#26796) rename Dropout2D/3D to Dropout2d/3d --- .../fluid/tests/unittests/test_dropout_op.py | 8 ++++---- python/paddle/nn/__init__.py | 4 ++-- python/paddle/nn/layer/__init__.py | 4 ++-- python/paddle/nn/layer/common.py | 16 ++++++++-------- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_dropout_op.py b/python/paddle/fluid/tests/unittests/test_dropout_op.py index d18c8e25974..ceec1190279 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_op.py @@ -530,7 +530,7 @@ class TestDropout2dFAPIError(unittest.TestCase): self.assertRaises(ValueError, test_dataformat) -class TestDropout2DCAPI(unittest.TestCase): +class TestDropout2dCAPI(unittest.TestCase): def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -543,7 +543,7 @@ class TestDropout2DCAPI(unittest.TestCase): input_np = np.random.random([2, 3, 4, 5]).astype("float32") result_np = input_np input = fluid.dygraph.to_variable(input_np) - m = paddle.nn.Dropout2D(p=0.) + m = paddle.nn.Dropout2d(p=0.) m.eval() result = m(input) self.assertTrue(np.allclose(result.numpy(), result_np)) @@ -616,7 +616,7 @@ class TestDropout3dFAPIError(unittest.TestCase): self.assertRaises(ValueError, test_dataformat) -class TestDropout3DCAPI(unittest.TestCase): +class TestDropout3dCAPI(unittest.TestCase): def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -629,7 +629,7 @@ class TestDropout3DCAPI(unittest.TestCase): input_np = np.random.random([2, 3, 4, 5, 6]).astype("float32") result_np = input_np input = fluid.dygraph.to_variable(input_np) - m = paddle.nn.Dropout3D(p=0.) + m = paddle.nn.Dropout3d(p=0.) m.eval() result = m(input) self.assertTrue(np.allclose(result.numpy(), result_np)) diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index 5cc9f6d32f9..66caba540f2 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -94,8 +94,8 @@ from .layer.common import UpsamplingNearest2d #DEFINE_ALIAS from .layer.common import UpsamplingBilinear2d #DEFINE_ALIAS from .layer.common import Bilinear #DEFINE_ALIAS from .layer.common import Dropout #DEFINE_ALIAS -from .layer.common import Dropout2D #DEFINE_ALIAS -from .layer.common import Dropout3D #DEFINE_ALIAS +from .layer.common import Dropout2d #DEFINE_ALIAS +from .layer.common import Dropout3d #DEFINE_ALIAS from .layer.common import AlphaDropout #DEFINE_ALIAS from .layer.pooling import AvgPool1d #DEFINE_ALIAS diff --git a/python/paddle/nn/layer/__init__.py b/python/paddle/nn/layer/__init__.py index 6eac15cd694..7d7a392ebe8 100644 --- a/python/paddle/nn/layer/__init__.py +++ b/python/paddle/nn/layer/__init__.py @@ -63,8 +63,8 @@ from .common import UpSample #DEFINE_ALIAS from .common import UpsamplingNearest2d #DEFINE_ALIAS from .common import UpsamplingBilinear2d #DEFINE_ALIAS from .common import Dropout #DEFINE_ALIAS -from .common import Dropout2D #DEFINE_ALIAS -from .common import Dropout3D #DEFINE_ALIAS +from .common import Dropout2d #DEFINE_ALIAS +from .common import Dropout3d #DEFINE_ALIAS from .common import AlphaDropout #DEFINE_ALIAS from .pooling import AvgPool1d #DEFINE_ALIAS from .pooling import AvgPool2d #DEFINE_ALIAS diff --git a/python/paddle/nn/layer/common.py b/python/paddle/nn/layer/common.py index a1e6508c67d..8641e28e37b 100644 --- a/python/paddle/nn/layer/common.py +++ b/python/paddle/nn/layer/common.py @@ -41,8 +41,8 @@ __all__ = [ 'ReplicationPad3d', 'CosineSimilarity', 'Dropout', - 'Dropout2D', - 'Dropout3D', + 'Dropout2d', + 'Dropout3d', 'Bilinear', 'AlphaDropout', ] @@ -766,7 +766,7 @@ class Dropout(layers.Layer): return out -class Dropout2D(layers.Layer): +class Dropout2d(layers.Layer): """ Randomly zero out entire channels (in the batched input 4d tensor with the shape `NCHW` , a channel is a 2D feature map with the shape `HW`). Each channel will be zeroed out independently @@ -798,7 +798,7 @@ class Dropout2D(layers.Layer): paddle.disable_static() x = np.random.random(size=(2, 3, 4, 5)).astype('float32') x = paddle.to_tensor(x) - m = paddle.nn.Dropout2D(p=0.5) + m = paddle.nn.Dropout2d(p=0.5) y_train = m(x) m.eval() # switch the model to test phase y_test = m(x) @@ -808,7 +808,7 @@ class Dropout2D(layers.Layer): """ def __init__(self, p=0.5, data_format='NCHW', name=None): - super(Dropout2D, self).__init__() + super(Dropout2d, self).__init__() self.p = p self.data_format = data_format @@ -824,7 +824,7 @@ class Dropout2D(layers.Layer): return out -class Dropout3D(layers.Layer): +class Dropout3d(layers.Layer): """ Randomly zero out entire channels (in the batched input 5d tensor with the shape `NCDHW` , a channel is a 3D feature map with the shape `DHW` ). Each channel will be zeroed out independently @@ -856,7 +856,7 @@ class Dropout3D(layers.Layer): paddle.disable_static() x = np.random.random(size=(2, 3, 4, 5, 6)).astype('float32') x = paddle.to_tensor(x) - m = paddle.nn.Dropout3D(p=0.5) + m = paddle.nn.Dropout3d(p=0.5) y_train = m(x) m.eval() # switch the model to test phase y_test = m(x) @@ -866,7 +866,7 @@ class Dropout3D(layers.Layer): """ def __init__(self, p=0.5, data_format='NCDHW', name=None): - super(Dropout3D, self).__init__() + super(Dropout3d, self).__init__() self.p = p self.data_format = data_format -- GitLab