diff --git a/python/paddle/nn/functional/conv.py b/python/paddle/nn/functional/conv.py index 55c9b70c8a21ba0eb98286a8b04a0c1747d3df78..d2e4ee2ac9d11595ec6e8c01e26e7f7b36991376 100644 --- a/python/paddle/nn/functional/conv.py +++ b/python/paddle/nn/functional/conv.py @@ -438,20 +438,14 @@ def conv2d(x, import paddle import paddle.nn.functional as F - import numpy as np - - x = np.random.randn(2, 3, 8, 8).astype(np.float32) - w = np.random.randn(6, 3, 3, 3).astype(np.float32) - paddle.disable_static() + x_var = paddle.randn((2, 3, 8, 8), dtype='float32') + w_var = paddle.randn((6, 3, 3, 3), dtype='float32') - x_var = paddle.to_tensor(x) - w_var = paddle.to_tensor(w) y_var = F.conv2d(x_var, w_var) y_np = y_var.numpy() print(y_np.shape) - # (2, 6, 6, 6) """ # entry checks @@ -946,20 +940,16 @@ def conv_transpose2d(x, Examples: .. code-block:: python - import numpy as np import paddle import paddle.nn.functional as F - x = np.random.randn(2, 3, 8, 8).astype(np.float32) - w = np.random.randn(3, 6, 3, 3).astype(np.float32) + x_var = paddle.randn((2, 3, 8, 8), dtype='float32') + w_var = paddle.randn((3, 6, 3, 3), dtype='float32') - paddle.disable_static() - x_var = paddle.to_tensor(x) - w_var = paddle.to_tensor(w) y_var = F.conv_transpose2d(x_var, w_var) y_np = y_var.numpy() - print(y_np.shape) + print(y_np.shape) # (2, 6, 10, 10) """ @@ -1166,20 +1156,16 @@ def conv3d(x, Examples: .. code-block:: python - import numpy as np import paddle import paddle.nn.functional as F - x = np.random.randn(2, 3, 8, 8, 8).astype(np.float32) - w = np.random.randn(6, 3, 3, 3, 3).astype(np.float32) + x_var = paddle.randn((2, 3, 8, 8, 8), dtype='float32') + w_var = paddle.randn((6, 3, 3, 3, 3), dtype='float32') - paddle.disable_static() - x_var = paddle.to_tensor(x) - w_var = paddle.to_tensor(w) y_var = F.conv3d(x_var, w_var) y_np = y_var.numpy() - print(y_np.shape) + print(y_np.shape) # (2, 6, 6, 6, 6) """ # entry check @@ -1399,22 +1385,16 @@ def conv_transpose3d(x, Examples: .. code-block:: python - import numpy as np - import paddle import paddle.nn.functional as F - x = np.random.randn(2, 3, 8, 8, 8).astype(np.float32) - w = np.random.randn(3, 6, 3, 3, 3).astype(np.float32) + x_var = paddle.randn((2, 3, 8, 8, 8), dtype='float32') + w_var = paddle.randn((3, 6, 3, 3, 3), dtype='float32') - paddle.disable_static() - - x_var = paddle.to_tensor(x) - w_var = paddle.to_tensor(w) y_var = F.conv_transpose3d(x_var, w_var) y_np = y_var.numpy() - print(y_np.shape) + print(y_np.shape) # (2, 6, 10, 10, 10) """ # entry checks diff --git a/python/paddle/nn/layer/conv.py b/python/paddle/nn/layer/conv.py index 3cc6a5a15b66c89ca7d0d35638c0e2e086adbeb9..baa89798b7fc3a4eca7b954c40f77ce46b24adb7 100644 --- a/python/paddle/nn/layer/conv.py +++ b/python/paddle/nn/layer/conv.py @@ -534,18 +534,15 @@ class Conv2d(_ConvNd): .. code-block:: python - import numpy as np import paddle import paddle.nn as nn - x = np.random.uniform(-1, 1, (2, 4, 8, 8)).astype('float32') + + x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.) - paddle.disable_static() - x_var = paddle.to_tensor(x) conv = nn.Conv2d(4, 6, (3, 3)) y_var = conv(x_var) y_np = y_var.numpy() print(y_np.shape) - # (2, 6, 6, 6) """ @@ -702,17 +699,15 @@ class ConvTranspose2d(_ConvNd): .. code-block:: python - import numpy as np import paddle import paddle.nn as nn - x = np.random.uniform(-1, 1, (2, 4, 8, 8)).astype('float32') - paddle.disable_static() - x_var = paddle.to_tensor(x) + + x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.) + conv = nn.ConvTranspose2d(4, 6, (3, 3)) y_var = conv(x_var) y_np = y_var.numpy() print(y_np.shape) - # (2, 6, 10, 10) """ @@ -856,19 +851,15 @@ class Conv3d(_ConvNd): .. code-block:: python - import numpy as np - import paddle import paddle.nn as nn - x = np.random.uniform(-1, 1, (2, 4, 8, 8, 8)).astype('float32') + + x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.) - paddle.disable_static() - x_var = dg.to_variable(x) conv = nn.Conv3d(4, 6, (3, 3, 3)) y_var = conv(x_var) y_np = y_var.numpy() print(y_np.shape) - # (2, 6, 6, 6, 6) """ @@ -1042,18 +1033,15 @@ class ConvTranspose3d(_ConvNd): .. code-block:: python - import numpy as np import paddle import paddle.nn as nn - x = np.random.uniform(-1, 1, (2, 4, 8, 8, 8)).astype('float32') + + x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.) - paddle.disable_static() - x_var = paddle.to_tensor(x) conv = nn.ConvTranspose3d(4, 6, (3, 3, 3)) y_var = conv(x_var) y_np = y_var.numpy() print(y_np.shape) - # (2, 6, 10, 10, 10) """