From 20db5221ece0329e62eb6e5a1c9664b0af6439ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kevin=E5=90=B4=E5=98=89=E6=96=87?= <417333277@qq.com> Date: Wed, 2 Nov 2022 13:46:29 +0800 Subject: [PATCH] Remove redundant numpy import (#47483) --- python/paddle/incubate/nn/functional/fused_transformer.py | 1 - python/paddle/nn/layer/pooling.py | 2 -- python/paddle/optimizer/adagrad.py | 1 - python/paddle/regularizer.py | 1 - 4 files changed, 5 deletions(-) diff --git a/python/paddle/incubate/nn/functional/fused_transformer.py b/python/paddle/incubate/nn/functional/fused_transformer.py index dffddb8b9e..0887cd56ae 100644 --- a/python/paddle/incubate/nn/functional/fused_transformer.py +++ b/python/paddle/incubate/nn/functional/fused_transformer.py @@ -947,7 +947,6 @@ def fused_multi_transformer( # required: gpu import paddle import paddle.incubate.nn.functional as F - import numpy as np # input: [batch_size, seq_len, embed_dim] x = paddle.rand(shape=(2, 4, 128), dtype="float32") diff --git a/python/paddle/nn/layer/pooling.py b/python/paddle/nn/layer/pooling.py index a9b5af5199..3c3abe5e39 100755 --- a/python/paddle/nn/layer/pooling.py +++ b/python/paddle/nn/layer/pooling.py @@ -1171,7 +1171,6 @@ class MaxUnPool1D(Layer): import paddle import paddle.nn.functional as F - import numpy as np data = paddle.rand(shape=[1, 3, 16]) pool_out, indices = F.max_pool1d(data, kernel_size=2, stride=2, padding=0, return_mask=True) @@ -1351,7 +1350,6 @@ class MaxUnPool3D(Layer): import paddle import paddle.nn.functional as F - import numpy as np data = paddle.rand(shape=[1, 1, 4, 4, 6]) pool_out, indices = F.max_pool3d(data, kernel_size=2, stride=2, padding=0, return_mask=True) diff --git a/python/paddle/optimizer/adagrad.py b/python/paddle/optimizer/adagrad.py index a4d9416e93..522ca753a9 100644 --- a/python/paddle/optimizer/adagrad.py +++ b/python/paddle/optimizer/adagrad.py @@ -70,7 +70,6 @@ class Adagrad(Optimizer): .. code-block:: python import paddle - import numpy as np inp = paddle.rand(shape=[10, 10]) linear = paddle.nn.Linear(10, 10) diff --git a/python/paddle/regularizer.py b/python/paddle/regularizer.py index 395ec08a36..38060b8233 100644 --- a/python/paddle/regularizer.py +++ b/python/paddle/regularizer.py @@ -105,7 +105,6 @@ class L2Decay(fluid.regularizer.L2Decay): # Example1: set Regularizer in optimizer import paddle from paddle.regularizer import L2Decay - import numpy as np linear = paddle.nn.Linear(10, 10) inp = paddle.rand(shape=[10, 10], dtype="float32") out = linear(inp) -- GitLab