未验证 提交 726c78f2 编写于 作者: X XiaoguangHu 提交者: GitHub

clean redundant API alias in 2.0 - part 1 (#29928)

* rm check_import_scipy, rm chunk_eval and mean_iou in paddle.metric.__init__.py

* Revert "rm check_import_scipy, rm chunk_eval and mean_iou in paddle.metric.__init__.py"

This reverts commit 179ba8c2b22bc31fe8d8a126e31820792cbd0f4e.

* delete paddle.metric.chunk_eval and paddle.metric.mean_iou

* delete paddle.nn.clip and paddle.nn.clip_by_norm

* delete paddle.nn.functional.activation.hard_sigmoid and paddle.nn.functional.activation.hard_swish

* delete paddle.nn.Pool2D, paddle.nn.BilinearTensorProduct, paddle.nn.RowConv, paddle.nn.functional.row_conv

* fix extension import error

* fix unittest for row_conv and Pool2D
上级 181ea187
...@@ -26,7 +26,7 @@ from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware ...@@ -26,7 +26,7 @@ from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware
from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass
from paddle.nn import Sequential from paddle.nn import Sequential
from paddle.fluid.dygraph import Conv2D from paddle.fluid.dygraph import Conv2D
from paddle.nn import Pool2D from paddle.fluid.dygraph import Pool2D
from paddle.fluid.dygraph import Linear from paddle.fluid.dygraph import Linear
from paddle.fluid.log_helper import get_logger from paddle.fluid.log_helper import get_logger
......
...@@ -12352,10 +12352,11 @@ def clip_by_norm(x, max_norm, name=None): ...@@ -12352,10 +12352,11 @@ def clip_by_norm(x, max_norm, name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np import paddle.fluid as fluid
input = paddle.to_tensor(data=np.array([[0.1, 0.2], [0.3, 0.4]]), dtype="float32") input = paddle.to_tensor([[2.0, 2.0], [2.0, 2.0]], dtype='float32')
reward = paddle.nn.clip_by_norm(x=input, max_norm=1.0) reward = fluid.layers.clip_by_norm(x=input, max_norm=1.0)
# [[0.5, 0.5], [0.5, 0.5]]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle import fluid, nn
import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import paddle.nn.functional as F
import unittest
class RowConvTestCase(unittest.TestCase):
def __init__(self,
methodName='runTest',
batch_size=4,
num_channels=8,
time_steps=12,
context_size=3,
act=None,
dtype="float32"):
super(RowConvTestCase, self).__init__(methodName=methodName)
self.batch_size = batch_size
self.num_channels = num_channels
self.time_steps = time_steps
self.context_size = context_size
self.act = act
self.dtype = dtype
def setUp(self):
input_shape = (self.batch_size, self.time_steps, self.num_channels)
self.input = np.random.uniform(size=input_shape).astype(self.dtype)
self.weight_shape = weight_shape = (self.context_size + 1,
self.num_channels)
self.weight = np.random.uniform(size=weight_shape).astype(self.dtype)
def fluid_layer(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
x = fluid.data(
"input", [-1, -1, self.num_channels], dtype=self.dtype)
y = fluid.layers.row_conv(
x,
self.context_size,
param_attr=I.NumpyArrayInitializer(self.weight),
act=self.act)
exe = fluid.Executor(place)
exe.run(start)
y_np, = exe.run(main, feed={"input": self.input}, fetch_list=[y])
return y_np
def functional_declarative(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
x = fluid.data(
"input", [-1, -1, self.num_channels], dtype=self.dtype)
w = fluid.data("weight", self.weight_shape, dtype=self.dtype)
y = F.extension.row_conv(x, w, act=self.act)
exe = fluid.Executor(place)
exe.run(start)
y_np, = exe.run(main,
feed={"input": self.input,
"weight": self.weight},
fetch_list=[y])
return y_np
def functional_imperative(self, place):
with dg.guard(place):
x_var = dg.to_variable(self.input)
w_var = dg.to_variable(self.weight)
y_var = F.extension.row_conv(x_var, w_var, act=self.act)
y_np = y_var.numpy()
return y_np
def nn_layer(self, place):
with dg.guard(place):
x_var = dg.to_variable(self.input)
conv = nn.RowConv(
self.num_channels,
self.context_size,
param_attr=I.NumpyArrayInitializer(self.weight),
act=self.act,
dtype=self.dtype)
y_var = conv(x_var)
y_np = y_var.numpy()
return y_np
def _test_equivalence(self, place):
result1 = self.fluid_layer(place)
result2 = self.functional_declarative(place)
result3 = self.functional_imperative(place)
result4 = self.nn_layer(place)
np.testing.assert_array_almost_equal(result1, result2)
np.testing.assert_array_almost_equal(result2, result3)
np.testing.assert_array_almost_equal(result3, result4)
def runTest(self):
place = fluid.CPUPlace()
self._test_equivalence(place)
if fluid.core.is_compiled_with_cuda():
palce = fluid.CUDAPlace(0)
self._test_equivalence(place)
def load_tests(loader, standard_tests, pattern):
suite = unittest.TestSuite()
suite.addTest(RowConvTestCase(methodName="runTest"))
suite.addTest(RowConvTestCase(methodName="runTest", act="sigmoid"))
suite.addTest(
RowConvTestCase(
methodName="runTest", context_size=5, act="sigmoid"))
return suite
if __name__ == "__main__":
unittest.main()
...@@ -15,9 +15,4 @@ ...@@ -15,9 +15,4 @@
from .metrics import * from .metrics import *
from . import metrics from . import metrics
from ..fluid.layers.nn import chunk_eval, mean_iou __all__ = metrics.__all__
__all__ = metrics.__all__ + [
'chunk_eval',
'mean_iou',
]
...@@ -34,9 +34,6 @@ __all__ += weight_norm_hook.__all__ ...@@ -34,9 +34,6 @@ __all__ += weight_norm_hook.__all__
from .clip import ClipGradByGlobalNorm #DEFINE_ALIAS from .clip import ClipGradByGlobalNorm #DEFINE_ALIAS
from .clip import ClipGradByNorm #DEFINE_ALIAS from .clip import ClipGradByNorm #DEFINE_ALIAS
from .clip import ClipGradByValue #DEFINE_ALIAS from .clip import ClipGradByValue #DEFINE_ALIAS
# from .clip import set_gradient_clip #DEFINE_ALIAS
from .clip import clip #DEFINE_ALIAS
from .clip import clip_by_norm #DEFINE_ALIAS
# from .control_flow import cond #DEFINE_ALIAS # from .control_flow import cond #DEFINE_ALIAS
# from .control_flow import DynamicRNN #DEFINE_ALIAS # from .control_flow import DynamicRNN #DEFINE_ALIAS
# from .control_flow import StaticRNN #DEFINE_ALIAS # from .control_flow import StaticRNN #DEFINE_ALIAS
...@@ -71,8 +68,6 @@ from .layer.activation import Tanhshrink #DEFINE_ALIAS ...@@ -71,8 +68,6 @@ from .layer.activation import Tanhshrink #DEFINE_ALIAS
from .layer.activation import ThresholdedReLU #DEFINE_ALIAS from .layer.activation import ThresholdedReLU #DEFINE_ALIAS
from .layer.activation import LogSoftmax #DEFINE_ALIAS from .layer.activation import LogSoftmax #DEFINE_ALIAS
from .layer.activation import Maxout #DEFINE_ALIAS from .layer.activation import Maxout #DEFINE_ALIAS
from .layer.common import BilinearTensorProduct #DEFINE_ALIAS
from .layer.common import Pool2D #DEFINE_ALIAS
from .layer.common import Pad1D #DEFINE_ALIAS from .layer.common import Pad1D #DEFINE_ALIAS
from .layer.common import Pad2D #DEFINE_ALIAS from .layer.common import Pad2D #DEFINE_ALIAS
from .layer.common import Pad3D #DEFINE_ALIAS from .layer.common import Pad3D #DEFINE_ALIAS
...@@ -108,7 +103,6 @@ from .layer.conv import Conv2DTranspose #DEFINE_ALIAS ...@@ -108,7 +103,6 @@ from .layer.conv import Conv2DTranspose #DEFINE_ALIAS
from .layer.conv import Conv3DTranspose #DEFINE_ALIAS from .layer.conv import Conv3DTranspose #DEFINE_ALIAS
# from .layer.conv import TreeConv #DEFINE_ALIAS # from .layer.conv import TreeConv #DEFINE_ALIAS
# from .layer.conv import Conv1D #DEFINE_ALIAS # from .layer.conv import Conv1D #DEFINE_ALIAS
from .layer.extension import RowConv #DEFINE_ALIAS
from .layer.common import Linear from .layer.common import Linear
# from .layer.loss import NCELoss #DEFINE_ALIAS # from .layer.loss import NCELoss #DEFINE_ALIAS
from .layer.loss import BCEWithLogitsLoss #DEFINE_ALIAS from .layer.loss import BCEWithLogitsLoss #DEFINE_ALIAS
......
...@@ -16,16 +16,5 @@ ...@@ -16,16 +16,5 @@
from ..fluid.clip import ClipGradByGlobalNorm #DEFINE_ALIAS from ..fluid.clip import ClipGradByGlobalNorm #DEFINE_ALIAS
from ..fluid.clip import ClipGradByNorm #DEFINE_ALIAS from ..fluid.clip import ClipGradByNorm #DEFINE_ALIAS
from ..fluid.clip import ClipGradByValue #DEFINE_ALIAS from ..fluid.clip import ClipGradByValue #DEFINE_ALIAS
from ..fluid.layers import clip #DEFINE_ALIAS
from ..fluid.layers import clip_by_norm #DEFINE_ALIAS __all__ = ['ClipGradByGlobalNorm', 'ClipGradByNorm', 'ClipGradByValue']
__all__ = [
# 'ErrorClipByValue',
'ClipGradByGlobalNorm',
'ClipGradByNorm',
'ClipGradByValue',
# 'set_gradient_clip',
'clip',
'clip_by_norm'
]
...@@ -88,7 +88,6 @@ from .conv import conv3d_transpose #DEFINE_ALIAS ...@@ -88,7 +88,6 @@ from .conv import conv3d_transpose #DEFINE_ALIAS
# from .extension import multiclass_nms #DEFINE_ALIAS # from .extension import multiclass_nms #DEFINE_ALIAS
# from .extension import polygon_box_transform #DEFINE_ALIAS # from .extension import polygon_box_transform #DEFINE_ALIAS
# from .extension import random_crop #DEFINE_ALIAS # from .extension import random_crop #DEFINE_ALIAS
# from .extension import row_conv #DEFINE_ALIAS
# from .extension import rpn_target_assign #DEFINE_ALIAS # from .extension import rpn_target_assign #DEFINE_ALIAS
# from .extension import similarity_focus #DEFINE_ALIAS # from .extension import similarity_focus #DEFINE_ALIAS
# from .extension import target_assign #DEFINE_ALIAS # from .extension import target_assign #DEFINE_ALIAS
......
...@@ -15,8 +15,6 @@ ...@@ -15,8 +15,6 @@
# TODO: define activation functions of neural network # TODO: define activation functions of neural network
from ...fluid.layers import brelu #DEFINE_ALIAS from ...fluid.layers import brelu #DEFINE_ALIAS
# from ...fluid.layers import erf #DEFINE_ALIAS # from ...fluid.layers import erf #DEFINE_ALIAS
from ...fluid.layers import hard_sigmoid #DEFINE_ALIAS
from ...fluid.layers import hard_swish #DEFINE_ALIAS
from ...fluid.layers import maxout #DEFINE_ALIAS from ...fluid.layers import maxout #DEFINE_ALIAS
# from ...fluid.layers import soft_relu #DEFINE_ALIAS # from ...fluid.layers import soft_relu #DEFINE_ALIAS
from ...fluid.layers import swish #DEFINE_ALIAS from ...fluid.layers import swish #DEFINE_ALIAS
...@@ -24,6 +22,7 @@ from ...fluid.layers import sigmoid #DEFINE_ALIAS ...@@ -24,6 +22,7 @@ from ...fluid.layers import sigmoid #DEFINE_ALIAS
from ...tensor.math import tanh #DEFINE_ALIAS from ...tensor.math import tanh #DEFINE_ALIAS
__all__ = [ __all__ = [
'brelu',
'elu', 'elu',
'gelu', 'gelu',
'hardshrink', 'hardshrink',
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
# TODO: define the extention functions # TODO: define the extention functions
__all__ = ['diag_embed', 'row_conv'] __all__ = ['diag_embed']
import numpy as np import numpy as np
from ...fluid.data_feeder import check_dtype from ...fluid.data_feeder import check_dtype
...@@ -138,64 +138,3 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1): ...@@ -138,64 +138,3 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1):
outputs={'Out': [out]}) outputs={'Out': [out]})
out.stop_gradient = True out.stop_gradient = True
return out return out
@templatedoc()
def row_conv(input, weight, act=None):
"""
${comment}
Args:
input (Tensor): the input(X) is a LodTensor or tensor, LodTensor(X)
supports variable time-length input sequences. The underlying
tensor in this LoDTensor is a matrix with shape (T, D), where
T is the total time steps in this mini-batch and D is the input
data dimension.
If the input is a padded minibatch, the shape of the input is
(N, T, D), N is batch size, T is the max time steps in the batch,
D is the input data dimension.
weight (Tensor): The weight. A Tensor with shape
(future_context_size + 1, D), where future_context_size is the
context size of the RowConv operator.
act (str): Non-linear activation to be applied to output variable.
Returns:
${out_comment}.
Examples:
.. code-block:: python
from paddle import fluid, nn
import paddle.nn.functional as F
import numpy as np
batch_size = 4
time_steps = 8
feature_size = 6
context_size = 4
x = np.random.randn(batch_size, time_steps, feature_size).astype(np.float32)
weight = np.random.randn(context_size + 1, feature_size).astype(np.float32)
x_var = paddle.to_tensor(x)
w_var = paddle.to_tensor(weight)
y_var = F.extension.row_conv(x_var, w_var)
print(y_var.shape)
# [4, 8, 6]
"""
if in_dygraph_mode():
pre_act = core.ops.row_conv(input, weight)
out = dygraph_utils._append_activation_in_dygraph(pre_act, act)
return out
else:
helper = LayerHelper('row_conv', **locals())
dtype = helper.input_dtype()
inputs = {'X': [input], 'Filter': [weight]}
pre_act = helper.create_variable_for_type_inference(dtype)
outputs = {'Out': [pre_act]}
helper.append_op(type='row_conv', inputs=inputs, outputs=outputs)
out = helper.append_activation(pre_act)
return out
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
from . import activation from . import activation
from . import loss from . import loss
from . import conv from . import conv
from . import extension
from . import activation from . import activation
from . import norm from . import norm
from . import rnn from . import rnn
...@@ -28,7 +27,6 @@ from . import transformer ...@@ -28,7 +27,6 @@ from . import transformer
from .activation import * from .activation import *
from .loss import * from .loss import *
from .conv import * from .conv import *
from .extension import *
from .activation import * from .activation import *
from .norm import * from .norm import *
from .rnn import * from .rnn import *
...@@ -41,9 +39,7 @@ from .activation import LeakyReLU #DEFINE_ALIAS ...@@ -41,9 +39,7 @@ from .activation import LeakyReLU #DEFINE_ALIAS
from .activation import Sigmoid #DEFINE_ALIAS from .activation import Sigmoid #DEFINE_ALIAS
from .activation import Softmax #DEFINE_ALIAS from .activation import Softmax #DEFINE_ALIAS
from .activation import LogSoftmax #DEFINE_ALIAS from .activation import LogSoftmax #DEFINE_ALIAS
from .common import BilinearTensorProduct #DEFINE_ALIAS
from .common import Bilinear #DEFINE_ALIAS from .common import Bilinear #DEFINE_ALIAS
from .common import Pool2D #DEFINE_ALIAS
from .common import Pad1D #DEFINE_ALIAS from .common import Pad1D #DEFINE_ALIAS
from .common import Pad2D #DEFINE_ALIAS from .common import Pad2D #DEFINE_ALIAS
from .common import Pad3D #DEFINE_ALIAS from .common import Pad3D #DEFINE_ALIAS
...@@ -79,7 +75,6 @@ from .conv import Conv2DTranspose #DEFINE_ALIAS ...@@ -79,7 +75,6 @@ from .conv import Conv2DTranspose #DEFINE_ALIAS
from .conv import Conv3DTranspose #DEFINE_ALIAS from .conv import Conv3DTranspose #DEFINE_ALIAS
# from .conv import TreeConv #DEFINE_ALIAS # from .conv import TreeConv #DEFINE_ALIAS
# from .conv import Conv1D #DEFINE_ALIAS # from .conv import Conv1D #DEFINE_ALIAS
from .extension import RowConv #DEFINE_ALIAS
# from .loss import NCELoss #DEFINE_ALIAS # from .loss import NCELoss #DEFINE_ALIAS
from .loss import BCEWithLogitsLoss #DEFINE_ALIAS from .loss import BCEWithLogitsLoss #DEFINE_ALIAS
from .loss import CrossEntropyLoss #DEFINE_ALIAS from .loss import CrossEntropyLoss #DEFINE_ALIAS
......
...@@ -14,16 +14,12 @@ ...@@ -14,16 +14,12 @@
# TODO: define the common classes to build a neural network # TODO: define the common classes to build a neural network
import paddle import paddle
from ...fluid.dygraph import BilinearTensorProduct #DEFINE_ALIAS
from ...fluid.dygraph import Pool2D #DEFINE_ALIAS
from ...fluid.dygraph import Flatten #DEFINE_ALIAS from ...fluid.dygraph import Flatten #DEFINE_ALIAS
from ...fluid.dygraph import layers from ...fluid.dygraph import layers
from .. import functional as F from .. import functional as F
from ...fluid.framework import _dygraph_tracer from ...fluid.framework import _dygraph_tracer
__all__ = [ __all__ = [
'BilinearTensorProduct',
'Pool2D',
'Embedding', 'Embedding',
'Linear', 'Linear',
'Upsample', 'Upsample',
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['RowConv']
from ...fluid.dygraph import layers
from .. import functional as F
class RowConv(layers.Layer):
"""
**Row-convolution operator**
The row convolution is called lookahead convolution. This operator was
introduced in the following paper for
`DeepSpeech2 <http://www.cs.cmu.edu/~dyogatam/papers/wang+etal.iclrworkshop2016.pdf>`_.
The main motivation is that a bidirectional RNN, useful in DeepSpeech like
speech models, learns representation for a sequence by performing a
forward and a backward pass through the entire sequence. However, unlike
unidirectional RNNs, bidirectional RNNs are challenging to deploy in an online
and low-latency setting. The lookahead convolution incorporates information
from future subsequences in a computationally efficient manner to improve
unidirectional recurrent neural networks. The row convolution operator is
different from the 1D sequence convolution, and is computed as follows:
Given an input sequence X of length t and input dimension D, and a filter
(W) of size context * D.
More details about row_conv please refer to the design document
`<https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645>`_ .
Parameters:
num_channels (int): input data's feature size.
future_context_size (int): Future context size. Please note, the shape
of convolution kernel is [future_context_size + 1, D].
param_attr (ParamAttr): Attributes of parameters, including
name, initializer etc. Default: None.
act (str): Non-linear activation to be applied to output tensor. Default: None.
dtype (str, optional): Data type, it can be "float32". Default: "float32".
Attributes:
weight (Parameter): shape [future_context_size + 1, D], the learnable
weight (convolution kernel) of this layer.
Returns:
None
Examples:
.. code-block:: python
from paddle import nn
import paddle.nn.functional as F
import numpy as np
batch_size = 4
time_steps = 8
feature_size = 6
context_size = 4
x = np.random.randn(batch_size, time_steps, feature_size).astype(np.float32)
x = paddle.to_tensor(x)
conv = nn.RowConv(feature_size, context_size)
y = conv(x)
print(y.shape)
# [4, 8, 6]
"""
def __init__(self,
num_channels,
future_context_size,
param_attr=None,
act=None,
dtype="float32"):
super(RowConv, self).__init__()
self._dtype = dtype
self._param_attr = param_attr
self._act = act
filter_shape = [future_context_size + 1, num_channels]
self.weight = self.create_parameter(
filter_shape, attr=param_attr, dtype=dtype)
def forward(self, input):
out = F.extension.row_conv(input, self.weight, act=self._act)
return out
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册