未验证 提交 f6f104d5 编写于 作者: J JYChen 提交者: GitHub

remove some left apis in fluid.nn (#52503)

上级 c045d17e
......@@ -61,44 +61,6 @@ __all__ = [
'autoincreased_step_counter',
]
OP_NAMEMAPPING = {
'elementwise_max': 'maximum',
'elementwise_min': 'minimum',
'elementwise_pow': 'elementwise_pow',
'elementwise_floordiv': 'floor_divide',
'elementwise_add': 'add',
'elementwise_sub': 'subtract',
'elementwise_mul': 'multiply',
'elementwise_div': 'divide',
'elementwise_mod': 'remainder',
}
def _get_reduce_dim(dim, input):
"""
Internal function for reduce_sum, reduce_mean, reduce_prod.
It computes the attribute reduce_all value based on axis.
"""
if dim is not None and not isinstance(dim, list):
if isinstance(dim, (tuple, range)):
dim = list(dim)
elif isinstance(dim, int):
dim = [dim]
else:
raise TypeError(
"The type of dim must be int, list, tuple or range, but received {}".format(
type(dim)
)
)
if dim is None:
dim = []
if dim == [] or len(dim) == len(input.shape):
reduce_all = True
else:
reduce_all = False
return reduce_all, dim
@deprecated(since="2.0.0", update_to="paddle.nn.functional.embedding")
def embedding(
......@@ -265,154 +227,6 @@ def embedding(
return tmp
def _pull_sparse(
input,
size,
table_id,
accessor_class,
name="embedding",
ctr_label_name="",
padding_id=0,
dtype='float32',
scale_sparse_grad=True,
):
r"""
**Pull Fleet Sparse Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
Fleet lookup table. The result of this lookup is the embedding of each ID in the
:attr:`input`.
Args:
input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
contains the IDs information.
size(int): The embedding size parameter, which indicates the size of
each embedding vector respectively.
table_id(int): the fleet table id of this embedding.
accessor_class(str): the pslib accessor of the table, default is DownpourCtrAccessor.
ctr_label_name(str): the layer name of click.
padding_id(int): the padding id during lookup, default is 0.
dtype(str): The dtype refers to the data type of output tensor. Only supports
float32 now.
scale_sparse_grad(bool): whether to scale sparse gradient with batch size. default
is True.
Returns:
Variable|list of Variable: The tensor variable storing the embeddings of the \
supplied inputs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = paddle.static.data(name='sequence', shape=[-1, 1], dtype='int64', lod_level=1)
emb = fluid.layers.nn._pull_sparse(
input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor")
"""
helper = LayerHelper(name, **locals())
inputs = helper.multiple_input()
outs = [helper.create_variable_for_type_inference(dtype)]
input_names = [i.name for i in inputs]
attrs = {
'EmbeddingDim': size,
'TableId': table_id,
'AccessorClass': accessor_class,
'CtrLabelName': ctr_label_name,
'PaddingId': padding_id,
'ScaleSparseGrad': scale_sparse_grad,
'InputNames': input_names,
# this is only for compatible with embedding op
'is_distributed': True,
}
# this is only for compatible with embedding op
w, _ = helper.create_or_get_global_variable(
name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True
)
helper.append_op(
type='pull_sparse',
inputs={'Ids': inputs, 'W': w},
outputs={'Out': outs},
attrs=attrs,
)
if len(outs) == 1:
return outs[0]
return outs
def _pull_sparse_v2(
input,
size,
table_id,
accessor_class,
name="embedding",
ctr_label_name="",
padding_id=0,
dtype='float32',
scale_sparse_grad=True,
):
r"""
**Pull Fleet Sparse Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
Fleet lookup table. The result of this lookup is the embedding of each ID in the
:attr:`input`.
Args:
input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
contains the IDs information.
size(int): The embedding size parameter, which indicates the size of
each embedding vector respectively.
table_id(int): the pslib table id of this embedding.
accessor_class(str): the fleet accessor of the table, default is DownpourCtrAccessor.
ctr_label_name(str): the layer name of click.
padding_id(int): the padding id during lookup, default is 0.
dtype(str): The dtype refers to the data type of output tensor. Only supports
float32 now.
scale_sparse_grad(bool): whether to scale sparse gradient with batch size. default
is True.
Returns:
Variable|list of Variable: The tensor variable storing the embeddings of the \
supplied inputs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = paddle.static.data(name='sequence', shape=[-1, 1], dtype='int64', lod_level=1)
emb = fluid.layers.nn._pull_sparse_v2(
input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor")
"""
helper = LayerHelper(name, **locals())
inputs = helper.multiple_input()
outs = [helper.create_variable_for_type_inference(dtype)]
input_names = [i.name for i in inputs]
attrs = {
'EmbeddingDim': size,
'TableId': table_id,
'AccessorClass': accessor_class,
'CtrLabelName': ctr_label_name,
'PaddingId': padding_id,
'ScaleSparseGrad': scale_sparse_grad,
'InputNames': input_names,
# this is only for compatible with embedding op
'is_distributed': True,
}
# this is only for compatible with embedding op
w, _ = helper.create_or_get_global_variable(
name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True
)
helper.append_op(
type='pull_sparse_v2',
inputs={'Ids': inputs, 'W': w},
outputs={'Out': outs},
attrs=attrs,
)
if len(outs) == 1:
return outs[0]
return outs
def _pull_gpups_sparse(
input, size, dtype='float32', is_distributed=False, is_sparse=False
):
......@@ -535,83 +349,6 @@ def _pull_box_sparse(
return outs
def reduce_sum(input, dim=None, keep_dim=False, name=None):
"""
Computes the sum of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimensions along which the sum is performed. If
:attr:`None`, sum all elements of :attr:`input` and return a
Tensor variable with a single element, otherwise must be in the
range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, results of summation operation on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Raises:
TypeError, if out data type is different with the input data type.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = paddle.static.data(name='x', shape=[2, 4], dtype='float32')
fluid.layers.nn.reduce_sum(x) # [3.5]
fluid.layers.nn.reduce_sum(x, dim=0) # [0.3, 0.5, 1.1, 1.6]
fluid.layers.nn.reduce_sum(x, dim=-1) # [1.9, 1.6]
fluid.layers.nn.reduce_sum(x, dim=1, keep_dim=True) # [[1.9], [1.6]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1, 2], [3, 4]],
# [[5, 6], [7, 8]]]
# Each example is followed by the corresponding output tensor.
y = paddle.static.data(name='y', shape=[2, 2, 2], dtype='float32')
fluid.layers.nn.reduce_sum(y, dim=[1, 2]) # [10, 26]
fluid.layers.nn.reduce_sum(y, dim=[0, 1]) # [16, 20]
"""
reduce_all, dim = _get_reduce_dim(dim, input)
if in_dygraph_mode():
return _C_ops.sum(input, dim, None, keep_dim)
else:
attrs = {'dim': dim, 'keep_dim': keep_dim, 'reduce_all': reduce_all}
check_variable_and_dtype(
input,
'input',
['float16', 'float32', 'float64', 'int32', 'int64'],
'reduce_sum',
)
helper = LayerHelper('reduce_sum', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype()
)
helper.append_op(
type='reduce_sum',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs,
)
return out
def autoincreased_step_counter(counter_name=None, begin=1, step=1):
"""
:api_attr: Static Graph
......@@ -663,146 +400,3 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1):
counter.stop_gradient = True
return counter
def unsqueeze(input, axes, name=None):
"""
Insert single-dimensional entries to the shape of a Tensor. Takes one
required argument axes, a list of dimensions that will be inserted.
Dimension indices in axes are as seen in the output tensor.
For example:
.. code-block:: text
Given a tensor such that tensor with shape [3, 4, 5],
then Unsqueezed tensor with axes=[0, 4] has shape [1, 3, 4, 5, 1].
Args:
input (Variable): The input Tensor to be unsqueezed. Supported data type: float32, float64, bool, int8, int32, int64.
axes (int|list|tuple|Variable): Indicates the dimensions to be inserted. The data type is ``int32`` . If ``axes`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``axes`` is an Variable, it should be an 1-D Tensor .
name (str|None): Name for this layer.
Returns:
Variable: Unsqueezed Tensor, with the same data type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = paddle.static.data(name='x', shape=[-1, 5, 10], dtype="float32")
y = fluid.layers.unsqueeze(input=x, axes=[1])
"""
if in_dygraph_mode():
if isinstance(axes, int):
axes = [axes]
elif isinstance(axes, Variable):
axes = axes.tolist()
elif isinstance(axes, (list, tuple)):
axes = [
item.item(0) if isinstance(item, Variable) else item
for item in axes
]
return _C_ops.unsqueeze(input, axes)
else:
check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze')
check_variable_and_dtype(
input,
'input',
[
'float16',
'float32',
'float64',
'bool',
'int8',
'int16',
'int32',
'int64',
'complex64',
'complex128',
],
'unsqueeze',
)
helper = LayerHelper("unsqueeze2", **locals())
inputs = {"X": input}
attrs = {}
if isinstance(axes, int):
axes = [axes]
if isinstance(axes, Variable):
axes.stop_gradient = True
inputs["AxesTensor"] = axes
elif isinstance(axes, (list, tuple)):
if paddle.utils._contain_var(axes):
inputs["AxesTensorList"] = paddle.utils._convert_to_tensor_list(
axes
)
else:
attrs["axes"] = axes
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="unsqueeze2",
inputs=inputs,
attrs=attrs,
outputs={"Out": out, "XShape": x_shape},
)
return out
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
if in_dygraph_mode():
op = getattr(_legacy_C_ops, op_name)
if binary_op:
return op(x, y)
else:
return op(x)
else:
check_variable_and_dtype(
x,
"x",
["bool", "int8", "int16", "int32", "int64", "float32", "float64"],
op_name,
)
if y is not None:
check_variable_and_dtype(
y,
"y",
[
"bool",
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
],
op_name,
)
if out is not None:
check_type(out, "out", Variable, op_name)
helper = LayerHelper(op_name, **locals())
if binary_op and x.dtype != y.dtype:
raise ValueError(
"(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s."
% (op_name, x.dtype, y.dtype)
)
if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if binary_op:
helper.append_op(
type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out}
)
else:
helper.append_op(
type=op_name, inputs={"X": x}, outputs={"Out": out}
)
return out
......@@ -23,6 +23,8 @@ from paddle.incubate.distributed.fleet.base import Mode
from paddle.incubate.distributed.fleet.base import DistributedOptimizer
from paddle.incubate.distributed.fleet.role_maker import MPISymetricRoleMaker
from paddle.incubate.distributed.fleet.role_maker import HeterRoleMaker
from paddle.common_ops_import import LayerHelper
import paddle
......@@ -961,6 +963,51 @@ def _fleet_embedding(
param_attr(ParamAttr): To specify the weight parameter property
dtype(str): data type of output
"""
def _pull_sparse(
input,
size,
table_id,
accessor_class,
name="embedding",
ctr_label_name="",
padding_id=0,
dtype='float32',
scale_sparse_grad=True,
):
helper = LayerHelper(name, **locals())
inputs = helper.multiple_input()
outs = [helper.create_variable_for_type_inference(dtype)]
input_names = [i.name for i in inputs]
attrs = {
'EmbeddingDim': size,
'TableId': table_id,
'AccessorClass': accessor_class,
'CtrLabelName': ctr_label_name,
'PaddingId': padding_id,
'ScaleSparseGrad': scale_sparse_grad,
'InputNames': input_names,
# this is only for compatible with embedding op
'is_distributed': True,
}
# this is only for compatible with embedding op
w, _ = helper.create_or_get_global_variable(
name=name,
shape=[size],
dtype=dtype,
is_bias=False,
persistable=True,
)
helper.append_op(
type='pull_sparse',
inputs={'Ids': inputs, 'W': w},
outputs={'Out': outs},
attrs=attrs,
)
if len(outs) == 1:
return outs[0]
return outs
# check and set params
_prepare_params(
input, size, is_sparse, is_distributed, padding_idx, param_attr, dtype
......@@ -970,7 +1017,8 @@ def _fleet_embedding(
if padding_idx is None:
padding_idx = 0
global FLEET_GLOBAL_DICT
return paddle.static.nn._pull_sparse(
return _pull_sparse(
input=input,
size=size,
table_id=FLEET_GLOBAL_DICT["emb_to_table"][name],
......@@ -1003,6 +1051,51 @@ def _fleet_embedding_v2(
param_attr(ParamAttr): To specify the weight parameter property
dtype(str): data type of output
"""
def _pull_sparse_v2(
input,
size,
table_id,
accessor_class,
name="embedding",
ctr_label_name="",
padding_id=0,
dtype='float32',
scale_sparse_grad=True,
):
helper = LayerHelper(name, **locals())
inputs = helper.multiple_input()
outs = [helper.create_variable_for_type_inference(dtype)]
input_names = [i.name for i in inputs]
attrs = {
'EmbeddingDim': size,
'TableId': table_id,
'AccessorClass': accessor_class,
'CtrLabelName': ctr_label_name,
'PaddingId': padding_id,
'ScaleSparseGrad': scale_sparse_grad,
'InputNames': input_names,
# this is only for compatible with embedding op
'is_distributed': True,
}
# this is only for compatible with embedding op
w, _ = helper.create_or_get_global_variable(
name=name,
shape=[size],
dtype=dtype,
is_bias=False,
persistable=True,
)
helper.append_op(
type='pull_sparse_v2',
inputs={'Ids': inputs, 'W': w},
outputs={'Out': outs},
attrs=attrs,
)
if len(outs) == 1:
return outs[0]
return outs
# check and set params
_prepare_params(
input, size, is_sparse, is_distributed, padding_idx, param_attr, dtype
......@@ -1012,7 +1105,7 @@ def _fleet_embedding_v2(
if padding_idx is None:
padding_idx = 0
return paddle.static.nn._pull_sparse_v2(
return _pull_sparse_v2(
input=input,
size=size,
table_id=FLEET_GLOBAL_DICT["emb_to_table"][name],
......
......@@ -41,8 +41,6 @@ from .common import layer_norm # noqa: F401
from .common import embedding # noqa: F401
from ...fluid.contrib.layers import sparse_embedding # noqa: F401
from ...fluid.layers import StaticRNN # noqa: F401
from ...fluid.layers.nn import _pull_sparse # noqa: F401
from ...fluid.layers.nn import _pull_sparse_v2 # noqa: F401
from .sequence_lod import sequence_conv # noqa: F401
from .sequence_lod import sequence_softmax # noqa: F401
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册