未验证 提交 5675c7d5 编写于 作者: C ccrrong 提交者: GitHub

remove range from fluid (#48086)

* remove range
上级 f38e09f0
......@@ -20,7 +20,6 @@ from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import (
)
from paddle.fluid.framework import core, Variable
from paddle.fluid.layers import Assert, Print
from paddle.fluid.layers import range as paddle_range
from paddle.fluid.layers import (
array_length,
array_read,
......@@ -570,7 +569,7 @@ class VariableTuple:
self.var = var
self.len = convert_len(var)
if isinstance(self.len, Variable):
self.rag = paddle_range(start, start + self.len, 1, paddle.int64)
self.rag = paddle.arange(start, start + self.len, 1, paddle.int64)
else:
self.rag = range(start, start + self.len)
......@@ -592,11 +591,11 @@ def convert_range(*args):
has_variable = any(map(lambda x: isinstance(x, Variable), args))
if has_variable:
if len(args) == 1:
return paddle_range(0, args[0], 1, paddle.int64)
return paddle.arange(0, args[0], 1, paddle.int64)
if len(args) == 2:
return paddle_range(args[0], args[1], 1, paddle.int64)
return paddle.arange(args[0], args[1], 1, paddle.int64)
if len(args) == 3:
return paddle_range(args[0], args[1], args[2], paddle.int64)
return paddle.arange(args[0], args[1], args[2], paddle.int64)
return range(*args)
......
......@@ -1163,7 +1163,7 @@ class BeamSearchDecoder(Decoder):
batch_size.stop_gradient = True # TODO: remove this
batch_pos = paddle.tile(
nn.unsqueeze(
tensor.range(0, batch_size, 1, dtype=indices.dtype), [1]
paddle.arange(0, batch_size, 1, dtype=indices.dtype), [1]
),
[1, self.beam_size],
)
......
......@@ -66,7 +66,6 @@ __all__ = [
'has_inf',
'has_nan',
'isfinite',
'range',
'linspace',
'zeros_like',
'ones_like',
......@@ -1635,108 +1634,6 @@ def isfinite(x):
return out
def range(start, end, step, dtype, name=None):
"""
This OP returns a 1-D Tensor with spaced values within a given interval.
Values are generated into the half-open interval [``start``, ``end``) with
the ``step``. (the interval including ``start`` but excluding ``end``).
If ``dtype`` is float32 or float64, we advise adding a small epsilon to
``end`` to avoid floating point rounding errors when comparing against ``end``.
Parameters:
start(float|int|Tensor): Start of interval. The interval includes this
value. If ``start`` is a Tensor, it is a 1-D Tensor with shape [1],
with data type int32, int64, float32, float64.
end(float|int|Tensor): End of interval. The interval does not include
this value. If ``end`` is a Tensor, it is a 1-D Tensor with shape
[1], with data type int32, int64, float32, float64.
step(float|int|Tensor): Spacing between values. For any out, it is
the istance between two adjacent values, out[i+1] - out[i]. If
``step`` is a Tensor, it is a 1-D Tensor with shape [1], with data
type int32, int64, float32, float64.
dtype(str|np.dtype|core.VarDesc.VarType, optional): The data type of the
output tensor. Supported data types: int32, int64, float32, float64.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A 1-D Tensor with values from the interval [``start``, ``end``)
taken with common difference ``step`` beginning from ``start``. Its
data type is set by ``dtype``.
Raises:
TypeError: If ``dtype`` is not int32, int64, float32, float64.
examples:
.. code-block:: python
import paddle.fluid as fluid
out1 = fluid.layers.range(0, 10, 2, 'int32')
# [0, 2, 4, 6, 8]
start_var = fluid.layers.fill_constant([1], 'int64', 3)
out2 = fluid.layers.range(start_var, 7, 1, 'int64')
# [3, 4, 5, 6]
"""
out_shape = None
if (
not isinstance(start, Variable)
and not isinstance(end, Variable)
and not isinstance(step, Variable)
):
out_shape = [int(math.ceil((end - start) / step))]
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if not isinstance(start, Variable):
with device_guard("cpu"):
start = fill_constant([1], dtype, start, force_cpu=True)
elif start.dtype != dtype:
start = cast(start, dtype)
if not isinstance(end, Variable):
with device_guard("cpu"):
end = fill_constant([1], dtype, end, force_cpu=True)
elif end.dtype != dtype:
end = cast(end, dtype)
if not isinstance(step, Variable):
with device_guard("cpu"):
step = fill_constant([1], dtype, step, force_cpu=True)
elif step.dtype != dtype:
step = cast(step, dtype)
if in_dygraph_mode():
return _C_ops.arange(start, end, step, dtype, _current_expected_place())
if _in_legacy_dygraph():
out = _legacy_C_ops.range(start, end, step)
out.stop_gradient = True
return out
check_dtype(
dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], 'range/arange'
)
helper = LayerHelper('range', **locals())
out = helper.create_variable_for_type_inference(dtype, shape=out_shape)
helper.append_op(
type='range',
inputs={'Start': start, 'End': end, 'Step': step},
outputs={'Out': out},
)
out.stop_gradient = True
if out_shape is not None:
out.desc.set_shape(out_shape)
return out
def linspace(start, stop, num, dtype=None, name=None):
r"""
This OP return fixed number of evenly spaced values within a given interval.
......
......@@ -13,6 +13,7 @@
# limitations under the License.
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import unittest
......@@ -43,7 +44,7 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False):
bs = layers.cast(bs, 'int64')
bs.stop_gradient = stop_gradient
batch_pos = layers.expand(
layers.unsqueeze(layers.range(0, bs, 1, dtype=bs.dtype), [1]),
layers.unsqueeze(paddle.arange(0, bs, 1, dtype=bs.dtype), [1]),
[1, beam_size],
)
topk_coordinates = paddle.stack([batch_pos, indices], axis=2)
......
......@@ -4023,13 +4023,13 @@ class TestBook(LayerTest):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
layers.range(0, 10, 2, 'int32')
layers.range(0.1, 10.0, 0.2, 'float32')
layers.range(0.1, 10.0, 0.2, 'float64')
paddle.arange(0, 10, 2, 'int32')
paddle.arange(0.1, 10.0, 0.2, 'float32')
paddle.arange(0.1, 10.0, 0.2, 'float64')
start = layers.fill_constant(shape=[1], value=0.1, dtype="float32")
end = layers.fill_constant(shape=[1], value=10.0, dtype="float32")
step = layers.fill_constant(shape=[1], value=0.2, dtype="float32")
y = layers.range(start, end, step, 'float64')
y = paddle.arange(start, end, step, 'float64')
return y
def make_spectral_norm(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册