未验证 提交 23299c70 编写于 作者: H heyanru 提交者: GitHub

[Fluid Clean] remove argsort, linspace, diag (#48456)

上级 ef575d6a
......@@ -58,10 +58,7 @@ __all__ = [
'fill_constant',
'argmin',
'argmax',
'argsort',
'zeros',
'linspace',
'diag',
]
......@@ -1140,102 +1137,6 @@ def argmax(x, axis=0):
return out
def argsort(input, axis=-1, descending=False, name=None):
"""
:alias_main: paddle.argsort
:alias: paddle.argsort,paddle.tensor.argsort,paddle.tensor.search.argsort
:old_api: paddle.fluid.layers.argsort
This OP sorts the input along the given axis, and returns sorted output
data Varibale and its corresponding index Variable with the same shape as
:attr:`input`.
Args:
input(Variable): An input N-D Tensor with type float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is 0.
descending(bool, optional) : Descending is a flag, if set to true,
algorithm will sort by descending order, else sort by
ascending order. Default is false.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
tuple: A tuple of sorted data Variable(with the same shape and data
type as input) and the sorted indices(with the same shape as input's
and with data type int64).
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]]).astype(np.float32)
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.argsort(input=x, axis=-1)
out2 = fluid.layers.argsort(input=x, axis=0)
out3 = fluid.layers.argsort(input=x, axis=1)
print(out1[0].numpy())
# [[[5. 5. 8. 9.]
# [0. 0. 1. 7.]
# [2. 4. 6. 9.]]
# [[2. 2. 4. 5.]
# [4. 7. 7. 9.]
# [0. 1. 6. 7.]]]
print(out1[1].numpy())
# [[[0 3 1 2]
# [0 1 2 3]
# [2 3 0 1]]
# [[1 3 2 0]
# [0 1 2 3]
# [2 0 3 1]]]
print(out2[0].numpy())
# [[[5. 2. 4. 2.]
# [0. 0. 1. 7.]
# [1. 7. 0. 4.]]
# [[5. 8. 9. 5.]
# [4. 7. 7. 9.]
# [6. 9. 2. 6.]]]
print(out3[0].numpy())
# [[[0. 0. 1. 4.]
# [5. 8. 2. 5.]
# [6. 9. 9. 7.]]
# [[1. 2. 0. 2.]
# [4. 7. 4. 6.]
# [5. 7. 7. 9.]]]
"""
check_variable_and_dtype(
input,
'input',
['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
'argsort',
)
helper = LayerHelper("argsort", **locals())
out = helper.create_variable_for_type_inference(
dtype=input.dtype, stop_gradient=True
)
ids = helper.create_variable_for_type_inference(
VarDesc.VarType.INT64, stop_gradient=True
)
helper.append_op(
type='argsort',
inputs={'X': input},
outputs={'Out': out, 'Indices': ids},
attrs={'axis': axis, 'descending': descending},
)
return out, ids
def zeros(shape, dtype, force_cpu=False, name=None):
"""
The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
......@@ -1265,171 +1166,3 @@ def zeros(shape, dtype, force_cpu=False, name=None):
data1 = fluid.layers.zeros(shape=shape, dtype='int32') #[[0, 0], [0, 0]]
"""
return fill_constant(value=0.0, **locals())
def linspace(start, stop, num, dtype=None, name=None):
r"""
This OP return fixed number of evenly spaced values within a given interval.
Args:
start(int|float|Tensor): The input :attr:`start` is start variable of range. It is a scalar, \
or a Tensor of shape [1] with input data type int32, int64, float32 or float64.
stop(int|float|Tensor): The input :attr:`stop` is start variable of range. It is a scalar, \
or a Tensor of shape [1] with input data type int32, int64, float32 or float64.
num(int|Tensor): The input :attr:`num` is given num of the sequence. It is an int scalar, \
or a Tensor of shape [1] with data type int32.
dtype(np.dtype|str, optional): The data type of output tensor, it could be
int32, int64, float32 and float64. Default: if None, the data type is float32.
name(str, optional): Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.Default: None.
Returns:
Tensor: the output data type will be float32, float64. The 1-D tensor with fixed number of evenly spaced values, \
the data shape of this tensor is :math:`[num]` . If the :attr:`num` is set 1, the output tensor just has \
the value with input :attr:`start`.
Examples:
.. code-block:: python
import paddle
data = paddle.linspace(0, 10, 5, 'float32') # [0.0, 2.5, 5.0, 7.5, 10.0]
data = paddle.linspace(0, 10, 1, 'float32') # [0.0]
"""
if dtype is None:
dtype = 'float32'
tensor_num = num
tensor_start = start
tensor_stop = stop
if not isinstance(num, Variable):
check_type(num, 'num', (int), 'linspace')
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if not isinstance(start, Variable):
with device_guard("cpu"):
tensor_start = fill_constant([1], dtype, start)
if not isinstance(stop, Variable):
with device_guard("cpu"):
tensor_stop = fill_constant([1], dtype, stop)
if not isinstance(num, Variable):
with device_guard("cpu"):
tensor_num = fill_constant([1], 'int32', num)
if in_dygraph_mode():
return _C_ops.linspace(
tensor_start,
tensor_stop,
tensor_num,
dtype,
_current_expected_place(),
)
if _in_legacy_dygraph():
return _legacy_C_ops.linspace(
tensor_start, tensor_stop, tensor_num, 'dtype', dtype
)
helper = LayerHelper("linspace", **locals())
start_dtype = convert_dtype(tensor_start.dtype)
stop_dtype = convert_dtype(tensor_stop.dtype)
out_dtype = convert_dtype(dtype)
if isinstance(start, Variable):
check_dtype(
start.dtype,
'start',
['float32', 'float64', 'int32', 'int64'],
'linspace',
)
else:
check_type(start, 'start', (int, float), 'linspace')
if isinstance(stop, Variable):
check_dtype(
stop.dtype,
'stop',
['float32', 'float64', 'int32', 'int64'],
'linspace',
)
else:
check_type(stop, 'stop', (int, float), 'linspace')
if isinstance(num, Variable):
check_dtype(num.dtype, 'num', ['int32'], 'linspace')
check_dtype(
dtype, 'dtype', ['int32', 'int64', 'float32', 'float64'], 'linspace'
)
if (
(stop_dtype == "float64" or start_dtype == "float64")
and out_dtype in ["float32", "int32"]
) or (
(stop_dtype == "int64" or start_dtype == "int64")
and out_dtype == "int32"
):
raise ValueError(
"The dtype of start/stop is {}/{} but the attr(dtype) of linspace is {}, "
"which may cause data type overflows. Please reset attr(dtype) of linspace.".format(
start_dtype, stop_dtype, dtype
)
)
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type='linspace',
inputs={'Start': tensor_start, 'Stop': tensor_stop, 'Num': tensor_num},
attrs={'dtype': dtype},
outputs={'Out': [out]},
)
if isinstance(num, int):
out.desc.set_shape((num,))
return out
@deprecated(since="2.0.0", update_to="paddle.diag")
def diag(diagonal):
r"""
:alias_main: paddle.diag
:alias: paddle.diag,paddle.tensor.diag,paddle.tensor.creation.diag
:old_api: paddle.fluid.layers.diag
This OP creates a square matrix which has diagonal values specified by input :attr:`diagonal`.
Args:
diagonal(Variable|numpy.ndarray): The input tensor should be 1D tensor, the input shape is :math:`[ N]` , \
specifying diagonal values by this input tensor. The input data type should be float32, float64, int32, int64.
Returns:
Variable, the output data type is the same as input data type.: The tensor variable storing the square matrix, \
the diagonal values specified by input :attr:`diagonal`. the output shape is :math:`[N, N]` with two dims.
Examples:
.. code-block:: python
# [[3, 0, 0]
# [0, 4, 0]
# [0, 0, 5]
import paddle.fluid as fluid
import numpy as np
diagonal = np.arange(3, 6, dtype='int32')
data = fluid.layers.diag(diagonal)
# diagonal.shape=(3,) data.shape=(3, 3)
"""
check_type(diagonal, 'diagonal', (Variable, numpy.ndarray), 'diag')
check_dtype(
diagonal.dtype,
'diagonal',
['float32', 'float64', 'int32', 'int64'],
'diag',
)
helper = LayerHelper("diag", **locals())
if not isinstance(diagonal, Variable):
diagonal = assign(diagonal)
out = helper.create_variable_for_type_inference(dtype=diagonal.dtype)
helper.append_op(
type='diag', inputs={'Diagonal': [diagonal]}, outputs={'Out': [out]}
)
out.stop_gradient = True
return out
......@@ -50,7 +50,7 @@ class TestBase(IPUOpTest):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
)
out, _ = paddle.fluid.layers.argsort(x, **self.attrs)
out, _ = paddle.argsort(x, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
......
......@@ -91,8 +91,11 @@ class TestArgsortOpCPU(unittest.TestCase):
label = fluid.layers.data(
name="label", shape=self.input_shape, dtype=self.dtype
)
self.sorted_x, self.index = fluid.layers.argsort(
input=x, axis=self.axis, descending=self.descending
self.index = paddle.argsort(
x=x, axis=self.axis, descending=self.descending
)
self.sorted_x = paddle.sort(
x=x, axis=self.axis, descending=self.descending
)
self.sorted_x.stop_gradient = False
loss = paddle.multiply(self.sorted_x, label)
......@@ -350,13 +353,13 @@ class TestArgsortErrorOnCPU(unittest.TestCase):
def test_fluid_var_type():
with fluid.program_guard(fluid.Program()):
x = [1]
output = fluid.layers.argsort(input=x)
output = paddle.argsort(x=x)
self.assertRaises(TypeError, test_fluid_var_type)
def test_paddle_var_type():
with fluid.program_guard(fluid.Program()):
x = [1]
output = paddle.argsort(input=x)
output = paddle.argsort(x=x)
self.assertRaises(TypeError, test_paddle_var_type)
......
......@@ -18,7 +18,6 @@ import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
......@@ -50,7 +49,7 @@ class TestDiagError(unittest.TestCase):
def test_diag_type():
x = [1, 2, 3]
output = fluid.layers.diag(diag=x)
output = paddle.diag(x=x)
self.assertRaises(TypeError, test_diag_type)
......
......@@ -3445,15 +3445,6 @@ class TestBook(LayerTest):
output = layers.l2_normalize(x, axis=1)
return output
def make_argsort(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
data = self._get_data(name='x', shape=[2, 3, 3], dtype="float32")
out, ids = layers.argsort(input=data, axis=1)
return out
return ids
def make_shape(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
......@@ -3757,7 +3748,7 @@ class TestBook(LayerTest):
def test_affine_grid(self):
with self.static_graph():
data = layers.data(name='data', shape=[2, 3, 3], dtype="float32")
out, ids = layers.argsort(input=data, axis=1)
out = paddle.argsort(x=data, axis=1)
theta = layers.data(name="theta", shape=[2, 3], dtype="float32")
out_shape = layers.data(name="out_shape", shape=[-1], dtype="int32")
......@@ -3983,7 +3974,7 @@ class TestBook(LayerTest):
def test_linspace(self):
program = Program()
with program_guard(program):
out = layers.linspace(20, 10, 5, 'float64')
out = paddle.linspace(20, 10, 5, 'float64')
self.assertIsNotNone(out)
print(str(program))
......
......@@ -139,45 +139,45 @@ class TestLinspaceOpError(unittest.TestCase):
with program_guard(Program(), Program()):
def test_dtype():
fluid.layers.linspace(0, 10, 1, dtype="int8")
paddle.linspace(0, 10, 1, dtype="int8")
self.assertRaises(TypeError, test_dtype)
def test_dtype1():
fluid.layers.linspace(0, 10, 1.33, dtype="int32")
paddle.linspace(0, 10, 1.33, dtype="int32")
self.assertRaises(TypeError, test_dtype1)
def test_start_type():
fluid.layers.linspace([0], 10, 1, dtype="float32")
paddle.linspace([0], 10, 1, dtype="float32")
self.assertRaises(TypeError, test_start_type)
def test_end_type():
fluid.layers.linspace(0, [10], 1, dtype="float32")
paddle.linspace(0, [10], 1, dtype="float32")
self.assertRaises(TypeError, test_end_type)
def test_step_dtype():
fluid.layers.linspace(0, 10, [0], dtype="float32")
paddle.linspace(0, 10, [0], dtype="float32")
self.assertRaises(TypeError, test_step_dtype)
def test_start_dtype():
start = fluid.data(shape=[1], dtype="float64", name="start")
fluid.layers.linspace(start, 10, 1, dtype="float32")
paddle.linspace(start, 10, 1, dtype="float32")
self.assertRaises(ValueError, test_start_dtype)
def test_end_dtype():
end = fluid.data(shape=[1], dtype="float64", name="end")
fluid.layers.linspace(0, end, 1, dtype="float32")
paddle.linspace(0, end, 1, dtype="float32")
self.assertRaises(ValueError, test_end_dtype)
def test_num_dtype():
num = fluid.data(shape=[1], dtype="int32", name="step")
fluid.layers.linspace(0, 10, num, dtype="float32")
paddle.linspace(0, 10, num, dtype="float32")
self.assertRaises(TypeError, test_step_dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册