未验证 提交 1e688e98 编写于 作者: V Vvsmile 提交者: GitHub

[Clean Fluid API]Remove lod_reset (#48516)

* solve conflicts of remove_lod_reset and latest develop version

* add lod_reset to the no_grad_set_white_list.py
上级 fa0f6938
......@@ -70,7 +70,6 @@ __all__ = [
'spectral_norm',
'one_hot',
'autoincreased_step_counter',
'lod_reset',
'clip',
'clip_by_norm',
'merge_selected_rows',
......@@ -1385,110 +1384,6 @@ def unsqueeze(input, axes, name=None):
return out
def lod_reset(x, y=None, target_lod=None):
"""
Set LoD of :attr:`x` to a new one specified by :attr:`y` or
:attr:`target_lod`. When :attr:`y` provided, :attr:`y.lod` would be
considered as target LoD first, otherwise :attr:`y.data` would be
considered as target LoD. If :attr:`y` is not provided, target LoD should
be specified by :attr:`target_lod`. If target LoD is specified by
:attr:`y.data` or :attr:`target_lod`, only one level LoD is supported.
.. code-block:: text
* Example 1:
Given a 1-level LoDTensor x:
x.lod = [[ 2, 3, 1 ]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
target_lod: [4, 2]
then we get a 1-level LoDTensor:
out.lod = [[4, 2]]
out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
out.dims = [6, 1]
* Example 2:
Given a 1-level LoDTensor x:
x.lod = [[2, 3, 1]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
y is a Tensor:
y.data = [[2, 4]]
y.dims = [1, 3]
then we get a 1-level LoDTensor:
out.lod = [[2, 4]]
out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
out.dims = [6, 1]
* Example 3:
Given a 1-level LoDTensor x:
x.lod = [[2, 3, 1]]
x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
x.dims = [6, 1]
y is a 2-level LoDTensor:
y.lod = [[2, 2], [2, 2, 1, 1]]
y.data = [[1.1], [2.1], [3.1], [4.1], [5.1], [6.1]]
y.dims = [6, 1]
then we get a 2-level LoDTensor:
out.lod = [[2, 2], [2, 2, 1, 1]]
out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
out.dims = [6, 1]
Args:
x (Variable): Input variable which could be a Tensor or LoDTensor.
The data type should be int32, int64, float32 or float64.
y (Variable, optional): If provided, output's LoD would be derived from :attr:`y`.
If y's lod level>0, the data type can be any type.
If y's lod level=0, the data type should be int32.
target_lod (list|tuple, optional): One level LoD which should be considered
as target LoD when :attr:`y` not provided.
Returns:
Variable: Output variable with LoD specified by this layer.
Raises:
ValueError: If :attr:`y` and :attr:`target_lod` are both None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[10])
y = fluid.layers.data(name='y', shape=[10, 20], lod_level=2)
out = fluid.layers.lod_reset(x=x, y=y)
"""
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'lod_reset'
)
helper = LayerHelper("lod_reset", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if y is not None:
check_type(y, 'y', (Variable), 'lod_reset')
# TODO: check y.lod_level = 0 dtype
helper.append_op(
type="lod_reset", inputs={'X': x, 'Y': y}, outputs={'Out': out}
)
elif target_lod is not None:
helper.append_op(
type="lod_reset",
inputs={'X': x},
attrs={'target_lod': target_lod},
outputs={'Out': out},
)
else:
raise ValueError("y and target_lod should not be both none.")
return out
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
if _non_static_mode():
op = getattr(_legacy_C_ops, op_name)
......
......@@ -500,7 +500,6 @@ set(TEST_OPS_WITH_GC
test_gather_op
test_gather_nd_op
test_linear_chain_crf_op
test_lod_reset_op
test_lookup_table_op
test_mean_op
test_pad2d_op
......
......@@ -2593,25 +2593,6 @@ class TestBook(LayerTest):
out = paddle.nn.functional.square_error_cost(input=x, label=y)
return out
def test_lod_reset(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
# case 1
x = layers.data(name='x', shape=[10], dtype='float32')
y = layers.data(
name='y', shape=[10, 20], dtype='float32', lod_level=2
)
z = layers.lod_reset(x=x, y=y)
self.assertTrue(z.lod_level == 2)
# case 2
lod_tensor_in = layers.data(name='lod_in', shape=[1], dtype='int32')
z = layers.lod_reset(x=x, y=lod_tensor_in)
self.assertTrue(z.lod_level == 1)
# case 3
z = layers.lod_reset(x=x, target_lod=[1, 2, 3])
self.assertTrue(z.lod_level == 1)
return z
def test_affine_grid(self):
with self.static_graph():
data = layers.data(name='data', shape=[2, 3, 3], dtype="float32")
......
......@@ -17,9 +17,6 @@ import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
class TestLodResetOpByAttr(OpTest):
def setUp(self):
......@@ -134,24 +131,5 @@ class TestLodAppendOpByAttr(OpTest):
self.check_grad(["X"], "Out", check_dygraph=False)
class TestLodResetOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The input must be Variable.
x1 = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64")
target_lod = [2, 2]
self.assertRaises(TypeError, fluid.layers.lod_reset, x1, target_lod)
# Input(x) dtype must be float32 or float64 or int32 or int64
for dtype in ["bool", "float16"]:
x2 = fluid.layers.data(
name='x2' + dtype, shape=[4], dtype=dtype
)
y2 = fluid.layers.data(
name='y2' + dtype, shape=[4], dtype='int32', lod_level=2
)
self.assertRaises(TypeError, fluid.layers.lod_reset, x2, y2)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册