未验证 提交 18f921e9 编写于 作者: F feifei-111 提交者: GitHub

fluid API magration : array_read, array_write (#49022)

* del array_write & array_read

* fix import err

* fix import err

* fix example codes
上级 170a31f9
......@@ -56,16 +56,16 @@ class HybridParallelInferenceHelper:
while_op = layers.While(cond, is_test=True)
# init global lod_tensor_array for generation task
arr = layers.array_write(data, step_idx)
arr = paddle.tensor.array_write(data, step_idx)
with while_op.block():
with paddle.fluid.device_guard(f'{device}:all'):
# read data from global lod_tensor_array
element_in_arr = layers.array_read(array=arr, i=step_idx)
element_in_arr = paddle.tensor.array_read(array=arr, i=step_idx)
# write placehold data to global lod_tensor_array,
# it need for send_v2 of lod_tensor_array
paddle.increment(x=step_idx, value=1.0)
layers.array_write(element_in_arr, i=step_idx, array=arr)
paddle.tensor.array_write(element_in_arr, i=step_idx, array=arr)
with paddle.fluid.device_guard(f'{device}:0'):
... some code
......@@ -77,7 +77,7 @@ class HybridParallelInferenceHelper:
# generate some data in while block and write to global lod_tensor_array
# that they are read in next while step.
# we will using send_v2 to send global lod_tensor_array to other pipeline and sync
layers.array_write(other_var, i=step_idx, array=arr)
paddle.tensor.array_write(other_var, i=step_idx, array=arr)
# update cond and assign to cond_int, we will sync cond_int
layers.assign(layers.cast(cond, dtype="int32"), cond_int)
......@@ -128,7 +128,7 @@ class HybridParallelInferenceHelper:
step_idx = layers.fill_constant(
shape=[1], dtype="int64", value=0, force_cpu=False, name="i")
data = layers.array_write(X, step_idx)
data = paddle.tensor.array_write(X, step_idx)
cond_int = layers.fill_constant(shape=[1], dtype="int64", value=0, force_cpu=False, name="cond_int")
cond = paddle.less_than(x=step_idx, y=max_len)
......@@ -136,9 +136,9 @@ class HybridParallelInferenceHelper:
with while_op.block():
with paddle.fluid.device_guard(f'{device}:all'):
input = layers.array_read(array=data, i=step_idx)
input = paddle.tensor.array_read(array=data, i=step_idx)
paddle.increment(x=step_idx, value=1.0)
layers.array_write(input, i=step_idx, array=data)
paddle.tensor.array_write(input, i=step_idx, array=data)
with paddle.fluid.device_guard(f'{device}:0'):
param_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(1.0))
......@@ -152,7 +152,7 @@ class HybridParallelInferenceHelper:
shape=[5, 2], dtype='float32', attr=param_attr, is_bias=False)
hidden2 = paddle.matmul(hidden1, weight2)
layers.array_write(hidden2, i=step_idx, array=data)
paddle.tensor.array_write(hidden2, i=step_idx, array=data)
# update cond and assign to cond_int, we will sync cond_int
paddle.assign(paddle.less_than(x=step_idx, y=max_len), cond)
......
......@@ -1480,7 +1480,7 @@ class Executor:
adam = paddle.optimizer.Adam()
adam.minimize(loss)
i = paddle.zeros(shape=[1], dtype='int64')
array = paddle.fluid.layers.array_write(x=loss, i=i)
array = paddle.tensor.array_write(x=loss, i=i)
# Run the startup program once and only once.
exe.run(paddle.static.default_startup_program())
......
......@@ -53,8 +53,6 @@ from paddle import _C_ops, _legacy_C_ops
__all__ = [
'Switch',
'array_write',
'array_read',
'StaticRNN',
'Print',
'while_loop',
......@@ -1362,196 +1360,6 @@ def _deal_with_undefined_var(output_vars, loop_vars):
return results
def array_write(x, i, array=None):
"""
This OP writes the input ``x`` into the i-th position of the ``array``
:ref:`api_fluid_LoDTensorArray` and returns the modified array.
If ``array`` is none, a new LoDTensorArray will be created and returned.
This OP is often used together with :ref:`api_fluid_layers_array_read` OP.
Args:
x (Variable): The input data to be written into array. It's multi-dimensional
Tensor or LoDTensor. Data type: float32, float64, int32, int64.
i (Variable): 1-D Tensor with shape [1], which represents the position into which
``x`` is written. Data type: int64.
array (LoDTensorArray, optional): The LoDTensorArray into which ``x`` is written.
The default value is None, when a new LoDTensorArray will be created and returned
as a result.
Returns:
Variable: The input ``array`` after ``x`` is written into.
Examples:
.. code-block:: python
import paddle.fluid as fluid
tmp = fluid.layers.fill_constant(shape=[3, 2], dtype='int64', value=5)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
# Write tmp into the position of arr with subscript 10 and return arr.
arr = fluid.layers.array_write(tmp, i=i)
# Now, arr is a LoDTensorArray with length 11. We can use array_read OP to read
# the data at subscript 10 and print it out.
item = fluid.layers.array_read(arr, i=i)
input = fluid.layers.Print(item, message="The content of i-th LoDTensor:")
main_program = fluid.default_main_program()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(main_program)
# The printed result is:
# 1570533133 The content of i-th LoDTensor: The place is:CPUPlace
# Tensor[array_read_0.tmp_0]
# shape: [3,2,]
# dtype: l
# data: 5,5,5,5,5,5,
# the output is 2-D Tensor with shape [3,2], which is tmp above.
# dtype is the corresponding C++ data type, which may vary in different environments.
# Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
# so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
# and '__int64' on Windows. They both represent 64-bit integer variables.
"""
if _non_static_mode():
assert isinstance(
x, Variable
), "The input data 'x' in array_write must be Variable in dygraph mode"
assert isinstance(
i, Variable
), "The index 'i' in array_write must be Variable in dygraph mode"
assert i.shape == [
1
], "The shape of index 'i' should be [1] in dygraph mode"
i = i.numpy().item(0)
if array is None:
array = paddle.tensor.create_array(x.dtype)
assert isinstance(
array, list
), "The 'array' in array_write must be a list in dygraph mode"
assert i <= len(
array
), "The index 'i' should not be greater than the length of 'array' in dygraph mode"
if i < len(array):
array[i] = x
else:
array.append(x)
return array
check_variable_and_dtype(i, 'i', ['int64'], 'array_write')
check_type(x, 'x', (Variable), 'array_write')
helper = LayerHelper('array_write', **locals())
if array is not None:
if (
not isinstance(array, Variable)
or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY
):
raise TypeError(
"array should be tensor array vairable in array_write Op"
)
if array is None:
array = helper.create_variable(
name="{0}.out".format(helper.name),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype,
)
helper.append_op(
type='write_to_array',
inputs={'X': [x], 'I': [i]},
outputs={'Out': [array]},
)
return array
def array_read(array, i):
"""
This OP is used to read data at the specified position from the input array
:ref:`api_fluid_LoDTensorArray` . ``array`` is the input array and ``i``
is the specified read position. This OP is often used together with
:ref:`api_fluid_layers_array_write` OP.
Case 1:
::
Input:
The shape of first three tensors are [1], and that of the last one is [1,2]:
array = ([0.6], [0.1], [0.3], [0.4, 0.2])
And:
i = [3]
Output:
output = [0.4, 0.2]
Args:
array (LoDTensorArray): The input LoDTensorArray.
i (Variable): 1-D Tensor, whose shape is [1] and dtype is int64. It represents the
specified read position of ``array``.
Returns:
Variable: The LoDTensor or Tensor that is read at the specified position of ``array``.
Examples:
.. code-block:: python
# First we're going to create a LoDTensorArray, then we're going to write the Tensor into
# the specified position, and finally we're going to read the Tensor at that position.
import paddle.fluid as fluid
arr = fluid.layers.create_array(dtype='float32')
tmp = fluid.layers.fill_constant(shape=[3, 2], dtype='int64', value=5)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
# tmp is the Tensor with shape [3,2], and if we write it into the position with subscript 10
# of the empty-array: arr, then the length of arr becomes 11.
arr = fluid.layers.array_write(tmp, i, array=arr)
# Read the data of the position with subscript 10.
item = fluid.layers.array_read(arr, i)
# You can print out the data via executor.
input = fluid.layers.Print(item, message="The LoDTensor of the i-th position:")
main_program = fluid.default_main_program()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(main_program)
# The printed result is:
# 1569588169 The LoDTensor of the i-th position: The place is:CPUPlace
# Tensor[array_read_0.tmp_0]
# shape: [3,2,]
# dtype: l
# data: 5,5,5,5,5,5,
# the output is 2-D Tensor with shape [3,2].
# dtype is the corresponding C++ data type, which may vary in different environments.
# Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
# so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
# and '__int64' on Windows. They both represent 64-bit integer variables.
"""
if _non_static_mode():
assert isinstance(
array, list
), "The 'array' in array_read must be list in dygraph mode"
assert isinstance(
i, Variable
), "The index 'i' in array_read must be Variable in dygraph mode"
assert i.shape == [
1
], "The shape of index 'i' should be [1] in dygraph mode"
i = i.numpy().item(0)
return array[i]
check_variable_and_dtype(i, 'i', ['int64'], 'array_read')
helper = LayerHelper('array_read', **locals())
if (
not isinstance(array, Variable)
or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY
):
raise TypeError("array should be tensor array vairable")
out = helper.create_variable_for_type_inference(dtype=array.dtype)
helper.append_op(
type='read_from_array',
inputs={'X': [array], 'I': [i]},
outputs={'Out': [out]},
)
return out
class ConditionalBlockGuard(BlockGuard):
"""
ConditionalBlockGuard is derived from BlockGuard. It is dedicated for
......
......@@ -18,7 +18,6 @@ import inspect
from .. import core
from ..framework import Variable, unique_name, static_only
from .layer_function_generator import OpProtoHolder
from .control_flow import array_write
from paddle.fluid.dygraph.base import in_declarative_mode
_supported_int_dtype_ = [
......@@ -246,7 +245,7 @@ def monkey_patch_variable():
self.type
)
)
from paddle.tensor.array import array_length
from paddle.tensor.array import array_length, array_write
array_write(x=var, i=array_length(self), array=self)
......
......@@ -218,10 +218,10 @@ def _dynamic_decode_declarative(
else:
# inputs and states of all steps must be saved for backward and training
inputs_arrays = map_structure(
lambda x: control_flow.array_write(x, step_idx), initial_inputs
lambda x: paddle.tensor.array_write(x, step_idx), initial_inputs
)
states_arrays = map_structure(
lambda x: control_flow.array_write(x, step_idx), initial_states
lambda x: paddle.tensor.array_write(x, step_idx), initial_states
)
def _maybe_copy(state, new_state, step_mask):
......@@ -260,11 +260,11 @@ def _dynamic_decode_declarative(
with while_op.block():
if not is_test:
inputs = map_structure(
lambda array: control_flow.array_read(array, step_idx),
lambda array: paddle.tensor.array_read(array, step_idx),
inputs_arrays,
)
states = map_structure(
lambda array: control_flow.array_read(array, step_idx),
lambda array: paddle.tensor.array_read(array, step_idx),
states_arrays,
)
(outputs, next_states, next_inputs, next_finished) = decoder.step(
......@@ -303,7 +303,7 @@ def _dynamic_decode_declarative(
)
map_structure(
lambda x, x_array: control_flow.array_write(
lambda x, x_array: paddle.tensor.array_write(
x, i=step_idx, array=x_array
),
outputs,
......@@ -320,14 +320,14 @@ def _dynamic_decode_declarative(
map_structure(tensor.assign, next_states, global_states)
else:
map_structure(
lambda x, x_array: control_flow.array_write(
lambda x, x_array: paddle.tensor.array_write(
x, i=step_idx, array=x_array
),
next_inputs,
inputs_arrays,
)
map_structure(
lambda x, x_array: control_flow.array_write(
lambda x, x_array: paddle.tensor.array_write(
x, i=step_idx, array=x_array
),
next_states,
......@@ -352,7 +352,7 @@ def _dynamic_decode_declarative(
final_states = global_states
else:
final_states = map_structure(
lambda array: control_flow.array_read(array, step_idx),
lambda array: paddle.tensor.array_read(array, step_idx),
states_arrays,
)
......
......@@ -331,14 +331,15 @@ def tensor_array_to_tensor(input, axis=1, name=None, use_stack=False):
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
x0 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
x1 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
array = fluid.layers.create_array(dtype='float32')
fluid.layers.array_write(x0, i, array)
fluid.layers.array_write(x1, i + 1, array)
array = paddle.tensor.create_array(dtype='float32')
paddle.tensor.array_write(x0, i, array)
paddle.tensor.array_write(x1, i + 1, array)
output, output_index = fluid.layers.tensor_array_to_tensor(input=array)
"""
if _non_static_mode():
......
......@@ -163,7 +163,7 @@ def get_program():
)
pred = mlp_start(input)
input_array = fluid.layers.array_write(pred, i)
input_array = paddle.tensor.array_write(pred, i)
# TODO: check whether this annotation is needed
# auto.shard_tensor(input_array,
# dist_attr={
......@@ -177,7 +177,7 @@ def get_program():
while_op = paddle.static.nn.control_flow.While(cond=cond)
with while_op.block():
pre_input = fluid.layers.array_read(array=input_array, i=i)
pre_input = paddle.tensor.array_read(array=input_array, i=i)
auto.shard_tensor(pre_input, _g_process_mesh, [None, None, None])
mlp_while = MLPLayer(
......@@ -190,10 +190,10 @@ def get_program():
# 更新循环条件
i = paddle.increment(x=i, value=1)
fluid.layers.array_write(cur_pred, array=input_array, i=i)
paddle.tensor.array_write(cur_pred, array=input_array, i=i)
paddle.assign(paddle.less_than(x=i, y=loop_len), cond)
end_pred = fluid.layers.array_read(array=input_array, i=i)
end_pred = paddle.tensor.array_read(array=input_array, i=i)
auto.shard_tensor(end_pred, _g_process_mesh, [None, None, None])
mlp_end = MLPLayer(
......
......@@ -51,8 +51,8 @@ class TestCollectiveSendRecv(TestCollectiveRunnerBase):
)
tensor_array = paddle.tensor.create_array(dtype='float32')
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
fluid.layers.array_write(data1, i, tensor_array)
fluid.layers.array_write(data2, i + 1, tensor_array)
paddle.tensor.array_write(data1, i, tensor_array)
paddle.tensor.array_write(data2, i + 1, tensor_array)
if self.rank == 0:
main_prog.global_block().append_op(
type="send_v2",
......
......@@ -73,7 +73,7 @@ class TestHybridParallelInferenceHelperClass(unittest.TestCase):
shape=[1], dtype="int64", value=0, force_cpu=False, name="i"
)
data = layers.array_write(X, step_idx)
data = paddle.tensor.array_write(X, step_idx)
cond_int = layers.fill_constant(
shape=[1],
......@@ -90,9 +90,9 @@ class TestHybridParallelInferenceHelperClass(unittest.TestCase):
with while_op.block():
with paddle.fluid.device_guard(f'{device}:all'):
input = layers.array_read(array=data, i=step_idx)
input = paddle.tensor.array_read(array=data, i=step_idx)
paddle.increment(x=step_idx, value=1.0)
layers.array_write(input, i=step_idx, array=data)
paddle.tensor.array_write(input, i=step_idx, array=data)
with paddle.fluid.device_guard(f'{device}:0'):
param_attr = paddle.ParamAttr(
......@@ -118,7 +118,7 @@ class TestHybridParallelInferenceHelperClass(unittest.TestCase):
)
hidden2 = paddle.matmul(hidden1, weight2)
layers.array_write(hidden2, i=step_idx, array=data)
paddle.tensor.array_write(hidden2, i=step_idx, array=data)
# update cond and assign to cond_int, we will sync cond_int
paddle.assign(paddle.less_than(x=step_idx, y=max_len), cond)
......
......@@ -228,10 +228,10 @@ class BaseModel(fluid.dygraph.Layer):
enc_cell = paddle.tensor.create_array(dtype="float32")
for i in range(self.num_layers):
index = zero + i
enc_hidden = fluid.layers.array_write(
enc_hidden = paddle.tensor.array_write(
enc_hidden_0, index, array=enc_hidden
)
enc_cell = fluid.layers.array_write(
enc_cell = paddle.tensor.array_write(
enc_cell_0, index, array=enc_cell
)
......@@ -330,10 +330,10 @@ class BaseModel(fluid.dygraph.Layer):
enc_cell = paddle.tensor.create_array(dtype="float32")
for j in range(self.num_layers):
index = zero + j
enc_hidden = fluid.layers.array_write(
enc_hidden = paddle.tensor.array_write(
enc_hidden_0, index, array=enc_hidden
)
enc_cell = fluid.layers.array_write(
enc_cell = paddle.tensor.array_write(
enc_cell_0, index, array=enc_cell
)
......@@ -720,7 +720,7 @@ class AttentionModel(fluid.dygraph.Layer):
print(" ^" * 10, "_change_size_for_array")
print("array : ", array)
for i, state in enumerate(array):
fluid.layers.array_write(func(state), i, array)
paddle.tensor.array_write(func(state), i, array)
return array
......@@ -747,10 +747,10 @@ class AttentionModel(fluid.dygraph.Layer):
enc_cell = paddle.tensor.create_array(dtype="float32")
for i in range(self.num_layers):
index = zero + i
enc_hidden = fluid.layers.array_write(
enc_hidden = paddle.tensor.array_write(
enc_hidden_0, index, array=enc_hidden
)
enc_cell = fluid.layers.array_write(
enc_cell = paddle.tensor.array_write(
enc_cell_0, index, array=enc_cell
)
......
......@@ -36,7 +36,7 @@ def len_with_lod_tensor_array(x):
x = fluid.dygraph.to_variable(x)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
arr = fluid.layers.array_write(x, i=i)
arr = paddle.tensor.array_write(x, i=i)
arr_len = len(arr)
return arr_len
......
......@@ -176,7 +176,7 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase):
)
for i in range(self.iter_num):
fluid.layers.array_write(input, zero + i, tensor_array)
paddle.tensor.array_write(input, zero + i, tensor_array)
self.out_var = fluid.layers.concat(tensor_array, axis=self.axis)
else:
......@@ -190,7 +190,7 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase):
for i in range(self.iter_num):
# Api array_write is not supported in paddle 2.0 yet.
fluid.layers.array_write(input, zero + i, tensor_array)
paddle.tensor.array_write(input, zero + i, tensor_array)
self.out_var = paddle.concat(tensor_array, axis=self.axis)
......
......@@ -142,7 +142,7 @@ class TestStackAPIWithLoDTensorArray(unittest.TestCase):
zero = fluid.layers.fill_constant(shape=[1], value=0, dtype="int64")
for i in range(self.iter_num):
fluid.layers.array_write(input, zero + i, tensor_array)
paddle.tensor.array_write(input, zero + i, tensor_array)
self.out_var = paddle.stack(tensor_array, axis=self.axis)
......@@ -180,7 +180,7 @@ class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase):
zero = fluid.layers.fill_constant(shape=[1], value=0, dtype="int64")
for i in range(self.iter_num):
fluid.layers.array_write(input, zero + i, tensor_array)
paddle.tensor.array_write(input, zero + i, tensor_array)
self.out_var = paddle.stack(tensor_array, axis=self.axis)
......
......@@ -41,12 +41,12 @@ class TestWhileOp(unittest.TestCase):
i = layers.cast(i, 'int64')
i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i)
mem_array = paddle.tensor.array_write(x=init, i=i)
data_array = paddle.tensor.array_write(x=d0, i=i)
i = paddle.increment(i)
layers.array_write(d1, i, array=data_array)
paddle.tensor.array_write(d1, i, array=data_array)
i = paddle.increment(i)
layers.array_write(d2, i, array=data_array)
paddle.tensor.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int32')
i = layers.cast(i, 'int64')
i.stop_gradient = True
......@@ -67,23 +67,23 @@ class TestWhileOp(unittest.TestCase):
while_op = paddle.static.nn.control_flow.While(cond=cond)
while_op2 = paddle.static.nn.control_flow.While(cond=cond2)
with while_op.block():
d = layers.array_read(array=data_array, i=i)
prev = layers.array_read(array=mem_array, i=i)
d = paddle.tensor.array_read(array=data_array, i=i)
prev = paddle.tensor.array_read(array=mem_array, i=i)
result = layers.sums(input=[d, prev])
i = paddle.increment(x=i)
layers.array_write(result, i=i, array=mem_array)
paddle.tensor.array_write(result, i=i, array=mem_array)
paddle.assign(paddle.less_than(x=i, y=array_len), cond)
with while_op2.block():
d2 = layers.array_read(array=data_array, i=j)
prev2 = layers.array_read(array=mem_array, i=j)
d2 = paddle.tensor.array_read(array=data_array, i=j)
prev2 = paddle.tensor.array_read(array=mem_array, i=j)
result2 = layers.sums(input=[d2, prev2])
j = paddle.increment(x=j)
layers.array_write(result2, i=j, array=mem_array)
paddle.tensor.array_write(result2, i=j, array=mem_array)
paddle.assign(paddle.less_than(x=j, y=array_len2), cond2)
sum_result = layers.array_read(array=mem_array, i=j)
sum_result = paddle.tensor.array_read(array=mem_array, i=j)
loss = paddle.mean(sum_result)
return loss, sum_result
......
......@@ -29,19 +29,19 @@ from paddle.fluid.framework import default_main_program
def _test_read_write(x):
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = False
arr = layers.array_write(x=x[0], i=i)
arr = paddle.tensor.array_write(x=x[0], i=i)
i = paddle.increment(x=i)
arr = layers.array_write(x=x[1], i=i, array=arr)
arr = paddle.tensor.array_write(x=x[1], i=i, array=arr)
i = paddle.increment(x=i)
arr = layers.array_write(x=x[2], i=i, array=arr)
arr = paddle.tensor.array_write(x=x[2], i=i, array=arr)
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = False
a0 = layers.array_read(array=arr, i=i)
a0 = paddle.tensor.array_read(array=arr, i=i)
i = paddle.increment(x=i)
a1 = layers.array_read(array=arr, i=i)
a1 = paddle.tensor.array_read(array=arr, i=i)
i = paddle.increment(x=i)
a2 = layers.array_read(array=arr, i=i)
a2 = paddle.tensor.array_read(array=arr, i=i)
mean_a0 = paddle.mean(a0)
mean_a1 = paddle.mean(a1)
......@@ -60,6 +60,7 @@ def _test_read_write(x):
class TestArrayReadWrite(unittest.TestCase):
def test_read_write(self):
paddle.enable_static()
x = [
layers.data(name='x0', shape=[100]),
layers.data(name='x1', shape=[100]),
......@@ -131,39 +132,18 @@ class TestArrayReadWrite(unittest.TestCase):
class TestArrayReadWriteOpError(unittest.TestCase):
def _test_errors(self, use_fluid_api=True):
if use_fluid_api:
with program_guard(Program(), Program()):
x1 = np.random.randn(2, 4).astype('int32')
x2 = fluid.layers.fill_constant(
shape=[1], dtype='int32', value=1
)
x3 = np.random.randn(2, 4).astype('int32')
self.assertRaises(
TypeError, fluid.layers.array_read, array=x1, i=x2
)
self.assertRaises(
TypeError, fluid.layers.array_write, array=x1, i=x2, out=x3
)
else:
with program_guard(Program(), Program()):
x1 = np.random.randn(2, 4).astype('int32')
x2 = paddle.ones(shape=[1], dtype='int32')
x3 = np.random.randn(2, 4).astype('int32')
self.assertRaises(
TypeError, paddle.tensor.array_read, array=x1, i=x2
)
self.assertRaises(
TypeError, paddle.tensor.array_write, array=x1, i=x2, out=x3
)
def test_fluid_api(self):
self._test_errors(use_fluid_api=True)
def test_paddle_api(self):
self._test_errors(use_fluid_api=False)
def test_errors(self):
with program_guard(Program(), Program()):
x1 = np.random.randn(2, 4).astype('int32')
x2 = paddle.ones(shape=[1], dtype='int32')
x3 = np.random.randn(2, 4).astype('int32')
self.assertRaises(
TypeError, paddle.tensor.array_read, array=x1, i=x2
)
self.assertRaises(
TypeError, paddle.tensor.array_write, array=x1, i=x2, out=x3
)
class TestArrayReadWriteApi(unittest.TestCase):
......
......@@ -87,9 +87,9 @@ class TestAssignOpWithLoDTensorArray(unittest.TestCase):
)
z = paddle.add(x=x, y=y)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
init_array = fluid.layers.array_write(x=z, i=i)
init_array = paddle.tensor.array_write(x=z, i=i)
array = fluid.layers.assign(init_array)
sums = fluid.layers.array_read(array=init_array, i=i)
sums = paddle.tensor.array_read(array=init_array, i=i)
mean = paddle.mean(sums)
append_backward(mean)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
......@@ -141,9 +141,9 @@ class TestAssignOApi(unittest.TestCase):
)
z = paddle.add(x=x, y=y)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
init_array = fluid.layers.array_write(x=z, i=i)
init_array = paddle.tensor.array_write(x=z, i=i)
array = paddle.assign(init_array)
sums = fluid.layers.array_read(array=init_array, i=i)
sums = paddle.tensor.array_read(array=init_array, i=i)
mean = paddle.mean(sums)
append_backward(mean)
......
......@@ -63,9 +63,9 @@ def convolutional_neural_network(use_py_reader):
avg_loss = paddle.mean(loss)
acc = paddle.static.accuracy(input=prediction, label=label)
i = fluid.layers.zeros(shape=[1], dtype='int64')
array = fluid.layers.array_write(x=prediction, i=i)
array = paddle.tensor.array_write(x=prediction, i=i)
paddle.increment(i)
fluid.layers.array_write(x=acc, i=i, array=array)
paddle.tensor.array_write(x=acc, i=i, array=array)
return array, img, label, prediction, avg_loss, acc, py_reader
......
......@@ -420,7 +420,7 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase):
)
for i in range(self.iter_num):
fluid.layers.array_write(input, zero + i, tensor_array)
paddle.tensor.array_write(input, zero + i, tensor_array)
self.out_var = fluid.layers.concat(tensor_array, axis=self.axis)
else:
......@@ -434,7 +434,7 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase):
for i in range(self.iter_num):
# Api array_write is not supported in paddle 2.0 yet.
fluid.layers.array_write(input, zero + i, tensor_array)
paddle.tensor.array_write(input, zero + i, tensor_array)
self.out_var = paddle.concat(tensor_array, axis=self.axis)
......
......@@ -40,7 +40,7 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False):
)
cond = paddle.less_than(x=step_idx, y=max_len)
while_op = paddle.static.nn.control_flow.While(cond)
scores = layers.array_write(x, step_idx)
scores = paddle.tensor.array_write(x, step_idx)
with while_op.block():
bs = layers.cast(paddle.shape(x)[0], "int64")
for _ in range(20):
......@@ -54,7 +54,7 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False):
topk_coordinates.stop_gradient = stop_gradient
score = paddle.gather_nd(x, topk_coordinates)
paddle.increment(x=step_idx, value=1.0)
layers.array_write(score, i=step_idx, array=scores)
paddle.tensor.array_write(score, i=step_idx, array=scores)
length_cond = paddle.less_than(x=step_idx, y=max_len)
layers.assign(length_cond, cond)
......
......@@ -80,14 +80,14 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase):
i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i)
mem_array = paddle.tensor.array_write(x=init, i=i)
data_array = paddle.tensor.array_write(x=d0, i=i)
i = paddle.increment(i)
layers.array_write(d1, i, array=data_array)
paddle.tensor.array_write(d1, i, array=data_array)
i = paddle.increment(i)
layers.array_write(d2, i, array=data_array)
paddle.tensor.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
......@@ -106,27 +106,27 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase):
while_op = paddle.static.nn.control_flow.While(cond=cond)
while_op2 = paddle.static.nn.control_flow.While(cond=cond2)
with while_op.block():
d = layers.array_read(array=data_array, i=i)
prev = layers.array_read(array=mem_array, i=i)
d = paddle.tensor.array_read(array=data_array, i=i)
prev = paddle.tensor.array_read(array=mem_array, i=i)
d = paddle.reshape(d, shape=[10])
prev = paddle.reshape(prev, shape=[10])
result = layers.sums(input=[d, prev])
i = paddle.increment(x=i)
layers.array_write(result, i=i, array=mem_array)
paddle.tensor.array_write(result, i=i, array=mem_array)
paddle.assign(paddle.less_than(x=i, y=array_len), cond)
with while_op2.block():
d2 = layers.array_read(array=data_array, i=j)
prev2 = layers.array_read(array=mem_array, i=j)
d2 = paddle.tensor.array_read(array=data_array, i=j)
prev2 = paddle.tensor.array_read(array=mem_array, i=j)
d2 = paddle.reshape(d2, shape=[10])
prev2 = paddle.reshape(prev2, shape=[10])
result2 = layers.sums(input=[d2, prev2])
j = paddle.increment(x=j)
layers.array_write(result2, i=j, array=mem_array)
paddle.tensor.array_write(result2, i=j, array=mem_array)
paddle.assign(paddle.less_than(x=j, y=array_len2), cond2)
sum_result = layers.array_read(array=mem_array, i=j)
sum_result = paddle.tensor.array_read(array=mem_array, i=j)
sum_result.persistable = True
tmp = layers.unsqueeze(sum_result, axes=[0])
tmp = paddle.expand(tmp, [10, -1])
......
......@@ -18,7 +18,8 @@ import numpy as np
import paddle
from paddle.fluid.executor import Executor
from paddle.fluid.layers import array_write, data, mul, zeros
from paddle.fluid.layers import data, mul, zeros
from paddle.tensor import array_write
class TestExecutor(unittest.TestCase):
......
......@@ -35,11 +35,11 @@ class TestFetchLoDTensorArray(unittest.TestCase):
opt = fluid.optimizer.SGD(learning_rate=0.001)
opt.minimize(loss)
array = layers.array_write(x=img, i=i)
array = paddle.tensor.array_write(x=img, i=i)
i = paddle.increment(i)
layers.array_write(x=label, i=i, array=array)
paddle.tensor.array_write(x=label, i=i, array=array)
i = paddle.increment(i)
layers.array_write(x=loss, i=i, array=array)
paddle.tensor.array_write(x=loss, i=i, array=array)
return loss, array
......
......@@ -27,7 +27,7 @@ class TestLoDArrayLength(unittest.TestCase):
def test_array_length(self):
tmp = layers.zeros(shape=[10], dtype='int32')
i = layers.fill_constant(shape=[1], dtype='int64', value=10)
arr = layers.array_write(tmp, i=i)
arr = paddle.tensor.array_write(tmp, i=i)
arr_len = paddle.tensor.array_length(arr)
cpu = core.CPUPlace()
exe = Executor(cpu)
......
......@@ -44,16 +44,16 @@ class TestProfiler(unittest.TestCase):
shape=[1], dtype='int64', force_cpu=True
)
until = layers.fill_constant([1], dtype='int64', value=10)
data_arr = layers.array_write(hidden1, i)
data_arr = paddle.tensor.array_write(hidden1, i)
cond = paddle.less_than(x=counter, y=until)
while_op = paddle.static.nn.control_flow.While(cond=cond)
with while_op.block():
hidden_n = fluid.layers.fc(input=hidden1, size=64, act='relu')
layers.array_write(hidden_n, i, data_arr)
paddle.tensor.array_write(hidden_n, i, data_arr)
paddle.increment(x=counter, value=1)
paddle.assign(paddle.less_than(x=counter, y=until), cond)
hidden_n = layers.array_read(data_arr, i)
hidden_n = paddle.tensor.array_read(data_arr, i)
hidden2 = fluid.layers.fc(input=hidden_n, size=64, act='relu')
predict = fluid.layers.fc(input=hidden2, size=10, act='softmax')
label = fluid.layers.data(name='y', shape=[1], dtype='int64')
......
......@@ -693,7 +693,7 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase):
arr = paddle.tensor.create_array(dtype="float32")
for i in range(3):
idx = paddle.tensor.array_length(arr)
arr = layers.array_write(x=x[i], i=idx, array=arr)
arr = paddle.tensor.array_write(x=x[i], i=idx, array=arr)
if case_num == 1:
self.sliced_arr = output = arr[0]
......
......@@ -172,7 +172,7 @@ class TestStackAPIWithLoDTensorArray(unittest.TestCase):
zero = fluid.layers.fill_constant(shape=[1], value=0, dtype="int64")
for i in range(self.iter_num):
fluid.layers.array_write(input, zero + i, tensor_array)
paddle.tensor.array_write(input, zero + i, tensor_array)
self.out_var = paddle.stack(tensor_array, axis=self.axis)
......@@ -210,7 +210,7 @@ class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase):
zero = fluid.layers.fill_constant(shape=[1], value=0, dtype="int64")
for i in range(self.iter_num):
fluid.layers.array_write(input, zero + i, tensor_array)
paddle.tensor.array_write(input, zero + i, tensor_array)
self.out_var = paddle.stack(tensor_array, axis=self.axis)
......
......@@ -197,7 +197,7 @@ class TestLoDTensorArrayStack(unittest.TestCase):
idx = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
for i, x in enumerate(self.inputs):
x = fluid.layers.assign(x)
fluid.layers.array_write(x, idx + i, array)
paddle.tensor.array_write(x, idx + i, array)
output, output_index = fluid.layers.tensor_array_to_tensor(
input=array, **self.attrs
)
......@@ -239,8 +239,8 @@ class TestTensorArrayToTensorAPI(unittest.TestCase):
x1.stop_gradient = False
i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
array = paddle.tensor.create_array(dtype='float32')
fluid.layers.array_write(x0, i, array)
fluid.layers.array_write(x1, i + 1, array)
paddle.tensor.array_write(x0, i, array)
paddle.tensor.array_write(x1, i + 1, array)
output_stack, output_index_stack = fluid.layers.tensor_array_to_tensor(
input=array, axis=1, use_stack=True
)
......@@ -280,14 +280,14 @@ class TestTensorArrayToTensorAPI(unittest.TestCase):
array = paddle.tensor.create_array(dtype='float32')
inp0 = np.random.rand(2, 3, 4).astype("float32")
x0 = fluid.layers.assign(inp0)
fluid.layers.array_write(x0, zero, array)
paddle.tensor.array_write(x0, zero, array)
def cond(i, end, array):
return paddle.less_than(i, end)
def body(i, end, array):
prev = fluid.layers.array_read(array, i - 1)
fluid.layers.array_write(prev, i, array)
prev = paddle.tensor.array_read(array, i - 1)
paddle.tensor.array_write(prev, i, array)
return i + 1, end, array
_, _, array = paddle.static.nn.while_loop(
......@@ -297,7 +297,7 @@ class TestTensorArrayToTensorAPI(unittest.TestCase):
self.assertTrue(paddle.tensor.array_length(array), 10)
last = fluid.layers.fill_constant(shape=[1], dtype='int64', value=9)
np.testing.assert_array_equal(
fluid.layers.array_read(array, last).numpy(), inp0
paddle.tensor.array_read(array, last).numpy(), inp0
)
......
......@@ -320,20 +320,20 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
return paddle.less_than(j, array_len2)
def internal_body(j, x, mem_array):
inner_data = layers.array_read(array=data_array, i=j)
inner_prev = layers.array_read(array=mem_array, i=j)
inner_data = paddle.tensor.array_read(array=data_array, i=j)
inner_prev = paddle.tensor.array_read(array=mem_array, i=j)
inner_sum_0 = paddle.add(x=inner_data, y=inner_prev)
inner_sum_1 = paddle.add(x=x, y=inner_sum_0)
j = paddle.increment(x=j)
layers.array_write(inner_sum_1, i=j, array=mem_array)
paddle.tensor.array_write(inner_sum_1, i=j, array=mem_array)
return [j, x, mem_array]
outer_data = layers.array_read(array=data_array, i=i)
outer_prev = layers.array_read(array=mem_array, i=i)
outer_data = paddle.tensor.array_read(array=data_array, i=i)
outer_prev = paddle.tensor.array_read(array=mem_array, i=i)
outer_sum_0 = paddle.add(x=outer_data, y=outer_prev)
outer_sum_1 = paddle.add(x=x, y=outer_sum_0)
i = paddle.increment(x=i)
layers.array_write(outer_sum_1, i=i, array=mem_array)
paddle.tensor.array_write(outer_sum_1, i=i, array=mem_array)
j, x, mem_array = paddle.static.nn.while_loop(
internal_cond, internal_body, [j, x, mem_array]
)
......@@ -350,12 +350,12 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i)
mem_array = paddle.tensor.array_write(x=init, i=i)
data_array = paddle.tensor.array_write(x=d0, i=i)
i = paddle.increment(i)
layers.array_write(d1, i, array=data_array)
paddle.tensor.array_write(d1, i, array=data_array)
i = paddle.increment(i)
layers.array_write(d2, i, array=data_array)
paddle.tensor.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
array_len = layers.fill_constant(shape=[1], dtype='int64', value=1)
......@@ -367,7 +367,7 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
external_cond, external_body, [i, j, x, mem_array]
)
sum_result = layers.array_read(array=mem_array, i=j)
sum_result = paddle.tensor.array_read(array=mem_array, i=j)
mean = paddle.mean(sum_result)
append_backward(mean)
......
......@@ -40,12 +40,12 @@ class TestWhileOp(unittest.TestCase):
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i)
mem_array = paddle.tensor.array_write(x=init, i=i)
data_array = paddle.tensor.array_write(x=d0, i=i)
i = paddle.increment(i)
layers.array_write(d1, i, array=data_array)
paddle.tensor.array_write(d1, i, array=data_array)
i = paddle.increment(i)
layers.array_write(d2, i, array=data_array)
paddle.tensor.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
array_len = layers.fill_constant(shape=[1], dtype='int64', value=1)
......@@ -59,23 +59,23 @@ class TestWhileOp(unittest.TestCase):
while_op = paddle.static.nn.control_flow.While(cond=cond)
while_op2 = paddle.static.nn.control_flow.While(cond=cond2)
with while_op.block():
d = layers.array_read(array=data_array, i=i)
prev = layers.array_read(array=mem_array, i=i)
d = paddle.tensor.array_read(array=data_array, i=i)
prev = paddle.tensor.array_read(array=mem_array, i=i)
result = layers.sums(input=[d, prev])
i = paddle.increment(x=i)
layers.array_write(result, i=i, array=mem_array)
paddle.tensor.array_write(result, i=i, array=mem_array)
paddle.assign(paddle.less_than(x=i, y=array_len), cond)
with while_op2.block():
d2 = layers.array_read(array=data_array, i=j)
prev2 = layers.array_read(array=mem_array, i=j)
d2 = paddle.tensor.array_read(array=data_array, i=j)
prev2 = paddle.tensor.array_read(array=mem_array, i=j)
result2 = layers.sums(input=[d2, prev2])
j = paddle.increment(x=j)
layers.array_write(result2, i=j, array=mem_array)
paddle.tensor.array_write(result2, i=j, array=mem_array)
paddle.assign(paddle.less_than(x=j, y=array_len2), cond2)
sum_result = layers.array_read(array=mem_array, i=j)
sum_result = paddle.tensor.array_read(array=mem_array, i=j)
loss = paddle.mean(sum_result)
return loss, sum_result
......
......@@ -49,9 +49,9 @@ class TestAssignOpWithLoDTensorArray(unittest.TestCase):
shape=[100, 10], dtype='float32', value=1)
z = fluid.layers.elementwise_add(x=x, y=y)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
init_array = fluid.layers.array_write(x=z, i=i)
init_array = paddle.tensor.array_write(x=z, i=i)
array = fluid.layers.assign(init_array)
sums = fluid.layers.array_read(array=init_array, i=i)
sums = paddle.tensor.array_read(array=init_array, i=i)
mean = paddle.mean(sums)
append_backward(mean)
......
......@@ -39,12 +39,12 @@ class TestWhileOp(unittest.TestCase):
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i)
mem_array = paddle.tensor.array_write(x=init, i=i)
data_array = paddle.tensor.array_write(x=d0, i=i)
i = paddle.increment(i)
layers.array_write(d1, i, array=data_array)
paddle.tensor.array_write(d1, i, array=data_array)
i = paddle.increment(i)
layers.array_write(d2, i, array=data_array)
paddle.tensor.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
array_len = layers.fill_constant(shape=[1], dtype='int64', value=1)
......@@ -58,23 +58,23 @@ class TestWhileOp(unittest.TestCase):
while_op = paddle.static.nn.control_flow.While(cond=cond)
while_op2 = paddle.static.nn.control_flow.While(cond=cond2)
with while_op.block():
d = layers.array_read(array=data_array, i=i)
prev = layers.array_read(array=mem_array, i=i)
d = paddle.tensor.array_read(array=data_array, i=i)
prev = paddle.tensor.array_read(array=mem_array, i=i)
result = layers.sums(input=[d, prev])
i = paddle.increment(x=i)
layers.array_write(result, i=i, array=mem_array)
paddle.tensor.array_write(result, i=i, array=mem_array)
paddle.assign(paddle.less_than(x=i, y=array_len), cond)
with while_op2.block():
d2 = layers.array_read(array=data_array, i=j)
prev2 = layers.array_read(array=mem_array, i=j)
d2 = paddle.tensor.array_read(array=data_array, i=j)
prev2 = paddle.tensor.array_read(array=mem_array, i=j)
result2 = layers.sums(input=[d2, prev2])
j = paddle.increment(x=j)
layers.array_write(result2, i=j, array=mem_array)
paddle.tensor.array_write(result2, i=j, array=mem_array)
paddle.assign(paddle.less_than(x=j, y=array_len2), cond2)
sum_result = layers.array_read(array=mem_array, i=j)
sum_result = paddle.tensor.array_read(array=mem_array, i=j)
loss = paddle.mean(sum_result)
return loss, sum_result
......
......@@ -17,15 +17,7 @@ import re
import paddle
from paddle.fluid.data_feeder import convert_dtype
from paddle.fluid.framework import Variable, core
from paddle.fluid.layers import (
Print,
array_read,
array_write,
assign,
cast,
control_flow,
fill_constant,
)
from paddle.fluid.layers import Print, assign, cast, control_flow, fill_constant
from paddle.fluid.layers.control_flow import while_loop
from paddle.fluid.layers.utils import copy_mutable_vars
from paddle.jit.dy2static.utils import (
......@@ -775,8 +767,10 @@ def _run_paddle_pop(array, *args):
return paddle.less_than(i, arr_len)
def body(i, new_array):
item = array_read(array=array, i=i)
array_write(item, paddle.tensor.array_length(new_array), new_array)
item = paddle.tensor.array_read(array=array, i=i)
paddle.tensor.array_write(
item, paddle.tensor.array_length(new_array), new_array
)
i = paddle.increment(i)
return i, new_array
......@@ -787,7 +781,7 @@ def _run_paddle_pop(array, *args):
else:
idx = fill_constant(shape=[1], dtype="int64", value=idx)
pop_item = array_read(array, idx)
pop_item = paddle.tensor.array_read(array, idx)
new_array = _slice_tensor_array(array, 0, idx)
i = idx + 1
......
......@@ -48,7 +48,7 @@ def Assert(cond, data=None, summarize=20, name=None):
number of the elements in the tensors to print.
Args:
cond (Variable): The boolean condition tensor whose numel should be 1.
cond (Tensor): The boolean condition tensor whose numel should be 1.
data (list|tuple, optional): list or tuple of tensors to print when
condition is not true. If it's ``None``, no tensor will be printed.
The default value is ``None``.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册