未验证 提交 27a9d97c 编写于 作者: 石晓伟 提交者: GitHub

fix API.spec conflicts, test=develop, test=document_preview, test=document_fix (#20540)

上级 4667bba4
......@@ -95,12 +95,12 @@ paddle.fluid.io.DataLoader.from_dataset (ArgSpec(args=['dataset', 'places', 'dro
paddle.fluid.io.DataLoader.from_generator (ArgSpec(args=['feed_list', 'capacity', 'use_double_buffer', 'iterable', 'return_list'], varargs=None, keywords=None, defaults=(None, None, True, True, False)), ('document', 'e3bdde36774236c3e381d2218e9cc09e'))
paddle.fluid.io.cache (ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None), ('document', '1676886070eb607cb608f7ba47be0d3c'))
paddle.fluid.io.map_readers (ArgSpec(args=['func'], varargs='readers', keywords=None, defaults=None), ('document', '2d0903e1d2f00b4f1d6618e6b5310121'))
paddle.fluid.io.buffered (ArgSpec(args=['reader', 'size'], varargs=None, keywords=None, defaults=None), ('document', '0d6186f109feceb99f60ec50a0a624cb'))
paddle.fluid.io.buffered (ArgSpec(args=['reader', 'size'], varargs=None, keywords=None, defaults=None), ('document', 'e095a541160c5dc2994eada9a1c7ad56'))
paddle.fluid.io.compose (ArgSpec(args=[], varargs='readers', keywords='kwargs', defaults=None), ('document', '81c933c8da58041d91f084dcf6322349'))
paddle.fluid.io.chain (ArgSpec(args=[], varargs='readers', keywords=None, defaults=None), ('document', 'e0311508658a7e741fc39feea8be0ad2'))
paddle.fluid.io.shuffle (ArgSpec(args=['reader', 'buf_size'], varargs=None, keywords=None, defaults=None), ('document', '961d0a950cc837c8b13577301dee7bd8'))
paddle.fluid.io.firstn (ArgSpec(args=['reader', 'n'], varargs=None, keywords=None, defaults=None), ('document', 'db83c761a5530a05c1ffe2f6f78198f4'))
paddle.fluid.io.xmap_readers (ArgSpec(args=['mapper', 'reader', 'process_num', 'buffer_size', 'order'], varargs=None, keywords=None, defaults=(False,)), ('document', '9c804a42f8a4dbaa76b3c98e0ab7f796'))
paddle.fluid.io.xmap_readers (ArgSpec(args=['mapper', 'reader', 'process_num', 'buffer_size', 'order'], varargs=None, keywords=None, defaults=(False,)), ('document', '17a1d4e59c4260a9416ff269c5e347a3'))
paddle.fluid.io.multiprocess_reader (ArgSpec(args=['readers', 'use_pipe', 'queue_size'], varargs=None, keywords=None, defaults=(True, 1000)), ('document', '7d8b3a96e592107c893d5d51ce968ba0'))
paddle.fluid.initializer.ConstantInitializer ('paddle.fluid.initializer.ConstantInitializer', ('document', '911263fc30c516c55e89cd72086a23f8'))
paddle.fluid.initializer.ConstantInitializer.__init__ (ArgSpec(args=['self', 'value', 'force_cpu'], varargs=None, keywords=None, defaults=(0.0, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
......@@ -172,7 +172,7 @@ paddle.fluid.layers.split (ArgSpec(args=['input', 'num_or_sections', 'dim', 'nam
paddle.fluid.layers.ctc_greedy_decoder (ArgSpec(args=['input', 'blank', 'input_length', 'padding_value', 'name'], varargs=None, keywords=None, defaults=(None, 0, None)), ('document', '31e0cbec2898efae95853034adadfe2b'))
paddle.fluid.layers.edit_distance (ArgSpec(args=['input', 'label', 'normalized', 'ignored_tokens', 'input_length', 'label_length'], varargs=None, keywords=None, defaults=(True, None, None, None)), ('document', '25f0dd786a98aac31490020725604fe1'))
paddle.fluid.layers.l2_normalize (ArgSpec(args=['x', 'axis', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(1e-12, None)), ('document', '30eeab67154ef09ab3e884117a8d4aee'))
paddle.fluid.layers.matmul (ArgSpec(args=['x', 'y', 'transpose_x', 'transpose_y', 'alpha', 'name'], varargs=None, keywords=None, defaults=(False, False, 1.0, None)), ('document', '3720b4a386585094435993deb028b592'))
paddle.fluid.layers.matmul (ArgSpec(args=['x', 'y', 'transpose_x', 'transpose_y', 'alpha', 'name'], varargs=None, keywords=None, defaults=(False, False, 1.0, None)), ('document', '8de6d8c13f8fa54ac77e51c5f6bc4cf2'))
paddle.fluid.layers.topk (ArgSpec(args=['input', 'k', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e50940f3ce5a08cc477b72f517491bf3'))
paddle.fluid.layers.warpctc (ArgSpec(args=['input', 'label', 'blank', 'norm_by_times', 'input_length', 'label_length'], varargs=None, keywords=None, defaults=(0, False, None, None)), ('document', '79aaea078ddea57a82ed7906d71dedc7'))
paddle.fluid.layers.sequence_reshape (ArgSpec(args=['input', 'new_dim'], varargs=None, keywords=None, defaults=None), ('document', 'eeb1591cfc854c6ffdac77b376313c44'))
......@@ -194,7 +194,7 @@ paddle.fluid.layers.autoincreased_step_counter (ArgSpec(args=['counter_name', 'b
paddle.fluid.layers.reshape (ArgSpec(args=['x', 'shape', 'actual_shape', 'act', 'inplace', 'name'], varargs=None, keywords=None, defaults=(None, None, False, None)), ('document', 'd7a6d59e464a7ef1184eb6caefeb49f1'))
paddle.fluid.layers.squeeze (ArgSpec(args=['input', 'axes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'ebbac07662a6e22e8e299ced880c7775'))
paddle.fluid.layers.unsqueeze (ArgSpec(args=['input', 'axes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b9bd3129d36a70e7c4385df51ff71c62'))
paddle.fluid.layers.lod_reset (ArgSpec(args=['x', 'y', 'target_lod'], varargs=None, keywords=None, defaults=(None, None)), ('document', '74498d37dd622ac472cb36887fce09ea'))
paddle.fluid.layers.lod_reset (ArgSpec(args=['x', 'y', 'target_lod'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'f1f04ae9bdcf8f3adc0658db6904aa0e'))
paddle.fluid.layers.lod_append (ArgSpec(args=['x', 'level'], varargs=None, keywords=None, defaults=None), ('document', '37663c7c179e920838a250ea0e28d909'))
paddle.fluid.layers.lrn (ArgSpec(args=['input', 'n', 'k', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(5, 1.0, 0.0001, 0.75, None)), ('document', 'fa565b65fb98d3ca82361c79f41b06b2'))
paddle.fluid.layers.pad (ArgSpec(args=['x', 'paddings', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None)), ('document', '46b3ada86dd2c79042dca90a55e08f66'))
......@@ -269,7 +269,7 @@ paddle.fluid.layers.logical_xor (ArgSpec(args=['x', 'y', 'out', 'name'], varargs
paddle.fluid.layers.logical_not (ArgSpec(args=['x', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '75fa78bea3ba82366dd99d2f92da56ef'))
paddle.fluid.layers.clip (ArgSpec(args=['x', 'min', 'max', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '4ad0d96a149f023cb72199ded4ce6e9d'))
paddle.fluid.layers.clip_by_norm (ArgSpec(args=['x', 'max_norm', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a5f4917fda557ceb834168cdbec6d51b'))
paddle.fluid.layers.mean (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '597257fb94d0597c404a6a5c91ab5258'))
paddle.fluid.layers.mean (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b817a28920b04ceeb4976aa2562f94df'))
paddle.fluid.layers.mul (ArgSpec(args=['x', 'y', 'x_num_col_dims', 'y_num_col_dims', 'name'], varargs=None, keywords=None, defaults=(1, 1, None)), ('document', 'a91eb670033cd103cd8b24624fef5f69'))
paddle.fluid.layers.sigmoid_cross_entropy_with_logits (ArgSpec(args=['x', 'label', 'ignore_index', 'name', 'normalize'], varargs=None, keywords=None, defaults=(-100, None, False)), ('document', '8cdf9e34f73b6f0ed8b60b59a8207fb6'))
paddle.fluid.layers.maxout (ArgSpec(args=['x', 'groups', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '406eee439e41988c8a0304186626a0dd'))
......@@ -283,7 +283,7 @@ paddle.fluid.layers.grid_sampler (ArgSpec(args=['x', 'grid', 'name'], varargs=No
paddle.fluid.layers.log_loss (ArgSpec(args=['input', 'label', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(0.0001, None)), ('document', 'ef1701e11d60508fe8f02dd2a8c60bdf'))
paddle.fluid.layers.add_position_encoding (ArgSpec(args=['input', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'bd8b28e6c1640b13a42b0524f86f7800'))
paddle.fluid.layers.bilinear_tensor_product (ArgSpec(args=['x', 'y', 'size', 'act', 'name', 'param_attr', 'bias_attr'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '6755168c4b2308e1e4f54cb56fa7dcb2'))
paddle.fluid.layers.merge_selected_rows (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b2b0e5d5c155ce24bafc38b78cd0b164'))
paddle.fluid.layers.merge_selected_rows (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e98af04d4e8c94bae899e91f6f3ac523'))
paddle.fluid.layers.get_tensor_from_selected_rows (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '2c568321feb4d16c41a83df43f95089d'))
paddle.fluid.layers.lstm (ArgSpec(args=['input', 'init_h', 'init_c', 'max_len', 'hidden_size', 'num_layers', 'dropout_prob', 'is_bidirec', 'is_test', 'name', 'default_initializer', 'seed'], varargs=None, keywords=None, defaults=(0.0, False, False, None, None, -1)), ('document', '5193cf1113f9d8d8f682ee5a5fc8b391'))
paddle.fluid.layers.shuffle_channel (ArgSpec(args=['x', 'group', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '50c06087a53aee4c466afe6fca057d2b'))
......@@ -332,7 +332,7 @@ paddle.fluid.layers.zeros (ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs
paddle.fluid.layers.reverse (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=None), ('document', '628135603692137d52bcf5a8d8d6816d'))
paddle.fluid.layers.has_inf (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', 'aca8a35516cef98af836fb6a64ac8acb'))
paddle.fluid.layers.has_nan (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', '99f4cf36db08a4e23c8c3857e2af1316'))
paddle.fluid.layers.isfinite (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', 'b9fff4ffc8d11934cde099f4c39bf841'))
paddle.fluid.layers.isfinite (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', '9e40eab383fbe2d76e065345cb27f140'))
paddle.fluid.layers.range (ArgSpec(args=['start', 'end', 'step', 'dtype'], varargs=None, keywords=None, defaults=None), ('document', '3e982b788b95f959eafeeb0696a3cbde'))
paddle.fluid.layers.linspace (ArgSpec(args=['start', 'stop', 'num', 'dtype'], varargs=None, keywords=None, defaults=None), ('document', '156e653497804566a43f6a53d48b08c4'))
paddle.fluid.layers.zeros_like (ArgSpec(args=['x', 'out'], varargs=None, keywords=None, defaults=(None,)), ('document', '5432543db3ff898451aa3af6bb38ab56'))
......
......@@ -6643,7 +6643,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
transpose_x (bool): Whether to transpose :math:`x` before multiplication.
transpose_y (bool): Whether to transpose :math:`y` before multiplication.
alpha (float): The scale of output. Default 1.0.
name(str|None): A name for this layer(optional). If set None, the layer
name(str|optional): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
......@@ -6654,30 +6654,57 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
# Examples to clarify shapes of the inputs and output
# x: [B, ..., M, K], y: [B, ..., K, N]
# fluid.layers.matmul(x, y) # out: [B, ..., M, N]
# fluid.layers.matmul(x, y)
# out: [B, ..., M, N]
# x: [B, M, K], y: [B, K, N]
# fluid.layers.matmul(x, y) # out: [B, M, N]
# fluid.layers.matmul(x, y)
# out: [B, M, N]
# x: [B, M, K], y: [K, N]
# fluid.layers.matmul(x, y) # out: [B, M, N]
# fluid.layers.matmul(x, y)
# out: [B, M, N]
# x: [M, K], y: [K, N]
# fluid.layers.matmul(x, y) # out: [M, N]
# fluid.layers.matmul(x, y)
# out: [M, N]
# x: [B, M, K], y: [K]
# fluid.layers.matmul(x, y) # out: [B, M]
# fluid.layers.matmul(x, y)
# out: [B, M]
# x: [K], y: [K]
# fluid.layers.matmul(x, y) # out: [1]
# fluid.layers.matmul(x, y)
# out: [1]
# x: [M], y: [N]
# fluid.layers.matmul(x, y, True, True) # out: [M, N]
# fluid.layers.matmul(x, y, True, True)
# out: [M, N]
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[2, 3], dtype='float32')
y = fluid.layers.data(name='y', shape=[3, 2], dtype='float32')
out = fluid.layers.matmul(x, y, True, True)
import numpy
# Graph Organizing
x = fluid.data(name='x', shape=[2, 3], dtype='float32')
y = fluid.data(name='y', shape=[3, 2], dtype='float32')
output = fluid.layers.matmul(x, y, True, True)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
input_x = numpy.ones([2, 3]).astype(numpy.float32)
input_y = numpy.ones([3, 2]).astype(numpy.float32)
res, = exe.run(fluid.default_main_program(),
feed={'x':input_x, 'y':input_y},
fetch_list=[output])
print(res)
'''
Output Value:
[[2. 2. 2.]
[2. 2. 2.]
[2. 2. 2.]]
'''
"""
def __check_input(x, y):
......@@ -8747,6 +8774,9 @@ def lod_reset(x, y=None, target_lod=None):
y.data = [[2, 4]]
y.dims = [1, 3]
target_lod:
This parameter does not work when y is not none.
then we get a 1-level LoDTensor:
out.lod = [[2, 4]]
out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
......@@ -8764,6 +8794,9 @@ def lod_reset(x, y=None, target_lod=None):
y.data = [[1.1], [2.1], [3.1], [4.1], [5.1], [6.1]]
y.dims = [6, 1]
target_lod:
This parameter does not work when y is not none.
then we get a 2-level LoDTensor:
out.lod = [[2, 2], [2, 2, 1, 1]]
out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
......@@ -8771,9 +8804,9 @@ def lod_reset(x, y=None, target_lod=None):
Args:
x (Variable): Input variable which could be a Tensor or LoDTensor.
y (Variable|None): If provided, output's LoD would be derived
y (Variable|optional): If provided, output's LoD would be derived
from :attr:`y`.
target_lod (list|tuple|None): One level LoD which should be considered
target_lod (list|tuple|optional): One level LoD which should be considered
as target LoD when :attr:`y` not provided.
Returns:
......@@ -8786,9 +8819,35 @@ def lod_reset(x, y=None, target_lod=None):
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[10])
y = fluid.layers.data(name='y', shape=[10, 20], lod_level=2)
out = fluid.layers.lod_reset(x=x, y=y)
import numpy
# Graph Organizing
x = fluid.data(name='x', shape=[6])
y = fluid.data(name='y', shape=[6], lod_level=1)
output = fluid.layers.lod_reset(x=x, y=y)
# Create an executor using CPU as an example
place = fluid.CPUPlace()
exe = fluid.Executor(place)
# Execute
x_tensor = fluid.core.LoDTensor()
x_tensor.set(numpy.ones([6]).astype(numpy.float32), place)
y_ndarray = numpy.ones([6]).astype(numpy.float32)
y_lod = [[2, 2], [2, 2, 1, 1]]
y_tensor = fluid.create_lod_tensor(y_ndarray, y_lod, place)
res, = exe.run(fluid.default_main_program(),
feed={'x':x_tensor, 'y':y_tensor},
fetch_list=[output],
return_numpy=False)
print(res)
# Output Value:
# lod: [[0, 2, 4], [0, 2, 4, 5, 6]]
# dim: 6
# layout: NCHW
# dtype: float
# data: [1 1 1 1 1 1]
"""
helper = LayerHelper("lod_reset", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -14281,9 +14340,27 @@ def mean(x, name=None):
.. code-block:: python
import paddle.fluid as fluid
input = fluid.layers.data(
import numpy
# Graph Organizing
input = fluid.data(
name='data', shape=[2, 3], dtype='float32')
mean = fluid.layers.mean(input)
output = fluid.layers.mean(input)
# Create an executor using CPU as an example
place = fluid.CPUPlace()
exe = fluid.Executor(place)
# Execute
x_ndarray = numpy.ones([2, 3]).astype(numpy.float32)
res, = exe.run(fluid.default_main_program(),
feed={'data':x_ndarray},
fetch_list=[output])
print(res)
'''
Output Value:
[1.]
'''
"""
helper = LayerHelper("mean", **locals())
......@@ -14316,11 +14393,47 @@ def merge_selected_rows(x, name=None):
.. code-block:: python
import paddle.fluid as fluid
b = fluid.default_main_program().global_block()
var = b.create_var(
name="X", dtype="float32", persistable=True,
type=fluid.core.VarDesc.VarType.SELECTED_ROWS)
import numpy
place = fluid.CPUPlace()
block = fluid.default_main_program().global_block()
var = block.create_var(name="X2",
dtype="float32",
persistable=True,
type=fluid.core.VarDesc.VarType.SELECTED_ROWS)
y = fluid.layers.merge_selected_rows(var)
z = fluid.layers.get_tensor_from_selected_rows(y)
x_rows = [0, 2, 2, 4, 19]
row_numel = 2
np_array = numpy.ones((len(x_rows), row_numel)).astype("float32")
x = fluid.global_scope().var("X2").get_selected_rows()
x.set_rows(x_rows)
x.set_height(20)
x_tensor = x.get_tensor()
x_tensor.set(np_array, place)
exe = fluid.Executor(place=place)
result = exe.run(fluid.default_main_program(), fetch_list=[z])
print("x_rows: ", x_rows)
print("np_array: ", np_array)
print("result: ", result)
'''
Output Values:
('x_rows: ', [0, 2, 2, 4, 19])
('np_array: ', array([[1., 1.],
[1., 1.],
[1., 1.],
[1., 1.],
[1., 1.]], dtype=float32))
('result: ', [array([[1., 1.],
[2., 2.],
[1., 1.],
[1., 1.]], dtype=float32)])
'''
"""
helper = LayerHelper("merge_selected_rows", **locals())
......
......@@ -950,11 +950,14 @@ def has_nan(x):
def isfinite(x):
"""
Test if any of x contains an infinity/NAN number. If all the elements are finite,
Test if any of x contains an infinity / nan number. If all the elements are finite,
returns true, else false.
Note: The input to this operator Tensor / LoDTensor data type must be one of
int32 / float / double.
Args:
x(variable): The Tensor/LoDTensor to be checked.
x(Variable): The Tensor / LoDTensor to be checked.
Returns:
Variable: The tensor variable storing the output, contains a bool value.
......@@ -964,10 +967,19 @@ def isfinite(x):
.. code-block:: python
import paddle.fluid as fluid
var = fluid.layers.data(name="data",
shape=(4, 6),
dtype="float32")
out = fluid.layers.isfinite(var)
import numpy
# Graph Organizing
var = fluid.data(name="data", shape=(4, 6), dtype="float32")
output = fluid.layers.isfinite(var)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
img = numpy.ones((4, 6)).astype(numpy.float32)
res, = exe.run(fluid.default_main_program(), feed={'data':img}, fetch_list=[output])
print(res) # Output Value: [ True]
"""
helper = LayerHelper("isfinite", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......
......@@ -280,12 +280,33 @@ def buffered(reader, size):
buffer. Reading from the buffered data reader will proceed as long
as the buffer is not empty.
:param reader: the data reader to read from.
:type reader: callable
:param size: max buffer size.
:type size: int
Args:
reader (callable): The data reader to read from.
size (int): Max buffer size.
Return:
Variable: The buffered data reader.
Examples:
.. code-block:: python
import paddle.reader as reader
import time
def reader_creator_10(dur):
def reader():
for i in range(10):
time.sleep(dur)
yield i
return reader
:returns: the buffered data reader.
for size in range(20):
b = reader.buffered(reader_creator_10(0), size)
c = 0
for i in b():
assert i == c
c += 1
assert c == 10
"""
class EndSignal():
......@@ -364,16 +385,51 @@ def xmap_readers(mapper, reader, process_num, buffer_size, order=False):
"""
Use multi-threads to map samples from reader by a mapper defined by user.
Args:
mapper (callable): a function to map the data from reader.
reader (callable): a data reader which yields the data.
process_num (int): thread number to handle original sample.
buffer_size (int): size of the queue to read data in.
order (bool): whether to keep the data order from original reader.
Parameters:
mapper (callable): A function to map the data from reader.
reader (callable): A data reader which yields the data.
process_num (int): Thread number to handle original sample.
buffer_size (int): Size of the queue to read data in.
order (bool): Whether to keep the data order from original reader.
Default False.
Returns:
callable: a decorated reader with data mapping.
A decorated reader with data mapping.
Example:
.. code-block:: python
import paddle.reader as reader
import time
def reader_creator_10(dur):
def reader():
for i in range(10):
time.sleep(dur)
yield i
return reader
def mapper(x):
return (x + 1)
orders = (True, False)
thread_num = (1, 2, 4, 8, 16)
buffer_size = (1, 2, 4, 8, 16)
for order in orders:
for t_num in thread_num:
for size in buffer_size:
user_reader = reader.xmap_readers(mapper,
reader_creator_10(0),
t_num, size, order)
for n in range(3):
result = list()
for i in user_reader():
result.append(i)
if not order:
result.sort()
for idx, e in enumerate(result):
assert e == mapper(idx)
"""
end = XmapEndSignal()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册