提交 a07d9f3f 编写于 作者: S Stefano Cappellini 提交者: François Chollet

Fixes automatic doc generation problem with nested lists. Adds a new test (#10212)

* Fixes automatic doc generation problem with indented lists. Adds a new test

* Some style fixes on doc automatic generation files

* Fixes a bad space in convolutional_recurrent.py

* Changes the test_doc_auto_generation in order to include a doc string taken from the codebase. Allows text lines following nested lists
上级 5a48df22
......@@ -453,23 +453,42 @@ def count_leading_spaces(s):
return 0
def process_list_block(docstring, anchor, marker):
anchor_pos = docstring.find(anchor)
starting_point = anchor_pos + len(anchor) + 1
block = ""
if anchor_pos > -1:
block = docstring[starting_point:docstring.find("\n\n", starting_point)]
# Place marker for later reinjection.
docstring = docstring.replace(block, marker)
# White spaces to be removed in order to correctly align the block.
whitespace_n = re.search("[^\s]", block).start()
# Remove the computed number of leading white spaces from each line.
lines = [re.sub('^' + ' ' * whitespace_n, '', line) for line in block.split('\n')]
# Format docstring lists
top_level_regex = r'^([^\s\\\(]+):(.*)'
top_level_replacement = r'- __\1__:\2'
lines = [re.sub(top_level_regex, top_level_replacement, line) for line in lines]
block = '\n'.join(lines)
def process_list_block(docstring, starting_point, leading_spaces, marker):
ending_point = docstring.find('\n\n', starting_point)
block = docstring[starting_point:None if ending_point == -1 else ending_point - 1]
# Place marker for later reinjection.
docstring = docstring.replace(block, marker)
lines = block.split('\n')
# Remove the computed number of leading white spaces from each line.
lines = [re.sub('^' + ' ' * leading_spaces, '', line) for line in lines]
# Usually lines have at least 4 additional leading spaces.
# These have to be removed, but first the list roots have to be detected.
top_level_regex = r'^ ([^\s\\\(]+):(.*)'
top_level_replacement = r'- __\1__:\2'
lines = [re.sub(top_level_regex, top_level_replacement, line) for line in lines]
# All the other lines get simply the 4 leading space (if present) removed
lines = [re.sub(r'^ ', '', line) for line in lines]
# Fix text lines after lists
indent = 0
text_block = False
for i in range(len(lines)):
line = lines[i]
spaces = re.search(r'\S', line)
if spaces:
# If it is a list element
if line[spaces.start()] == '-':
indent = spaces.start() + 1
if text_block:
text_block = False
lines[i] = '\n' + line
elif spaces.start() < indent:
text_block = True
indent = spaces.start()
lines[i] = '\n' + line
else:
text_block = False
indent = 0
block = '\n'.join(lines)
return docstring, block
......@@ -513,21 +532,34 @@ def process_docstring(docstring):
tmp = tmp[index:]
# Format docstring lists.
docstring, arguments = process_list_block(docstring, "# Arguments", "$ARGUMENTS$")
docstring, returns = process_list_block(docstring, "# Returns", "$RETURNS$")
section_regex = r'\n( +)# (.*)\n'
section_idx = re.search(section_regex, docstring)
shift = 0
sections = {}
while section_idx and section_idx.group(2):
anchor = section_idx.group(2)
leading_spaces = len(section_idx.group(1))
shift += section_idx.end()
marker = '$' + anchor.replace(' ', '_') + '$'
docstring, content = process_list_block(docstring,
shift,
leading_spaces,
marker)
sections[marker] = content
section_idx = re.search(section_regex, docstring[shift:])
# Format docstring section titles.
docstring = re.sub(r'\n(\s+)# (.*)\n',
r'\n\1__\2__\n\n',
docstring)
# Strip all leading spaces.
# Strip all remaining leading spaces.
lines = docstring.split('\n')
docstring = '\n'.join([line.lstrip(' ') for line in lines])
# Reinject arguments and returns blocks.
docstring = docstring.replace("$ARGUMENTS$", arguments)
docstring = docstring.replace("$RETURNS$", returns)
# Reinject list blocks.
for marker, content in sections.items():
docstring = docstring.replace(marker, content)
# Reinject code blocks.
for i, code_block in enumerate(code_blocks):
......@@ -581,94 +613,95 @@ def render_function(function, method=True):
return '\n\n'.join(subblocks)
readme = read_file('../README.md')
index = read_file('templates/index.md')
index = index.replace('{{autogenerated}}', readme[readme.find('##'):])
with open('sources/index.md', 'w') as f:
f.write(index)
print('Starting autogeneration.')
for page_data in PAGES:
blocks = []
classes = page_data.get('classes', [])
for module in page_data.get('all_module_classes', []):
module_classes = []
for name in dir(module):
if name[0] == '_' or name in EXCLUDE:
continue
module_member = getattr(module, name)
if inspect.isclass(module_member):
cls = module_member
if cls.__module__ == module.__name__:
if cls not in module_classes:
module_classes.append(cls)
module_classes.sort(key=lambda x: id(x))
classes += module_classes
for element in classes:
if not isinstance(element, (list, tuple)):
element = (element, [])
cls = element[0]
subblocks = []
signature = get_class_signature(cls)
subblocks.append('<span style="float:right;">' +
class_to_source_link(cls) + '</span>')
if element[1]:
subblocks.append('## ' + cls.__name__ + ' class\n')
if __name__ == '__main__':
readme = read_file('../README.md')
index = read_file('templates/index.md')
index = index.replace('{{autogenerated}}', readme[readme.find('##'):])
with open('sources/index.md', 'w') as f:
f.write(index)
print('Starting autogeneration.')
for page_data in PAGES:
blocks = []
classes = page_data.get('classes', [])
for module in page_data.get('all_module_classes', []):
module_classes = []
for name in dir(module):
if name[0] == '_' or name in EXCLUDE:
continue
module_member = getattr(module, name)
if inspect.isclass(module_member):
cls = module_member
if cls.__module__ == module.__name__:
if cls not in module_classes:
module_classes.append(cls)
module_classes.sort(key=lambda x: id(x))
classes += module_classes
for element in classes:
if not isinstance(element, (list, tuple)):
element = (element, [])
cls = element[0]
subblocks = []
signature = get_class_signature(cls)
subblocks.append('<span style="float:right;">' +
class_to_source_link(cls) + '</span>')
if element[1]:
subblocks.append('## ' + cls.__name__ + ' class\n')
else:
subblocks.append('### ' + cls.__name__ + '\n')
subblocks.append(code_snippet(signature))
docstring = cls.__doc__
if docstring:
subblocks.append(process_docstring(docstring))
methods = collect_class_methods(cls, element[1])
if methods:
subblocks.append('\n---')
subblocks.append('## ' + cls.__name__ + ' methods\n')
subblocks.append('\n---\n'.join(
[render_function(method, method=True) for method in methods]))
blocks.append('\n'.join(subblocks))
functions = page_data.get('functions', [])
for module in page_data.get('all_module_functions', []):
module_functions = []
for name in dir(module):
if name[0] == '_' or name in EXCLUDE:
continue
module_member = getattr(module, name)
if inspect.isfunction(module_member):
function = module_member
if module.__name__ in function.__module__:
if function not in module_functions:
module_functions.append(function)
module_functions.sort(key=lambda x: id(x))
functions += module_functions
for function in functions:
blocks.append(render_function(function, method=False))
if not blocks:
raise RuntimeError('Found no content for page ' +
page_data['page'])
mkdown = '\n----\n\n'.join(blocks)
# save module page.
# Either insert content into existing page,
# or create page otherwise
page_name = page_data['page']
path = os.path.join('sources', page_name)
if os.path.exists(path):
template = read_file(path)
assert '{{autogenerated}}' in template, ('Template found for ' + path +
' but missing {{autogenerated}} tag.')
mkdown = template.replace('{{autogenerated}}', mkdown)
print('...inserting autogenerated content into template:', path)
else:
subblocks.append('### ' + cls.__name__ + '\n')
subblocks.append(code_snippet(signature))
docstring = cls.__doc__
if docstring:
subblocks.append(process_docstring(docstring))
methods = collect_class_methods(cls, element[1])
if methods:
subblocks.append('\n---')
subblocks.append('## ' + cls.__name__ + ' methods\n')
subblocks.append('\n---\n'.join(
[render_function(method, method=True) for method in methods]))
blocks.append('\n'.join(subblocks))
functions = page_data.get('functions', [])
for module in page_data.get('all_module_functions', []):
module_functions = []
for name in dir(module):
if name[0] == '_' or name in EXCLUDE:
continue
module_member = getattr(module, name)
if inspect.isfunction(module_member):
function = module_member
if module.__name__ in function.__module__:
if function not in module_functions:
module_functions.append(function)
module_functions.sort(key=lambda x: id(x))
functions += module_functions
for function in functions:
blocks.append(render_function(function, method=False))
if not blocks:
raise RuntimeError('Found no content for page ' +
page_data['page'])
mkdown = '\n----\n\n'.join(blocks)
# save module page.
# Either insert content into existing page,
# or create page otherwise
page_name = page_data['page']
path = os.path.join('sources', page_name)
if os.path.exists(path):
template = read_file(path)
assert '{{autogenerated}}' in template, ('Template found for ' + path +
' but missing {{autogenerated}} tag.')
mkdown = template.replace('{{autogenerated}}', mkdown)
print('...inserting autogenerated content into template:', path)
else:
print('...creating new page with autogenerated content:', path)
subdir = os.path.dirname(path)
if not os.path.exists(subdir):
os.makedirs(subdir)
with open(path, 'w') as f:
f.write(mkdown)
shutil.copyfile('../CONTRIBUTING.md', 'sources/contributing.md')
print('...creating new page with autogenerated content:', path)
subdir = os.path.dirname(path)
if not os.path.exists(subdir):
os.makedirs(subdir)
with open(path, 'w') as f:
f.write(mkdown)
shutil.copyfile('../CONTRIBUTING.md', 'sources/contributing.md')
......@@ -868,7 +868,7 @@ class ConvLSTM2D(ConvRNN2D):
5D tensor with shape:
`(samples, time, rows, cols, channels)`
# Output shape
# Output shape
- if `return_sequences`
- if data_format='channels_first'
5D tensor with shape:
......
......@@ -510,9 +510,9 @@ class ImageDataGenerator(object):
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
......@@ -520,9 +520,9 @@ class ImageDataGenerator(object):
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
......@@ -871,12 +871,12 @@ class ImageDataGenerator(object):
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
......@@ -1068,18 +1068,18 @@ class ImageDataGenerator(object):
# Arguments
x: 3D tensor, single image.
transform_parameters: Dictionary with string - parameter pairs
describing the transformation. Currently, the following parameters
from the dictionary are used:
- `'theta'`: Float. Rotation angle in degrees.
- `'tx'`: Float. Shift in the x direction.
- `'ty'`: Float. Shift in the y direction.
- `'shear'`: Float. Shear angle in degrees.
- `'zx'`: Float. Zoom in the x direction.
- `'zy'`: Float. Zoom in the y direction.
- `'flip_horizontal'`: Boolean. Horizontal flip.
- `'flip_vertical'`: Boolean. Vertical flip.
- `'channel_shift_intencity'`: Float. Channel shift intensity.
- `'brightness'`: Float. Brightness shift intensity.
describing the transformation. Currently, the following parameters
from the dictionary are used:
- `'theta'`: Float. Rotation angle in degrees.
- `'tx'`: Float. Shift in the x direction.
- `'ty'`: Float. Shift in the y direction.
- `'shear'`: Float. Shear angle in degrees.
- `'zx'`: Float. Zoom in the x direction.
- `'zy'`: Float. Zoom in the y direction.
- `'flip_horizontal'`: Boolean. Horizontal flip.
- `'flip_vertical'`: Boolean. Vertical flip.
- `'channel_shift_intencity'`: Float. Channel shift intensity.
- `'brightness'`: Float. Brightness shift intensity.
# Returns
A ransformed version of the input (same shape).
......
......@@ -268,9 +268,12 @@ class TimeseriesGenerator(Sequence):
stride: Period between successive output sequences.
For stride `s`, consecutive output samples would
be centered around `data[i]`, `data[i+s]`, `data[i+2*s]`, etc.
start_index, end_index: Data points earlier than `start_index`
or later than `end_index` will not be used in the output sequences.
This is useful to reserve part of the data for test or validation.
start_index: Data points earlier than `start_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
end_index: Data points later than `end_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
shuffle: Whether to shuffle output samples,
or instead draw them in chronological order.
reverse: Boolean: if `true`, timesteps in each output sample will be
......
from docs import autogen
import pytest
test_doc1 = {
'doc': """Base class for recurrent layers.
# Arguments
cell: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is
the size of the recurrent state
(which should be the same as the size of the cell output).
This can also be a list/tuple of integers
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
It is also possible for `cell` to be a list of RNN cell instances,
in which cases the cells get stacked on after the other in the RNN,
implementing an efficient stacked RNN.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
input_dim: dimensionality of the input (integer).
This argument (or alternatively,
the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
input_length: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
# Input shape
3D tensor with shape `(batch_size, timesteps, input_dim)`.
# Output shape
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, units)`.
- if `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, units)`.
- else, 2D tensor with shape `(batch_size, units)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
# Note on specifying the initial state of RNNs
Note: that
One: You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`.
Two: The value of `initial_state` should be a tensor or list of
tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by:
One: calling `reset_states`
- With the keyword argument `states`.
- The value of
`states` should be a numpy array or
list of numpy arrays representing
the initial state of the RNN layer.
# Note on passing external constants to RNNs
You can pass "external" constants to the cell using the `constants`
keyword: argument of `RNN.__call__` (as well as `RNN.call`) method.
This: requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
# Examples
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
""",
'result': '''Base class for recurrent layers.
__Arguments__
- __cell__: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is
the size of the recurrent state
(which should be the same as the size of the cell output).
This can also be a list/tuple of integers
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
It is also possible for `cell` to be a list of RNN cell instances,
in which cases the cells get stacked on after the other in the RNN,
implementing an efficient stacked RNN.
- __return_sequences__: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
- __return_state__: Boolean. Whether to return the last state
in addition to the output.
- __go_backwards__: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
- __stateful__: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
- __unroll__: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
- __input_dim__: dimensionality of the input (integer).
This argument (or alternatively,
the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
- __input_length__: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
__Input shape__
3D tensor with shape `(batch_size, timesteps, input_dim)`.
__Output shape__
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, units)`.
- if `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, units)`.
- else, 2D tensor with shape `(batch_size, units)`.
__Masking__
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
__Note on using statefulness in RNNs__
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
__Note on specifying the initial state of RNNs__
Note: that
- __One__: You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`.
- __Two__: The value of `initial_state` should be a tensor or list of
tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by:
- __One__: calling `reset_states`
- With the keyword argument `states`.
- The value of
`states` should be a numpy array or
list of numpy arrays representing
the initial state of the RNN layer.
__Note on passing external constants to RNNs__
You can pass "external" constants to the cell using the `constants`
- __keyword__: argument of `RNN.__call__` (as well as `RNN.call`) method.
- __This__: requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
__Examples__
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
'''}
def test_doc_lists():
docstring = autogen.process_docstring(test_doc1['doc'])
assert docstring == test_doc1['result']
if __name__ == '__main__':
pytest.main([__file__])
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册