Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
d4bdbf8c
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
d4bdbf8c
编写于
1月 14, 2020
作者:
L
Leo Chen
提交者:
GitHub
1月 14, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Polish nn code, test=develop (#22237)
* refine code, test=develop * reuse contain_var, test=develop
上级
efcdeb51
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
95 addition
and
135 deletion
+95
-135
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+29
-26
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+46
-93
python/paddle/fluid/layers/tensor.py
python/paddle/fluid/layers/tensor.py
+3
-10
python/paddle/fluid/layers/utils.py
python/paddle/fluid/layers/utils.py
+11
-0
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+6
-6
未找到文件。
python/paddle/fluid/framework.py
浏览文件 @
d4bdbf8c
...
...
@@ -455,14 +455,14 @@ def name_scope(prefix=None):
"""
# TODO(panyx0718): Only [0-9a-z].
# in dygraph we don't need namescope since it will cause mem leak
if
not
in_dygraph_mode
():
if
in_dygraph_mode
():
yield
else
:
assert
prefix
,
"namescope prefix cannot be empty."
global
_name_scope
_name_scope
=
_name_scope
.
child
(
prefix
)
yield
_name_scope
=
_name_scope
.
parent
()
else
:
yield
def
_full_name_scope
():
...
...
@@ -715,10 +715,9 @@ def _getitem_impl_(var, item):
if
(
use_strided_slice
==
True
):
attrs
[
'strides'
]
=
[]
infer_flags
=
list
(
1
for
i
in
range
(
len
(
slice_axis
)))
# starts
if
not
contain_var
(
slice_start
):
attrs
[
'starts'
]
=
slice_start
else
:
if
contain_var
(
slice_start
):
inputs
[
'StartsTensorList'
]
=
get_new_list_tensor
(
slice_start
)
for
i
,
dim
in
enumerate
(
slice_start
):
if
isinstance
(
dim
,
Variable
):
...
...
@@ -726,10 +725,11 @@ def _getitem_impl_(var, item):
infer_flags
[
i
]
=
-
1
else
:
attrs
[
'starts'
].
append
(
dim
)
# ends
if
not
contain_var
(
slice_end
):
attrs
[
'ends'
]
=
slice_end
else
:
attrs
[
'starts'
]
=
slice_start
# ends
if
contain_var
(
slice_end
):
inputs
[
'EndsTensorList'
]
=
get_new_list_tensor
(
slice_end
)
for
i
,
dim
in
enumerate
(
slice_end
):
if
isinstance
(
dim
,
Variable
):
...
...
@@ -737,11 +737,12 @@ def _getitem_impl_(var, item):
infer_flags
[
i
]
=
-
1
else
:
attrs
[
'ends'
].
append
(
dim
)
else
:
attrs
[
'ends'
]
=
slice_end
# strides
if
use_strided_slice
==
True
:
if
not
contain_var
(
slice_step
):
attrs
[
'strides'
]
=
slice_step
else
:
if
contain_var
(
slice_step
):
inputs
[
'StridesTensorList'
]
=
get_new_list_tensor
(
slice_step
)
for
i
,
dim
in
enumerate
(
slice_step
):
if
isinstance
(
dim
,
Variable
):
...
...
@@ -749,6 +750,8 @@ def _getitem_impl_(var, item):
infer_flags
[
i
]
=
-
1
else
:
attrs
[
'strides'
].
append
(
dim
)
else
:
attrs
[
'strides'
]
=
slice_step
# infer_flags
attrs
[
'infer_flags'
]
=
infer_flags
...
...
@@ -2344,12 +2347,12 @@ class Block(object):
if
isinstance
(
item
[
1
],
Parameter
))
def
create_var
(
self
,
*
args
,
**
kwargs
):
if
not
in_dygraph_mode
():
if
in_dygraph_mode
():
var
=
_varbase_creator
(
*
args
,
**
kwargs
)
else
:
var
=
Variable
(
block
=
self
,
*
args
,
**
kwargs
)
if
'initializer'
in
kwargs
:
kwargs
[
'initializer'
](
var
,
self
)
else
:
var
=
_varbase_creator
(
*
args
,
**
kwargs
)
return
var
def
has_var
(
self
,
name
):
...
...
@@ -2396,9 +2399,8 @@ class Block(object):
# NOTE: v is destroyed by C++ after calling _rename_var.
d
=
self
.
desc
.
find_var
(
cpt
.
to_bytes
(
new_name
))
if
var_type
==
"Parameter"
:
if
not
in_dygraph_mode
():
var
=
Parameter
(
self
,
if
in_dygraph_mode
():
var
=
ParamBase
(
d
.
shape
(),
d
.
dtype
(),
type
=
orig_var_type
,
...
...
@@ -2410,7 +2412,8 @@ class Block(object):
gradient_clip_attr
=
gradient_clip_attr
,
error_clip
=
error_clip
)
else
:
var
=
ParamBase
(
var
=
Parameter
(
self
,
d
.
shape
(),
d
.
dtype
(),
type
=
orig_var_type
,
...
...
@@ -2444,10 +2447,10 @@ class Block(object):
def
create_parameter
(
self
,
*
args
,
**
kwargs
):
global_block
=
self
.
program
.
global_block
()
param
=
None
if
not
in_dygraph_mode
():
param
=
Parameter
(
global_block
,
*
args
,
**
kwargs
)
else
:
if
in_dygraph_mode
():
param
=
ParamBase
(
*
args
,
**
kwargs
)
else
:
param
=
Parameter
(
global_block
,
*
args
,
**
kwargs
)
if
'initializer'
in
kwargs
:
def
_is_inited_by
(
block
,
var
):
...
...
@@ -2687,9 +2690,8 @@ class Block(object):
"same topology"
)
assert
isinstance
(
v
,
Variable
)
new_p
=
None
if
not
in_dygraph_mode
():
new_p
=
Parameter
(
block
=
self
,
if
in_dygraph_mode
():
new_p
=
ParamBase
(
shape
=
v
.
shape
,
dtype
=
v
.
dtype
,
type
=
v
.
type
,
...
...
@@ -2702,7 +2704,8 @@ class Block(object):
error_clip
=
p
.
error_clip
,
name
=
v
.
name
)
else
:
new_p
=
ParamBase
(
new_p
=
Parameter
(
block
=
self
,
shape
=
v
.
shape
,
dtype
=
v
.
dtype
,
type
=
v
.
type
,
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
d4bdbf8c
...
...
@@ -4346,24 +4346,23 @@ def split(input, num_or_sections, dim=-1, name=None):
if isinstance(num_or_sections, int):
num = num_or_sections
attrs['num'] = num_or_sections
res = core.ops.split(inputs, attrs, {}, {'Out': num})
return res['Out']
elif isinstance(num_or_sections, list):
elif isinstance(num_or_sections, (list, tuple)):
num = len(num_or_sections)
attrs['sections'] = list(
map(lambda ele: -1 if isinstance(ele, Variable) else ele,
num_or_sections))
contain_var = not all(not isinstance(ele, Variable)
for ele in num_or_sections)
if contain_var:
if utils._contain_var(num_or_sections):
raise TypeError(
"The type of 'num_or_sections' in split must be int or list[int] in Dygraph mode, but "
"received %s." % ('list[Variable]'))
"The type of 'num_or_sections' in split must be int or list[int] or tuple[int] in Dygraph mode, but "
"received %s, which contains Variable." %
(type(num_or_sections)))
else:
attrs['sections'] = list(num_or_sections)
else:
raise TypeError(
"The type of 'num_or_sections' in split must be int or list in Dygraph mode, but "
"received %s." % (type(num_or_sections)))
res = core.ops.split(inputs, attrs, {}, {'Out': num})
return res['Out']
if not isinstance(num_or_sections, (int, list, tuple)):
raise TypeError(
"The type of 'num_or_sections' in split must be int, list or "
...
...
@@ -4422,9 +4421,7 @@ def split(input, num_or_sections, dim=-1, name=None):
attrs['sections'] = list(
map(lambda ele: -1 if isinstance(ele, Variable) else ele,
num_or_sections))
contain_var = not all(not isinstance(ele, Variable)
for ele in num_or_sections)
if contain_var:
if utils._contain_var(num_or_sections):
inputs['SectionsTensorList'] = _get_SectionsTensorList(
num_or_sections)
...
...
@@ -5572,16 +5569,14 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
# the shape of reshaped_3 is [6,8].
"""
if in_dygraph_mode():
#TODO(zhiqiu):
open inplace if we can
.
#TODO(zhiqiu):
enable inplace in dygraph mode
.
if inplace:
warnings.warn(
"Inplace on reshape is not allowed and will be discarded in dygraph mode currently."
)
attrs = {}
if isinstance(shape, (list, tuple)):
contain_var = not all(not isinstance(ele, Variable)
for ele in shape)
if contain_var:
if utils._contain_var(shape):
raise TypeError(
"The type of 'shape' in reshape must be list[int] or tuple(int) in Dygraph mode, but "
"received %s, which contains Variable." % type(shape))
...
...
@@ -5604,12 +5599,6 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
helper = LayerHelper("reshape2", **locals())
def contain_var(one_list):
for ele in one_list:
if isinstance(ele, Variable):
return True
return False
def get_new_shape_tensor(list_shape):
new_shape_tensor = []
for dim in list_shape:
...
...
@@ -5659,7 +5648,7 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
assert len(shape) > 0, ("The size of 'shape' in reshape can't be zero, "
"but received %s." % len(shape))
attrs["shape"] = get_attr_shape(shape)
if contain_var(shape):
if
utils._
contain_var(shape):
inputs['ShapeTensor'] = get_new_shape_tensor(shape)
elif isinstance(actual_shape, Variable):
actual_shape.stop_gradient = True
...
...
@@ -5804,8 +5793,7 @@ def unsqueeze(input, axes, name=None):
axes.stop_gradient = True
inputs["AxesTensor"] = axes
elif isinstance(axes, (list, tuple)):
contain_var = not all(not isinstance(ele, Variable) for ele in axes)
if contain_var:
if utils._contain_var(axes):
inputs["AxesTensorList"] = _to_Variable_list(axes)
else:
attrs["axes"] = axes
...
...
@@ -8256,12 +8244,6 @@ def crop_tensor(x, shape=None, offsets=None, name=None):
ipts = {'X': x}
attrs = {}
def _contain_var(input_list):
for ele in input_list:
if isinstance(ele, Variable):
return True
return False
def _attr_shape_check(shape_val):
if not isinstance(shape_val, int):
raise TypeError(
...
...
@@ -8290,7 +8272,7 @@ def crop_tensor(x, shape=None, offsets=None, name=None):
offsets.stop_gradient = True
ipts['Offsets'] = offsets
attrs['offsets'] = [-1] * len(x.shape)
elif _contain_var(offsets):
elif
utils.
_contain_var(offsets):
new_offsets_tensor = []
offsets_attr = []
for dim in offsets:
...
...
@@ -8314,7 +8296,7 @@ def crop_tensor(x, shape=None, offsets=None, name=None):
if isinstance(shape, Variable):
shape.stop_gradient = True
ipts['Shape'] = shape
elif _contain_var(shape):
elif
utils.
_contain_var(shape):
new_shape_tensor = []
shape_attr = []
for dim_size in shape:
...
...
@@ -9344,20 +9326,12 @@ def expand(x, expand_times, name=None):
expanded_2 = fluid.layers.expand(data_2, expand_times=expand_times)
# the shape of expanded_2 is [48, 56].
"""
def contain_var(expand_times):
for ele in expand_times:
if isinstance(ele, Variable):
return True
return False
inputs = {"X": [x]}
attrs = {}
if in_dygraph_mode():
if isinstance(expand_times, (list, tuple)):
contain_var = contain_var(expand_times)
if contain_var:
if utils._contain_var(expand_times):
raise TypeError(
"The type of 'expand_times' in expand must be list[int] or tuple(int) in Dygraph mode, but "
"received %s, which contains Variable." % type(shape))
...
...
@@ -9404,18 +9378,14 @@ def expand(x, expand_times, name=None):
new_expand_times_tensor.append(temp_out)
return new_expand_times_tensor
if in_dygraph_mode():
inputs = {'X': x}
attrs = {'expand_times': expand_times}
else:
if isinstance(expand_times, Variable):
expand_times.stop_gradient = True
inputs['ExpandTimes'] = expand_times
elif isinstance(expand_times, (list, tuple)):
attrs['expand_times'] = get_attr_expand_times(expand_times)
if contain_var(expand_times):
inputs['expand_times_tensor'] = get_new_expand_times_tensor(
expand_times)
if isinstance(expand_times, Variable):
expand_times.stop_gradient = True
inputs['ExpandTimes'] = expand_times
elif isinstance(expand_times, (list, tuple)):
attrs['expand_times'] = get_attr_expand_times(expand_times)
if utils._contain_var(expand_times):
inputs['expand_times_tensor'] = get_new_expand_times_tensor(
expand_times)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
...
...
@@ -9912,19 +9882,12 @@ def slice(input, axes, starts, ends):
sliced_2 = fluid.layers.slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends)
# sliced_2 is input[0:3, 0:2, 2:4].
"""
def contain_var(one_list):
for ele in one_list:
if isinstance(ele, Variable):
return True
return False
if in_dygraph_mode():
infer_flags = list(1 for i in range(len(axes)))
inputs = {'Input': [input]}
if isinstance(starts, (list, tuple)):
if contain_var(starts):
if
utils._
contain_var(starts):
raise TypeError(
"The type of 'starts' in slice must be list[int] or tuple(int) in Dygraph mode, but "
"received %s, which contains Variable." % type(shape))
...
...
@@ -9934,7 +9897,7 @@ def slice(input, axes, starts, ends):
"received %s." % type(shape))
if isinstance(ends, (list, tuple)):
if contain_var(ends):
if
utils._
contain_var(ends):
raise TypeError(
"The type of 'ends' in slice must be list[int] or tuple(int) in Dygraph mode, but "
"received %s, which contains Variable." % type(shape))
...
...
@@ -9985,9 +9948,7 @@ def slice(input, axes, starts, ends):
infer_flags = list(-1 for i in range(len(axes)))
elif isinstance(starts, (list, tuple)):
attrs['starts'] = []
if not contain_var(starts):
attrs['starts'] = starts
else:
if utils._contain_var(starts):
inputs['StartsTensorList'] = get_new_list_tensor(starts)
for i, dim in enumerate(starts):
if isinstance(dim, Variable):
...
...
@@ -9995,6 +9956,8 @@ def slice(input, axes, starts, ends):
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
else:
attrs['starts'] = starts
# ends
if isinstance(ends, Variable):
...
...
@@ -10003,9 +9966,7 @@ def slice(input, axes, starts, ends):
infer_flags = list(-1 for i in range(len(axes)))
elif isinstance(ends, (list, tuple)):
attrs['ends'] = []
if not contain_var(ends):
attrs['ends'] = ends
else:
if utils._contain_var(ends):
inputs['EndsTensorList'] = get_new_list_tensor(ends)
for i, dim in enumerate(ends):
if isinstance(dim, Variable):
...
...
@@ -10013,6 +9974,9 @@ def slice(input, axes, starts, ends):
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
else:
attrs['ends'] = ends
# infer_flags
attrs['infer_flags'] = infer_flags
out = helper.create_variable_for_type_inference(
...
...
@@ -10130,12 +10094,6 @@ def strided_slice(input, axes, starts, ends, strides):
helper = LayerHelper('strided_slice', **locals())
def contain_var(one_list):
for ele in one_list:
if isinstance(ele, Variable):
return True
return False
def get_new_list_tensor(old_list):
new_list_tensor = []
for dim in old_list:
...
...
@@ -10169,9 +10127,7 @@ def strided_slice(input, axes, starts, ends, strides):
inputs['StartsTensor'] = starts
elif isinstance(starts, (list, tuple)):
attrs['starts'] = []
if not contain_var(starts):
attrs['starts'] = starts
else:
if utils._contain_var(starts):
inputs['StartsTensorList'] = get_new_list_tensor(starts)
for i, dim in enumerate(starts):
if isinstance(dim, Variable):
...
...
@@ -10179,6 +10135,8 @@ def strided_slice(input, axes, starts, ends, strides):
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
else:
attrs['starts'] = starts
# ends
if isinstance(ends, Variable):
...
...
@@ -10186,9 +10144,7 @@ def strided_slice(input, axes, starts, ends, strides):
inputs['EndsTensor'] = ends
elif isinstance(ends, (list, tuple)):
attrs['ends'] = []
if not contain_var(ends):
attrs['ends'] = ends
else:
if utils._contain_var(ends):
inputs['EndsTensorList'] = get_new_list_tensor(ends)
for i, dim in enumerate(ends):
if isinstance(dim, Variable):
...
...
@@ -10196,15 +10152,16 @@ def strided_slice(input, axes, starts, ends, strides):
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
else:
attrs['ends'] = ends
# strides
if isinstance(strides, Variable):
strides.stop_gradient = True
inputs['StridesTensor'] = strides
elif isinstance(strides, (list, tuple)):
attrs['strides'] = []
if not contain_var(strides):
attrs['strides'] = strides
else:
if utils._contain_var(strides):
inputs['StridesTensorList'] = get_new_list_tensor(strides)
for i, dim in enumerate(strides):
if isinstance(dim, Variable):
...
...
@@ -10212,6 +10169,8 @@ def strided_slice(input, axes, starts, ends, strides):
infer_flags[i] = -1
else:
attrs['strides'].append(dim)
else:
attrs['strides'] = strides
attrs['infer_flags'] = infer_flags
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('input'))
...
...
@@ -13894,12 +13853,6 @@ def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0):
dtype = convert_np_dtype_to_dtype_(dtype)
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'uniform_random')
def contain_var(one_list):
for ele in one_list:
if isinstance(ele, Variable):
return True
return False
def get_new_shape_tensor(list_shape):
new_shape_tensor = []
for dim in list_shape:
...
...
@@ -13939,7 +13892,7 @@ def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0):
assert len(shape) > 0, (
"The size of argument(shape) can't be zero.")
attrs["shape"] = get_attr_shape(shape)
if contain_var(shape):
if
utils._
contain_var(shape):
inputs['ShapeTensorList'] = get_new_shape_tensor(shape)
out = helper.create_variable_for_type_inference(dtype)
...
...
python/paddle/fluid/layers/tensor.py
浏览文件 @
d4bdbf8c
...
...
@@ -22,6 +22,7 @@ from ..initializer import Constant, force_init_on_cpu
from
..core
import
VarDesc
from
..
import
core
from
.layer_function_generator
import
templatedoc
from
.
import
utils
from
..data_feeder
import
check_type_and_dtype
,
check_type
,
check_dtype
,
convert_dtype
import
numpy
import
warnings
...
...
@@ -552,13 +553,6 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2]
data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
"""
def
_contain_var
(
one_list
):
for
ele
in
one_list
:
if
isinstance
(
ele
,
Variable
):
return
True
return
False
attrs
=
{
'value'
:
float
(
value
),
'force_cpu'
:
force_cpu
or
force_init_on_cpu
()
...
...
@@ -571,8 +565,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
if
in_dygraph_mode
():
if
isinstance
(
shape
,
(
list
,
tuple
)):
contain_var
=
_contain_var
(
shape
)
if
contain_var
:
if
utils
.
_contain_var
(
shape
):
raise
TypeError
(
"The type of 'shape' in fill_constant must be list[int] or tuple(int) in Dygraph mode, but "
"received %s, which contains Variable."
%
type
(
shape
))
...
...
@@ -644,7 +637,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
"The size of 'shape' in fill_constant can't be zero, "
"but received %s."
%
len
(
shape
))
attrs
[
"shape"
]
=
_get_attr_shape
(
shape
)
if
_contain_var
(
shape
):
if
utils
.
_contain_var
(
shape
):
inputs
[
'ShapeTensorList'
]
=
_get_shape_tensor
(
shape
)
if
out
is
None
:
...
...
python/paddle/fluid/layers/utils.py
浏览文件 @
d4bdbf8c
...
...
@@ -16,6 +16,7 @@ from __future__ import print_function
import
collections
import
six
import
numpy
as
np
from
..framework
import
Variable
def
convert_to_list
(
value
,
n
,
name
,
dtype
=
np
.
int
):
...
...
@@ -244,3 +245,13 @@ def _is_symmetric_padding(padding, data_dim):
if
padding
[
i
*
2
]
!=
padding
[
i
*
2
+
1
]:
is_sys
=
False
return
is_sys
def
_contain_var
(
list_or_tuple
):
"""
Check whether list or tuple contains variable.
"""
for
item
in
list_or_tuple
:
if
isinstance
(
item
,
Variable
):
return
True
return
False
python/paddle/fluid/optimizer.py
浏览文件 @
d4bdbf8c
...
...
@@ -134,11 +134,11 @@ class Optimizer(object):
# global step if use lr decay
if
isinstance
(
self
.
_learning_rate
,
LearningRateDecay
):
var_tmp
=
None
if
not
framework
.
in_dygraph_mode
():
var_temp
=
Variable
(
None
,
name
=
'global_step'
,
dtype
=
'int32'
)
else
:
if
framework
.
in_dygraph_mode
():
var_temp
=
framework
.
_varbase_creator
(
None
,
name
=
'global_step'
,
dtype
=
'int32'
)
else
:
var_temp
=
Variable
(
None
,
name
=
'global_step'
,
dtype
=
'int32'
)
tensor
.
fill_constant
(
[
1
],
"int32"
,
self
.
_learning_rate
.
step_num
,
out
=
var_temp
)
...
...
@@ -546,10 +546,10 @@ class Optimizer(object):
See examples in ``apply_gradients``.
"""
act_no_grad_set
=
None
if
not
framework
.
in_dygraph_mode
():
act_no_grad_set
=
self
.
_get_no_grad_set
(
loss
,
no_grad_set
)
else
:
if
framework
.
in_dygraph_mode
():
pass
else
:
act_no_grad_set
=
self
.
_get_no_grad_set
(
loss
,
no_grad_set
)
self
.
_dtype
=
loss
.
dtype
if
framework
.
in_dygraph_mode
():
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录