Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
7d5c9d52
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7d5c9d52
编写于
7月 20, 2020
作者:
W
Wei Luning
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix geir export bugs
上级
bbfcbbe2
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
24 addition
and
17 deletion
+24
-17
mindspore/ccsrc/pipeline/jit/pipeline.cc
mindspore/ccsrc/pipeline/jit/pipeline.cc
+1
-1
mindspore/ccsrc/transform/graph_ir/op_declare.cc
mindspore/ccsrc/transform/graph_ir/op_declare.cc
+5
-4
mindspore/nn/layer/quant.py
mindspore/nn/layer/quant.py
+10
-5
mindspore/ops/operations/_inner_ops.py
mindspore/ops/operations/_inner_ops.py
+1
-0
mindspore/ops/operations/math_ops.py
mindspore/ops/operations/math_ops.py
+2
-2
mindspore/ops/operations/nn_ops.py
mindspore/ops/operations/nn_ops.py
+4
-4
tests/ut/python/train/quant/test_quant.py
tests/ut/python/train/quant/test_quant.py
+1
-1
未找到文件。
mindspore/ccsrc/pipeline/jit/pipeline.cc
浏览文件 @
7d5c9d52
...
@@ -395,7 +395,7 @@ void ExecutorPy::GetGeBackendPolicy() const {
...
@@ -395,7 +395,7 @@ void ExecutorPy::GetGeBackendPolicy() const {
bool
IsPhaseExportGeir
(
const
std
::
string
&
phase_s
)
{
bool
IsPhaseExportGeir
(
const
std
::
string
&
phase_s
)
{
auto
phase_to_export
=
"export.geir"
;
auto
phase_to_export
=
"export.geir"
;
return
phase_s
.
rfind
(
phase_to_export
,
0
)
!=
std
::
string
::
npos
;
return
phase_s
.
rfind
(
phase_to_export
)
!=
std
::
string
::
npos
;
}
}
std
::
vector
<
ActionItem
>
GetPipline
(
const
ResourcePtr
&
resource
,
const
std
::
string
&
phase_s
,
bool
use_vm
)
{
std
::
vector
<
ActionItem
>
GetPipline
(
const
ResourcePtr
&
resource
,
const
std
::
string
&
phase_s
,
bool
use_vm
)
{
...
...
mindspore/ccsrc/transform/graph_ir/op_declare.cc
浏览文件 @
7d5c9d52
...
@@ -757,7 +757,7 @@ ATTR_MAP(ExtractImagePatches) = {{"ksizes", ATTR_DESC(ksizes, AnyTraits<int>(),
...
@@ -757,7 +757,7 @@ ATTR_MAP(ExtractImagePatches) = {{"ksizes", ATTR_DESC(ksizes, AnyTraits<int>(),
OUTPUT_MAP
(
ExtractImagePatches
)
=
{{
0
,
OUTPUT_DESC
(
y
)}};
OUTPUT_MAP
(
ExtractImagePatches
)
=
{{
0
,
OUTPUT_DESC
(
y
)}};
// Conv2D
// Conv2D
INPUT_MAP
(
Conv2D
)
=
{{
1
,
INPUT_DESC
(
x
)},
{
2
,
INPUT_DESC
(
filter
)}};
INPUT_MAP
(
Conv2D
)
=
{{
1
,
INPUT_DESC
(
x
)},
{
2
,
INPUT_DESC
(
filter
)}
,
{
3
,
INPUT_DESC
(
bias
)}
};
ATTR_MAP
(
Conv2D
)
=
{
ATTR_MAP
(
Conv2D
)
=
{
{
"stride"
,
ATTR_DESC
(
strides
,
AnyTraits
<
std
::
vector
<
int64_t
>>
(),
AnyTraits
<
std
::
vector
<
int64_t
>>
())},
{
"stride"
,
ATTR_DESC
(
strides
,
AnyTraits
<
std
::
vector
<
int64_t
>>
(),
AnyTraits
<
std
::
vector
<
int64_t
>>
())},
{
"pad_list"
,
ATTR_DESC
(
pads
,
AnyTraits
<
std
::
vector
<
int64_t
>>
(),
AnyTraits
<
std
::
vector
<
int64_t
>>
())},
{
"pad_list"
,
ATTR_DESC
(
pads
,
AnyTraits
<
std
::
vector
<
int64_t
>>
(),
AnyTraits
<
std
::
vector
<
int64_t
>>
())},
...
@@ -794,7 +794,7 @@ ATTR_MAP(Conv2DBackpropFilterD) = {
...
@@ -794,7 +794,7 @@ ATTR_MAP(Conv2DBackpropFilterD) = {
OUTPUT_MAP
(
Conv2DBackpropFilterD
)
=
{{
0
,
OUTPUT_DESC
(
y
)}};
OUTPUT_MAP
(
Conv2DBackpropFilterD
)
=
{{
0
,
OUTPUT_DESC
(
y
)}};
// DepthwiseConv2D
// DepthwiseConv2D
INPUT_MAP
(
DepthwiseConv2D
)
=
{{
1
,
INPUT_DESC
(
x
)},
{
2
,
INPUT_DESC
(
filter
)}};
INPUT_MAP
(
DepthwiseConv2D
)
=
{{
1
,
INPUT_DESC
(
x
)},
{
2
,
INPUT_DESC
(
filter
)}
,
{
3
,
INPUT_DESC
(
bias
)}
};
ATTR_MAP
(
DepthwiseConv2D
)
=
{
ATTR_MAP
(
DepthwiseConv2D
)
=
{
{
"stride"
,
ATTR_DESC
(
strides
,
AnyTraits
<
std
::
vector
<
int64_t
>>
(),
AnyTraits
<
std
::
vector
<
int64_t
>>
())},
{
"stride"
,
ATTR_DESC
(
strides
,
AnyTraits
<
std
::
vector
<
int64_t
>>
(),
AnyTraits
<
std
::
vector
<
int64_t
>>
())},
{
"pads"
,
ATTR_DESC
(
pads
,
AnyTraits
<
std
::
vector
<
int64_t
>>
(),
AnyTraits
<
std
::
vector
<
int64_t
>>
())},
{
"pads"
,
ATTR_DESC
(
pads
,
AnyTraits
<
std
::
vector
<
int64_t
>>
(),
AnyTraits
<
std
::
vector
<
int64_t
>>
())},
...
@@ -826,7 +826,7 @@ ATTR_MAP(DepthwiseConv2DBackpropFilterD) = {
...
@@ -826,7 +826,7 @@ ATTR_MAP(DepthwiseConv2DBackpropFilterD) = {
OUTPUT_MAP
(
DepthwiseConv2DBackpropFilterD
)
=
{{
0
,
OUTPUT_DESC
(
filter_grad
)}};
OUTPUT_MAP
(
DepthwiseConv2DBackpropFilterD
)
=
{{
0
,
OUTPUT_DESC
(
filter_grad
)}};
// MatMulV2
// MatMulV2
INPUT_MAP
(
MatMulV2
)
=
{{
1
,
INPUT_DESC
(
x1
)},
{
2
,
INPUT_DESC
(
x2
)}};
INPUT_MAP
(
MatMulV2
)
=
{{
1
,
INPUT_DESC
(
x1
)},
{
2
,
INPUT_DESC
(
x2
)}
,
{
3
,
INPUT_DESC
(
bias
)}
};
ATTR_MAP
(
MatMulV2
)
=
{{
"transpose_a"
,
ATTR_DESC
(
transpose_x1
,
AnyTraits
<
bool
>
())},
ATTR_MAP
(
MatMulV2
)
=
{{
"transpose_a"
,
ATTR_DESC
(
transpose_x1
,
AnyTraits
<
bool
>
())},
{
"transpose_b"
,
ATTR_DESC
(
transpose_x2
,
AnyTraits
<
bool
>
())}};
{
"transpose_b"
,
ATTR_DESC
(
transpose_x2
,
AnyTraits
<
bool
>
())}};
OUTPUT_MAP
(
MatMulV2
)
=
{{
0
,
OUTPUT_DESC
(
y
)}};
OUTPUT_MAP
(
MatMulV2
)
=
{{
0
,
OUTPUT_DESC
(
y
)}};
...
@@ -1347,7 +1347,8 @@ OUTPUT_MAP(AscendQuant) = {{0, OUTPUT_DESC(y)}};
...
@@ -1347,7 +1347,8 @@ OUTPUT_MAP(AscendQuant) = {{0, OUTPUT_DESC(y)}};
// AscendDequant
// AscendDequant
INPUT_MAP
(
AscendDequant
)
=
{{
1
,
INPUT_DESC
(
x
)},
{
2
,
INPUT_DESC
(
deq_scale
)}};
INPUT_MAP
(
AscendDequant
)
=
{{
1
,
INPUT_DESC
(
x
)},
{
2
,
INPUT_DESC
(
deq_scale
)}};
ATTR_MAP
(
AscendDequant
)
=
{{
"sqrt_mode"
,
ATTR_DESC
(
sqrt_mode
,
AnyTraits
<
bool
>
())},
ATTR_MAP
(
AscendDequant
)
=
{{
"sqrt_mode"
,
ATTR_DESC
(
sqrt_mode
,
AnyTraits
<
bool
>
())},
{
"relu_flag"
,
ATTR_DESC
(
relu_flag
,
AnyTraits
<
bool
>
())}};
{
"relu_flag"
,
ATTR_DESC
(
relu_flag
,
AnyTraits
<
bool
>
())},
{
"dtype"
,
ATTR_DESC
(
dtype
,
AnyTraits
<
GEType
>
())}};
OUTPUT_MAP
(
AscendDequant
)
=
{{
0
,
OUTPUT_DESC
(
y
)}};
OUTPUT_MAP
(
AscendDequant
)
=
{{
0
,
OUTPUT_DESC
(
y
)}};
#ifdef ENABLE_GE
#ifdef ENABLE_GE
// Print
// Print
...
...
mindspore/nn/layer/quant.py
浏览文件 @
7d5c9d52
...
@@ -28,8 +28,8 @@ from mindspore._checkparam import check_int_positive, check_bool, twice
...
@@ -28,8 +28,8 @@ from mindspore._checkparam import check_int_positive, check_bool, twice
from
mindspore._checkparam
import
Rel
from
mindspore._checkparam
import
Rel
import
mindspore.context
as
context
import
mindspore.context
as
context
from
.normalization
import
BatchNorm2d
from
.normalization
import
BatchNorm2d
,
BatchNorm1d
from
.activation
import
get_activation
from
.activation
import
get_activation
,
ReLU
from
..cell
import
Cell
from
..cell
import
Cell
from
.
import
conv
,
basic
from
.
import
conv
,
basic
from
..._checkparam
import
ParamValidator
as
validator
from
..._checkparam
import
ParamValidator
as
validator
...
@@ -206,7 +206,7 @@ class DenseBnAct(Cell):
...
@@ -206,7 +206,7 @@ class DenseBnAct(Cell):
self
.
has_bn
=
validator
.
check_bool
(
"has_bn"
,
has_bn
)
self
.
has_bn
=
validator
.
check_bool
(
"has_bn"
,
has_bn
)
self
.
has_act
=
activation
is
not
None
self
.
has_act
=
activation
is
not
None
if
has_bn
:
if
has_bn
:
self
.
batchnorm
=
BatchNorm
2
d
(
out_channels
)
self
.
batchnorm
=
BatchNorm
1
d
(
out_channels
)
self
.
activation
=
get_activation
(
activation
)
self
.
activation
=
get_activation
(
activation
)
def
construct
(
self
,
x
):
def
construct
(
self
,
x
):
...
@@ -1156,13 +1156,18 @@ class QuantBlock(Cell):
...
@@ -1156,13 +1156,18 @@ class QuantBlock(Cell):
self
.
has_bias
=
bias
is
not
None
self
.
has_bias
=
bias
is
not
None
self
.
activation
=
activation
self
.
activation
=
activation
self
.
has_act
=
activation
is
not
None
self
.
has_act
=
activation
is
not
None
if
isinstance
(
activation
,
ReLU
):
self
.
activation
=
None
self
.
has_act
=
False
self
.
dequant
.
add_prim_attr
(
"relu_flag"
,
True
)
self
.
bias_add
=
P
.
BiasAdd
()
self
.
bias_add
=
P
.
BiasAdd
()
def
construct
(
self
,
x
):
def
construct
(
self
,
x
):
x
=
self
.
quant
(
x
)
x
=
self
.
quant
(
x
)
x
=
self
.
core_op
(
x
,
self
.
weight
)
if
self
.
has_bias
:
if
self
.
has_bias
:
x
=
self
.
bias_add
(
x
,
self
.
bias
)
x
=
self
.
core_op
(
x
,
self
.
weight
,
self
.
bias
)
else
:
x
=
self
.
core_op
(
x
,
self
.
weight
)
if
self
.
has_act
:
if
self
.
has_act
:
x
=
self
.
activation
(
x
)
x
=
self
.
activation
(
x
)
x
=
self
.
dequant
(
x
,
self
.
dequant_scale
)
x
=
self
.
dequant
(
x
,
self
.
dequant_scale
)
...
...
mindspore/ops/operations/_inner_ops.py
浏览文件 @
7d5c9d52
...
@@ -383,6 +383,7 @@ class Dequant(PrimitiveWithInfer):
...
@@ -383,6 +383,7 @@ class Dequant(PrimitiveWithInfer):
def
__init__
(
self
,
sqrt_mode
=
False
,
relu_flag
=
False
):
def
__init__
(
self
,
sqrt_mode
=
False
,
relu_flag
=
False
):
self
.
sqrt_mode
=
validator
.
check_value_type
(
"sqrt_mode"
,
sqrt_mode
,
[
bool
],
self
.
name
)
self
.
sqrt_mode
=
validator
.
check_value_type
(
"sqrt_mode"
,
sqrt_mode
,
[
bool
],
self
.
name
)
self
.
relu_flag
=
validator
.
check_value_type
(
"relu_flag"
,
relu_flag
,
[
bool
],
self
.
name
)
self
.
relu_flag
=
validator
.
check_value_type
(
"relu_flag"
,
relu_flag
,
[
bool
],
self
.
name
)
self
.
add_prim_attr
(
"dtype"
,
mstype
.
float16
)
def
infer_shape
(
self
,
x_shape
,
deq_scale_shape
):
def
infer_shape
(
self
,
x_shape
,
deq_scale_shape
):
return
x_shape
return
x_shape
...
...
mindspore/ops/operations/math_ops.py
浏览文件 @
7d5c9d52
...
@@ -596,7 +596,7 @@ class MatMul(PrimitiveWithInfer):
...
@@ -596,7 +596,7 @@ class MatMul(PrimitiveWithInfer):
raise
ValueError
(
'MatMul input x, y should be the same dimension size and should be '
raise
ValueError
(
'MatMul input x, y should be the same dimension size and should be '
+
f
'equal to 2, while x size =
{
len
(
x
)
}
, y size=
{
len
(
y
)
}
'
)
+
f
'equal to 2, while x size =
{
len
(
x
)
}
, y size=
{
len
(
y
)
}
'
)
def
infer_shape
(
self
,
x
,
y
):
def
infer_shape
(
self
,
x
,
y
,
bias
=
None
):
self
.
check_shape_size
(
x
,
y
)
self
.
check_shape_size
(
x
,
y
)
cls_name
=
self
.
name
cls_name
=
self
.
name
# expected dimension of x, y, x:[...,a,b] y:[..., c,d], the dim size should be the same except the last two
# expected dimension of x, y, x:[...,a,b] y:[..., c,d], the dim size should be the same except the last two
...
@@ -621,7 +621,7 @@ class MatMul(PrimitiveWithInfer):
...
@@ -621,7 +621,7 @@ class MatMul(PrimitiveWithInfer):
ret_dims
=
x
[:
-
2
]
+
[
x_last
[
self
.
transpose_a
],
y_last
[
not
self
.
transpose_b
]]
ret_dims
=
x
[:
-
2
]
+
[
x_last
[
self
.
transpose_a
],
y_last
[
not
self
.
transpose_b
]]
return
ret_dims
return
ret_dims
def
infer_dtype
(
self
,
x
,
y
):
def
infer_dtype
(
self
,
x
,
y
,
bias
=
None
):
args
=
{
"x"
:
x
,
"y"
:
y
}
args
=
{
"x"
:
x
,
"y"
:
y
}
validator
.
check_tensor_type_same
(
args
,
mstype
.
float_type
+
mstype
.
int_type
,
self
.
name
)
validator
.
check_tensor_type_same
(
args
,
mstype
.
float_type
+
mstype
.
int_type
,
self
.
name
)
if
x
.
element_type
()
==
mstype
.
int8
:
if
x
.
element_type
()
==
mstype
.
int8
:
...
...
mindspore/ops/operations/nn_ops.py
浏览文件 @
7d5c9d52
...
@@ -842,7 +842,7 @@ class Conv2D(PrimitiveWithInfer):
...
@@ -842,7 +842,7 @@ class Conv2D(PrimitiveWithInfer):
self
.
group
=
validator
.
check_integer
(
'group'
,
group
,
0
,
Rel
.
GT
,
self
.
name
)
self
.
group
=
validator
.
check_integer
(
'group'
,
group
,
0
,
Rel
.
GT
,
self
.
name
)
self
.
add_prim_attr
(
'offset_a'
,
0
)
self
.
add_prim_attr
(
'offset_a'
,
0
)
def
infer_shape
(
self
,
x_shape
,
w_shape
):
def
infer_shape
(
self
,
x_shape
,
w_shape
,
b_shape
=
None
):
validator
.
check_integer
(
"weight rank"
,
len
(
w_shape
),
4
,
Rel
.
EQ
,
self
.
name
)
validator
.
check_integer
(
"weight rank"
,
len
(
w_shape
),
4
,
Rel
.
EQ
,
self
.
name
)
validator
.
check_integer
(
"x rank"
,
len
(
x_shape
),
4
,
Rel
.
EQ
,
self
.
name
)
validator
.
check_integer
(
"x rank"
,
len
(
x_shape
),
4
,
Rel
.
EQ
,
self
.
name
)
validator
.
check
(
f
"x_shape[1] / group"
,
x_shape
[
1
]
//
self
.
group
,
"w_shape[1]"
,
w_shape
[
1
],
Rel
.
EQ
,
self
.
name
)
validator
.
check
(
f
"x_shape[1] / group"
,
x_shape
[
1
]
//
self
.
group
,
"w_shape[1]"
,
w_shape
[
1
],
Rel
.
EQ
,
self
.
name
)
...
@@ -887,7 +887,7 @@ class Conv2D(PrimitiveWithInfer):
...
@@ -887,7 +887,7 @@ class Conv2D(PrimitiveWithInfer):
out_shape
=
[
x_shape
[
0
],
out_channel
,
h_out
,
w_out
]
out_shape
=
[
x_shape
[
0
],
out_channel
,
h_out
,
w_out
]
return
out_shape
return
out_shape
def
infer_dtype
(
self
,
x_dtype
,
w_dtype
):
def
infer_dtype
(
self
,
x_dtype
,
w_dtype
,
b_dtype
=
None
):
args
=
{
'x'
:
x_dtype
,
'w'
:
w_dtype
}
args
=
{
'x'
:
x_dtype
,
'w'
:
w_dtype
}
valid_types
=
[
mstype
.
int8
,
mstype
.
int32
,
mstype
.
float16
,
mstype
.
float32
]
valid_types
=
[
mstype
.
int8
,
mstype
.
int32
,
mstype
.
float16
,
mstype
.
float32
]
validator
.
check_tensor_type_same
(
args
,
valid_types
,
self
.
name
)
validator
.
check_tensor_type_same
(
args
,
valid_types
,
self
.
name
)
...
@@ -968,7 +968,7 @@ class DepthwiseConv2dNative(PrimitiveWithInfer):
...
@@ -968,7 +968,7 @@ class DepthwiseConv2dNative(PrimitiveWithInfer):
self
.
group
=
validator
.
check_integer
(
"group"
,
group
,
0
,
Rel
.
GT
,
self
.
name
)
self
.
group
=
validator
.
check_integer
(
"group"
,
group
,
0
,
Rel
.
GT
,
self
.
name
)
self
.
add_prim_attr
(
'offset_a'
,
0
)
self
.
add_prim_attr
(
'offset_a'
,
0
)
def
infer_shape
(
self
,
x_shape
,
w_shape
):
def
infer_shape
(
self
,
x_shape
,
w_shape
,
b_shape
=
None
):
validator
.
check_integer
(
"weight rank"
,
len
(
w_shape
),
4
,
Rel
.
EQ
,
self
.
name
)
validator
.
check_integer
(
"weight rank"
,
len
(
w_shape
),
4
,
Rel
.
EQ
,
self
.
name
)
validator
.
check_integer
(
"x rank"
,
len
(
x_shape
),
4
,
Rel
.
EQ
,
self
.
name
)
validator
.
check_integer
(
"x rank"
,
len
(
x_shape
),
4
,
Rel
.
EQ
,
self
.
name
)
validator
.
check
(
"x_shape[1]"
,
x_shape
[
1
],
"w_shape[1]"
,
w_shape
[
1
],
Rel
.
EQ
,
self
.
name
)
validator
.
check
(
"x_shape[1]"
,
x_shape
[
1
],
"w_shape[1]"
,
w_shape
[
1
],
Rel
.
EQ
,
self
.
name
)
...
@@ -1011,7 +1011,7 @@ class DepthwiseConv2dNative(PrimitiveWithInfer):
...
@@ -1011,7 +1011,7 @@ class DepthwiseConv2dNative(PrimitiveWithInfer):
out_shape
=
[
x_shape
[
0
],
out_channel
,
h_out
,
w_out
]
out_shape
=
[
x_shape
[
0
],
out_channel
,
h_out
,
w_out
]
return
out_shape
return
out_shape
def
infer_dtype
(
self
,
x_dtype
,
w_dtype
):
def
infer_dtype
(
self
,
x_dtype
,
w_dtype
,
b_dtype
=
None
):
args
=
{
'x'
:
x_dtype
,
'w'
:
w_dtype
}
args
=
{
'x'
:
x_dtype
,
'w'
:
w_dtype
}
validator
.
check_tensor_type_same
(
args
,
mstype
.
number_type
,
self
.
name
)
validator
.
check_tensor_type_same
(
args
,
mstype
.
number_type
,
self
.
name
)
if
x_dtype
.
element_type
()
==
mstype
.
int8
:
if
x_dtype
.
element_type
()
==
mstype
.
int8
:
...
...
tests/ut/python/train/quant/test_quant.py
浏览文件 @
7d5c9d52
...
@@ -78,7 +78,7 @@ def test_qat_lenet():
...
@@ -78,7 +78,7 @@ def test_qat_lenet():
def
test_qat_mobile_per_channel_tf
():
def
test_qat_mobile_per_channel_tf
():
network
=
mobilenetV2
(
num_classes
=
1000
)
network
=
mobilenetV2
(
num_classes
=
1000
)
img
=
Tensor
(
np
.
ones
((
1
,
3
,
224
,
224
)).
astype
(
np
.
float32
))
img
=
Tensor
(
np
.
ones
((
1
,
3
,
224
,
224
)).
astype
(
np
.
float32
))
network
=
qat
.
convert_quant_network
(
network
,
bn_fold
=
True
,
per_channel
=
[
False
,
Tru
e
],
symmetric
=
[
True
,
False
])
network
=
qat
.
convert_quant_network
(
network
,
bn_fold
=
True
,
per_channel
=
[
True
,
Fals
e
],
symmetric
=
[
True
,
False
])
# should load the checkpoint. mock here
# should load the checkpoint. mock here
for
param
in
network
.
get_parameters
():
for
param
in
network
.
get_parameters
():
param
.
init_data
()
param
.
init_data
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录