Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
0cdaafea
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
0cdaafea
编写于
6月 27, 2023
作者:
Z
zhangyuqin1998
提交者:
GitHub
6月 27, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
delete swish_raw (#54536)
* delete swish_raw * fix * Update activation_kernel.cc * fix
上级
7c2c965d
变更
18
隐藏空白更改
内联
并排
Showing
18 changed file
with
49 addition
and
52 deletion
+49
-52
paddle/phi/api/yaml/op_compat.yaml
paddle/phi/api/yaml/op_compat.yaml
+1
-1
paddle/phi/api/yaml/static_backward.yaml
paddle/phi/api/yaml/static_backward.yaml
+1
-1
paddle/phi/api/yaml/static_ops.yaml
paddle/phi/api/yaml/static_ops.yaml
+2
-2
paddle/phi/kernels/activation_kernel.cc
paddle/phi/kernels/activation_kernel.cc
+0
-22
paddle/phi/kernels/activation_kernel.h
paddle/phi/kernels/activation_kernel.h
+0
-1
paddle/phi/kernels/cpu/activation_kernel.cc
paddle/phi/kernels/cpu/activation_kernel.cc
+11
-2
paddle/phi/kernels/gpu/activation_kernel.cu
paddle/phi/kernels/gpu/activation_kernel.cu
+11
-2
paddle/phi/kernels/onednn/activation_kernel.cc
paddle/phi/kernels/onednn/activation_kernel.cc
+9
-2
paddle/phi/kernels/xpu/activation_kernel.cc
paddle/phi/kernels/xpu/activation_kernel.cc
+5
-10
test/ir/inference/test_conv_act_onednn_fuse_pass.py
test/ir/inference/test_conv_act_onednn_fuse_pass.py
+1
-1
test/ir/inference/test_mkldnn_matmul_activation_fuse_pass.py
test/ir/inference/test_mkldnn_matmul_activation_fuse_pass.py
+1
-1
test/ir/inference/test_mkldnn_matmul_elementwise_add_activation_fuse_pass.py
...est_mkldnn_matmul_elementwise_add_activation_fuse_pass.py
+1
-1
test/ir/inference/test_mkldnn_matmul_v2_activation_fuse_pass.py
...r/inference/test_mkldnn_matmul_v2_activation_fuse_pass.py
+1
-1
test/ir/inference/test_onednn_conv_concat_activation_fuse_pass.py
...inference/test_onednn_conv_concat_activation_fuse_pass.py
+1
-1
test/ir/inference/test_onednn_elementwise_add_activation_fuse_pass.py
...rence/test_onednn_elementwise_add_activation_fuse_pass.py
+1
-1
test/ir/inference/test_onednn_fc_activation_fuse_pass.py
test/ir/inference/test_onednn_fc_activation_fuse_pass.py
+1
-1
test/ir/inference/test_onednn_softplus_activation_fuse_pass.py
...ir/inference/test_onednn_softplus_activation_fuse_pass.py
+1
-1
test/ir/inference/test_trt_convert_swish.py
test/ir/inference/test_trt_convert_swish.py
+1
-1
未找到文件。
paddle/phi/api/yaml/op_compat.yaml
浏览文件 @
0cdaafea
...
...
@@ -2541,7 +2541,7 @@
outputs
:
out
:
Out
extra
:
attrs
:
[
bool use_mkldnn = false
]
attrs
:
[
bool use_mkldnn = false
,
float beta = 1.0
]
-
op
:
sync_batch_norm
backward
:
sync_batch_norm_grad
...
...
paddle/phi/api/yaml/static_backward.yaml
浏览文件 @
0cdaafea
...
...
@@ -288,7 +288,7 @@
backward
:
sum_double_grad
-
backward_op
:
swish_grad
forward
:
swish (Tensor x
, float beta = 1.0f
) -> Tensor(out)
forward
:
swish (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
...
...
paddle/phi/api/yaml/static_ops.yaml
浏览文件 @
0cdaafea
...
...
@@ -540,13 +540,13 @@
backward
:
sum_grad
-
op
:
swish
args
:
(Tensor x
, float beta = 1.0f
)
args
:
(Tensor x)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
swish
_raw
func
:
swish
backward
:
swish_grad
-
op
:
tril_indices
...
...
paddle/phi/kernels/activation_kernel.cc
浏览文件 @
0cdaafea
...
...
@@ -26,19 +26,11 @@ void Relu6Kernel(const Context& dev_ctx,
Relu6RawKernel
<
T
,
Context
>
(
dev_ctx
,
x
,
6
,
out
);
}
template
<
typename
T
,
typename
Context
>
void
SwishKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
DenseTensor
*
out
)
{
SwishRawKernel
<
T
,
Context
>
(
dev_ctx
,
x
,
1.0
,
out
);
}
}
// namespace phi
using
complex64
=
::
phi
::
dtype
::
complex
<
float
>
;
using
complex128
=
::
phi
::
dtype
::
complex
<
double
>
;
PD_REGISTER_KERNEL
(
relu6
,
CPU
,
ALL_LAYOUT
,
phi
::
Relu6Kernel
,
float
,
double
)
{}
PD_REGISTER_KERNEL
(
swish
,
CPU
,
ALL_LAYOUT
,
phi
::
SwishKernel
,
float
,
double
)
{}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_REGISTER_KERNEL
(
relu6
,
...
...
@@ -49,28 +41,14 @@ PD_REGISTER_KERNEL(relu6,
double
,
phi
::
dtype
::
float16
,
phi
::
dtype
::
bfloat16
)
{}
PD_REGISTER_KERNEL
(
swish
,
GPU
,
ALL_LAYOUT
,
phi
::
SwishKernel
,
float
,
double
,
phi
::
dtype
::
float16
,
phi
::
dtype
::
bfloat16
)
{}
#endif
#if defined PADDLE_WITH_XPU
PD_REGISTER_KERNEL
(
relu6
,
XPU
,
ALL_LAYOUT
,
phi
::
Relu6Kernel
,
float
,
phi
::
dtype
::
float16
)
{}
PD_REGISTER_KERNEL
(
swish
,
XPU
,
ALL_LAYOUT
,
phi
::
SwishKernel
,
float
,
phi
::
dtype
::
float16
)
{}
#endif
#ifdef PADDLE_WITH_MKLDNN
PD_REGISTER_KERNEL
(
relu6
,
OneDNN
,
ONEDNN
,
phi
::
Relu6Kernel
,
float
,
phi
::
dtype
::
bfloat16
)
{}
PD_REGISTER_KERNEL
(
swish
,
OneDNN
,
ONEDNN
,
phi
::
SwishKernel
,
float
,
phi
::
dtype
::
bfloat16
)
{}
#endif
paddle/phi/kernels/activation_kernel.h
浏览文件 @
0cdaafea
...
...
@@ -81,7 +81,6 @@ DECLARE_ACTIVATION_KERNEL_WITH_ONE_ATTRS(Mish, threshold)
DECLARE_ACTIVATION_KERNEL_WITH_ONE_ATTRS
(
HardShrink
,
threshold
)
DECLARE_ACTIVATION_KERNEL_WITH_ONE_ATTRS
(
SoftShrink
,
lambda
)
DECLARE_ACTIVATION_KERNEL_WITH_ONE_ATTRS
(
Elu
,
alpha
)
DECLARE_ACTIVATION_KERNEL_WITH_ONE_ATTRS
(
SwishRaw
,
beta
)
DECLARE_ACTIVATION_KERNEL_WITH_ONE_ATTRS
(
Celu
,
alpha
)
DECLARE_ACTIVATION_KERNEL_WITH_ONE_ATTRS
(
Logit
,
eps
)
...
...
paddle/phi/kernels/cpu/activation_kernel.cc
浏览文件 @
0cdaafea
...
...
@@ -114,7 +114,6 @@ DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS(Mish, MishFunctor, threshold)
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS
(
HardShrink
,
HardShrinkFunctor
,
threshold
)
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS
(
SoftShrink
,
SoftShrinkFunctor
,
lambda
)
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS
(
Elu
,
ELUFunctor
,
alpha
)
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS
(
SwishRaw
,
SwishFunctor
,
beta
)
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS
(
Celu
,
CELUFunctor
,
alpha
)
DEFINE_CPU_ACT_KERNEL_WITH_TWO_ATTRS
(
HardTanh
,
HardTanhFunctor
,
t_min
,
t_max
)
...
...
@@ -141,6 +140,16 @@ void HardSwishKernel(const Context& dev_ctx,
dev_ctx
,
x
,
out
,
functor
);
}
template
<
typename
T
,
typename
Context
>
void
SwishKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
DenseTensor
*
out
)
{
funcs
::
SwishFunctor
<
T
>
functor
;
auto
attrs
=
functor
.
GetAttrs
();
*
(
attrs
[
0
].
second
)
=
1.0
;
ActivationImpl
<
T
,
T
,
Context
,
funcs
::
SwishFunctor
<
T
>>
(
dev_ctx
,
x
,
out
,
functor
);
}
}
// namespace phi
PD_REGISTER_KERNEL
(
relu
,
CPU
,
ALL_LAYOUT
,
phi
::
ReluKernel
,
float
,
double
)
{}
...
...
@@ -202,6 +211,7 @@ PD_REGISTER_ACTIVATION_KERNEL(softsign, SoftsignKernel)
PD_REGISTER_ACTIVATION_KERNEL
(
sigmoid
,
SigmoidKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
logsigmoid
,
LogSigmoidKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
hard_sigmoid
,
HardSigmoidKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
swish
,
SwishKernel
)
PD_REGISTER_KERNEL
(
log
,
CPU
,
...
...
@@ -244,7 +254,6 @@ PD_REGISTER_KERNEL(log1p,
phi
::
dtype
::
float16
,
phi
::
dtype
::
bfloat16
)
{}
PD_REGISTER_ACTIVATION_KERNEL
(
swish_raw
,
SwishRawKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
hardswish
,
HardSwishKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
round
,
RoundKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
floor
,
FloorKernel
)
...
...
paddle/phi/kernels/gpu/activation_kernel.cu
浏览文件 @
0cdaafea
...
...
@@ -132,7 +132,6 @@ DEFINE_GPU_ACT_KERNEL_WITH_ONE_ATTRS(HardShrink,
threshold
)
DEFINE_GPU_ACT_KERNEL_WITH_ONE_ATTRS
(
SoftShrink
,
CudaSoftShrinkFunctor
,
lambda
)
DEFINE_GPU_ACT_KERNEL_WITH_ONE_ATTRS
(
Elu
,
CudaELUFunctor
,
alpha
)
DEFINE_GPU_ACT_KERNEL_WITH_ONE_ATTRS
(
SwishRaw
,
CudaSwishFunctor
,
beta
)
DEFINE_GPU_ACT_KERNEL_WITH_ONE_ATTRS
(
Mish
,
CudaMishFunctor
,
threshold
)
DEFINE_GPU_ACT_KERNEL_WITH_ONE_ATTRS
(
Celu
,
CudaCELUFunctor
,
alpha
)
...
...
@@ -167,6 +166,16 @@ void HardSwishKernel(const Context& dev_ctx,
dev_ctx
,
x
,
out
,
functor
);
}
template
<
typename
T
,
typename
Context
>
void
SwishKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
DenseTensor
*
out
)
{
funcs
::
CudaSwishFunctor
<
T
>
functor
;
auto
attrs
=
functor
.
GetAttrs
();
*
(
attrs
[
0
].
second
)
=
1.0
;
ActivationGPUImpl
<
T
,
Context
,
funcs
::
CudaSwishFunctor
<
T
>>
(
dev_ctx
,
x
,
out
,
functor
);
}
}
// namespace phi
#ifdef PADDLE_WITH_HIP
...
...
@@ -262,7 +271,7 @@ PD_REGISTER_ACTIVATION_KERNEL(sigmoid, SigmoidKernel)
PD_REGISTER_ACTIVATION_KERNEL
(
logsigmoid
,
LogSigmoidKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
hard_sigmoid
,
HardSigmoidKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
hardswish
,
HardSwishKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
swish
_raw
,
SwishRaw
Kernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
swish
,
Swish
Kernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
round
,
RoundKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
floor
,
FloorKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
ceil
,
CeilKernel
)
...
...
paddle/phi/kernels/onednn/activation_kernel.cc
浏览文件 @
0cdaafea
...
...
@@ -154,7 +154,6 @@ DEFINE_ONEDNN_ACTIVATION_KERNEL(Round, RoundOneDNNFunctor)
DEFINE_ONEDNN_ACT_KERNEL_WITH_ONE_ATTRS
(
Elu
,
EluOneDNNFunctor
,
alpha
)
DEFINE_ONEDNN_ACT_KERNEL_WITH_ONE_ATTRS
(
LeakyRelu
,
ReluOneDNNFunctor
,
alpha
)
DEFINE_ONEDNN_ACT_KERNEL_WITH_ONE_ATTRS
(
Mish
,
MishOneDNNFunctor
,
threshold
)
DEFINE_ONEDNN_ACT_KERNEL_WITH_ONE_ATTRS
(
SwishRaw
,
SwishOneDNNFunctor
,
beta
)
template
<
typename
T
,
typename
Context
>
void
HardSwishKernel
(
const
Context
&
dev_ctx
,
...
...
@@ -187,6 +186,14 @@ void Relu6RawKernel(const Context& dev_ctx,
functor
(
dev_ctx
,
x
,
0
,
threshold
,
out
);
}
template
<
typename
T
,
typename
Context
>
void
SwishKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
DenseTensor
*
out
)
{
SwishOneDNNFunctor
<
T
>
functor
;
functor
(
dev_ctx
,
x
,
1.0
,
0
,
out
);
}
}
// namespace phi
PD_REGISTER_KERNEL
(
round
,
OneDNN
,
ONEDNN
,
phi
::
RoundKernel
,
float
)
{}
...
...
@@ -206,5 +213,5 @@ PD_REGISTER_ACTIVATION_KERNEL(relu, ReluKernel)
PD_REGISTER_ACTIVATION_KERNEL
(
relu6_raw
,
Relu6RawKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
sigmoid
,
SigmoidKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
sqrt
,
SqrtKernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
swish
_raw
,
SwishRaw
Kernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
swish
,
Swish
Kernel
)
PD_REGISTER_ACTIVATION_KERNEL
(
tanh
,
TanhKernel
)
paddle/phi/kernels/xpu/activation_kernel.cc
浏览文件 @
0cdaafea
...
...
@@ -403,10 +403,9 @@ struct XPUMishFunctor : public funcs::BaseActivationFunctor<T> {
};
template
<
typename
T
,
typename
Context
>
void
SwishRawKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
float
beta
,
DenseTensor
*
out
)
{
void
SwishKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
DenseTensor
*
out
)
{
using
XPUType
=
typename
XPUTypeTrait
<
T
>::
Type
;
dev_ctx
.
template
Alloc
<
T
>(
out
);
int
r
=
xpu
::
swish
(
dev_ctx
.
x_context
(),
...
...
@@ -542,12 +541,8 @@ PD_REGISTER_KERNEL(
silu
,
XPU
,
ALL_LAYOUT
,
phi
::
SiluKernel
,
float
,
phi
::
dtype
::
float16
)
{}
PD_REGISTER_KERNEL
(
sigmoid
,
XPU
,
ALL_LAYOUT
,
phi
::
SigmoidKernel
,
float
,
phi
::
dtype
::
float16
)
{}
PD_REGISTER_KERNEL
(
swish_raw
,
XPU
,
ALL_LAYOUT
,
phi
::
SwishRawKernel
,
float
,
phi
::
dtype
::
float16
)
{}
PD_REGISTER_KERNEL
(
swish
,
XPU
,
ALL_LAYOUT
,
phi
::
SwishKernel
,
float
,
phi
::
dtype
::
float16
)
{}
PD_REGISTER_KERNEL
(
hard_sigmoid
,
XPU
,
ALL_LAYOUT
,
...
...
test/ir/inference/test_conv_act_onednn_fuse_pass.py
浏览文件 @
0cdaafea
...
...
@@ -180,7 +180,7 @@ class TestConvActOneDNNFusePass(PassAutoScanTest):
'swish'
,
inputs
=
{
'X'
:
[
'conv2d_out'
]},
outputs
=
{
'Out'
:
[
'swish_out'
]},
beta
=
draw
(
st
.
floats
(
min_value
=
0.1
,
max_value
=
1.0
))
,
beta
=
1.0
,
)
elif
act_type
==
'clip'
:
act_op
=
OpConfig
(
...
...
test/ir/inference/test_mkldnn_matmul_activation_fuse_pass.py
浏览文件 @
0cdaafea
...
...
@@ -107,7 +107,7 @@ class TestMatmulActivationMkldnnFusePass(PassAutoScanTest):
activation_type
,
inputs
=
{
"X"
:
[
"matmul_output"
]},
outputs
=
{
"Out"
:
[
"activation_output"
]},
beta
=
draw
(
st
.
floats
(
min_value
=
0.1
,
max_value
=
1.0
))
,
beta
=
1.0
,
)
elif
activation_type
==
"clip"
:
activation_op
=
OpConfig
(
...
...
test/ir/inference/test_mkldnn_matmul_elementwise_add_activation_fuse_pass.py
浏览文件 @
0cdaafea
...
...
@@ -95,7 +95,7 @@ class TestMatmulElementwiseAddActivationMkldnnFusePass(PassAutoScanTest):
activation_type
,
inputs
=
{
"X"
:
[
"elementwise_add_output"
]},
outputs
=
{
"Out"
:
[
"activation_output"
]},
beta
=
draw
(
st
.
floats
(
min_value
=
0.1
,
max_value
=
1.0
))
,
beta
=
1.0
,
)
elif
activation_type
==
"clip"
:
activation_op
=
OpConfig
(
...
...
test/ir/inference/test_mkldnn_matmul_v2_activation_fuse_pass.py
浏览文件 @
0cdaafea
...
...
@@ -111,7 +111,7 @@ class TestMatmulv2ActivationMkldnnFusePass(PassAutoScanTest):
activation_type
,
inputs
=
{
'X'
:
[
'matmul_output'
]},
outputs
=
{
'Out'
:
[
'activation_output'
]},
beta
=
draw
(
st
.
floats
(
min_value
=
0.1
,
max_value
=
1.0
))
,
beta
=
1.0
,
)
elif
activation_type
==
'clip'
:
activation_op
=
OpConfig
(
...
...
test/ir/inference/test_onednn_conv_concat_activation_fuse_pass.py
浏览文件 @
0cdaafea
...
...
@@ -113,7 +113,7 @@ class TestOneDNNConvConcatActivationFusePass(PassAutoScanTest):
activation_type
,
inputs
=
{
'X'
:
[
'concat_output'
]},
outputs
=
{
'Out'
:
[
'activation_output'
]},
beta
=
draw
(
st
.
floats
(
min_value
=
0.1
,
max_value
=
1.0
))
,
beta
=
1.0
,
)
elif
activation_type
==
'clip'
:
activation_op
=
OpConfig
(
...
...
test/ir/inference/test_onednn_elementwise_add_activation_fuse_pass.py
浏览文件 @
0cdaafea
...
...
@@ -83,7 +83,7 @@ class TestElementwiseAddActivationOneDNNFusePass(PassAutoScanTest):
activation_type
,
inputs
=
{
'X'
:
[
'eltwise_output'
]},
outputs
=
{
'Out'
:
[
'activation_output'
]},
beta
=
draw
(
st
.
floats
(
min_value
=
0.1
,
max_value
=
1.0
))
,
beta
=
1.0
,
)
elif
activation_type
==
'clip'
:
activation_op
=
OpConfig
(
...
...
test/ir/inference/test_onednn_fc_activation_fuse_pass.py
浏览文件 @
0cdaafea
...
...
@@ -103,7 +103,7 @@ class TestFCActivationOneDNNFusePass(PassAutoScanTest):
activation_type
,
inputs
=
{
"X"
:
[
"fc_output"
]},
outputs
=
{
"Out"
:
[
"activation_output"
]},
beta
=
draw
(
st
.
floats
(
min_value
=
0.1
,
max_value
=
10.0
))
,
beta
=
1.0
,
)
else
:
activation_op
=
OpConfig
(
...
...
test/ir/inference/test_onednn_softplus_activation_fuse_pass.py
浏览文件 @
0cdaafea
...
...
@@ -92,7 +92,7 @@ class TestSoftplusActivationOneDNNFusePass(PassAutoScanTest):
activation_type
,
inputs
=
{
'X'
:
[
'softplus_out'
]},
outputs
=
{
'Out'
:
[
'activation_output'
]},
beta
=
draw
(
st
.
floats
(
min_value
=
0.1
,
max_value
=
10.0
))
,
beta
=
1.0
,
)
else
:
activation_op
=
OpConfig
(
...
...
test/ir/inference/test_trt_convert_swish.py
浏览文件 @
0cdaafea
...
...
@@ -41,7 +41,7 @@ class TrtConvertSwishTest(TrtLayerAutoScanTest):
return
np
.
ones
([
1
,
3
,
64
,
64
]).
astype
(
np
.
float32
)
for
dims
in
[
0
,
1
,
2
,
3
,
4
]:
for
beta
in
[
1.0
,
2.0
,
3.0
]:
for
beta
in
[
1.0
]:
self
.
dims
=
dims
dics
=
[{
"beta"
:
beta
}]
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录