Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
10be5005
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
10be5005
编写于
8月 19, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
8月 19, 2020
浏览文件
操作
浏览文件
下载
差异文件
!4716 fix fp16 mean transpose
Merge pull request !4716 from zhaozhenlong/lite/issue/fix_fp16_transpose
上级
7a8fbbbb
6bfc483b
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
102 addition
and
88 deletion
+102
-88
mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc
mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc
+27
-25
mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.h
mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.h
+1
-0
mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc
mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc
+35
-27
mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h
mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h
+1
-1
mindspore/lite/src/runtime/kernel/arm/fp16/reshape_fp16.cc
mindspore/lite/src/runtime/kernel/arm/fp16/reshape_fp16.cc
+1
-1
mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.cc
mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.cc
+0
-1
mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc
mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc
+35
-33
mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.h
mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.h
+2
-0
未找到文件。
mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc
浏览文件 @
10be5005
...
...
@@ -43,7 +43,15 @@ int ConcatFp16CPUKernel::Init() {
int
ConcatFp16CPUKernel
::
ReSize
()
{
FreeTmpBuffer
();
auto
ret
=
MallocTmpBuffer
();
if
(
ret
!=
RET_OK
)
{
FreeTmpBuffer
();
return
ret
;
}
return
ConcatBaseCPUKernel
::
ReSize
();
}
int
ConcatFp16CPUKernel
::
MallocTmpBuffer
()
{
for
(
const
auto
&
in_tensor
:
in_tensors_
)
{
float16_t
*
ptr
=
nullptr
;
if
(
in_tensor
->
data_type
()
==
kNumberTypeFloat32
||
in_tensor
->
data_type
()
==
kNumberTypeFloat
)
{
...
...
@@ -58,10 +66,6 @@ int ConcatFp16CPUKernel::ReSize() {
auto
&
out_tensor
=
out_tensors_
.
at
(
0
);
if
(
out_tensor
->
data_type
()
==
kNumberTypeFloat32
||
out_tensor
->
data_type
()
==
kNumberTypeFloat
)
{
if
(
fp16_output_
!=
nullptr
)
{
context_
->
allocator
->
Free
(
fp16_output_
);
fp16_output_
=
nullptr
;
}
fp16_output_
=
reinterpret_cast
<
float16_t
*>
(
context_
->
allocator
->
Malloc
(
sizeof
(
float16_t
)
*
out_tensors_
[
0
]
->
ElementsNum
()));
if
(
fp16_output_
==
nullptr
)
{
...
...
@@ -70,17 +74,29 @@ int ConcatFp16CPUKernel::ReSize() {
}
}
return
ConcatBaseCPUKernel
::
ReSize
()
;
return
RET_OK
;
}
void
ConcatFp16CPUKernel
::
FreeTmpBuffer
()
{
for
(
auto
ptr
:
fp16_inputs_
)
{
if
(
ptr
!=
nullptr
)
{
context_
->
allocator
->
Free
(
ptr
);
ptr
=
nullptr
;
for
(
auto
i
=
0
;
i
<
fp16_inputs_
.
size
();
i
++
)
{
auto
&
in_tensor
=
in_tensors_
.
at
(
i
);
auto
in_ptr
=
fp16_inputs_
.
at
(
i
);
if
(
in_tensor
->
data_type
()
==
kNumberTypeFloat32
||
in_tensor
->
data_type
()
==
kNumberTypeFloat
)
{
if
(
in_ptr
!=
nullptr
)
{
context_
->
allocator
->
Free
(
in_ptr
);
in_ptr
=
nullptr
;
}
}
}
fp16_inputs_
.
clear
();
auto
&
out_tensor
=
out_tensors_
.
at
(
0
);
if
(
out_tensor
->
data_type
()
==
kNumberTypeFloat32
||
out_tensor
->
data_type
()
==
kNumberTypeFloat
)
{
if
(
fp16_output_
!=
nullptr
)
{
context_
->
allocator
->
Free
(
fp16_output_
);
fp16_output_
=
nullptr
;
}
}
}
int
ConcatFp16CPUKernel
::
Run
()
{
...
...
@@ -119,24 +135,10 @@ int ConcatFp16CPUKernel::Run() {
ConcatFp16
(
reinterpret_cast
<
void
**>
(
fp16_inputs_
.
data
()),
input_num
,
axis_
,
inputs_output_shape
.
data
(),
output_shape
.
size
(),
reinterpret_cast
<
void
*>
(
fp16_output_
));
// free fp16 in out buffer
if
(
out_tensors_
.
at
(
0
)
->
data_type
()
==
kNumberTypeFloat32
||
out_tensors_
.
at
(
0
)
->
data_type
()
==
kNumberTypeFloat
)
{
Float16ToFloat32
(
fp16_output_
,
reinterpret_cast
<
float
*>
(
output_addr
),
out_tensors_
.
at
(
0
)
->
ElementsNum
());
context_
->
allocator
->
Free
(
fp16_output_
);
fp16_output_
=
nullptr
;
}
for
(
auto
i
=
0
;
i
<
fp16_inputs_
.
size
();
i
++
)
{
const
auto
in_tensor
=
in_tensors_
[
i
];
if
(
in_tensor
->
data_type
()
==
kNumberTypeFloat
||
in_tensor
->
data_type
()
==
kNumberTypeFloat32
)
{
auto
ptr
=
fp16_inputs_
[
i
];
if
(
ptr
!=
nullptr
)
{
context_
->
allocator
->
Free
(
ptr
);
ptr
=
nullptr
;
}
}
}
fp16_inputs_
.
clear
();
FreeTmpBuffer
();
return
RET_OK
;
}
...
...
@@ -164,5 +166,5 @@ kernel::LiteKernel *CpuConcatFp16KernelCreator(const std::vector<lite::tensor::T
}
return
kernel
;
}
//
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Concat, CpuConcatFp16KernelCreator)
REG_KERNEL
(
kCPU
,
kNumberTypeFloat16
,
PrimitiveType_Concat
,
CpuConcatFp16KernelCreator
)
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.h
浏览文件 @
10be5005
...
...
@@ -41,6 +41,7 @@ class ConcatFp16CPUKernel : public ConcatBaseCPUKernel {
int
Run
()
override
;
private:
int
MallocTmpBuffer
();
void
FreeTmpBuffer
();
private:
...
...
mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc
浏览文件 @
10be5005
...
...
@@ -58,20 +58,13 @@ int ReduceFp16CPUKernel::Init() {
}
int
ReduceFp16CPUKernel
::
ReSize
()
{
if
(
fp16_input_
!=
nullptr
)
{
context_
->
allocator
->
Free
(
fp16_input_
);
fp16_input_
=
nullptr
;
}
auto
in_tensor
=
in_tensors_
.
front
();
if
(
in_tensor
->
data_type
()
==
kNumberTypeFloat32
||
in_tensor
->
data_type
()
==
kNumberTypeFloat
)
{
fp16_input_
=
reinterpret_cast
<
float16_t
*>
(
context_
->
allocator
->
Malloc
(
in_tensor
->
ElementsNum
()
*
sizeof
(
float16_t
)));
if
(
fp16_input_
==
nullptr
)
{
return
RET_ERROR
;
}
Float32ToFloat16
(
reinterpret_cast
<
float
*>
(
in_tensor
->
Data
()),
fp16_input_
,
in_tensor
->
ElementsNum
());
FreeTmpBuffer
();
auto
ret
=
MallocTmpBuffer
();
if
(
ret
!=
RET_OK
)
{
FreeTmpBuffer
();
return
ret
;
}
return
MallocTmpBuffer
()
;
return
RET_OK
;
}
int
ReduceFp16CPUKernel
::
CallReduceUnit
(
int
task_id
)
{
...
...
@@ -99,9 +92,13 @@ int ReduceFp16CPUKernel::Run() {
tmp_shape_
=
in_tensors_
.
at
(
0
)
->
shape
();
auto
in_tensor
=
in_tensors_
.
at
(
0
);
if
(
in_tensor
->
data_type
()
==
kNumberTypeFloat16
)
{
if
(
in_tensor
->
data_type
()
==
kNumberTypeFloat32
||
in_tensor
->
data_type
()
==
kNumberTypeFloat
)
{
auto
input_data
=
reinterpret_cast
<
float
*>
(
in_tensor
->
Data
());
Float32ToFloat16
(
input_data
,
fp16_input_
,
in_tensor
->
ElementsNum
());
}
else
{
fp16_input_
=
reinterpret_cast
<
float16_t
*>
(
in_tensor
->
Data
());
}
fp16_src_data_
=
fp16_input_
;
for
(
int
i
=
0
;
i
<
data_buffers_
.
size
();
++
i
)
{
fp16_dst_data_
=
data_buffers_
[
i
];
...
...
@@ -117,6 +114,7 @@ int ReduceFp16CPUKernel::Run() {
axis_size_
=
tmp_shape_
[
axis
];
auto
error_code
=
LiteBackendParallelLaunch
(
ReduceImpl
,
this
,
context_
->
thread_num_
);
if
(
error_code
!=
RET_OK
)
{
FreeTmpBuffer
();
MS_LOG
(
ERROR
)
<<
"Reduce run error, error_code["
<<
error_code
<<
"]"
;
return
RET_ERROR
;
}
...
...
@@ -132,16 +130,11 @@ int ReduceFp16CPUKernel::Run() {
memcpy
(
out_tensor
->
Data
(),
fp16_dst_data_
,
out_tensor
->
ElementsNum
()
*
sizeof
(
float16_t
));
}
if
(
in_tensor
->
data_type
()
==
kNumberTypeFloat32
||
in_tensor
->
data_type
()
==
kNumberTypeFloat
)
{
context_
->
allocator
->
Free
(
fp16_input_
);
}
fp16_input_
=
nullptr
;
FreeTmpBuffer
();
return
RET_OK
;
}
int
ReduceFp16CPUKernel
::
FreeTmpBuffer
()
{
void
ReduceFp16CPUKernel
::
FreeTmpBuffer
()
{
for
(
auto
buffer
:
data_buffers_
)
{
if
(
buffer
!=
nullptr
)
{
context_
->
allocator
->
Free
(
buffer
);
...
...
@@ -149,12 +142,17 @@ int ReduceFp16CPUKernel::FreeTmpBuffer() {
}
}
data_buffers_
.
clear
();
return
RET_OK
;
auto
in_tensor
=
in_tensors_
.
at
(
0
);
if
(
in_tensor
->
data_type
()
==
kNumberTypeFloat32
||
in_tensor
->
data_type
()
==
kNumberTypeFloat
)
{
if
(
fp16_input_
!=
nullptr
)
{
context_
->
allocator
->
Free
(
fp16_input_
);
fp16_input_
=
nullptr
;
}
}
}
int
ReduceFp16CPUKernel
::
MallocTmpBuffer
()
{
auto
ret
=
FreeTmpBuffer
();
auto
input_shape
=
in_tensors_
.
at
(
0
)
->
shape
();
for
(
auto
i
=
0
;
i
<
num_axes_
;
i
++
)
{
int
axis
=
axes_
[
i
];
...
...
@@ -166,13 +164,23 @@ int ReduceFp16CPUKernel::MallocTmpBuffer() {
}
float16_t
*
buffer
=
reinterpret_cast
<
float16_t
*>
(
context_
->
allocator
->
Malloc
(
size
*
sizeof
(
float16_t
)));
if
(
buffer
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Malloc data failed
.
"
;
MS_LOG
(
ERROR
)
<<
"Malloc data failed"
;
return
RET_ERROR
;
}
data_buffers_
.
emplace_back
(
buffer
);
input_shape
[
axis
]
=
1
;
}
return
ret
;
auto
in_tensor
=
in_tensors_
.
front
();
if
(
in_tensor
->
data_type
()
==
kNumberTypeFloat32
||
in_tensor
->
data_type
()
==
kNumberTypeFloat
)
{
fp16_input_
=
reinterpret_cast
<
float16_t
*>
(
context_
->
allocator
->
Malloc
(
in_tensor
->
ElementsNum
()
*
sizeof
(
float16_t
)));
if
(
fp16_input_
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Malloc data failed"
;
return
RET_ERROR
;
}
}
return
RET_OK
;
}
kernel
::
LiteKernel
*
CpuReduceFp16KernelCreator
(
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
...
...
@@ -235,6 +243,6 @@ kernel::LiteKernel *CpuMeanFp16KernelCreator(const std::vector<lite::tensor::Ten
return
kernel
;
}
//
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Reduce, CpuReduceFp16KernelCreator)
//
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Mean, CpuMeanFp16KernelCreator)
REG_KERNEL
(
kCPU
,
kNumberTypeFloat16
,
PrimitiveType_Reduce
,
CpuReduceFp16KernelCreator
)
REG_KERNEL
(
kCPU
,
kNumberTypeFloat16
,
PrimitiveType_Mean
,
CpuMeanFp16KernelCreator
)
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h
浏览文件 @
10be5005
...
...
@@ -52,7 +52,7 @@ class ReduceFp16CPUKernel : public ReduceBaseCPUKernel {
private:
int
MallocTmpBuffer
();
int
FreeTmpBuffer
();
void
FreeTmpBuffer
();
};
}
// namespace mindspore::kernel
...
...
mindspore/lite/src/runtime/kernel/arm/fp16/reshape_fp16.cc
浏览文件 @
10be5005
...
...
@@ -72,5 +72,5 @@ int ReshapeCPUKernel::Run() {
context_
->
allocator
->
Free
(
input_ptr
);
}
return
RET_OK
;
}
// namespace mindspore::kernel
}
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.cc
浏览文件 @
10be5005
...
...
@@ -140,5 +140,4 @@ kernel::LiteKernel *CpuSplitFp16KernelCreator(const std::vector<lite::tensor::Te
return
kernel
;
}
REG_KERNEL
(
kCPU
,
kNumberTypeFloat16
,
PrimitiveType_Split
,
CpuSplitFp16KernelCreator
)
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc
浏览文件 @
10be5005
...
...
@@ -59,10 +59,19 @@ int TransposeFp16CPUKernel::ReSize() {
param
->
out_strides_
[
i
]
=
out_shape
[
i
+
1
]
*
param
->
out_strides_
[
i
+
1
];
}
if
(
fp16_in_data_
!=
nullptr
)
{
context_
->
allocator
->
Free
(
fp16_in_data_
);
fp16_in_data_
=
nullptr
;
FreeFp16Buffer
();
auto
ret
=
MallocFp16Buffer
();
if
(
ret
!=
RET_OK
)
{
FreeFp16Buffer
();
return
ret
;
}
return
RET_OK
;
}
int
TransposeFp16CPUKernel
::
MallocFp16Buffer
()
{
auto
&
in_tensor
=
in_tensors_
.
front
();
auto
&
out_tensor
=
out_tensors_
.
front
();
if
(
in_tensor
->
data_type
()
==
kNumberTypeFloat
||
in_tensor
->
data_type
()
==
kNumberTypeFloat32
)
{
fp16_in_data_
=
reinterpret_cast
<
float16_t
*>
(
context_
->
allocator
->
Malloc
(
sizeof
(
float16_t
)
*
in_tensor
->
ElementsNum
()));
...
...
@@ -71,11 +80,6 @@ int TransposeFp16CPUKernel::ReSize() {
return
RET_ERROR
;
}
}
if
(
fp16_out_data_
!=
nullptr
)
{
context_
->
allocator
->
Free
(
fp16_out_data_
);
fp16_out_data_
=
nullptr
;
}
if
(
out_tensor
->
data_type
()
==
kNumberTypeFloat
||
out_tensor
->
data_type
()
==
kNumberTypeFloat32
)
{
fp16_out_data_
=
reinterpret_cast
<
float16_t
*>
(
context_
->
allocator
->
Malloc
(
sizeof
(
float16_t
)
*
out_tensor
->
ElementsNum
()));
...
...
@@ -87,6 +91,24 @@ int TransposeFp16CPUKernel::ReSize() {
return
RET_OK
;
}
void
TransposeFp16CPUKernel
::
FreeFp16Buffer
()
{
auto
&
in_tensor
=
in_tensors_
.
front
();
auto
&
out_tensor
=
out_tensors_
.
front
();
if
(
in_tensor
->
data_type
()
==
kNumberTypeFloat
||
in_tensor
->
data_type
()
==
kNumberTypeFloat32
)
{
if
(
fp16_in_data_
!=
nullptr
)
{
context_
->
allocator
->
Free
(
fp16_in_data_
);
fp16_in_data_
=
nullptr
;
}
}
if
(
out_tensor
->
data_type
()
==
kNumberTypeFloat
||
out_tensor
->
data_type
()
==
kNumberTypeFloat32
)
{
if
(
fp16_out_data_
!=
nullptr
)
{
context_
->
allocator
->
Free
(
fp16_out_data_
);
fp16_out_data_
=
nullptr
;
}
}
}
int
TransposeFp16CPUKernel
::
TransposeParallel
(
int
task_id
)
{
int
num_unit_thread
=
MSMIN
(
thread_h_stride_
,
num_unit_
-
task_id
*
thread_h_stride_
);
if
(
num_unit_thread
<=
0
)
{
...
...
@@ -95,13 +117,6 @@ int TransposeFp16CPUKernel::TransposeParallel(int task_id) {
int
thread_offset
=
task_id
*
thread_h_stride_
;
TransposeParameter
*
param
=
reinterpret_cast
<
TransposeParameter
*>
(
this
->
op_parameter_
);
if
(
in_tensors_
.
at
(
0
)
->
data_type
()
==
kNumberTypeFloat16
)
{
fp16_in_data_
=
reinterpret_cast
<
float16_t
*>
(
in_tensors_
.
at
(
0
)
->
Data
());
}
if
(
out_tensors_
.
at
(
0
)
->
data_type
()
==
kNumberTypeFloat16
)
{
fp16_out_data_
=
reinterpret_cast
<
float16_t
*>
(
out_tensors_
.
at
(
0
)
->
Data
());
}
auto
ret
=
DoTranspose
(
fp16_in_data_
,
fp16_out_data_
,
in_shape_
,
out_shape_
,
param
,
thread_offset
,
thread_offset
+
num_unit_thread
);
if
(
ret
!=
RET_OK
)
{
...
...
@@ -109,12 +124,6 @@ int TransposeFp16CPUKernel::TransposeParallel(int task_id) {
return
RET_ERROR
;
}
if
(
in_tensors_
.
at
(
0
)
->
data_type
()
==
kNumberTypeFloat32
||
in_tensors_
.
at
(
0
)
->
data_type
()
==
kNumberTypeFloat
)
{
context_
->
allocator
->
Free
(
fp16_in_data_
);
}
if
(
out_tensors_
.
at
(
0
)
->
data_type
()
==
kNumberTypeFloat32
||
out_tensors_
.
at
(
0
)
->
data_type
()
==
kNumberTypeFloat
)
{
context_
->
allocator
->
Free
(
fp16_out_data_
);
}
return
RET_OK
;
}
...
...
@@ -139,7 +148,8 @@ int TransposeFp16CPUKernel::Run() {
auto
&
in_tensor
=
in_tensors_
.
front
();
auto
&
out_tensor
=
out_tensors_
.
front
();
if
(
in_tensor
==
nullptr
||
out_tensor
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"null pointer dreferencing."
;
MS_LOG
(
ERROR
)
<<
"null pointer referencing."
;
FreeFp16Buffer
();
return
RET_ERROR
;
}
...
...
@@ -159,23 +169,15 @@ int TransposeFp16CPUKernel::Run() {
ret
=
LiteBackendParallelLaunch
(
TransposeRun
,
this
,
thread_h_num_
);
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"Tranpose error error_code["
<<
ret
<<
"]"
;
FreeFp16Buffer
();
return
ret
;
}
if
(
in_tensor
->
data_type
()
==
kNumberTypeFloat
||
in_tensor
->
data_type
()
==
kNumberTypeFloat32
)
{
context_
->
allocator
->
Free
(
fp16_in_data_
);
fp16_in_data_
=
nullptr
;
}
if
(
out_tensor
->
data_type
()
==
kNumberTypeFloat
||
out_tensor
->
data_type
()
==
kNumberTypeFloat32
)
{
out_data_
=
reinterpret_cast
<
float
*>
(
out_tensor
->
Data
());
if
(
out_data_
==
nullptr
)
{
return
RET_ERROR
;
}
Float16ToFloat32
(
fp16_out_data_
,
out_data_
,
out_tensor
->
ElementsNum
());
context_
->
allocator
->
Free
(
fp16_out_data_
);
fp16_out_data_
=
nullptr
;
}
FreeFp16Buffer
();
return
ret
;
}
...
...
@@ -206,5 +208,5 @@ kernel::LiteKernel *CpuTransposeFp16KernelCreator(const std::vector<lite::tensor
return
kernel
;
}
//
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Transpose, CpuTransposeFp16KernelCreator)
REG_KERNEL
(
kCPU
,
kNumberTypeFloat16
,
PrimitiveType_Transpose
,
CpuTransposeFp16KernelCreator
)
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.h
浏览文件 @
10be5005
...
...
@@ -36,6 +36,8 @@ class TransposeFp16CPUKernel : public LiteKernel {
int
ReSize
()
override
;
int
Run
()
override
;
int
TransposeParallel
(
int
task_id
);
void
FreeFp16Buffer
();
int
MallocFp16Buffer
();
private:
int
thread_num_
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录