Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
DeepSpeech
提交
6b129661
D
DeepSpeech
项目概览
PaddlePaddle
/
DeepSpeech
大约 1 年 前同步成功
通知
206
Star
8425
Fork
1598
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
245
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DeepSpeech
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
245
Issue
245
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
未验证
提交
6b129661
编写于
11月 16, 2021
作者:
J
Jackwaterveg
提交者:
GitHub
11月 16, 2021
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #1002 from mmglove/add_conformer_1110
Add conformer 1110
上级
4a28751d
3ecb8cbf
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
40 addition
and
375 deletion
+40
-375
tests/benchmark/conformer/README.md
tests/benchmark/conformer/README.md
+0
-10
tests/benchmark/conformer/analysis.py
tests/benchmark/conformer/analysis.py
+0
-345
tests/benchmark/conformer/prepare.sh
tests/benchmark/conformer/prepare.sh
+3
-2
tests/benchmark/conformer/run.sh
tests/benchmark/conformer/run.sh
+20
-9
tests/benchmark/conformer/run_benchmark.sh
tests/benchmark/conformer/run_benchmark.sh
+17
-9
未找到文件。
tests/benchmark/conformer/README.md
浏览文件 @
6b129661
...
...
@@ -43,16 +43,6 @@ bash prepare.sh
bash run.sh
```
### Analyse the sp
```
bash run_analysis_sp.sh
```
### Analyse the mp
```
bash run_analysis_mp.sh
```
### The log
```
{"log_file": "recoder_sp_bs16_fp32_ngpu1.txt",
...
...
tests/benchmark/conformer/analysis.py
已删除
100644 → 0
浏览文件 @
4a28751d
# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
import
argparse
import
json
import
re
import
traceback
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
__doc__
)
parser
.
add_argument
(
"--filename"
,
type
=
str
,
help
=
"The name of log which need to analysis."
)
parser
.
add_argument
(
"--log_with_profiler"
,
type
=
str
,
help
=
"The path of train log with profiler"
)
parser
.
add_argument
(
"--profiler_path"
,
type
=
str
,
help
=
"The path of profiler timeline log."
)
parser
.
add_argument
(
"--keyword"
,
type
=
str
,
help
=
"Keyword to specify analysis data"
)
parser
.
add_argument
(
"--separator"
,
type
=
str
,
default
=
None
,
help
=
"Separator of different field in log"
)
parser
.
add_argument
(
'--position'
,
type
=
int
,
default
=
None
,
help
=
'The position of data field'
)
parser
.
add_argument
(
'--range'
,
type
=
str
,
default
=
""
,
help
=
'The range of data field to intercept'
)
parser
.
add_argument
(
'--base_batch_size'
,
type
=
int
,
help
=
'base_batch size on gpu'
)
parser
.
add_argument
(
'--skip_steps'
,
type
=
int
,
default
=
0
,
help
=
'The number of steps to be skipped'
)
parser
.
add_argument
(
'--model_mode'
,
type
=
int
,
default
=-
1
,
help
=
'Analysis mode, default value is -1'
)
parser
.
add_argument
(
'--ips_unit'
,
type
=
str
,
default
=
None
,
help
=
'IPS unit'
)
parser
.
add_argument
(
'--model_name'
,
type
=
str
,
default
=
0
,
help
=
'training model_name, transformer_base'
)
parser
.
add_argument
(
'--mission_name'
,
type
=
str
,
default
=
0
,
help
=
'training mission name'
)
parser
.
add_argument
(
'--direction_id'
,
type
=
int
,
default
=
0
,
help
=
'training direction_id'
)
parser
.
add_argument
(
'--run_mode'
,
type
=
str
,
default
=
"sp"
,
help
=
'multi process or single process'
)
parser
.
add_argument
(
'--index'
,
type
=
int
,
default
=
1
,
help
=
'{1: speed, 2:mem, 3:profiler, 6:max_batch_size}'
)
parser
.
add_argument
(
'--gpu_num'
,
type
=
int
,
default
=
1
,
help
=
'nums of training gpus'
)
parser
.
add_argument
(
'--use_num'
,
type
=
int
,
default
=
1
,
help
=
'nums of used recoders'
)
args
=
parser
.
parse_args
()
args
.
separator
=
None
if
args
.
separator
==
"None"
else
args
.
separator
return
args
def
_is_number
(
num
):
pattern
=
re
.
compile
(
r
'^[-+]?[-0-9]\d*\.\d*|[-+]?\.?[0-9]\d*$'
)
result
=
pattern
.
match
(
num
)
if
result
:
return
True
else
:
return
False
class
TimeAnalyzer
(
object
):
def
__init__
(
self
,
filename
,
keyword
=
None
,
separator
=
None
,
position
=
None
,
range
=
"-1"
):
if
filename
is
None
:
raise
Exception
(
"Please specify the filename!"
)
if
keyword
is
None
:
raise
Exception
(
"Please specify the keyword!"
)
self
.
filename
=
filename
self
.
keyword
=
keyword
self
.
separator
=
separator
self
.
position
=
position
self
.
range
=
range
self
.
records
=
None
self
.
_distil
()
def
_distil
(
self
):
self
.
records
=
[]
with
open
(
self
.
filename
,
"r"
)
as
f_object
:
lines
=
f_object
.
readlines
()
for
line
in
lines
:
if
self
.
keyword
not
in
line
:
continue
try
:
result
=
None
# Distil the string from a line.
line
=
line
.
strip
()
line_words
=
line
.
split
(
self
.
separator
)
if
self
.
separator
else
line
.
split
()
print
(
"line_words"
,
line_words
)
if
args
.
position
:
result
=
line_words
[
self
.
position
]
else
:
# Distil the string following the keyword.
for
i
in
range
(
len
(
line_words
)
-
1
):
if
line_words
[
i
]
==
self
.
keyword
:
result
=
line_words
[
i
+
1
]
break
# Distil the result from the picked string.
if
not
self
.
range
:
result
=
result
[
0
:]
elif
_is_number
(
self
.
range
):
result
=
result
[
0
:
int
(
self
.
range
)]
else
:
result
=
result
[
int
(
self
.
range
.
split
(
":"
)[
0
]):
int
(
self
.
range
.
split
(
":"
)[
1
])]
self
.
records
.
append
(
float
(
result
))
except
Exception
as
exc
:
pass
#print("line is: {}; separator={}; position={}".format(line, self.separator, self.position))
self
.
records
.
sort
()
self
.
records
=
self
.
records
[:
args
.
use_num
]
print
(
"records"
,
self
.
records
)
print
(
"Extract {} records: separator={}; position={}"
.
format
(
len
(
self
.
records
),
self
.
separator
,
self
.
position
))
def
_get_fps
(
self
,
mode
,
batch_size
,
gpu_num
,
avg_of_records
,
run_mode
,
unit
=
None
):
if
mode
==
-
1
and
run_mode
==
'sp'
:
assert
unit
,
"Please set the unit when mode is -1."
fps
=
gpu_num
*
avg_of_records
elif
mode
==
-
1
and
run_mode
==
'mp'
:
assert
unit
,
"Please set the unit when mode is -1."
fps
=
gpu_num
*
avg_of_records
#temporarily, not used now
print
(
"------------this is mp"
)
elif
mode
==
0
:
# s/step -> samples/s
fps
=
(
batch_size
*
gpu_num
)
/
avg_of_records
unit
=
"samples/s"
elif
mode
==
1
:
# steps/s -> steps/s
fps
=
avg_of_records
unit
=
"steps/s"
elif
mode
==
2
:
# s/step -> steps/s
fps
=
1
/
avg_of_records
unit
=
"steps/s"
elif
mode
==
3
:
# steps/s -> samples/s
fps
=
batch_size
*
gpu_num
*
avg_of_records
unit
=
"samples/s"
elif
mode
==
4
:
# s/epoch -> s/epoch
fps
=
avg_of_records
unit
=
"s/epoch"
else
:
ValueError
(
"Unsupported analysis mode."
)
return
fps
,
unit
def
analysis
(
self
,
batch_size
,
gpu_num
=
1
,
skip_steps
=
0
,
mode
=-
1
,
run_mode
=
'sp'
,
unit
=
None
):
if
batch_size
<=
0
:
print
(
"base_batch_size should larger than 0."
)
return
0
,
''
if
len
(
self
.
records
)
<=
skip_steps
:
# to address the condition which item of log equals to skip_steps
print
(
"no records"
)
return
0
,
''
sum_of_records
=
0
sum_of_records_skipped
=
0
skip_min
=
self
.
records
[
skip_steps
]
skip_max
=
self
.
records
[
skip_steps
]
count
=
len
(
self
.
records
)
for
i
in
range
(
count
):
sum_of_records
+=
self
.
records
[
i
]
if
i
>=
skip_steps
:
sum_of_records_skipped
+=
self
.
records
[
i
]
if
self
.
records
[
i
]
<
skip_min
:
skip_min
=
self
.
records
[
i
]
if
self
.
records
[
i
]
>
skip_max
:
skip_max
=
self
.
records
[
i
]
avg_of_records
=
sum_of_records
/
float
(
count
)
avg_of_records_skipped
=
sum_of_records_skipped
/
float
(
count
-
skip_steps
)
fps
,
fps_unit
=
self
.
_get_fps
(
mode
,
batch_size
,
gpu_num
,
avg_of_records
,
run_mode
,
unit
)
fps_skipped
,
_
=
self
.
_get_fps
(
mode
,
batch_size
,
gpu_num
,
avg_of_records_skipped
,
run_mode
,
unit
)
if
mode
==
-
1
:
print
(
"average ips of %d steps, skip 0 step:"
%
count
)
print
(
"
\t
Avg: %.3f %s"
%
(
avg_of_records
,
fps_unit
))
print
(
"
\t
FPS: %.3f %s"
%
(
fps
,
fps_unit
))
if
skip_steps
>
0
:
print
(
"average ips of %d steps, skip %d steps:"
%
(
count
,
skip_steps
))
print
(
"
\t
Avg: %.3f %s"
%
(
avg_of_records_skipped
,
fps_unit
))
print
(
"
\t
Min: %.3f %s"
%
(
skip_min
,
fps_unit
))
print
(
"
\t
Max: %.3f %s"
%
(
skip_max
,
fps_unit
))
print
(
"
\t
FPS: %.3f %s"
%
(
fps_skipped
,
fps_unit
))
elif
mode
==
1
or
mode
==
3
:
print
(
"average latency of %d steps, skip 0 step:"
%
count
)
print
(
"
\t
Avg: %.3f steps/s"
%
avg_of_records
)
print
(
"
\t
FPS: %.3f %s"
%
(
fps
,
fps_unit
))
if
skip_steps
>
0
:
print
(
"average latency of %d steps, skip %d steps:"
%
(
count
,
skip_steps
))
print
(
"
\t
Avg: %.3f steps/s"
%
avg_of_records_skipped
)
print
(
"
\t
Min: %.3f steps/s"
%
skip_min
)
print
(
"
\t
Max: %.3f steps/s"
%
skip_max
)
print
(
"
\t
FPS: %.3f %s"
%
(
fps_skipped
,
fps_unit
))
elif
mode
==
0
or
mode
==
2
:
print
(
"average latency of %d steps, skip 0 step:"
%
count
)
print
(
"
\t
Avg: %.3f s/step"
%
avg_of_records
)
print
(
"
\t
FPS: %.3f %s"
%
(
fps
,
fps_unit
))
if
skip_steps
>
0
:
print
(
"average latency of %d steps, skip %d steps:"
%
(
count
,
skip_steps
))
print
(
"
\t
Avg: %.3f s/step"
%
avg_of_records_skipped
)
print
(
"
\t
Min: %.3f s/step"
%
skip_min
)
print
(
"
\t
Max: %.3f s/step"
%
skip_max
)
print
(
"
\t
FPS: %.3f %s"
%
(
fps_skipped
,
fps_unit
))
return
round
(
fps_skipped
,
3
),
fps_unit
if
__name__
==
"__main__"
:
args
=
parse_args
()
run_info
=
dict
()
run_info
[
"log_file"
]
=
args
.
filename
run_info
[
"model_name"
]
=
args
.
model_name
run_info
[
"mission_name"
]
=
args
.
mission_name
run_info
[
"direction_id"
]
=
args
.
direction_id
run_info
[
"run_mode"
]
=
args
.
run_mode
run_info
[
"index"
]
=
args
.
index
run_info
[
"gpu_num"
]
=
args
.
gpu_num
run_info
[
"FINAL_RESULT"
]
=
0
run_info
[
"JOB_FAIL_FLAG"
]
=
0
try
:
if
args
.
index
==
1
:
if
args
.
gpu_num
==
1
:
run_info
[
"log_with_profiler"
]
=
args
.
log_with_profiler
run_info
[
"profiler_path"
]
=
args
.
profiler_path
analyzer
=
TimeAnalyzer
(
args
.
filename
,
args
.
keyword
,
args
.
separator
,
args
.
position
,
args
.
range
)
run_info
[
"FINAL_RESULT"
],
run_info
[
"UNIT"
]
=
analyzer
.
analysis
(
batch_size
=
args
.
base_batch_size
,
gpu_num
=
args
.
gpu_num
,
skip_steps
=
args
.
skip_steps
,
mode
=
args
.
model_mode
,
run_mode
=
args
.
run_mode
,
unit
=
args
.
ips_unit
)
# if int(os.getenv('job_fail_flag')) == 1 or int(run_info["FINAL_RESULT"]) == 0:
# run_info["JOB_FAIL_FLAG"] = 1
elif
args
.
index
==
3
:
run_info
[
"FINAL_RESULT"
]
=
{}
records_fo_total
=
TimeAnalyzer
(
args
.
filename
,
'Framework overhead'
,
None
,
3
,
''
).
records
records_fo_ratio
=
TimeAnalyzer
(
args
.
filename
,
'Framework overhead'
,
None
,
5
).
records
records_ct_total
=
TimeAnalyzer
(
args
.
filename
,
'Computation time'
,
None
,
3
,
''
).
records
records_gm_total
=
TimeAnalyzer
(
args
.
filename
,
'GpuMemcpy Calls'
,
None
,
4
,
''
).
records
records_gm_ratio
=
TimeAnalyzer
(
args
.
filename
,
'GpuMemcpy Calls'
,
None
,
6
).
records
records_gmas_total
=
TimeAnalyzer
(
args
.
filename
,
'GpuMemcpyAsync Calls'
,
None
,
4
,
''
).
records
records_gms_total
=
TimeAnalyzer
(
args
.
filename
,
'GpuMemcpySync Calls'
,
None
,
4
,
''
).
records
run_info
[
"FINAL_RESULT"
][
"Framework_Total"
]
=
records_fo_total
[
0
]
if
records_fo_total
else
0
run_info
[
"FINAL_RESULT"
][
"Framework_Ratio"
]
=
records_fo_ratio
[
0
]
if
records_fo_ratio
else
0
run_info
[
"FINAL_RESULT"
][
"ComputationTime_Total"
]
=
records_ct_total
[
0
]
if
records_ct_total
else
0
run_info
[
"FINAL_RESULT"
][
"GpuMemcpy_Total"
]
=
records_gm_total
[
0
]
if
records_gm_total
else
0
run_info
[
"FINAL_RESULT"
][
"GpuMemcpy_Ratio"
]
=
records_gm_ratio
[
0
]
if
records_gm_ratio
else
0
run_info
[
"FINAL_RESULT"
][
"GpuMemcpyAsync_Total"
]
=
records_gmas_total
[
0
]
if
records_gmas_total
else
0
run_info
[
"FINAL_RESULT"
][
"GpuMemcpySync_Total"
]
=
records_gms_total
[
0
]
if
records_gms_total
else
0
else
:
print
(
"Not support!"
)
except
Exception
:
traceback
.
print_exc
()
print
(
"{}"
.
format
(
json
.
dumps
(
run_info
))
)
# it's required, for the log file path insert to the database
tests/benchmark/conformer/prepare.sh
浏览文件 @
6b129661
source
../../../tools/venv/bin/activate
cd
../../../
pip
install
-e
.
# 安装pdspeech
cd
-
#Enter the example dir
pushd
../../../examples/aishell/s1
...
...
tests/benchmark/conformer/run.sh
浏览文件 @
6b129661
# 提供可稳定复现性能的脚本,默认在标准docker环境内py37执行: paddlepaddle/paddle:latest-gpu-cuda10.1-cudnn7 paddle=2.1.2 py=37
# 执行目录:需说明
CUR_DIR
=
${
PWD
}
source
../../../tools/venv/bin/activate
CUR_DIR
=
${
PWD
}
# PaddleSpeech/tests/benchmark/conformer
cd
../../../
log_path
=
${
LOG_PATH_INDEX_DIR
:-
$(
pwd
)
}
# benchmark系统指定该参数,不需要跑profile时,log_path指向存speed的目录
cd
${
CUR_DIR
}
sed
-i
'/set\ -xe/d'
run_benchmark.sh
#cd **
pushd
../../../examples/aishell/s1
# 1 安装该模型需要的依赖 (如需开启优化策略请注明)
...
...
@@ -11,26 +15,33 @@ pushd ../../../examples/aishell/s1
source
path.sh
source
${
MAIN_ROOT
}
/utils/parse_options.sh
||
exit
1
;
mkdir
-p
conf/benchmark
#yq e ".training.accum_grad=1" conf/conformer.yaml > conf/benchmark/conformer.yaml
cp
conf/conformer.yaml conf/benchmark/conformer.yaml
sed
-i
"s/ accum_grad: 2/ accum_grad: 1/g"
conf/benchmark/conformer.yaml
fp_item_list
=(
fp32
)
bs_item
=(
16 30
)
config_path
=
conf/conformer.yaml
config_path
=
conf/
benchmark/
conformer.yaml
seed
=
0
output
=
exp/conformer
profiler_options
=
None
model_item
=
conformer
for
fp_item
in
${
fp_item_list
[@]
}
;
do
for
b
atch_size
in
${
bs_item
[@]
}
for
b
s_item
in
${
bs_item
[@]
}
do
rm
exp
-rf
log_name
=
speech_
${
model_item
}
_bs
${
bs_item
}
_
${
fp_item
}
# 如:clas_MobileNetv1_mp_bs32_fp32_8
echo
"index is speed, 8gpus, run_mode is multi_process, begin, conformer"
run_mode
=
mp
ngpu
=
8
CUDA_VISIBLE_DEVICES
=
0,1,2,3,4,5,6,7 bash
${
CUR_DIR
}
/run_benchmark.sh
${
run_mode
}
${
config_path
}
${
output
}
${
seed
}
${
ngpu
}
${
profiler_options
}
${
batch_size
}
${
fp_item
}
${
CUR_DIR
}
rm
exp
-rf
echo
"index is speed, 1gpus, begin, conformer"
CUDA_VISIBLE_DEVICES
=
0,1,2,3,4,5,6,7 bash
${
CUR_DIR
}
/run_benchmark.sh
${
run_mode
}
${
config_path
}
${
output
}
${
seed
}
${
ngpu
}
${
profiler_options
}
${
bs_item
}
${
fp_item
}
${
model_item
}
|
tee
${
log_path
}
/
${
log_name
}
_speed_8gpus8p 2>&1
sleep
60
log_name
=
speech_
${
model_item
}
_bs
${
bs_item
}
_
${
fp_item
}
# 如:clas_MobileNetv1_mp_bs32_fp32_8
echo
"index is speed, 1gpus, begin,
${
log_name
}
"
run_mode
=
sp
ngpu
=
1
CUDA_VISIBLE_DEVICES
=
0 bash
${
CUR_DIR
}
/run_benchmark.sh
${
run_mode
}
${
config_path
}
${
output
}
${
seed
}
${
ngpu
}
${
profiler_options
}
${
batch_size
}
${
fp_item
}
${
CUR_DIR
}
CUDA_VISIBLE_DEVICES
=
0 bash
${
CUR_DIR
}
/run_benchmark.sh
${
run_mode
}
${
config_path
}
${
output
}
${
seed
}
${
ngpu
}
${
profiler_options
}
${
bs_item
}
${
fp_item
}
${
model_item
}
|
tee
${
log_path
}
/
${
log_name
}
_speed_1gpus 2>&1
# (5min)
sleep
60
done
done
...
...
tests/benchmark/conformer/run_benchmark.sh
浏览文件 @
6b129661
...
...
@@ -12,17 +12,24 @@ function _set_params(){
profiler_options
=
${
6
:-
"None"
}
batch_size
=
${
7
:-
"32"
}
fp_item
=
${
8
:-
"fp32"
}
TRAIN_LOG_DIR
=
${
9
:-
$(
pwd
)
}
model_item
=
${
9
:-
"conformer"
}
benchmark_max_step
=
0
run_log_path
=
${
TRAIN_LOG_DIR
:-
$(
pwd
)
}
# TRAIN_LOG_DIR 后续QA设置该参数
# 添加日志解析需要的参数
base_batch_size
=
${
batch_size
}
mission_name
=
"语音识别"
direction_id
=
"1"
ips_unit
=
"sent./sec"
skip_steps
=
10
# 解析日志,有些模型前几个step耗时长,需要跳过 (必填)
keyword
=
"ips:"
# 解析日志,筛选出数据所在行的关键字 (必填)
index
=
"1"
model_name
=
${
model_item
}
_bs
${
batch_size
}
_
${
fp_item
}
# 以下不用修改
device
=
${
CUDA_VISIBLE_DEVICES
//,/
}
arr
=(
${
device
}
)
num_gpu_devices
=
${#
arr
[*]
}
log_file
=
${
run_log_path
}
/recoder_
${
run_mode
}
_bs
${
batch_size
}
_
${
fp_item
}
_ngpu
${
ngpu
}
.txt
log_file
=
${
run_log_path
}
/recoder_
${
model_item
}
_
${
run_mode
}
_bs
${
batch_size
}
_
${
fp_item
}
_ngpu
${
ngpu
}
}
function
_train
(){
...
...
@@ -36,11 +43,9 @@ function _train(){
--benchmark-batch-size
${
batch_size
}
--benchmark-max-step
${
benchmark_max_step
}
"
echo
"run_mode "
${
run_mode
}
case
${
run_mode
}
in
sp
)
train_cmd
=
"python
3
-u
${
BIN_DIR
}
/train.py "
${
train_cmd
}
;;
mp
)
train_cmd
=
"python
3
-u
${
BIN_DIR
}
/train.py "
${
train_cmd
}
;;
sp
)
train_cmd
=
"python -u
${
BIN_DIR
}
/train.py "
${
train_cmd
}
;;
mp
)
train_cmd
=
"python -u
${
BIN_DIR
}
/train.py "
${
train_cmd
}
;;
*
)
echo
"choose run_mode(sp or mp)"
;
exit
1
;
esac
echo
${
train_cmd
}
...
...
@@ -61,5 +66,8 @@ function _train(){
fi
}
source
${
BENCHMARK_ROOT
}
/scripts/run_model.sh
# 在该脚本中会对符合benchmark规范的log使用analysis.py 脚本进行性能数据解析;该脚本在连调时可从benchmark repo中下载https://github.com/PaddlePaddle/benchmark/blob/master/scripts/run_model.sh;如果不联调只想要产出训练log可以注掉本行,提交时需打开
_set_params
$@
_train
# _train # 如果只想产出训练log,不解析,可取消注释
_run
# 该函数在run_model.sh中,执行时会调用_train; 如果不联调只想要产出训练log可以注掉本行,提交时需打开
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录