未验证 提交 fddef391 编写于 作者: X xiaoting 提交者: GitHub

Merge branch 'dygraph' into autolog_rec

......@@ -29,7 +29,7 @@ deploy/hubserving/ocr_system/
### 1. 准备环境
```shell
# 安装paddlehub
pip3 install paddlehub==1.8.3 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple
pip3 install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple
```
### 2. 下载推理模型
......
......@@ -30,7 +30,7 @@ The following steps take the 2-stage series service as an example. If only the d
### 1. Prepare the environment
```shell
# Install paddlehub
pip3 install paddlehub==1.8.3 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple
pip3 install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple
```
### 2. Download inference model
......
......@@ -101,7 +101,7 @@ def main():
quanter = QAT(config=quant_config)
quanter.quantize(model)
init_model(config, model, logger)
init_model(config, model)
model.eval()
# build metric
......@@ -113,7 +113,7 @@ def main():
use_srn = config['Architecture']['algorithm'] == "SRN"
model_type = config['Architecture']['model_type']
# start eval
metirc = program.eval(model, valid_dataloader, post_process_class,
metric = program.eval(model, valid_dataloader, post_process_class,
eval_class, model_type, use_srn)
logger.info('metric eval ***************')
......
......@@ -375,7 +375,9 @@ PaddleOCR目前已支持80种(除中文外)语种识别,`configs/rec/multi
更多支持语种请参考: [多语言模型](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.1/doc/doc_ch/multi_languages.md#%E8%AF%AD%E7%A7%8D%E7%BC%A9%E5%86%99)
多语言模型训练方式与中文模型一致,训练数据集均为100w的合成数据,少量的字体可以在 [百度网盘](https://pan.baidu.com/s/1bS_u207Rm7YbY33wOECKDA) 上下载,提取码:frgi。
多语言模型训练方式与中文模型一致,训练数据集均为100w的合成数据,少量的字体可以通过下面两种方式下载。
* [百度网盘](https://pan.baidu.com/s/1bS_u207Rm7YbY33wOECKDA)。提取码:frgi。
* [google drive](https://drive.google.com/file/d/18cSWX7wXSy4G0tbKJ0d9PuIaiwRLHpjA/view)
如您希望在现有模型效果的基础上调优,请参考下列说明修改配置文件:
......
......@@ -375,7 +375,9 @@ Currently, the multi-language algorithms supported by PaddleOCR are:
For more supported languages, please refer to : [Multi-language model](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.1/doc/doc_en/multi_languages_en.md#4-support-languages-and-abbreviations)
The multi-language model training method is the same as the Chinese model. The training data set is 100w synthetic data. A small amount of fonts and test data can be downloaded on [Baidu Netdisk](https://pan.baidu.com/s/1bS_u207Rm7YbY33wOECKDA),Extraction code:frgi.
The multi-language model training method is the same as the Chinese model. The training data set is 100w synthetic data. A small amount of fonts and test data can be downloaded using the following two methods.
* [Baidu Netdisk](https://pan.baidu.com/s/1bS_u207Rm7YbY33wOECKDA),Extraction code:frgi.
* [Google drive](https://drive.google.com/file/d/18cSWX7wXSy4G0tbKJ0d9PuIaiwRLHpjA/view)
If you want to finetune on the basis of the existing model effect, please refer to the following instructions to modify the configuration file:
......
model_name:ocr_det
python:python3.7
gpu_list:0|0,1
Global.auto_cast:False
Global.auto_cast:null
Global.epoch_num:10
Global.save_model_dir:./output/
Global.save_inference_dir:./output/
Train.loader.batch_size_per_card:
Global.use_gpu
Global.pretrained_model
Global.use_gpu:
Global.pretrained_model:null
trainer:norm|pact
norm_train:tools/train.py -c configs/det/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained
......@@ -17,6 +16,8 @@ distill_train:null
eval:tools/eval.py -c configs/det/det_mv3_db.yml -o
Global.save_inference_dir:./output/
Global.pretrained_model:
norm_export:tools/export_model.py -c configs/det/det_mv3_db.yml -o
quant_export:deploy/slim/quantization/export_model.py -c configs/det/det_mv3_db.yml -o
fpgm_export:deploy/slim/prune/export_prune_model.py
......@@ -29,7 +30,6 @@ inference:tools/infer/predict_det.py
--rec_batch_num:1
--use_tensorrt:True|False
--precision:fp32|fp16|int8
--det_model_dir
--image_dir
--save_log_path
--det_model_dir:./inference/ch_ppocr_mobile_v2.0_det_infer/
--image_dir:./inference/ch_det_data_50/all-sum-510/
--save_log_path:./test/output/
......@@ -26,9 +26,9 @@ IFS=$'\n'
# The training params
model_name=$(func_parser_value "${lines[0]}")
train_model_list=$(func_parser_value "${lines[0]}")
trainer_list=$(func_parser_value "${lines[10]}")
echo $train_model_list
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer']
MODE=$2
# prepare pretrained weights and dataset
......@@ -68,8 +68,8 @@ else
rm -rf ./train_data/icdar2015
wget -nc -P ./train_data https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
if [ ${model_name} = "ocr_det" ]; then
eval_model_name="ch_ppocr_mobile_v2.0_det_train"
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar
eval_model_name="ch_ppocr_mobile_v2.0_det_infer"
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
cd ./inference && tar xf ${eval_model_name}.tar && cd ../
else
eval_model_name="ch_ppocr_mobile_v2.0_rec_train"
......@@ -102,7 +102,7 @@ for train_model in ${train_model_list[*]}; do
# eval
for slim_trainer in ${trainer_list[*]}; do
if [ ${slim_trainer} = "norm" ]; then
if [ ${model_name} = "ocr_det" ]; then
if [ ${model_name} = "det" ]; then
eval_model_name="ch_ppocr_mobile_v2.0_det_train"
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar
cd ./inference && tar xf ${eval_model_name}.tar && cd ../
......@@ -112,7 +112,7 @@ for train_model in ${train_model_list[*]}; do
cd ./inference && tar xf ${eval_model_name}.tar && cd ../
fi
elif [ ${slim_trainer} = "pact" ]; then
if [ ${model_name} = "ocr_det" ]; then
if [ ${model_name} = "det" ]; then
eval_model_name="ch_ppocr_mobile_v2.0_det_quant_train"
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_quant_train.tar
cd ./inference && tar xf ${eval_model_name}.tar && cd ../
......@@ -122,7 +122,7 @@ for train_model in ${train_model_list[*]}; do
cd ./inference && tar xf ${eval_model_name}.tar && cd ../
fi
elif [ ${slim_trainer} = "distill" ]; then
if [ ${model_name} = "ocr_det" ]; then
if [ ${model_name} = "det" ]; then
eval_model_name="ch_ppocr_mobile_v2.0_det_distill_train"
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_distill_train.tar
cd ./inference && tar xf ${eval_model_name}.tar && cd ../
......@@ -132,7 +132,7 @@ for train_model in ${train_model_list[*]}; do
cd ./inference && tar xf ${eval_model_name}.tar && cd ../
fi
elif [ ${slim_trainer} = "fpgm" ]; then
if [ ${model_name} = "ocr_det" ]; then
if [ ${model_name} = "det" ]; then
eval_model_name="ch_ppocr_mobile_v2.0_det_prune_train"
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_train.tar
cd ./inference && tar xf ${eval_model_name}.tar && cd ../
......
......@@ -41,59 +41,51 @@ gpu_list=$(func_parser_value "${lines[2]}")
autocast_list=$(func_parser_value "${lines[3]}")
autocast_key=$(func_parser_key "${lines[3]}")
epoch_key=$(func_parser_key "${lines[4]}")
epoch_num=$(func_parser_value "${lines[4]}")
save_model_key=$(func_parser_key "${lines[5]}")
save_infer_key=$(func_parser_key "${lines[6]}")
train_batch_key=$(func_parser_key "${lines[7]}")
train_use_gpu_key=$(func_parser_key "${lines[8]}")
pretrain_model_key=$(func_parser_key "${lines[9]}")
trainer_list=$(func_parser_value "${lines[10]}")
norm_trainer=$(func_parser_value "${lines[11]}")
pact_trainer=$(func_parser_value "${lines[12]}")
fpgm_trainer=$(func_parser_value "${lines[13]}")
distill_trainer=$(func_parser_value "${lines[14]}")
eval_py=$(func_parser_value "${lines[15]}")
norm_export=$(func_parser_value "${lines[16]}")
pact_export=$(func_parser_value "${lines[17]}")
fpgm_export=$(func_parser_value "${lines[18]}")
distill_export=$(func_parser_value "${lines[19]}")
inference_py=$(func_parser_value "${lines[20]}")
use_gpu_key=$(func_parser_key "${lines[21]}")
use_gpu_list=$(func_parser_value "${lines[21]}")
use_mkldnn_key=$(func_parser_key "${lines[22]}")
use_mkldnn_list=$(func_parser_value "${lines[22]}")
cpu_threads_key=$(func_parser_key "${lines[23]}")
cpu_threads_list=$(func_parser_value "${lines[23]}")
batch_size_key=$(func_parser_key "${lines[24]}")
batch_size_list=$(func_parser_value "${lines[24]}")
use_trt_key=$(func_parser_key "${lines[25]}")
use_trt_list=$(func_parser_value "${lines[25]}")
precision_key=$(func_parser_key "${lines[26]}")
precision_list=$(func_parser_value "${lines[26]}")
model_dir_key=$(func_parser_key "${lines[27]}")
image_dir_key=$(func_parser_key "${lines[28]}")
save_log_key=$(func_parser_key "${lines[29]}")
train_batch_key=$(func_parser_key "${lines[6]}")
train_use_gpu_key=$(func_parser_key "${lines[7]}")
pretrain_model_key=$(func_parser_key "${lines[8]}")
pretrain_model_value=$(func_parser_value "${lines[8]}")
trainer_list=$(func_parser_value "${lines[9]}")
norm_trainer=$(func_parser_value "${lines[10]}")
pact_trainer=$(func_parser_value "${lines[11]}")
fpgm_trainer=$(func_parser_value "${lines[12]}")
distill_trainer=$(func_parser_value "${lines[13]}")
eval_py=$(func_parser_value "${lines[14]}")
save_infer_key=$(func_parser_key "${lines[15]}")
export_weight=$(func_parser_key "${lines[16]}")
norm_export=$(func_parser_value "${lines[17]}")
pact_export=$(func_parser_value "${lines[18]}")
fpgm_export=$(func_parser_value "${lines[19]}")
distill_export=$(func_parser_value "${lines[20]}")
inference_py=$(func_parser_value "${lines[21]}")
use_gpu_key=$(func_parser_key "${lines[22]}")
use_gpu_list=$(func_parser_value "${lines[22]}")
use_mkldnn_key=$(func_parser_key "${lines[23]}")
use_mkldnn_list=$(func_parser_value "${lines[23]}")
cpu_threads_key=$(func_parser_key "${lines[24]}")
cpu_threads_list=$(func_parser_value "${lines[24]}")
batch_size_key=$(func_parser_key "${lines[25]}")
batch_size_list=$(func_parser_value "${lines[25]}")
use_trt_key=$(func_parser_key "${lines[26]}")
use_trt_list=$(func_parser_value "${lines[26]}")
precision_key=$(func_parser_key "${lines[27]}")
precision_list=$(func_parser_value "${lines[27]}")
infer_model_key=$(func_parser_key "${lines[28]}")
infer_model=$(func_parser_value "${lines[28]}")
image_dir_key=$(func_parser_key "${lines[29]}")
infer_img_dir=$(func_parser_value "${lines[29]}")
save_log_key=$(func_parser_key "${lines[30]}")
LOG_PATH="./test/output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results.log"
if [ ${MODE} = "lite_train_infer" ]; then
export infer_img_dir="./train_data/icdar2015/text_localization/ch4_test_images/"
export epoch_num=10
elif [ ${MODE} = "whole_infer" ]; then
export infer_img_dir="./train_data/icdar2015/text_localization/ch4_test_images/"
export epoch_num=10
elif [ ${MODE} = "whole_train_infer" ]; then
export infer_img_dir="./train_data/icdar2015/text_localization/ch4_test_images/"
export epoch_num=300
else
export infer_img_dir="./inference/ch_det_data_50/all-sum-510"
export infer_model_dir="./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy"
fi
function func_inference(){
IFS='|'
......@@ -109,8 +101,8 @@ function func_inference(){
for use_mkldnn in ${use_mkldnn_list[*]}; do
for threads in ${cpu_threads_list[*]}; do
for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}"
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${cpu_threads_key}=${threads} ${model_dir_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True"
_save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}.log"
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${cpu_threads_key}=${threads} ${infer_model_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True"
eval $command
status_check $? "${command}" "${status_log}"
done
......@@ -123,8 +115,8 @@ function func_inference(){
continue
fi
for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}"
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_trt_key}=${use_trt} ${precision_key}=${precision} ${model_dir_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True"
_save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_trt_key}=${use_trt} ${precision_key}=${precision} ${infer_model_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True"
eval $command
status_check $? "${command}" "${status_log}"
done
......@@ -138,12 +130,13 @@ if [ ${MODE} != "infer" ]; then
IFS="|"
for gpu in ${gpu_list[*]}; do
train_use_gpu=True
use_gpu=True
if [ ${gpu} = "-1" ];then
train_use_gpu=False
use_gpu=False
env=""
elif [ ${#gpu} -le 1 ];then
env="export CUDA_VISIBLE_DEVICES=${gpu}"
eval ${env}
elif [ ${#gpu} -le 15 ];then
IFS=","
array=(${gpu})
......@@ -155,6 +148,7 @@ for gpu in ${gpu_list[*]}; do
ips=${array[0]}
gpu=${array[1]}
IFS="|"
env=" "
fi
for autocast in ${autocast_list[*]}; do
for trainer in ${trainer_list[*]}; do
......@@ -179,13 +173,32 @@ for gpu in ${gpu_list[*]}; do
continue
fi
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}"
if [ ${#gpu} -le 2 ];then # epoch_num #TODO
cmd="${python} ${run_train} ${train_use_gpu_key}=${train_use_gpu} ${autocast_key}=${autocast} ${epoch_key}=${epoch_num} ${save_model_key}=${save_log} "
elif [ ${#gpu} -le 15 ];then
cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${autocast_key}=${autocast} ${epoch_key}=${epoch_num} ${save_model_key}=${save_log}"
# not set autocast when autocast is null
if [ ${autocast} = "null" ]; then
set_autocast=" "
else
set_autocast="${autocast_key}=${autocast}"
fi
# not set epoch when whole_train_infer
if [ ${MODE} != "whole_train_infer" ]; then
set_epoch="${epoch_key}=${epoch_num}"
else
set_epoch=" "
fi
# set pretrain
if [ ${pretrain_model_value} != "null" ]; then
set_pretrain="${pretrain_model_key}=${pretrain_model_value}"
else
cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${autocast_key}=${autocast} ${epoch_key}=${epoch_num} ${save_model_key}=${save_log}"
set_pretrain=" "
fi
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}"
if [ ${#gpu} -le 2 ];then # train with cpu or single gpu
cmd="${python} ${run_train} ${train_use_gpu_key}=${use_gpu} ${save_model_key}=${save_log} ${set_epoch} ${set_pretrain} ${set_autocast}"
elif [ ${#gpu} -le 15 ];then # train with multi-gpu
cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${save_model_key}=${save_log} ${set_epoch} ${set_pretrain} ${set_autocast}"
else # train with multi-machine
cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${save_model_key}=${save_log} ${set_pretrain} ${set_epoch} ${set_autocast}"
fi
# run train
eval $cmd
......@@ -198,24 +211,27 @@ for gpu in ${gpu_list[*]}; do
# run export model
save_infer_path="${save_log}"
export_cmd="${python} ${run_export} ${save_model_key}=${save_log} ${pretrain_model_key}=${save_log}/latest ${save_infer_key}=${save_infer_path}"
export_cmd="${python} ${run_export} ${save_model_key}=${save_log} ${export_weight}=${save_log}/latest ${save_infer_key}=${save_infer_path}"
eval $export_cmd
status_check $? "${export_cmd}" "${status_log}"
#run inference
eval $env
save_infer_path="${save_log}"
func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${infer_img_dir}"
eval "unset CUDA_VISIBLE_DEVICES"
done
done
done
else
save_infer_path="${LOG_PATH}/${MODE}"
run_export=${norm_export}
export_cmd="${python} ${run_export} ${save_model_key}=${save_infer_path} ${pretrain_model_key}=${infer_model_dir} ${save_infer_key}=${save_infer_path}"
eval $export_cmd
status_check $? "${export_cmd}" "${status_log}"
GPUID=$3
if [ ${#GPUID} -le 0 ];then
env=" "
else
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi
echo $env
#run inference
func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${infer_img_dir}"
func_inference "${python}" "${inference_py}" "${infer_model}" "${LOG_PATH}" "${infer_img_dir}"
fi
......@@ -19,7 +19,29 @@
### 2.1 训练
TBD
#### 数据准备
训练数据使用公开数据集[PubTabNet](https://arxiv.org/abs/1911.10683),可以从[官网](https://github.com/ibm-aur-nlp/PubTabNet)下载。PubTabNet数据集包含约50万张表格数据的图像,以及图像对应的html格式的注释。
#### 启动训练
*如果您安装的是cpu版本,请将配置文件中的 `use_gpu` 字段修改为false*
```shell
# 单机单卡训练
python3 tools/train.py -c configs/table/table_mv3.yml
# 单机多卡训练,通过 --gpus 参数设置使用的GPU ID
python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/table/table_mv3.yml
```
上述指令中,通过-c 选择训练使用configs/table/table_mv3.yml配置文件。有关配置文件的详细解释,请参考[链接](./config.md)
#### 断点训练
如果训练程序中断,如果希望加载训练中断的模型从而恢复训练,可以通过指定Global.checkpoints指定要加载的模型路径:
```shell
python3 tools/train.py -c configs/table/table_mv3.yml -o Global.checkpoints=./your/trained/model
```
**注意**`Global.checkpoints`的优先级高于`Global.pretrain_weights`的优先级,即同时指定两个参数时,优先加载`Global.checkpoints`指定的模型,如果`Global.checkpoints`指定的模型路径有误,会加载`Global.pretrain_weights`指定的模型。
### 2.2 评估
先cd到PaddleOCR/ppstructure目录下
......
......@@ -175,7 +175,7 @@ class TextDetector(object):
st = time.time()
if args.benchmark:
if self.args.benchmark:
self.autolog.times.start()
data = transform(data, self.preprocess_op)
......@@ -186,7 +186,7 @@ class TextDetector(object):
shape_list = np.expand_dims(shape_list, axis=0)
img = img.copy()
if args.benchmark:
if self.args.benchmark:
self.autolog.times.stamp()
self.input_tensor.copy_from_cpu(img)
......@@ -195,7 +195,7 @@ class TextDetector(object):
for output_tensor in self.output_tensors:
output = output_tensor.copy_to_cpu()
outputs.append(output)
if args.benchmark:
if self.args.benchmark:
self.autolog.times.stamp()
preds = {}
......@@ -220,7 +220,7 @@ class TextDetector(object):
else:
dt_boxes = self.filter_tag_det_res(dt_boxes, ori_im.shape)
if args.benchmark:
if self.args.benchmark:
self.autolog.times.end(stamp=True)
et = time.time()
return dt_boxes, et - st
......
......@@ -174,8 +174,6 @@ def main(args):
logger.info("The predict total time is {}".format(time.time() - _st))
logger.info("\nThe predict total time is {}".format(total_time))
img_num = text_sys.text_detector.det_times.img_num
if __name__ == "__main__":
args = utility.parse_args()
......
......@@ -37,7 +37,7 @@ def init_args():
parser.add_argument("--use_gpu", type=str2bool, default=True)
parser.add_argument("--ir_optim", type=str2bool, default=True)
parser.add_argument("--use_tensorrt", type=str2bool, default=False)
parser.add_argument("--min_subgraph_size", type=int, default=3)
parser.add_argument("--min_subgraph_size", type=int, default=10)
parser.add_argument("--precision", type=str, default="fp32")
parser.add_argument("--gpu_mem", type=int, default=500)
......@@ -164,7 +164,7 @@ def create_predictor(args, mode, logger):
config.enable_use_gpu(args.gpu_mem, 0)
if args.use_tensorrt:
config.enable_tensorrt_engine(
precision_mode=inference.PrecisionType.Float32,
precision_mode=precision,
max_batch_size=args.max_batch_size,
min_subgraph_size=args.min_subgraph_size)
# skip the minmum trt subgraph
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册