未验证 提交 f656733f 编写于 作者: B Bin Lu 提交者: GitHub

Merge pull request #1754 from Intsigstephon/lite_demo_with_retrieval

Lite demo with faiss retrieval
......@@ -17,7 +17,6 @@ ${info LITE_ROOT: $(abspath ${LITE_ROOT})}
THIRD_PARTY_DIR=third_party
${info THIRD_PARTY_DIR: $(abspath ${THIRD_PARTY_DIR})}
OPENCV_VERSION=opencv4.1.0
OPENCV_LIBS = ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/libs/libopencv_imgcodecs.a \
${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/libs/libopencv_imgproc.a \
......@@ -32,6 +31,8 @@ OPENCV_LIBS = ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/libs/libopencv_im
${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/3rdparty/libs/libtbb.a \
${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/3rdparty/libs/libcpufeatures.a
FAISS_VERSION=faiss1.5.3
FAISS_LIBS = ${THIRD_PARTY_DIR}/${FAISS_VERSION}/libs/${ARM_PLAT}/libfaiss.a
LITE_LIBS = -L${LITE_ROOT}/cxx/lib/ -lpaddle_light_api_shared
###############################################################
......@@ -45,7 +46,7 @@ LITE_LIBS = -L${LITE_ROOT}/cxx/lib/ -lpaddle_light_api_shared
# 2. Undo comment below line using `libpaddle_api_light_bundled.a`
# LITE_LIBS = ${LITE_ROOT}/cxx/lib/libpaddle_api_light_bundled.a
CXX_LIBS = $(LITE_LIBS) ${OPENCV_LIBS} $(SYSTEM_LIBS)
CXX_LIBS = $(LITE_LIBS) ${OPENCV_LIBS} ${FAISS_LIBS} $(SYSTEM_LIBS)
LOCAL_DIRSRCS=$(wildcard src/*.cc)
LOCAL_SRCS=$(notdir $(LOCAL_DIRSRCS))
......@@ -53,9 +54,17 @@ LOCAL_OBJS=$(patsubst %.cpp, %.o, $(patsubst %.cc, %.o, $(LOCAL_SRCS)))
JSON_OBJS = json_reader.o json_value.o json_writer.o
pp_shitu: $(LOCAL_OBJS) $(JSON_OBJS) fetch_opencv
pp_shitu: $(LOCAL_OBJS) $(JSON_OBJS) fetch_opencv fetch_faiss
$(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) $(LOCAL_OBJS) $(JSON_OBJS) -o pp_shitu $(CXX_LIBS) $(LDFLAGS)
fetch_faiss:
@ test -d ${THIRD_PARTY_DIR} || mkdir ${THIRD_PARTY_DIR}
@ test -e ${THIRD_PARTY_DIR}/${FAISS_VERSION}.tar.gz || \
(echo "fetch faiss libs" && \
wget -P ${THIRD_PARTY_DIR} https://paddle-inference-dist.bj.bcebos.com/${FAISS_VERSION}.tar.gz)
@ test -d ${THIRD_PARTY_DIR}/${FAISS_VERSION} || \
tar -xf ${THIRD_PARTY_DIR}/${FAISS_VERSION}.tar.gz -C ${THIRD_PARTY_DIR}
fetch_opencv:
@ test -d ${THIRD_PARTY_DIR} || mkdir ${THIRD_PARTY_DIR}
@ test -e ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz || \
......@@ -74,11 +83,12 @@ fetch_json_code:
LOCAL_INCLUDES = -I./ -Iinclude
OPENCV_INCLUDE = -I${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/include
FAISS_INCLUDE = -I${THIRD_PARTY_DIR}/${FAISS_VERSION}/include
JSON_INCLUDE = -I${THIRD_PARTY_DIR}/jsoncpp_code/include
CXX_INCLUDES = ${LOCAL_INCLUDES} ${INCLUDES} ${OPENCV_INCLUDE} ${JSON_INCLUDE} -I$(LITE_ROOT)/cxx/include
CXX_INCLUDES = ${LOCAL_INCLUDES} ${INCLUDES} ${OPENCV_INCLUDE} ${FAISS_INCLUDE} ${JSON_INCLUDE} -I$(LITE_ROOT)/cxx/include
$(LOCAL_OBJS): %.o: src/%.cc fetch_opencv fetch_json_code
$(LOCAL_OBJS): %.o: src/%.cc fetch_opencv fetch_json_code fetch_faiss
$(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -c $< -o $@
$(JSON_OBJS): %.o: ${THIRD_PARTY_DIR}/jsoncpp_code/%.cpp fetch_json_code
......
......@@ -191,16 +191,28 @@ cd deploy/lite_shitu
```shell
# 如果测试单张图像
python generate_json_config.py --det_model_path ppshitu_lite_models_v1.0/mainbody_PPLCNet_x2_5_640_quant_v1.0_lite.nb --rec_model_path ppshitu_lite_models_v1.0/general_PPLCNet_x2_5_quant_v1.0_lite.nb --rec_label_path ppshitu_lite_models_v1.0/label.txt --img_path images/demo.jpg
python generate_json_config.py --det_model_path ppshitu_lite_models_v1.0/mainbody_PPLCNet_x2_5_640_quant_v1.0_lite.nb --rec_model_path ppshitu_lite_models_v1.0/general_PPLCNet_x2_5_lite_v1.0_infer.nb --img_path images/demo.jpg
# or
# 如果测试多张图像
python generate_json_config.py --det_model_path ppshitu_lite_models_v1.0/mainbody_PPLCNet_x2_5_640_quant_v1.0_lite.nb --rec_model_path ppshitu_lite_models_v1.0/general_PPLCNet_x2_5_quant_v1.0_lite.nb --rec_label_path ppshitu_lite_models_v1.0/label.txt --img_dir images
python generate_json_config.py --det_model_path ppshitu_lite_models_v1.0/mainbody_PPLCNet_x2_5_640_quant_v1.0_lite.nb --rec_model_path ppshitu_lite_models_v1.0/general_PPLCNet_x2_5_lite_v1.0_infer.nb --img_dir images
# 执行完成后,会在lit_shitu下生成shitu_config.json配置文件
```
### 2.3 index字典转换
由于python的检索库字典,使用`pickle`进行的序列化存储,导致C++不方便读取,因此需要进行转换
```shell
# 下载瓶装饮料数据集
wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/drink_dataset_v1.0.tar && tar -xf drink_dataset_v1.0.tar
rm -rf drink_dataset_v1.0.tar
# 转化id_map.pkl为id_map.txt
python transform_id_map.py -c ../configs/inference_drink.yaml
```
转换成功后,会在`IndexProcess.index_dir`目录下生成`id_map.txt`
### 2.3 与手机联调
### 2.4 与手机联调
首先需要进行一些准备工作。
1. 准备一台arm8的安卓手机,如果编译的预测库是armv7,则需要arm7的手机,并修改Makefile中`ARM_ABI=arm7`
......@@ -252,6 +264,7 @@ make ARM_ABI=arm8
```shell
mkdir deploy
mv ppshitu_lite_models_v1.0 deploy/
mv drink_dataset_v1.0 deploy/
mv images deploy/
mv shitu_config.json deploy/
cp pp_shitu deploy/
......@@ -265,12 +278,12 @@ cp ../../../cxx/lib/libpaddle_light_api_shared.so deploy/
```shell
deploy/
|-- ppshitu_lite_models_v1.0/
| |--mainbody_PPLCNet_x2_5_640_v1.0_lite.nb 优化后的主体检测模型文件
| |--general_PPLCNet_x2_5_quant_v1.0_lite.nb 优化后的识别模型文件
| |--label.txt 识别模型的label文件
| |--mainbody_PPLCNet_x2_5_lite_v1.0_infer.nb 优化后的主体检测模型文件
| |--general_PPLCNet_x2_5_quant_v1.0_lite.nb 优化后的识别模型文件
|-- images/
| |--demo.jpg 图片文件
| ... 图片文件
|-- drink_dataset_v1.0/ 瓶装饮料demo数据
| |--index 检索index目录
|-- pp_shitu 生成的移动端执行文件
|-- shitu_config.json 执行时参数配置文件
|-- libpaddle_light_api_shared.so Paddle-Lite库文件
......@@ -298,8 +311,10 @@ chmod 777 pp_shitu
如果对代码做了修改,则需要重新编译并push到手机上。
运行效果如下:
![](../../docs/images/ppshitu_lite_demo.png)
```
images/demo.jpg:
result0: bbox[253, 275, 1146, 872], score: 0.974196, label: 伊藤园_果蔬汁
```
## FAQ
Q1:如果想更换模型怎么办,需要重新按照流程走一遍吗?
......
......@@ -130,6 +130,8 @@ def main():
y["type"] = k
config_json["RecPreProcess"]["transform_ops"].append(y)
# set IndexProces
config_json["IndexProcess"] = config_yaml["IndexProcess"]
with open('shitu_config.json', 'w') as fd:
json.dump(config_json, fd, indent=4)
......
......@@ -36,10 +36,9 @@ struct RESULT {
float score;
};
class Recognition {
class FeatureExtract {
public:
explicit Recognition(const Json::Value &config_file) {
explicit FeatureExtract(const Json::Value &config_file) {
MobileConfig config;
if (config_file["Global"]["rec_model_path"].as<std::string>().empty()) {
std::cout << "Please set [rec_model_path] in config file" << std::endl;
......@@ -53,29 +52,8 @@ public:
std::cout << "Please set [rec_label_path] in config file" << std::endl;
exit(-1);
}
LoadLabel(config_file["Global"]["rec_label_path"].as<std::string>());
SetPreProcessParam(config_file["RecPreProcess"]["transform_ops"]);
if (!config_file["Global"].isMember("return_k")){
this->topk = config_file["Global"]["return_k"].as<int>();
}
printf("rec model create!\n");
}
void LoadLabel(std::string path) {
std::ifstream file;
std::vector<std::string> label_list;
file.open(path);
while (file) {
std::string line;
std::getline(file, line);
std::string::size_type pos = line.find(" ");
if (pos != std::string::npos) {
line = line.substr(pos);
}
this->label_list.push_back(line);
}
file.clear();
file.close();
printf("feature extract model create!\n");
}
void SetPreProcessParam(const Json::Value &config_file) {
......@@ -97,19 +75,17 @@ public:
}
}
std::vector<RESULT> RunRecModel(const cv::Mat &img, double &cost_time);
std::vector<RESULT> PostProcess(const float *output_data, int output_size,
cv::Mat &output_image);
void RunRecModel(const cv::Mat &img, double &cost_time, std::vector<float> &feature);
//void PostProcess(std::vector<float> &feature);
cv::Mat ResizeImage(const cv::Mat &img);
void NeonMeanScale(const float *din, float *dout, int size);
private:
std::shared_ptr<PaddlePredictor> predictor;
std::vector<std::string> label_list;
//std::vector<std::string> label_list;
std::vector<float> mean = {0.485f, 0.456f, 0.406f};
std::vector<float> std = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f};
double scale = 0.00392157;
float size = 224;
int topk = 5;
};
} // namespace PPShiTu
......@@ -16,7 +16,7 @@
#include <algorithm>
#include <ctime>
#include <include/recognition.h>
#include <include/feature_extractor.h>
#include <memory>
#include <numeric>
#include <string>
......
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#ifdef WIN32
#define OS_PATH_SEP "\\"
#else
#define OS_PATH_SEP "/"
#endif
#include "json/json.h"
#include <cstring>
#include <faiss/Index.h>
#include <faiss/index_io.h>
#include <map>
namespace PPShiTu {
struct SearchResult {
std::vector<faiss::Index::idx_t> I;
std::vector<float> D;
int return_k;
};
class VectorSearch {
public:
explicit VectorSearch(const Json::Value &config) {
// IndexProcess
this->index_dir = config["IndexProcess"]["index_dir"].as<std::string>();
this->return_k = config["IndexProcess"]["return_k"].as<int>();
this->score_thres = config["IndexProcess"]["score_thres"].as<float>();
this->max_query_number = config["Global"]["max_det_results"].as<int>() + 1;
LoadIdMap();
LoadIndexFile();
this->I.resize(this->return_k * this->max_query_number);
this->D.resize(this->return_k * this->max_query_number);
printf("faiss index load success!\n");
};
void LoadIdMap();
void LoadIndexFile();
const SearchResult &Search(float *feature, int query_number);
const std::string &GetLabel(faiss::Index::idx_t ind);
const float &GetThreshold() { return this->score_thres; }
private:
std::string index_dir;
int return_k = 5;
float score_thres = 0.5;
std::map<long int, std::string> id_map;
faiss::Index *index;
int max_query_number = 6;
std::vector<float> D;
std::vector<faiss::Index::idx_t> I;
SearchResult sr;
};
}
......@@ -12,12 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "include/recognition.h"
#include "include/feature_extractor.h"
namespace PPShiTu {
std::vector<RESULT> Recognition::RunRecModel(const cv::Mat &img,
double &cost_time) {
void FeatureExtract::RunRecModel(const cv::Mat &img,
double &cost_time,
std::vector<float> &feature) {
// Read img
cv::Mat resize_image = ResizeImage(img);
......@@ -38,8 +38,7 @@ std::vector<RESULT> Recognition::RunRecModel(const cv::Mat &img,
// Get output and post process
std::unique_ptr<const Tensor> output_tensor(
std::move(this->predictor->GetOutput(1)));
auto *output_data = output_tensor->data<float>();
std::move(this->predictor->GetOutput(0))); //only one output
auto end = std::chrono::system_clock::now();
auto duration =
std::chrono::duration_cast<std::chrono::microseconds>(end - start);
......@@ -47,17 +46,27 @@ std::vector<RESULT> Recognition::RunRecModel(const cv::Mat &img,
std::chrono::microseconds::period::num /
std::chrono::microseconds::period::den;
//do postprocess
int output_size = 1;
for (auto dim : output_tensor->shape()) {
output_size *= dim;
}
feature.resize(output_size);
output_tensor->CopyToCpu(feature.data());
cv::Mat output_image;
auto results = PostProcess(output_data, output_size, output_image);
return results;
//postprocess include sqrt or binarize.
//PostProcess(feature);
return;
}
void Recognition::NeonMeanScale(const float *din, float *dout, int size) {
// void FeatureExtract::PostProcess(std::vector<float> &feature){
// float feature_sqrt = std::sqrt(std::inner_product(
// feature.begin(), feature.end(), feature.begin(), 0.0f));
// for (int i = 0; i < feature.size(); ++i)
// feature[i] /= feature_sqrt;
// }
void FeatureExtract::NeonMeanScale(const float *din, float *dout, int size) {
if (this->mean.size() != 3 || this->std.size() != 3) {
std::cerr << "[ERROR] mean or scale size must equal to 3\n";
......@@ -99,45 +108,9 @@ void Recognition::NeonMeanScale(const float *din, float *dout, int size) {
}
}
cv::Mat Recognition::ResizeImage(const cv::Mat &img) {
cv::Mat FeatureExtract::ResizeImage(const cv::Mat &img) {
cv::Mat resize_img;
cv::resize(img, resize_img, cv::Size(this->size, this->size));
return resize_img;
}
std::vector<RESULT> Recognition::PostProcess(const float *output_data,
int output_size,
cv::Mat &output_image) {
int max_indices[this->topk];
double max_scores[this->topk];
for (int i = 0; i < this->topk; i++) {
max_indices[i] = 0;
max_scores[i] = 0;
}
for (int i = 0; i < output_size; i++) {
float score = output_data[i];
int index = i;
for (int j = 0; j < this->topk; j++) {
if (score > max_scores[j]) {
index += max_indices[j];
max_indices[j] = index - max_indices[j];
index -= max_indices[j];
score += max_scores[j];
max_scores[j] = score - max_scores[j];
score -= max_scores[j];
}
}
}
std::vector<RESULT> results(this->topk);
for (int i = 0; i < results.size(); i++) {
results[i].class_name = "Unknown";
if (max_indices[i] >= 0 && max_indices[i] < this->label_list.size()) {
results[i].class_name = this->label_list[max_indices[i]];
}
results[i].score = max_scores[i];
results[i].class_id = max_indices[i];
}
return results;
}
}
......@@ -24,9 +24,10 @@
#include <vector>
#include "include/config_parser.h"
#include "include/feature_extractor.h"
#include "include/object_detector.h"
#include "include/preprocess_op.h"
#include "include/recognition.h"
#include "include/vector_search.h"
#include "json/json.h"
Json::Value RT_Config;
......@@ -111,14 +112,18 @@ void DetPredictImage(const std::vector<cv::Mat> &batch_imgs,
}
}
void PrintResult(const std::string &image_path,
std::vector<PPShiTu::ObjectResult> &det_result) {
printf("%s:\n", image_path.c_str());
void PrintResult(std::string &img_path,
std::vector<PPShiTu::ObjectResult> &det_result,
PPShiTu::VectorSearch &vector_search,
PPShiTu::SearchResult &search_result) {
printf("%s:\n", img_path.c_str());
for (int i = 0; i < det_result.size(); ++i) {
int t = i;
printf("\tresult%d: bbox[%d, %d, %d, %d], score: %f, label: %s\n", i,
det_result[i].rect[0], det_result[i].rect[1], det_result[i].rect[2],
det_result[i].rect[3], det_result[i].rec_result[0].score,
det_result[i].rec_result[0].class_name.c_str());
det_result[t].rect[0], det_result[t].rect[1], det_result[t].rect[2],
det_result[t].rect[3], det_result[t].confidence,
vector_search.GetLabel(search_result.I[search_result.return_k * t])
.c_str());
}
}
......@@ -159,11 +164,16 @@ int main(int argc, char **argv) {
RT_Config["Global"]["cpu_num_threads"].as<int>(),
RT_Config["Global"]["batch_size"].as<int>());
// create rec model
PPShiTu::Recognition rec(RT_Config);
PPShiTu::FeatureExtract rec(RT_Config);
PPShiTu::VectorSearch searcher(RT_Config);
// Do inference on input image
std::vector<PPShiTu::ObjectResult> det_result;
std::vector<cv::Mat> batch_imgs;
// for vector search
std::vector<float> feature;
std::vector<float> features;
double rec_time;
if (!RT_Config["Global"]["infer_imgs"].as<std::string>().empty() ||
!img_dir.empty()) {
......@@ -178,8 +188,7 @@ int main(int argc, char **argv) {
return -1;
}
} else {
cv::glob(img_dir,
cv_all_img_paths);
cv::glob(img_dir, cv_all_img_paths);
for (const auto &img_path : cv_all_img_paths) {
all_img_paths.push_back(img_path);
}
......@@ -199,24 +208,25 @@ int main(int argc, char **argv) {
RT_Config["Global"]["max_det_results"].as<int>(), false, &det);
// add the whole image for recognition to improve recall
PPShiTu::ObjectResult result_whole_img = {
{0, 0, srcimg.cols, srcimg.rows}, 0, 1.0};
det_result.push_back(result_whole_img);
// PPShiTu::ObjectResult result_whole_img = {
// {0, 0, srcimg.cols, srcimg.rows}, 0, 1.0};
// det_result.push_back(result_whole_img);
// get rec result
PPShiTu::SearchResult search_result;
for (int j = 0; j < det_result.size(); ++j) {
int w = det_result[j].rect[2] - det_result[j].rect[0];
int h = det_result[j].rect[3] - det_result[j].rect[1];
cv::Rect rect(det_result[j].rect[0], det_result[j].rect[1], w, h);
cv::Mat crop_img = srcimg(rect);
std::vector<PPShiTu::RESULT> result =
rec.RunRecModel(crop_img, rec_time);
det_result[j].rec_result.assign(result.begin(), result.end());
rec.RunRecModel(crop_img, rec_time, feature);
features.insert(features.end(), feature.begin(), feature.end());
}
// rec nms
PPShiTu::nms(det_result,
RT_Config["Global"]["rec_nms_thresold"].as<float>(), true);
PrintResult(img_path, det_result);
// do vectore search
search_result = searcher.Search(features.data(), det_result.size());
PrintResult(img_path, det_result, searcher, search_result);
batch_imgs.clear();
det_result.clear();
}
......
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "include/vector_search.h"
#include <cstdio>
#include <faiss/index_io.h>
#include <fstream>
#include <iostream>
#include <regex>
namespace PPShiTu {
// load the vector.index
void VectorSearch::LoadIndexFile() {
std::string file_path = this->index_dir + OS_PATH_SEP + "vector.index";
const char *fname = file_path.c_str();
this->index = faiss::read_index(fname, 0);
}
// load id_map.txt
void VectorSearch::LoadIdMap() {
std::string file_path = this->index_dir + OS_PATH_SEP + "id_map.txt";
std::ifstream in(file_path);
std::string line;
std::vector<std::string> m_vec;
if (in) {
while (getline(in, line)) {
std::regex ws_re("\\s+");
std::vector<std::string> v(
std::sregex_token_iterator(line.begin(), line.end(), ws_re, -1),
std::sregex_token_iterator());
if (v.size() != 2) {
std::cout << "The number of element for each line in : " << file_path
<< "must be 2, exit the program..." << std::endl;
exit(1);
} else
this->id_map.insert(std::pair<long int, std::string>(
std::stol(v[0], nullptr, 10), v[1]));
}
}
}
// doing search
const SearchResult &VectorSearch::Search(float *feature, int query_number) {
this->D.resize(this->return_k * query_number);
this->I.resize(this->return_k * query_number);
this->index->search(query_number, feature, return_k, D.data(), I.data());
this->sr.return_k = this->return_k;
this->sr.D = this->D;
this->sr.I = this->I;
return this->sr;
}
const std::string &VectorSearch::GetLabel(faiss::Index::idx_t ind) {
return this->id_map.at(ind);
}
}
\ No newline at end of file
import argparse
import os
import pickle
import yaml
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, required=True)
args = parser.parse_args()
return args
def main():
args = parse_args()
with open(args.config) as fd:
config = yaml.load(fd.read(), yaml.FullLoader)
index_dir = ""
try:
index_dir = config["IndexProcess"]["index_dir"]
except Exception as e:
print("The IndexProcess.index_dir in config_file dose not exist")
exit(1)
id_map_path = os.path.join(index_dir, "id_map.pkl")
assert os.path.exists(
id_map_path), "The id_map file dose not exist: {}".format(id_map_path)
with open(id_map_path, "rb") as fd:
ids = pickle.load(fd)
with open(os.path.join(index_dir, "id_map.txt"), "w") as fd:
for k, v in ids.items():
v = v.split("\t")[1]
fd.write(str(k) + " " + v + "\n")
print('Transform id_map sucess')
if __name__ == "__main__":
main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册