提交 2c8f3aa6 编写于 作者: L leaves-zwx

Merge branch 'dev_quick_dirty_object_detection' of...

Merge branch 'dev_quick_dirty_object_detection' of https://github.com/Oneflow-Inc/oneflow into dev_tensor_list
......@@ -16,7 +16,7 @@ DataLoader::DataLoader(const DataLoadOpConf& op_conf, const DataLoadKernelConf&
sampler_ctx_.num_replicas_ = kernel_conf.parallel_num();
sampler_ctx_.rank_ = kernel_conf.parallel_id();
sampler_ctx_.epoch_ = 0;
sampler_ctx_.iter_ = kernel_conf.parallel_id();
sampler_ctx_.offset_ = kernel_conf.parallel_id();
sampler_ctx_.count_ = 0;
dataset_->SubmitSamplerContext(&sampler_ctx_);
load_thrd_ = std::thread([this] {
......
......@@ -2,6 +2,33 @@
namespace oneflow {
namespace data {
namespace {
// eop: epoch offset pair
using eop_cache_func_t = std::function<void(size_t, size_t)>;
using eop_iscached_func_t = std::function<bool(size_t, size_t)>;
std::pair<eop_cache_func_t, eop_iscached_func_t> MakeEpochOffsetPairCacheFunctions() {
using eop_t = std::pair<size_t, size_t>;
static thread_local HashSet<eop_t> epoch_offset_pair_set;
auto Cache = [&](size_t epoch, size_t offset) {
CHECK(epoch_offset_pair_set.insert({epoch, offset}).second);
};
auto IsCachedAndTryClear = [&](size_t epoch, size_t offset) {
auto it = epoch_offset_pair_set.find({epoch, offset});
if (it != epoch_offset_pair_set.end()) {
epoch_offset_pair_set.erase(it);
return true;
}
return false;
};
return {Cache, IsCachedAndTryClear};
}
} // namespace
DataSampler::DataSampler(Dataset* dataset)
: dataset_(dataset), max_count_(0), gen_(dataset->dataset_proto().random_seed()) {}
......@@ -50,14 +77,14 @@ std::vector<int64_t> DataSampler::FetchBatchIndexSequence(DataSamplerContext* ct
std::vector<int64_t> batch_index_seq(batch_size);
size_t i = 0;
while (i < batch_size) {
if (ctx->iter_ >= dataset()->Size()) {
if (ctx->offset_ >= dataset()->Size()) {
CheckIndexSequenceRanOut(ctx);
ctx->epoch_ += 1;
ctx->iter_ %= dataset()->Size();
ctx->offset_ %= dataset()->Size();
ctx->count_ = 0;
}
batch_index_seq[i] = AcquireGetOrGenEpochIndexSequence(ctx->epoch_).at(ctx->iter_);
ctx->iter_ += ctx->num_replicas_;
batch_index_seq[i] = AcquireGetOrGenEpochIndexSequence(ctx->epoch_).at(ctx->offset_);
ctx->offset_ += ctx->num_replicas_;
ctx->count_ += 1;
i += 1;
}
......@@ -71,41 +98,51 @@ GroupedDataSampler::GroupedDataSampler(Dataset* dataset)
std::vector<int64_t> GroupedDataSampler::FetchBatchIndexSequence(DataSamplerContext* ctx,
size_t batch_size) {
std::function<void(size_t, size_t)> CacheFetched;
std::function<bool(size_t, size_t)> IsFetched;
std::tie(CacheFetched, IsFetched) = MakeEpochOffsetPairCacheFunctions();
std::vector<int64_t> seq(batch_size);
size_t fetch_count = 0;
bool skip_happened = false;
int64_t group_id = -1;
size_t iter = ctx->iter_;
size_t offset = ctx->offset_;
size_t epoch = ctx->epoch_;
// fetch indices
int64_t first_group_id = -1;
bool skip_happened = false;
while (fetch_count < batch_size) {
if (iter >= dataset()->Size()) {
epoch += 1;
iter %= dataset()->Size();
}
// stop updating ctx that once skip happened
if (!skip_happened) {
if (ctx->epoch_ == epoch) {
ctx->count_ += 1;
bool is_feteched = IsFetched(epoch, offset);
if (!is_feteched) {
int64_t index = AcquireGetOrGenEpochIndexSequence(epoch).at(offset);
int64_t group_id = group_ids_.at(index);
if (first_group_id == -1) { first_group_id = group_id; }
if (first_group_id == group_id) {
seq.at(fetch_count) = index;
fetch_count += 1;
is_feteched = true;
if (skip_happened) { CacheFetched(epoch, offset); }
} else {
skip_happened = true;
}
}
if (is_feteched && !skip_happened) {
ctx->count_ += 1;
ctx->offset_ += ctx->num_replicas_;
if (ctx->offset_ >= dataset()->Size()) {
CheckIndexSequenceRanOut(ctx);
ctx->epoch_ = epoch;
ctx->count_ = 0;
ctx->offset_ %= dataset()->Size();
ctx->epoch_ += 1;
}
ctx->iter_ = iter;
}
int64_t index = AcquireGetOrGenEpochIndexSequence(epoch).at(iter);
if (group_id == -1) { group_id = group_ids_.at(index); }
if (group_id == group_ids_.at(index)) {
// fetch index with the same group_id
seq.at(fetch_count) = index;
fetch_count += 1;
} else {
// record skip happened
skip_happened = true;
offset += ctx->num_replicas_;
if (offset >= dataset()->Size()) {
epoch += 1;
offset %= dataset()->Size();
}
iter += ctx->num_replicas_;
}
return seq;
}
......
......@@ -8,7 +8,7 @@ struct DataSamplerContext final {
size_t num_replicas_;
size_t rank_;
size_t epoch_;
size_t iter_;
size_t offset_;
size_t count_;
};
......
#include "oneflow/core/common/str_util.h"
#include "oneflow/core/job_completer/optimizer.h"
namespace oneflow {
......@@ -7,11 +8,27 @@ namespace {
void GenerateOptimizerOpConf(const VariableOp& op, const ParallelConf& parallel_conf,
JobBuilder* job_builder, const LogicalBlobId& diff_lbi_of_var_out,
const LogicalBlobId& total_loss_instance_num_lbi) {
const std::string op_name = op.op_name() + "-momentum";
OperatorConf momentum_var(op.op_conf());
InitializerConf constant_initializer;
constant_initializer.mutable_constant_conf()->set_value(0.f);
*(momentum_var.mutable_variable_conf()->mutable_initializer()) = constant_initializer;
momentum_var.set_name(op.op_name() + "-momentum");
const bool has_snapshot_path =
job_builder->job().job_conf().has_default_initialize_with_snapshot_path();
std::string file_path = "";
if (has_snapshot_path) {
file_path = JoinPath(job_builder->job().job_conf().default_initialize_with_snapshot_path(),
op_name, "out");
}
if (has_snapshot_path && SnapshotFS()->FileExists(file_path)) {
LOG(INFO) << "file_path: " << file_path;
momentum_var.mutable_variable_conf()->mutable_initialize_with_snapshot()->set_path(
JoinPath(job_builder->job().job_conf().default_initialize_with_snapshot_path(), op_name));
momentum_var.mutable_variable_conf()->mutable_initialize_with_snapshot()->set_key("out");
} else {
if (has_snapshot_path) { LOG(INFO) << file_path << " not found, will be initialized"; }
InitializerConf constant_initializer;
constant_initializer.mutable_constant_conf()->set_value(0.f);
*(momentum_var.mutable_variable_conf()->mutable_initializer()) = constant_initializer;
}
momentum_var.set_name(op_name);
momentum_var.mutable_variable_conf()->set_out("out");
job_builder->AddOps(parallel_conf, {momentum_var});
......
......@@ -15,7 +15,6 @@ from rpn import RPNHead, RPNLoss, RPNProposal
from box_head import BoxHead
from mask_head import MaskHead
from eval.bounding_box import BoxList
from eval.box_head_inference import PostProcessor
from eval.mask_head_inference import MaskPostProcessor
from eval.coco import COCODataset
from eval.coco_eval import do_coco_evaluation
......
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import numpy as np
import torch
from .bounding_box import BoxList
from maskrcnn_benchmark.layers import nms as _box_nms
def boxlist_nms(boxlist, nms_thresh, max_proposals=-1, score_field="scores"):
"""
Performs non-maximum suppression on a boxlist, with scores specified
in a boxlist field via score_field.
Arguments:
boxlist(BoxList)
nms_thresh (float)
max_proposals (int): if > 0, then only the top max_proposals are kept
after non-maximum suppression
score_field (str)
"""
if nms_thresh <= 0:
return boxlist
mode = boxlist.mode
boxlist = boxlist.convert("xyxy")
boxes = boxlist.bbox
score = boxlist.get_field(score_field)
keep = (
_box_nms(
torch.tensor(boxes, device=torch.device("cuda")),
torch.tensor(score, device=torch.device("cuda")),
nms_thresh,
)
.cpu()
.numpy()
)
if max_proposals > 0:
keep = keep[:max_proposals]
boxlist = boxlist[keep]
return boxlist.convert(mode)
def remove_small_boxes(boxlist, min_size):
......
......@@ -37,8 +37,15 @@ def has_valid_annotation(anno):
class COCODataset():
def __init__(self, ann_file, remove_images_without_annotations=False, transforms=None):
self.coco = COCO(ann_file)
# sort indices for reproducible results
self.ids = list(sorted(self.coco.imgs.keys()))
to_remove = set([])
for cat_id, _ in self.coco.cats.items():
if cat_id > 80:
to_remove |= set(self.coco.catToImgs[cat_id])
self.ids = list(set(self.ids) - to_remove)
# sort indices for reproducible results
self.ids = sorted(self.ids)
# filter images without detection annotations
......
import altair as alt
import pandas as pd
import numpy as np
COLUMNS = [
"loss_rpn_box_reg",
"loss_objectness",
"loss_box_reg",
"loss_classifier",
"loss_mask",
]
def plot_loss(df):
legends = df["legend"].unique()
poly_data = pd.DataFrame(
{"iter": np.linspace(df["iter"].min(), df["iter"].max(), 1000)}
)
for legend in legends:
poly_data[legend + "-fit"] = np.poly1d(
np.polyfit(
df[df["legend"] == legend]["iter"],
df[df["legend"] == legend]["loss"],
2,
)
)(poly_data["iter"])
base = alt.Chart(df).interactive()
chart = base.mark_line().encode(x="iter", y="loss", color="legend:N")
polynomial_fit = (
alt.Chart(poly_data)
.transform_fold(
[legend + "-fit" for legend in legends], as_=["legend", "loss"]
)
.mark_line()
.encode(x="iter:Q", y="loss:Q", color="legend:N")
)
chart += polynomial_fit
chart.display()
if len(legends) == 2:
loss_ratio_df = pd.DataFrame(
{
"metrics": poly_data[legends[0] + "-fit"]
/ poly_data[legends[1] + "-fit"],
"iter": poly_data["iter"],
"legend": "loss_ratio",
}
)
loss_diff_df = pd.DataFrame(
{
"metrics": poly_data[legends[0] + "-fit"]
- poly_data[legends[1] + "-fit"],
"iter": poly_data["iter"],
"legend": "loss_diff",
}
)
loss_compare_df = pd.concat([loss_ratio_df, loss_diff_df], axis=0)
base = alt.Chart(loss_compare_df).interactive()
chart = (
base.mark_line()
.encode(x="iter:Q", y="metrics:Q", color="legend:N")
.mark_line()
)
chart.display()
def plot_lr(df):
base = alt.Chart(df).interactive()
chart = base.mark_line().encode(x="iter:Q", y="lr:Q", color="legend:N")
chart.display()
def take_every_n(ndarray, n):
return ndarray[np.mod(np.arange(ndarray.shape[0]), n) == 0]
def make_loss_frame(hisogram, column_index, legend="undefined"):
assert column_index < len(COLUMNS)
ndarray = np.array(hisogram)[:, [column_index, len(COLUMNS)]]
return pd.DataFrame(
{"legend": legend, "loss": ndarray[:, 0], "iter": ndarray[:, 1]}
)
def make_lr_frame(hisogram, legend="undefined"):
if hisogram.shape[1] == 7:
ndarray = np.array(hisogram)[:, [6, 5]]
return pd.DataFrame(
{"legend": legend, "lr": ndarray[:, 0], "iter": ndarray[:, 1]}
)
else:
return pd.DataFrame()
def plot5(losses_hisogram, source="undefined"):
for column_index, column_name in enumerate(COLUMNS):
plot_loss(make_loss_frame(losses_hisogram, column_index, column_name))
def plot5in1(losses_hisogram, source="undefined"):
cat = pd.concat(
[
make_loss_frame(
losses_hisogram,
column_index,
legend="{}-{}".format(column_name, source),
)
for column_index, column_name in enumerate(COLUMNS)
],
axis=0,
)
plot_loss(cat)
def plot2(losses_hisogram_dict):
cat_lr = pd.concat(
[
make_lr_frame(losses_hisogram, legend="{}-{}".format("lr-", source))
for source, losses_hisogram in losses_hisogram_dict.items()
],
axis=0,
)
plot_lr(cat_lr)
for column_index, column_name in enumerate(COLUMNS):
cat = pd.concat(
[
make_loss_frame(
losses_hisogram,
column_index,
legend="{}-{}".format(column_name, source),
)
for source, losses_hisogram in losses_hisogram_dict.items()
],
axis=0,
)
plot_loss(cat)
plot2(
{
"OneFlow-sunday": take_every_n(
np.load(
"/Users/jackal/Downloads/loss-19999-batch_size-8-gpu-4-image_dir-train2017-2019-12-09--01-02-54.npy"
)[:6000],
3,
),
"PyTorch-no_contigous": take_every_n(
np.load(
"/Users/jackal/Downloads/pytorch_maskrcnn_losses_no_contigous.npy"
)[:6000],
3,
),
}
)
# plot2(
# {
# "PyTorch-no_contigous": take_every_n(np.load("/Users/jackal/Downloads/pytorch_maskrcnn_losses_no_contigous.npy")[:6000],3),
# "PyTorch": take_every_n(np.load("/Users/jackal/Downloads/pytorch_maskrcnn_losses.npy")[:6000], 3)
# }
# )
# plot2(
# {
# "OneFlow-sunday": np.load("/Users/jackal/Downloads/loss-1999-batch_size-8-gpu-4-image_dir-train2017-2019-12-08--18-55-06.npy")[:1600, :],
# "OneFlow-friday": np.load("/Users/jackal/Downloads/loss-1999-batch_size-8-gpu-4-image_dir-train2017.npy")[:1600, :],
# }
# )
# plot5in1(take_every_n(np.load("/tmp/shared_with_zwx/pytorch_maskrcnn_losses.npy"), 100), "PyTorch")
# plot5(take_every_n(np.load("/Users/jackal/Downloads/pytorch_maskrcnn_losses.npy"), 100), "PyTorch")
......@@ -15,7 +15,6 @@ from box_head import BoxHead
from mask_head import MaskHead
from eval.bounding_box import BoxList
from eval.box_head_inference import PostProcessor
from eval.mask_head_inference import MaskPostProcessor
from eval.coco import COCODataset
from eval.coco_eval import do_coco_evaluation
......@@ -197,10 +196,10 @@ if __name__ == "__main__":
for i in range(terminal_args.iter_num):
if terminal_args.fake_img:
if i == 0:
f = open("/dataset/mask_rcnn/maskrcnn_eval_net_10/fake_image_list.pkl", "rb")
f = open("/dataset/mask_rcnn/maskrcnn_eval_net_50/fake_image_list.pkl", "rb")
fake_image_list = pickle.load(f)
images = fake_image_list[i].transpose((0, 2, 3, 1)).copy()
results = maskrcnn_eval_job(fake_image_list[i]).get()
results = maskrcnn_eval_job(images).get()
else:
results = maskrcnn_eval_job().get()
predictions, image_ids = GenPredictionsAndImageIds(results)
......
......@@ -15,7 +15,6 @@ from rpn import RPNHead, RPNLoss, RPNProposal
from box_head import BoxHead
from mask_head import MaskHead
from blob_watcher import save_blob_watched, blob_watched, diff_blob_watched
from distribution import distribute_execute
parser = argparse.ArgumentParser()
parser.add_argument(
......@@ -75,6 +74,13 @@ parser.add_argument(
default=0,
required=False,
)
parser.add_argument(
"-save_loss_npy_every_n_batch",
"--save_loss_npy_every_n_batch",
type=int,
default=0,
required=False,
)
parser.add_argument(
"-v", "--verbose", default=False, action="store_true", required=False
)
......@@ -122,6 +128,13 @@ parser.add_argument(
action="store_true",
required=False,
)
parser.add_argument(
"-pr",
"--print_loss_each_rank",
default=False,
action="store_true",
required=False,
)
terminal_args = parser.parse_args()
......@@ -458,7 +471,7 @@ def init_config():
return config
def save_model(i):
def save_model(check_point, i):
if not os.path.exists(terminal_args.model_save_dir):
os.makedirs(terminal_args.model_save_dir)
model_dst = os.path.join(terminal_args.model_save_dir, "iter-" + str(i))
......@@ -532,7 +545,7 @@ if terminal_args.train_with_real_dataset:
data_loader.add_blob(
"gt_bbox",
data_util.DataSourceCase.kObjectBoundingBox,
shape=(64, 4),
shape=(128, 4),
dtype=flow.float,
tensor_list_variable_axis=0,
is_dynamic=True,
......@@ -540,7 +553,7 @@ if terminal_args.train_with_real_dataset:
data_loader.add_blob(
"gt_labels",
data_util.DataSourceCase.kObjectLabel,
shape=(64,),
shape=(128,),
dtype=flow.int32,
tensor_list_variable_axis=0,
is_dynamic=True,
......@@ -556,7 +569,7 @@ if terminal_args.train_with_real_dataset:
data_loader.add_blob(
"gt_segm",
data_util.DataSourceCase.kObjectSegmentationAlignedMask,
shape=(64, 1344, 800),
shape=(128, 1344, 800),
dtype=flow.int8,
tensor_list_variable_axis=0,
is_dynamic=True,
......@@ -661,7 +674,7 @@ if __name__ == "__main__":
check_point = flow.train.CheckPoint()
check_point.init()
if terminal_args.model_save_every_n_batch > 0:
save_model(0)
save_model(check_point, 0)
else:
check_point = flow.train.CheckPoint()
# check_point.load(terminal_args.model_load_dir)
......@@ -702,21 +715,34 @@ if __name__ == "__main__":
save_blob_watched(i)
if (i + 1) % 10 == 0:
save_model(i + 1)
save_model(check_point, i + 1)
elif terminal_args.train_with_real_dataset:
print(
"{:<8} {:<8} {:<16} {:<16} {:<16} {:<16} {:<16} {:<16}".format(
"iter",
"rank",
"elapsed_time",
"loss_rpn_box_reg",
"loss_objectness",
"loss_box_reg",
"loss_classifier",
"loss_mask",
if terminal_args.print_loss_each_rank:
print(
"{:<8} {:<8} {:<16} {:<16} {:<16} {:<16} {:<16} {:<16}".format(
"iter",
"rank",
"elapsed_time",
"loss_rpn_box_reg",
"loss_objectness",
"loss_box_reg",
"loss_classifier",
"loss_mask",
)
)
else:
print(
"{:<8} {:<16} {:<16} {:<16} {:<16} {:<16} {:<16}".format(
"iter",
"elapsed_time",
"loss_rpn_box_reg",
"loss_objectness",
"loss_box_reg",
"loss_classifier",
"loss_mask",
)
)
)
losses_hisogram = []
start_time = time.time()
......@@ -737,31 +763,53 @@ if __name__ == "__main__":
(i + 1) % terminal_args.model_save_every_n_batch == 0
or i + 1 == terminal_args.iter_num
):
save_model(i + 1)
fmt = "{:<8} {:<8} {:<16} " + "{:<16.10f} " * len(losses)
for rank, loss_tup in enumerate(zip(*losses)):
frame = [loss.mean() for loss in loss_tup]
elapsed_time_str = (
"{:.6f}".format(elapsed_time) if rank == 0 else ""
)
print(fmt.format(i, rank, elapsed_time_str, *frame))
frame.append(i)
losses_hisogram.append(frame)
save_model(check_point, i + 1)
elapsed_time_str = "{:.6f}".format(elapsed_time)
if terminal_args.print_loss_each_rank:
for rank, loss_tup in enumerate(zip(*losses)):
fmt = "{:<8} {:<8} {:<16} " + "{:<16.10f} " * len(
loss_tup
)
loss_per_rank = [loss.mean() for loss in loss_tup]
print(
fmt.format(
i,
rank,
elapsed_time_str if rank == 0 else "",
*loss_per_rank
)
)
else:
loss_per_batch = []
for loss_list in losses:
rank_loss_list = [
loss_per_rank.mean() for loss_per_rank in loss_list
]
loss_per_batch.append(
sum(rank_loss_list) / len(rank_loss_list)
)
fmt = "{:<8} {:<16} " + "{:<16.10f} " * len(loss_per_batch)
print(fmt.format(i, elapsed_time_str, *loss_per_batch))
loss_per_batch.append(i)
losses_hisogram.append(loss_per_batch)
if (terminal_args.save_loss_npy_every_n_batch > 0 and (i + 1) % terminal_args.save_loss_npy_every_n_batch == 0) or i + 1 == terminal_args.iter_num:
npy_file_name = "loss-{}-batch_size-{}-gpu-{}-image_dir-{}-{}".format(i, terminal_args.batch_size, terminal_args.gpu_num_per_node, terminal_args.image_dir ,str(datetime.now().strftime("%Y-%m-%d--%H-%M-%S")))
np.save(npy_file_name, np.array(losses_hisogram))
print("saved: {}.npy".format(npy_file_name))
save_blob_watched(i)
print(
"median of elapsed time per batch:",
statistics.median(elapsed_times),
)
npy_file_name = "loss-{}".format(i)
np.save(npy_file_name, np.array(losses_hisogram))
print("saved: {}.npy".format(npy_file_name))
if terminal_args.jupyter:
import altair as alt
import pandas as pd
columns=[
columns = [
"loss_rpn_box_reg",
"loss_objectness",
"loss_box_reg",
......@@ -771,10 +819,7 @@ if __name__ == "__main__":
for column_index, column_name in enumerate(columns):
loss_data_frame = pd.DataFrame(
np.array(losses_hisogram)[:, [column_index, -1]],
columns=[
column_name,
"iter",
],
columns=[column_name, "iter"],
)
base = (
......@@ -782,8 +827,6 @@ if __name__ == "__main__":
.mark_line()
.encode(x="petalLength", y="petalWidth")
)
chart = (
base.mark_line().encode(x="iter", y=column_name)
)
chart = base.mark_line().encode(x="iter", y=column_name)
chart.display()
......@@ -22,6 +22,8 @@ def _Conv2d(
dilation_rate=[1, 1],
activation=activation,
use_bias=True,
kernel_initializer=flow.random_normal_initializer(mean=0.0, stddev=0.01),
bias_initializer=flow.constant_initializer(0),
name=name,
weight_name=weight_name,
bias_name=bias_name,
......
......@@ -5,6 +5,6 @@ python maskrcnn_eval.py -load="/model_zoo/detection/mask_rcnn_R_50_FPN_1x/snapsh
-cp=19237 \
-bz=2 \
-dataset_dir="/dataset/mscoco_2017" \
-anno="annotations/sample_10_instances_val2017.json" \
-anno="annotations/sample_50_instances_val2017.json" \
-imgd="val2017" \
-i=5
-i=15 -f
......@@ -35,11 +35,11 @@ def get_variable(
root_path = compile_context.cur_job_conf.default_initialize_with_snapshot_path
dir_path = os.path.join(root_path, name)
file_path = os.path.join(dir_path, "out")
if root_path is not None and os.path.isfile(file_path):
if root_path and os.path.isfile(file_path):
op_conf.variable_conf.initialize_with_snapshot.path = dir_path
op_conf.variable_conf.initialize_with_snapshot.key = "out"
else:
if root_path is not None:
if root_path:
print("{} not found, will be initialized".format(file_path))
if initializer is not None:
op_conf.variable_conf.initializer.CopyFrom(initializer)
......
......@@ -126,6 +126,7 @@ def conv2d(
if kernel_initializer is not None
else flow.constant_initializer(0),
trainable=trainable,
model_name="weight",
)
output = flow.nn.conv2d(
inputs, weight, strides, padding, data_format, dilation_rate, name
......@@ -139,6 +140,7 @@ def conv2d(
if bias_initializer is not None
else flow.constant_initializer(0),
trainable=trainable,
model_name="bias",
)
output = flow.nn.bias_add(
output, bias, data_format, name=name_prefix + "-bias_add"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册