Auto Commit

上级 becd727a
# 默认忽略的文件
/shelf/
/workspace.xml
此差异已折叠。
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.7 (yolov5) (4)" project-jdk-type="Python SDK" />
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/test.iml" filepath="$PROJECT_DIR$/.idea/test.iml" />
</modules>
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="jdk" jdkName="Python 3.7 (yolov5) (4)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>
\ No newline at end of file
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
print('欢迎来到 InsCode')
\ No newline at end of file
import pandas as pd
import numpy as np
import os
from PyEMD import EEMD
import torch
from torch import nn
from torch.utils.data import Dataset,DataLoader, random_split
from ptflops import get_model_complexity_info
import seaborn as sns
from sklearn.metrics import r2_score
import time
import matplotlib.pyplot as plt
from model2 import GRUModel , LSTMmdoel,LSTM_att_Model
plt.rcParams['font.sans-serif']=['SimHei'] # 显示中文
plt.rcParams['axes.unicode_minus']=False #显示正负号
# 参数修改处
class paramte():
def __init__(self):
self.params1 = 25 # epoch
self.params2 = 0.001 # lr
self.params3 = 150 # point
self.params4 = [64,128,128] # hidden size
self.params5 = 0.8 # split data
self.params6 = 16 # batch_size
self.params7 = 2 # output_size
self.params8 = r'datasets\train'
self.params9 = r'datasets\evaluate'
self.params10 = r'datasets\predict'
self.params = [self.params1,self.params2,self.params3,self.params4,self.params5
,self.params6,self.params7,self.params8,self.params9,self.params10]
def solve_p(*a):
PP = []
cos = a[0]
sin = a[1]
for i in range(len(cos)):
x1 = cos[i]
x2 = sin[i]
sita = np.arctan(x2/x1)
if (x1 > 0) and (x2 >= 0):
p = sita / ( 2 * np.pi )
elif x1 < 0:
p = 1 / 2 + sita / (2* np.pi)
elif (x1 > 0) and (x2 < 0):
p = 1 + sita / (2* np.pi)
PP.append(np.round(p,3))
return PP
def load_data(path , names):
data = pd.DataFrame()
for name in os.listdir(path):
filename = os.path.join(path , name)
data = data.append(pd.read_csv(filename) , ignore_index=True)
a = solve_p(data.iloc[:,0].values, data.iloc[:,1].values)
data['P'] = a
return data.loc[:,names].values , data.loc[:,names]
def plot(train_data , name):
plt.figure()
for i in range(train_data.shape[1]):
plt.subplot(train_data.shape[1] //2 +1, 2, i + 1)
plt.plot(np.arange(len(train_data[:, i])), train_data[:, i])
plt.grid()
plt.ylabel(name[i])
plt.tight_layout()
plt.show()
# 过滤
'''
在 EEMD 分解中,每个信号被分解成若干个本征模态函数(IMF),其中第一 IMF 描述的是信号中最高频率的成分,
而最后一个 IMF 描述的是信号中最低频率的成分。因此,要选择哪个 IMF 取决于你想要分析的信号特征,以及你的应用目的。
一般来说,IMF 的选择需要基于一些领域知识和数据分析的结果。例如,如果你想要分析信号的瞬态特征,那么可以选择前几个 IMF;
如果你想要分析信号的周期性特征,那么可以选择后几个 IMF;如果你想要分析信号的整体趋势,那么可以选择中间的 IMF。
在实际应用中,也可以通过观察不同 IMF 的频谱、能量分布等特征来选择合适的 IMF。
'''
def filter(train_data,names):
eemd = EEMD()
E_IMFs = eemd.eemd(train_data[:,-2], max_imf=7)
# plot(np.concatenate((E_IMFs.T ,
# np.expand_dims(train_data[:,-2],1) ,
# np.expand_dims(train_data[:,-2],1)),
# axis = 1) ,
# [0,1,2,3,4,5,6,7,'ori','ori'])
# 新数据
train_data[:, -2] = E_IMFs.T[:,6] # 替换Velocity_thigh
X = train_data[:,:]
# 替换后的特征
# plot(X,name = names )
return X
# slide_window
def slide_window(X,n,point):
float_X1 = np.zeros((len(X) - point , point , n-2))
float_X2 = np.zeros((len(X) - point , 10))
float_Y = np.zeros((len(X) - point ,2))
for i in range(len(float_X1)):
# extract the slide_window sequences for model to learn the useful features
float_X1[i] = X[i : i+point , 2:n]
# synthetic features
a = X[i: i + point, 2:4]
float_X2[i , 0:2] = a.mean(axis = 0)
float_X2[i, 2:4] = a.std(axis = 0)
float_X2[i , 4:6] = a.max(axis = 0)
float_X2[i , 6:8] = a.min(axis = 0)
float_X2[i , 8:10] = a[-1,:]
# the training label
float_Y[i] = X[i+point , :2]
return float_X1 , float_X2 , float_Y
class MyDataset(torch.utils.data.Dataset):
def __init__(self, data , targets):
self.data = torch.tensor(data).float()
self.targets = torch.tensor(targets).float()
def __getitem__(self, index):
data ,target = self.data[index], self.targets[index]
return data, target
def __len__(self):
return len(self.data)
def pred(test_x , test_y):
r2 = []
x = test_x
y = test_y
x , y = x.to(device) , y.to(device)
pred = model(x)
pred = pred.cpu().detach().numpy()
y = y.cpu().detach().numpy()
num = range(len(x))
r2.append(r2_score(y, pred))
y = y * st[:2] + me[:2]
pred = pred * st[:2] + me[:2]
# 计算P
P_y = solve_p(y[:,0] , y[:,1])
P_pred = solve_p(pred[:,0] , pred[:,1])
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(num , y[:,0] , label = 'true_cos')
plt.plot(num , pred[:,0] , label = 'pred_cos')
plt.plot(num , y[:,1] , label = 'true_sin')
plt.plot(num , pred[:,1], label = 'pred_sin')
plt.title(f'R2:{r2[-1]}')
plt.legend()
plt.subplot(2,1,2)
plt.plot(num , P_y , label = 'P_ture')
plt.plot(num , P_pred , label = 'P_pred')
plt.legend()
plt.tight_layout()
plt.show()
mae=np.sum(np.abs(y - pred) , axis=0) / len(y)
rmse=np.sqrt(np.sum(np.abs(y - pred) ** 2 ,axis = 0) / len(y))
print(f'pred:{pred} \ntrue:{y}')
print('R2 %.2f_mae %.2f_rmse %.2f'%(np.mean(r2) , np.mean(mae) , np.mean(rmse) ))
plt.savefig('pred R2 %.2f_mae %.2f_rmse %.2f.png'%(np.mean(r2) , np.mean(mae) , np.mean(rmse) ),dpi = 600)
def plot_1():
# 密度图
plt.figure(figsize=(30, 30))
sns.distplot(data.iloc[:,-1].values)
plt.show()
plt.savefig('密度图.png',dpi = 600)
# 相关性分析
corr = data.corr()
highest_corr_features = corr.index[abs(corr["Index_COS"])>0.5]
plt.figure(figsize=(30, 30))
g = sns.heatmap(data[highest_corr_features].corr(), annot=True, cmap="RdYlGn")
plt.show()
plt.savefig('相关性.png',dpi = 600)
# his图
plt.figure()
x = range(len(train_loss))
plt.plot(x , train_loss , label = 'train_loss')
plt.plot(x , val_loss , label = 'val_loss')
# plt.ylim((0,0.04))
plt.legend()
plt.title('history')
plt.grid()
plt.show()
plt.savefig('训练图.png',dpi = 600)
def grad_clipping(net, theta):
"""
梯度裁剪,norm 梯度范数
"""
if isinstance(net, nn.Module):
params = [p for p in net.parameters() if p.requires_grad]
else:
params = net.params
norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params))
if norm > theta:
for param in params:
param.grad[:] *= theta / norm
def train(train_iter ,test_iter, num_epochs , updater , loss, model):
a = []
b = []
iter = 0
for epoch in range(num_epochs):
for i ,(x,y) in enumerate(train_iter):
x,y = x.to(device) , y.to(device)
y_hat = model(x)
updater.zero_grad()
l = loss(y_hat , y)
l.backward()
# grad_clipping(model, 1) # 梯度裁剪
updater.step()
iter += 1
a.append(l.item())
for x,y in test_iter:
x, y = x.to(device), y.to(device)
y_hat = model(x)
l2 = loss(y_hat, y)
b.append(l2.item())
print(f'epoch : {epoch} , loss : {l.item()} , val_loss: {l2.item()}')
return a,b
def try_gpu(i=0):
"""Return gpu(i) if exists, otherwise return cpu().
Defined in :numref:`sec_use_gpu`"""
if torch.cuda.device_count() >= i + 1:
return torch.device(f'cuda:{i}')
return torch.device('cpu')
if __name__ == '__main__':
pass
para = paramte().params
# original data
# ['H_mean' , 'T_mean' , 'H_std' , 'T_std' , 'H_max' ,'T_max' , 'H_min' , 'T_min' ,'H_fin' , 'T_fin']
dict = {'names' : ['Index_COS','Index_SIN','Position_thigh','Velocity_thigh','P'],
'n' : 3,
'feature_name' : ['T_mean' , 'T_std' , 'T_max' , 'T_min' , 'T_fin']}
train_data , data = load_data(para[7],dict['names'])
eva_data , _= load_data(para[8],dict['names'])
pre_data ,_ = load_data(para[9],dict['names'])
# plot(train_data[:3000,:],dict['names'])
# filtering the original data
X = filter(train_data[:3000,:] , names = dict['names'])
test_x = filter(eva_data[:1500,:] , names = dict['names'])
# standardization
me = np.mean(X,axis = 0)
st = np.std(X , axis = 0)
X = (X - me) / st
test_x = (test_x - me) / st
# sliding the filter data X
float_X1 , float_X2 , float_Y = slide_window(X,n=dict['n'],point=para[2])
test_x , _,test_y = slide_window(test_x , n=dict['n'],point=para[2])
# plot(float_X2 , name = ['H_mean' , 'T_mean' , 'H_std' , 'T_std' , 'H_max' ,'T_max' , 'H_min' , 'T_min' ,'H_fin' , 'T_fin'])
# plot(float_Y , name = ['COS' , 'SIN'])
# 建立数据类
float_data = torch.tensor(float_X1)
label = torch.tensor(np.array(float_Y))
dataset = MyDataset(float_data , label)
# 将数据集划分为训练集和验证集
train_size = int(para[4] * len(dataset))
val_size = len(dataset) - train_size
train_,test_=random_split(dataset,[train_size, val_size],generator=torch.Generator().manual_seed(42))
# 创建数据加载器
train_iter = DataLoader(train_, batch_size=para[5], shuffle=True)
test_iter = DataLoader(test_, batch_size=para[5], shuffle=False)
test_x = torch.tensor(test_x[-800:-1]).float()
test_y = torch.tensor(test_y[-800 : -1 , :]).float()
# 设置参数
lr = para[1]
num_epochs = para[0]
device = try_gpu()
# device = torch.device('cpu')
# 加载模型
# model = LSTM_att_Model(input_size=float_data.shape[-1],output_size=para[6] , hidden_size=para[3],num_layers=1)
# model = GRUModel(input_size=float_data.shape[-1],output_size=para[6] , hidden_size=para[3],num_layers=1)
model = LSTMmdoel(input_size=float_data.shape[-1],output_size=para[6] , hidden_size=para[3],num_layers=1)
model = model.to(device)
ops, params = get_model_complexity_info(model , (150,1), as_strings=True, print_per_layer_stat=True, verbose=True)
#模型算力
# 建立损失函数
loss = nn.MSELoss()
# 建立优化器
updater = torch.optim.Adam(model.parameters() , lr)
# 训练
model.train()
train_loss , val_loss = train(train_iter ,test_iter, num_epochs , updater , loss, model)
# 保存
times = time.localtime()
path = "%04d-%02d-%02d_%02d_%02d_%02d" % (times.tm_year, times.tm_mon, times.tm_mday, times.tm_hour, times.tm_min, times.tm_sec)
torch.save(model.state_dict(), 'model\\' + path +'.pth')
# 加载
# state_dict = torch.load(r'model\GRU\2023-06-10_10_55_23.pth')
# model.load_state_dict(state_dict)
# model1.eval()
# # 预测结果
pred(test_x , test_y)
#
# ## plot
plot_1()
#
#
#
plt.show(block=True)
from d2l import torch as d2l
# for i in [20,30,50,90,140,320,500]:
# d2l.show_heatmaps(model.attention.attention_weights[i].cpu().reshape((1, 1, 150,150)),xlabel='Keys', ylabel='Queries')
d2l.show_heatmaps(model.attention.attention_weights.mean(axis = 0).cpu().reshape((1, 1, 150,150)),xlabel='Keys', ylabel='Queries')
Warning: module dot_att is treated as a zero-op.
Warning: module NonDynamicallyQuantizableLinear is treated as a zero-op.
Warning: module GRUModel is treated as a zero-op.
GRUModel(
269.76 k, 100.000% Params, 30.8 MMac, 100.000% MACs,
(GRU1): GRU(12.86 k, 4.769% Params, 2.0 MMac, 6.484% MACs, 1, 64, batch_first=True)
(GRU2): GRU(74.5 k, 27.615% Params, 11.31 MMac, 36.721% MACs, 64, 128, batch_first=True)
(bn1): BatchNorm1d(256, 0.095% Params, 0.0 Mac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(GRU3): GRU(99.07 k, 36.726% Params, 15.0 MMac, 48.691% MACs, 128, 128, batch_first=True)
(bn2): BatchNorm1d(256, 0.095% Params, 0.0 Mac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(attention): dot_att(
16.51 k, 6.121% Params, 2.46 MMac, 7.980% MACs,
(attention): Linear(16.51 k, 6.121% Params, 2.46 MMac, 7.980% MACs, in_features=128, out_features=128, bias=True)
)
(attention2): MultiheadAttention(
66.05 k, 24.484% Params, 0.0 Mac, 0.000% MACs,
(out_proj): NonDynamicallyQuantizableLinear(0, 0.000% Params, 0.0 Mac, 0.000% MACs, in_features=128, out_features=128, bias=True)
)
(fc): Linear(258, 0.096% Params, 38.4 KMac, 0.125% MACs, in_features=128, out_features=2, bias=True)
)
\ No newline at end of file
LSTMmdoel(
199.68 k, 100.000% Params, 30.3 MMac, 100.000% MACs,
(lstm1): LSTM(67.07 k, 33.589% Params, 10.25 MMac, 33.840% MACs, 1, 128, batch_first=True)
(bn1): BatchNorm1d(256, 0.128% Params, 0.0 Mac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(lstm2): LSTM(132.1 k, 66.153% Params, 20.01 MMac, 66.033% MACs, 128, 128, batch_first=True)
(fc): Linear(258, 0.129% Params, 38.4 KMac, 0.127% MACs, in_features=128, out_features=2, bias=True)
)
\ No newline at end of file
Warning: module dot_att is treated as a zero-op.
Warning: module NonDynamicallyQuantizableLinear is treated as a zero-op.
Warning: module LSTMModel is treated as a zero-op.
LSTMModel(
331.91 k, 100.000% Params, 40.26 MMac, 100.000% MACs,
(lstm1): LSTM(17.15 k, 5.168% Params, 2.67 MMac, 6.628% MACs, 1, 64, batch_first=True)
(lstm2): LSTM(99.33 k, 29.927% Params, 15.09 MMac, 37.482% MACs, 64, 128, batch_first=True)
(bn1): BatchNorm1d(256, 0.077% Params, 0.0 Mac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(lstm3): LSTM(132.1 k, 39.799% Params, 20.01 MMac, 49.690% MACs, 128, 128, batch_first=True)
(bn2): BatchNorm1d(256, 0.077% Params, 0.0 Mac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(attention): dot_att(
16.51 k, 4.975% Params, 2.46 MMac, 6.104% MACs,
(attention): Linear(16.51 k, 4.975% Params, 2.46 MMac, 6.104% MACs, in_features=128, out_features=128, bias=True)
)
(attention2): MultiheadAttention(
66.05 k, 19.900% Params, 0.0 Mac, 0.000% MACs,
(out_proj): NonDynamicallyQuantizableLinear(0, 0.000% Params, 0.0 Mac, 0.000% MACs, in_features=128, out_features=128, bias=True)
)
(fc): Linear(258, 0.078% Params, 38.4 KMac, 0.095% MACs, in_features=128, out_features=2, bias=True)
)
\ No newline at end of file
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project :test
@File :model2.py
@IDE :PyCharm
@Author :WJY
@Date :2023/6/10 10:07
'''
from torch import nn
import torch
import numpy as np
import torch.nn.functional as F
class dot_att(nn.Module):
"""缩放点积注意⼒"""
def __init__(self, input_hidden , output_hidden, **kwargs):
super(dot_att, self).__init__(**kwargs)
self.attention = nn.Linear(input_hidden , output_hidden) # t = 24 , h = 1
# queries的形状:(batch_size,查询的个数,d)
# keys的形状:(batch_size,“键-值”对的个数,d)
# values的形状:(batch_size,“键-值”对的个数,值的维度)
# valid_lens的形状:(batch_size,)或者(batch_size,查询的个数)
def forward(self, x):
x = self.attention(x) # b s h - b s t
e = torch.bmm(x, x.permute(0, 2, 1)) # bst*bts=bss
e = e / np.sqrt(x.shape[2])
self.attention_weights = F.softmax(e, dim=-1) # b s s
out = torch.bmm(self.attention_weights, x) # bss * bst ---> bst
out = F.relu(out)
return out
def try_gpu(i=0):
"""Return gpu(i) if exists, otherwise return cpu().
Defined in :numref:`sec_use_gpu`"""
if torch.cuda.device_count() >= i + 1:
return torch.device(f'cuda:{i}')
return torch.device('cpu')
class GRUModel(nn.Module):
def __init__(self , input_size , output_size , hidden_size , num_layers):
super(GRUModel,self).__init__()
self.num_layers = num_layers
self.hidden_size = hidden_size
self.input_size = input_size
self.output_size = output_size
self.num_directions = 1
self.GRU1 = nn.GRU(self.input_size, self.hidden_size[0], self.num_layers, batch_first=True , bidirectional=False)
self.GRU2 = nn.GRU(self.hidden_size[0], self.hidden_size[1], self.num_layers, batch_first=True)
self.bn1 = nn.BatchNorm1d(self.hidden_size[1])
self.GRU3 = nn.GRU(self.hidden_size[1], self.hidden_size[2], self.num_layers, batch_first=True)
self.bn2 = nn.BatchNorm1d(self.hidden_size[2])
self.attention = dot_att(hidden_size[2], hidden_size[2])
self.attention2 = nn.MultiheadAttention(hidden_size[2], num_heads=8)
self.fc = nn.Linear(self.hidden_size[2], self.output_size)
def forward(self,x , device = try_gpu()):
batch_size = x.shape[0]
h_0 = torch.randn(self.num_directions * self.num_layers, batch_size, self.hidden_size[0]).to(device)
output, _ = self.GRU1(x.float(), h_0)
h_1 = torch.randn(self.num_directions * self.num_layers, batch_size, self.hidden_size[1]).to(device)
output, _ = self.GRU2(output.float(), h_1)
h_2 = torch.randn(self.num_directions * self.num_layers, batch_size, self.hidden_size[2]).to(device)
output, _ = self.GRU3(output.float(), h_2)
output = self.attention(output)
pred = self.fc(output)
pred = pred[:, -1, :]
return pred
class LSTM_att_Model(nn.Module):
def __init__(self , input_size , output_size , hidden_size , num_layers):
super(LSTM_att_Model , self).__init__()
self.num_layers = num_layers
self.hidden_size = hidden_size
self.input_size = input_size
self.output_size = output_size
self.num_directions = 1
self.lstm1 = nn.LSTM(self.input_size, self.hidden_size[0], self.num_layers,batch_first=True, bidirectional=False)
self.lstm2 = nn.LSTM(self.hidden_size[0] , self.hidden_size[1],self.num_layers,batch_first=True)
self.bn1 = nn.BatchNorm1d(self.hidden_size[1])
self.lstm3 = nn.LSTM(self.hidden_size[1], self.hidden_size[2], self.num_layers, batch_first=True)
self.bn2 = nn.BatchNorm1d(self.hidden_size[2])
self.attention = dot_att(hidden_size[2] , hidden_size[2])
self.attention2 = nn.MultiheadAttention(hidden_size[2], num_heads=8)
self.fc = nn.Linear(self.hidden_size[2] , self.output_size)
def forward(self, input_seq, device = try_gpu()):
# print(input_seq.size())
h_0 = torch.randn(self.num_directions * self.num_layers, input_seq.size(0), self.hidden_size[0]).to(device)
c_0 = torch.randn(self.num_directions * self.num_layers, input_seq.size(0), self.hidden_size[0]).to(device)
output, _ = self.lstm1(input_seq.float(), (h_0, c_0))
h_1 = torch.randn(self.num_directions * self.num_layers , output.size(0) , self.hidden_size[1]).to(device)
c_1 = torch.randn(self.num_directions * self.num_layers, output.size(0), self.hidden_size[1]).to(device)
output , _ = self.lstm2(output.float() , (h_1 , c_1))
h_2 = torch.randn(self.num_directions * self.num_layers , output.size(0) , self.hidden_size[2]).to(device)
c_2 = torch.randn(self.num_directions * self.num_layers, output.size(0), self.hidden_size[2]).to(device)
output , _ = self.lstm3(output.float() , (h_2 , c_2))
output = self.attention(output)
pred = self.fc(output)
pred = pred[:, -1, :]
return pred
class LSTMmdoel(nn.Module):
def __init__(self , input_size , output_size , hidden_size , num_layers):
super(LSTMmdoel , self).__init__()
self.num_layers = num_layers
self.hidden_size = hidden_size
self.input_size = input_size
self.output_size = output_size
self.num_directions = 1
self.lstm1 = nn.LSTM(self.input_size, self.hidden_size[1], self.num_layers,batch_first=True, bidirectional=False)
self.bn1 = nn.BatchNorm1d(self.hidden_size[1])
self.lstm2 = nn.LSTM(self.hidden_size[1], self.hidden_size[2], self.num_layers, batch_first=True)
self.fc = nn.Linear(self.hidden_size[2] , self.output_size)
def forward(self, input_seq, device = try_gpu()):
# print(input_seq.size())
h_0 = torch.randn(self.num_directions * self.num_layers, input_seq.size(0), self.hidden_size[1]).to(device)
c_0 = torch.randn(self.num_directions * self.num_layers, input_seq.size(0), self.hidden_size[1]).to(device)
output, _ = self.lstm1(input_seq.float(), (h_0, c_0))
h_2 = torch.randn(self.num_directions * self.num_layers , output.size(0) , self.hidden_size[2]).to(device)
c_2 = torch.randn(self.num_directions * self.num_layers, output.size(0), self.hidden_size[2]).to(device)
output , _ = self.lstm2(output.float() , (h_2 , c_2))
pred = self.fc(output)
pred = pred[:, -1, :]
return pred
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册