提交 e7f4d240 编写于 作者: H hypox64

Plot video

上级 5436233f
import torch
from torch import nn
from .micro_multi_scale_resnet_1d import Multi_Scale_ResNet
import torch.nn.functional as F
# class Autoencoder(nn.Module):
# def __init__(self, input_nc,num_feature,num_classes,datasize):
# super(Autoencoder, self).__init__()
# self.datasize = datasize
# self.finesize = (1+datasize//1024)*1024
# self.prepad = nn.ReflectionPad1d((self.finesize-self.datasize)//2)
# # encoder
# self.encoder = Multi_Scale_ResNet(input_nc, num_feature)
# self.class_fc = nn.Linear(num_feature, num_classes)
# # decoder
# self.decoder = nn.Sequential(
# nn.Linear(num_feature, 512),
# nn.Linear(512, datasize)
# )
# def forward(self, x):
# #print(x.size())
# x = self.prepad(x)
# #print(x.size())
# feature = self.encoder(x)
# x = self.decoder(feature)
# #x = x[:,:,(self.finesize-self.datasize)//2:(self.finesize-self.datasize)//2+self.datasize]
# #print(x.size())
# return x,feature
# class Autoencoder(nn.Module):
# def __init__(self, input_nc,num_feature,num_classes,datasize):
# super(Autoencoder, self).__init__()
# self.datasize = datasize
# self.finesize = (1+datasize//1024)*1024
# self.prepad = nn.ReflectionPad1d((self.finesize-self.datasize)//2)
# # encoder
# encoder = [nn.ReflectionPad1d(3),
# nn.Conv1d(input_nc, 64, kernel_size=7, padding=0, bias=False),
# nn.BatchNorm1d(64),
# nn.ReLU(True)]
# n_downsampling = 4
# for i in range(n_downsampling): # add downsampling layers
# mult = 2 ** i
# encoder += [nn.Conv1d(64 * mult, 64 * mult * 2, kernel_size=3, stride=2, padding=1, bias=False),
# nn.BatchNorm1d(64 * mult * 2),
# nn.ReLU(True),
# nn.MaxPool1d(2)]
# encoder += [nn.AvgPool1d(8)]
# self.encoder = nn.Sequential(*encoder)
# self.fc1 = nn.Linear(self.finesize//2, num_feature)
# self.class_fc = nn.Linear(num_feature, num_classes)
# self.fc2 = nn.Linear(num_feature,self.finesize//2)
# # decoder
# decoder = [nn.Upsample(scale_factor = 8, mode='nearest')]
# for i in range(n_downsampling): # add upsampling layers
# mult = 2 ** (n_downsampling - i)
# decoder += [
# nn.Upsample(scale_factor = 2, mode='nearest'),
# nn.ConvTranspose1d(64 * mult, int(64 * mult / 2),
# kernel_size=3, stride=2,
# padding=1, output_padding=1,
# bias=False),
# nn.BatchNorm1d(int(64 * mult / 2)),
# nn.ReLU(True)]
# decoder += [nn.ReflectionPad1d(3)]
# decoder += [nn.Conv1d(64, input_nc, kernel_size=7, padding=0)]
# self.decoder = nn.Sequential(*decoder)
# def forward(self, x):
# #print(x.size())
# x = self.prepad(x)
# #print(x.size())
# x = self.encoder(x)
# #print(x.size())
# x = x.view(x.size(0), -1)
# #print(x.size())
# feature = self.fc1(x)
# out_class = self.class_fc(feature)
# #print(feature.size())
# x = self.fc2(feature)
# x = x.view(x.size(0), -1, 1)
# #print(x.size())
# x = self.decoder(x)
# x = x[:,:,(self.finesize-self.datasize)//2:(self.finesize-self.datasize)//2+self.datasize]
# #print(x.size())
# return x,feature,out_class
class Autoencoder(nn.Module):
def __init__(self, input_nc,num_feature,num_classes,datasize):
super(Autoencoder, self).__init__()
# encoder
self.encoder = nn.Sequential(
nn.Linear(datasize, 128),
nn.Tanh(),
nn.Linear(128, 64),
nn.Tanh(),
nn.Linear(64, 12),
nn.Tanh(),
nn.Linear(12, num_feature),
)
# decoder
self.decoder = nn.Sequential(
nn.Linear(num_feature, 12),
nn.Linear(12, 64),
nn.Tanh(),
nn.Linear(64, 128),
nn.Tanh(),
nn.Linear(128, datasize),
)
def forward(self, x):
feature = self.encoder(x)
x = self.decoder(feature)
return x,feature
\ No newline at end of file
from torch import nn
from . import cnn_1d,densenet,dfcnn,lstm,mobilenet,resnet,resnet_1d,squeezenet, \
multi_scale_resnet,multi_scale_resnet_1d,micro_multi_scale_resnet_1d
multi_scale_resnet,multi_scale_resnet_1d,micro_multi_scale_resnet_1d,autoencoder
# from models import cnn_1d,densenet,dfcnn,lstm,mobilenet,resnet,resnet_1d,squeezenet
# from models import multi_scale_resnet,multi_scale_resnet_1d,micro_multi_scale_resnet_1d
def CreatNet(opt):
name = opt.model_name
label_num = opt.label
if name =='lstm':
net = lstm.lstm(opt.input_size,opt.time_step,input_nc=opt.input_nc,num_classes=label_num)
#encoder
if name =='autoencoder':
net = autoencoder.Autoencoder(opt.input_nc, opt.feature, opt.label,opt.finesize)
#1d
elif name =='lstm':
net = lstm.lstm(opt.input_size,opt.time_step,input_nc=opt.input_nc,num_classes=opt.label)
elif name == 'cnn_1d':
net = cnn_1d.cnn(opt.input_nc,num_classes=label_num)
net = cnn_1d.cnn(opt.input_nc,num_classes=opt.label)
elif name == 'resnet18_1d':
net = resnet_1d.resnet18()
net.conv1 = nn.Conv1d(opt.input_nc, 64, 7, 2, 3, bias=False)
net.fc = nn.Linear(512, label_num)
net.fc = nn.Linear(512, opt.label)
elif name == 'resnet34_1d':
net = resnet_1d.resnet34()
net.conv1 = nn.Conv1d(opt.input_nc, 64, 7, 2, 3, bias=False)
net.fc = nn.Linear(512, label_num)
net.fc = nn.Linear(512, opt.label)
elif name == 'multi_scale_resnet_1d':
net = multi_scale_resnet_1d.Multi_Scale_ResNet(inchannel=opt.input_nc, num_classes=label_num)
net = multi_scale_resnet_1d.Multi_Scale_ResNet(inchannel=opt.input_nc, num_classes=opt.label)
elif name == 'micro_multi_scale_resnet_1d':
net = micro_multi_scale_resnet_1d.Multi_Scale_ResNet(inchannel=opt.input_nc, num_classes=label_num)
net = micro_multi_scale_resnet_1d.Multi_Scale_ResNet(inchannel=opt.input_nc, num_classes=opt.label)
elif name == 'multi_scale_resnet':
net = multi_scale_resnet.Multi_Scale_ResNet(inchannel=opt.input_nc, num_classes=label_num)
net = multi_scale_resnet.Multi_Scale_ResNet(inchannel=opt.input_nc, num_classes=opt.label)
#2d
elif name == 'dfcnn':
net = dfcnn.dfcnn(num_classes = label_num)
net = dfcnn.dfcnn(num_classes = opt.label)
elif name in ['resnet101','resnet50','resnet18']:
if name =='resnet101':
net = resnet.resnet101(pretrained=False)
net.fc = nn.Linear(2048, label_num)
net.fc = nn.Linear(2048, opt.label)
elif name =='resnet50':
net = resnet.resnet50(pretrained=False)
net.fc = nn.Linear(2048, label_num)
net.fc = nn.Linear(2048, opt.label)
elif name =='resnet18':
net = resnet.resnet18(pretrained=False)
net.fc = nn.Linear(512, label_num)
net.fc = nn.Linear(512, opt.label)
net.conv1 = nn.Conv2d(opt.input_nc, 64, 7, 2, 3, bias=False)
elif 'densenet' in name:
if name =='densenet121':
net = densenet.densenet121(pretrained=False,num_classes=label_num)
net = densenet.densenet121(pretrained=False,num_classes=opt.label)
elif name == 'densenet201':
net = densenet.densenet201(pretrained=False,num_classes=label_num)
net = densenet.densenet201(pretrained=False,num_classes=opt.label)
elif name =='squeezenet':
net = squeezenet.squeezenet1_1(pretrained=False,num_classes=label_num,inchannel = 1)
net = squeezenet.squeezenet1_1(pretrained=False,num_classes=opt.label,inchannel = 1)
return net
\ No newline at end of file
......@@ -7,7 +7,7 @@ from torch import nn, optim
import warnings
warnings.filterwarnings("ignore")
from util import util,transformer,dataloader,statistics,heatmap,options
from util import util,transformer,dataloader,statistics,plot,options,scatter3d
from models import creatnet
opt = options.Options().getparse()
......@@ -52,29 +52,45 @@ if not opt.no_cudnn:
torch.backends.cudnn.benchmark = True
optimizer = torch.optim.Adam(net.parameters(), lr=opt.lr)
criterion = nn.CrossEntropyLoss(weight)
criterion_class = nn.CrossEntropyLoss(weight)
criterion_auto = nn.MSELoss()
torch.save(net.cpu().state_dict(),os.path.join(opt.save_dir,'tmp.pth'))
def evalnet(net,signals,labels,sequences,epoch,plot_result={}):
# net.eval()
confusion_mat = np.zeros((opt.label,opt.label), dtype=int)
for i in range(int(len(sequences)/opt.batchsize)):
features = np.zeros((len(sequences)//opt.batchsize*opt.batchsize,opt.feature+1))
epoch_loss = 0
for i in range(len(sequences)//opt.batchsize):
signal,label = transformer.batch_generator(signals, labels, sequences[i*opt.batchsize:(i+1)*opt.batchsize])
signal = transformer.ToInputShape(signal,opt,test_flag =True)
signal,label = transformer.ToTensor(signal,label,no_cuda =opt.no_cuda)
with torch.no_grad():
out = net(signal)
pred = torch.max(out, 1)[1]
pred=pred.data.cpu().numpy()
label=label.data.cpu().numpy()
for x in range(len(pred)):
confusion_mat[label[x]][pred[x]] += 1
recall,acc,sp,err,k = statistics.report(confusion_mat)
plot_result['test'].append(err)
heatmap.draw(confusion_mat,opt,name = 'current_test')
print('epoch:'+str(epoch),' macro-prec,reca,F1,err,kappa: '+str(statistics.report(confusion_mat)))
if opt.model_name == 'autoencoder':
out,feature = net(signal)
loss = criterion_auto(out, signal)
features[i*opt.batchsize:(i+1)*opt.batchsize,:opt.feature] = (feature.data.cpu().numpy()).reshape(opt.batchsize,-1)
features[i*opt.batchsize:(i+1)*opt.batchsize,opt.feature] = label.data.cpu().numpy()
else:
out = net(signal)
loss = criterion_class(out, label)
pred = (torch.max(out, 1)[1]).data.cpu().numpy()
label=label.data.cpu().numpy()
for x in range(len(pred)):
confusion_mat[label[x]][pred[x]] += 1
epoch_loss += loss.item()
if opt.model_name != 'autoencoder':
recall,acc,sp,err,k = statistics.report(confusion_mat)
plot.draw_heatmap(confusion_mat,opt,name = 'current_test')
print('epoch:'+str(epoch),' macro-prec,reca,F1,err,kappa: '+str(statistics.report(confusion_mat)))
else:
plot.draw_autoencoder_result(signal.data.cpu().numpy(), out.data.cpu().numpy(),opt)
print('epoch:'+str(epoch),' loss: '+str(round(epoch_loss/i,5)))
plot.draw_scatter(features, opt)
plot_result['test'].append(epoch_loss/i)
return plot_result,confusion_mat
print('begin to train ...')
......@@ -92,36 +108,49 @@ for fold in range(opt.k_fold):
final_confusion_mat = np.zeros((opt.label,opt.label), dtype=int)
confusion_mat = np.zeros((opt.label,opt.label), dtype=int)
features = np.zeros((len(train_sequences[fold])//opt.batchsize*opt.batchsize,opt.feature+1))
confusion_mats = []
plot_result={'train':[1.],'test':[1.]}
plot_result = {'train':[],'test':[]}
for epoch in range(opt.epochs):
epoch_loss = 0
t1 = time.time()
np.random.shuffle(train_sequences[fold])
net.train()
for i in range(int(len(train_sequences[fold])/opt.batchsize)):
for i in range(len(train_sequences[fold])//opt.batchsize):
signal,label = transformer.batch_generator(signals, labels, train_sequences[fold][i*opt.batchsize:(i+1)*opt.batchsize])
signal = transformer.ToInputShape(signal,opt,test_flag =False)
signal,label = transformer.ToTensor(signal,label,no_cuda =opt.no_cuda)
out = net(signal)
loss = criterion(out, label)
pred = torch.max(out, 1)[1]
if opt.model_name == 'autoencoder':
out,feature = net(signal)
loss = criterion_auto(out, signal)
features[i*opt.batchsize:(i+1)*opt.batchsize,:opt.feature] = (feature.data.cpu().numpy()).reshape(opt.batchsize,-1)
features[i*opt.batchsize:(i+1)*opt.batchsize,opt.feature] = label.data.cpu().numpy()
else:
out = net(signal)
loss = criterion_class(out, label)
pred = (torch.max(out, 1)[1]).data.cpu().numpy()
label=label.data.cpu().numpy()
for x in range(len(pred)):
confusion_mat[label[x]][pred[x]] += 1
epoch_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
pred=pred.data.cpu().numpy()
label=label.data.cpu().numpy()
for x in range(len(pred)):
confusion_mat[label[x]][pred[x]] += 1
iter_cnt += 1
if iter_cnt%opt.plotfreq==0:
plot_result['train'].append(statistics.report(confusion_mat)[3])
heatmap.draw(confusion_mat,opt,name = 'current_train')
statistics.plotloss(plot_result,epoch+i/(train_sequences.shape[1]/opt.batchsize),opt)
confusion_mat[:]=0
# iter_cnt += 1
# if iter_cnt%opt.plotfreq==0 and i>(len(train_sequences[fold])//opt.batchsize)/2:
# plot_result['train'].append(epoch_loss/i)
# plot.draw_loss(plot_result,epoch+i/(train_sequences.shape[1]/opt.batchsize),opt)
# if opt.model_name != 'autoencoder':
# plot.draw_heatmap(confusion_mat,opt,name = 'current_train')
# confusion_mat[:]=0
###tmp
plot_result['train'].append(epoch_loss/i)
plot.draw_loss(plot_result,epoch+i/(train_sequences.shape[1]/opt.batchsize),opt)
#plot.draw_scatter(features, opt)
###
plot_result,confusion_mat_eval = evalnet(net,signals,labels,test_sequences[fold],epoch+1,plot_result)
confusion_mats.append(confusion_mat_eval)
......@@ -136,22 +165,22 @@ for fold in range(opt.k_fold):
if epoch+1==1:
util.writelog('>>> per epoch cost time:'+str(round((t2-t1),2))+'s',opt,True)
#save result
pos = plot_result['test'].index(min(plot_result['test']))-1
final_confusion_mat = confusion_mats[pos]
if opt.k_fold==1:
statistics.statistics(final_confusion_mat, opt, 'final', 'final_test')
np.save(os.path.join(opt.save_dir,'confusion_mat.npy'), final_confusion_mat)
else:
fold_final_confusion_mat += final_confusion_mat
util.writelog('fold -> macro-prec,reca,F1,err,kappa: '+str(statistics.report(final_confusion_mat)),opt,True)
util.writelog('confusion_mat:\n'+str(final_confusion_mat)+'\n',opt,True)
heatmap.draw(final_confusion_mat,opt,name = 'fold'+str(fold+1)+'_test')
if opt.k_fold != 1:
statistics.statistics(fold_final_confusion_mat, opt, 'final', 'k-fold-final_test')
np.save(os.path.join(opt.save_dir,'confusion_mat.npy'), fold_final_confusion_mat)
# #save result
# pos = plot_result['test'].index(min(plot_result['test']))-1
# final_confusion_mat = confusion_mats[pos]
# if opt.k_fold==1:
# statistics.statistics(final_confusion_mat, opt, 'final', 'final_test')
# np.save(os.path.join(opt.save_dir,'confusion_mat.npy'), final_confusion_mat)
# else:
# fold_final_confusion_mat += final_confusion_mat
# util.writelog('fold -> macro-prec,reca,F1,err,kappa: '+str(statistics.report(final_confusion_mat)),opt,True)
# util.writelog('confusion_mat:\n'+str(final_confusion_mat)+'\n',opt,True)
# plot.draw_heatmap(final_confusion_mat,opt,name = 'fold'+str(fold+1)+'_test')
# if opt.k_fold != 1:
# statistics.statistics(fold_final_confusion_mat, opt, 'final', 'k-fold-final_test')
# np.save(os.path.join(opt.save_dir,'confusion_mat.npy'), fold_final_confusion_mat)
if opt.mergelabel:
mat = statistics.mergemat(fold_final_confusion_mat, opt.mergelabel)
statistics.statistics(mat, opt, 'merge', 'mergelabel_final')
# if opt.mergelabel:
# mat = statistics.mergemat(fold_final_confusion_mat, opt.mergelabel)
# statistics.statistics(mat, opt, 'merge', 'mergelabel_final')
import numpy as np
def interp(y,length):
xp = np.linspace(0, len(y)-1,num = len(y))
fp = y
x = np.linspace(0, len(y)-1,num = length)
return np.interp(x, xp, fp)
def pad(data,padding,mod='zero'):
if mod == 'zero':
pad_data = np.zeros(padding, dtype = data.dtype)
return np.append(data, pad_data)
elif mod == 'repeat':
out_data = data.copy()
repeat_num = int(padding/len(data))
for i in range(repeat_num):
out_data = np.append(out_data, data)
pad_data = data[:padding-repeat_num*len(data)]
return np.append(out_data, pad_data)
def normliaze(data,mod = 'norm',sigma = 0,dtype=np.float64,truncated = 1):
'''
mod: norm | std | maxmin | 5_95
dtype : np.float64,np.float16...
'''
data = data.astype(dtype)
if mod == 'norm':
result = (data-np.mean(data))/sigma
elif mod == 'std':
mu = np.mean(data, axis=0)
sigma = np.std(data, axis=0)
result = (data - mu) / sigma
elif mod == 'maxmin':
result = (data-np.mean(data))/sigma
elif mod == '5_95':
data_sort = np.sort(data)
th5 = data_sort[int(0.05*len(data_sort))]
th95 = data_sort[int(0.95*len(data_sort))]
baseline = (th5+th95)/2
sigma = (th95-th5)/2
if sigma == 0:
sigma =1
result = (data-baseline)/sigma
if truncated > 1:
result = np.clip(result, (-truncated), (truncated))
return result.astype(dtype)
def diff1d(indata,stride=1,padding=1,bias=False):
pad = np.zeros(padding)
indata = np.append(indata, pad)
if bias:
if np.min(indata)<0:
indata = indata - np.min(indata)
outdata = np.zeros(int(len(indata)/stride)-1)
for i in range(int(len(indata)/stride)-1):
outdata[i]=indata[i*stride+stride]-indata[i*stride]
return outdata
def findpeak(indata,ismax=False,interval=2):
'''
return:indexs
'''
diff = diff1d(indata)
indexs = []
if ismax:
return np.array([np.argmax(indata)])
rise = True
if diff[0] <=0:
rise = False
for i in range(len(diff)):
if rise==True and diff[i]<=0:
index = i
ok_flag = True
for x in range(interval):
if indata[np.clip(index-x,0,len(indata)-1)]>indata[index] or indata[np.clip(index+x,0,len(indata)-1)]>indata[index]:
ok_flag = False
if ok_flag:
indexs.append(index)
if diff[i] <=0:
rise = False
else:
rise = True
return np.array(indexs)
def get_crossing(line1,line2):
cross_pos = []
dif = line1-line2
flag = 1
if dif[0]<0:
dif = -dif
for i in range(int(len(dif))):
if flag == 1:
if dif[i] <= 0:
cross_pos.append(i)
flag = 0
else:
if dif[i] >= 0:
cross_pos.append(i)
flag = 1
return cross_pos
def get_y(indexs,fun):
y = []
for index in indexs:
y.append(fun[index])
return np.array(y)
def fillnone(arr_in,flag,num = 7):
arr = arr_in.copy()
index = np.linspace(0,len(arr)-1,len(arr),dtype='int')
cnt = 0
for i in range(2,len(arr)-2):
if arr[i] != flag:
arr[i] = arr[i]
if cnt != 0:
if cnt <= num*2:
arr[i-cnt:round(i-cnt/2)] = arr[i-cnt-1-2]
arr[round(i-cnt/2):i] = arr[i+2]
index[i-cnt:round(i-cnt/2)] = i-cnt-1-2
index[round(i-cnt/2):i] = i+2
else:
arr[i-cnt:i-cnt+num] = arr[i-cnt-1-2]
arr[i-num:i] = arr[i+2]
index[i-cnt:i-cnt+num] = i-cnt-1-2
index[i-num:i] = i+2
cnt = 0
else:
cnt += 1
return arr,index
def main():
a = [0,2,4,6,8,10]
print(interp(a, 6))
if __name__ == '__main__':
main()
\ No newline at end of file
......@@ -14,13 +14,17 @@ class Options():
self.parser.add_argument('--gpu_id', type=int, default=0,help='choose which gpu want to use, 0 | 1 | 2 ...')
self.parser.add_argument('--no_cudnn', action='store_true', help='if specified, do not use cudnn')
self.parser.add_argument('--label', type=int, default=5,help='number of labels')
self.parser.add_argument('--input_nc', type=int, default=3, help='# of input channels')
self.parser.add_argument('--input_nc', type=int, default=3, help='of input channels')
self.parser.add_argument('--label_name', type=str, default='auto',help='name of labels,example:"a,b,c,d,e,f"')
self.parser.add_argument('--model_name', type=str, default='micro_multi_scale_resnet_1d',help='Choose model lstm | multi_scale_resnet_1d | resnet18 | micro_multi_scale_resnet_1d...')
# ------------
# for lstm
self.parser.add_argument('--input_size', type=int, default=100,help='input_size of LSTM')
self.parser.add_argument('--time_step', type=int, default=270,help='time_step of LSTM')
# for autoencoder
self.parser.add_argument('--finesize', type=int, default=1800, help='crop your data into this size')
self.parser.add_argument('--feature', type=int, default=3, help='number of encoder features')
# ------------
self.parser.add_argument('--pretrained', action='store_true', help='if specified, use pretrained models')
self.parser.add_argument('--continue_train', action='store_true', help='if specified, continue train')
self.parser.add_argument('--lr', type=float, default=0.001,help='learning rate')
......
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import time
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
colors= ['blue','orange','green','red','purple','brown','pink','gray','olive','cyan']
markers = ['o','^','.',',','v','<','>','1','2','3','4','s','p','*','h','H','+','x','D','d','|','_']
#---------------------------------heatmap---------------------------------
'''
heatmap: https://matplotlib.org/gallery/images_contours_and_fields/image_annotated_heatmap.html#sphx-glr-gallery-images-contours-and-fields-image-annotated-heatmap-py
choose color:https://matplotlib.org/tutorials/colors/colormaps.html?highlight=wistia
recommend: YlGn Wistia Blues YlOrBr
'''
def create_heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
......@@ -130,7 +134,7 @@ def annotate_heatmap(im, data=None, valfmt="{x:.2f}",
return texts
def draw(mat,opt,name = 'train'):
def draw_heatmap(mat,opt,name = 'train'):
if 'merge' in name:
label_name = opt.mergelabel_name
else:
......@@ -153,10 +157,108 @@ def draw(mat,opt,name = 'train'):
fig.tight_layout()
# plt.show()
plt.savefig(os.path.join(opt.save_dir,name+'_heatmap.png'))
plt.close('all')
#---------------------------------loss---------------------------------
def draw_loss(plot_result,epoch,opt):
train = np.array(plot_result['train'])
test = np.array(plot_result['test'])
plt.figure('running loss')
plt.clf()
train_x = np.linspace(0,epoch,len(train))
test_x = np.linspace(0,int(epoch),len(test))
plt.xlabel('Epoch')
plt.ylabel('loss')
if epoch <10:
plt.xlim((0,10))
else:
plt.xlim((0,epoch))
plt.plot(train_x,train,label='train',linewidth = 1.5)
plt.plot(test_x,test,label='test', linewidth = 1.5)
plt.legend(loc=1)
plt.title('Running loss',fontsize='large')
plt.savefig(os.path.join(opt.save_dir,'running_loss'+'%06d' % plotcnt+'.png'))
#---------------------------------scatter---------------------------------
plotcnt = 0
def label_statistics(labels):
labels = (np.array(labels)).astype(np.int64)
label_num = np.max(labels)+1
label_cnt = np.zeros(label_num,dtype=np.int64)
for i in range(len(labels)):
label_cnt[labels[i]] += 1
label_cnt_per = label_cnt/len(labels)
return label_cnt,label_cnt_per,label_num
def draw_scatter(data,opt):
label_cnt,_,label_num = label_statistics(data[:,-1])
fig = plt.figure(figsize=(12,9))
cnt = 0
data_dimension = data.shape[1]-1
if data_dimension>3:
from sklearn.decomposition import PCA
pca=PCA(n_components=3)
data=pca.fit_transform(data[:,:-1])
data_dimension = 3
if data_dimension == 2:
for i in range(label_num):
plt.scatter(data[cnt:cnt+label_cnt[i],0], data[cnt:cnt+label_cnt[i],1],
)
cnt += label_cnt[i]
elif data_dimension == 3:
ax = fig.add_subplot(111, projection='3d')
for i in range(label_num):
ax.scatter(data[cnt:cnt+label_cnt[i],0], data[cnt:cnt+label_cnt[i],1], data[cnt:cnt+label_cnt[i],2],
)
cnt += label_cnt[i]
global plotcnt
plotcnt += 1
plt.xlim(-1.5,1.5)
plt.ylim(-1.5,1.5)
plt.savefig(os.path.join(opt.save_dir,'feature_scatter'+'%06d' % plotcnt+'.png'))
np.save(os.path.join(opt.save_dir,'feature_scatter.npy'), data)
plt.close('all')
def draw_autoencoder_result(true_signal,pred_signal,opt):
plt.subplot(211)
plt.plot(true_signal[0][0])
plt.title('True')
plt.subplot(212)
plt.plot(pred_signal[0][0])
plt.title('Pred')
plt.savefig(os.path.join(opt.save_dir,'autoencoder_result'+'%06d' % plotcnt+'.png'))
plt.close('all')
def showscatter3d(data):
label_cnt,_,label_num = label_statistics(data[:,3])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
cnt = 0
for i in range(label_num):
ax.scatter(data[cnt:cnt+label_cnt[i],0], data[cnt:cnt+label_cnt[i],1], data[cnt:cnt+label_cnt[i],2],
c = colors[i%10],marker = markers[i//10])
cnt += label_cnt[i]
plt.show()
def main():
data = np.load('../checkpoints/au/feature_scatter.npy')
show(data)
#heatmap test
'''
vegetables = ["cucumber", "tomato", "lettuce", "asparagus",
"potato", "wheat", "barley"]
farmers = ["Farmer Joe", "Upland Bros.", "Smith Gardening",
......@@ -171,6 +273,6 @@ def main():
[0.1, 2.0, 0.0, 1.4, 0.0, 1.9, 6.3]])
draw(harvest,vegetables,farmers,name = 'train')
'''
if __name__ == '__main__':
main()
\ No newline at end of file
import numpy as np
import matplotlib.pyplot as plt
import os
from mpl_toolkits.mplot3d import Axes3D
def label_statistics(labels):
labels = (np.array(labels)).astype(np.int64)
label_num = np.max(labels)+1
label_cnt = np.zeros(label_num,dtype=np.int64)
for i in range(len(labels)):
label_cnt[labels[i]] += 1
label_cnt_per = label_cnt/len(labels)
return label_cnt,label_cnt_per,label_num
colors= ['blue','orange','green','red','purple','brown','pink','gray','olive','cyan']
markers = ['o','^','.',',','v','<','>','1','2','3','4','s','p','*','h','H','+','x','D','d','|','_']
def draw(data,opt):
label_cnt,_,label_num = label_statistics(data[:,3])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
cnt = 0
for i in range(label_num):
ax.scatter(data[cnt:cnt+label_cnt[i],0], data[cnt:cnt+label_cnt[i],1], data[cnt:cnt+label_cnt[i],2],
c = colors[i%10],marker = markers[i//10])
cnt += label_cnt[i]
plt.savefig(os.path.join(opt.save_dir,'scatter3d.png'))
np.save(os.path.join(opt.save_dir,'scatter3d.npy'), data)
plt.close('all')
def show(data):
label_cnt,_,label_num = label_statistics(data[:,3])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
cnt = 0
for i in range(label_num):
ax.scatter(data[cnt:cnt+label_cnt[i],0], data[cnt:cnt+label_cnt[i],1], data[cnt:cnt+label_cnt[i],2],
c = colors[i%10],marker = markers[i//10])
cnt += label_cnt[i]
plt.show()
if __name__ == '__main__':
data = np.load('../checkpoints/au/scatter3d.npy')
show(data)
\ No newline at end of file
import numpy as np
import matplotlib.pyplot as plt
import util
import os
from . import heatmap
from . import plot
def label_statistics(labels):
#for sleep label: N3->0 N2->1 N1->2 REM->3 W->4
s = set()
for label in labels:
s.add(label)
label_num = len(list(s))
label_cnt=np.zeros(label_num,dtype=np.int64)
# if not opt:
# s = set()
# labels = (np.array(labels)).astype(np.int64)
# for label in labels:
# s.add(label)
# label_num = len(list(s))
# else:
# label_num = opt.label
labels = (np.array(labels)).astype(np.int64)
label_num = np.max(labels)+1
label_cnt = np.zeros(label_num,dtype=np.int64)
for i in range(len(labels)):
label_cnt[labels[i]] += 1
label_cnt_per = label_cnt/len(labels)
......@@ -91,32 +95,12 @@ def report(mat,print_sub=False):
k = Kappa(mat)
return round(Macro_precision,4),round(Macro_recall,4),round(Macro_F1,4),round(err,4),round(k, 4)
def plotloss(plot_result,epoch,opt):
train = np.array(plot_result['train'])
test = np.array(plot_result['test'])
plt.figure('running recall')
plt.clf()
train_x = np.linspace(0,epoch,len(train))
test_x = np.linspace(0,int(epoch),len(test))
plt.xlabel('Epoch')
plt.ylabel('%')
plt.ylim((0,100))
if epoch <10:
plt.xlim((0,10))
else:
plt.xlim((0,epoch))
plt.plot(train_x,train*100,label='train',linewidth = 1.5,color = 'red')
plt.plot(test_x,test*100,label='test', linewidth = 1.5,color = 'blue')
plt.legend(loc=1)
plt.title('Running err.',fontsize='large')
plt.savefig(os.path.join(opt.save_dir,'running_err.png'))
def statistics(mat,opt,logname,heatmapname):
util.writelog('------------------------------ '+logname+' result ------------------------------',opt,True)
util.writelog(logname+' -> macro-prec,reca,F1,err,kappa: '+str(report(mat)),opt,True)
util.writelog('confusion_mat:\n'+str(mat)+'\n',opt,True)
heatmap.draw(mat,opt,name = heatmapname)
plot.draw_heatmap(mat,opt,name = heatmapname)
def main():
......
......@@ -3,6 +3,7 @@ import random
import numpy as np
import torch
from . import dsp
from . import array_operation as arr
# import dsp
def trimdata(data,num):
......@@ -80,6 +81,10 @@ def ToTensor(data,target=None,no_cuda = False):
def random_transform_1d(data,finesize,test_flag):
batch_size,ch,length = data.shape
# if finesize>length:
# result = np.zeros((batch_size,ch,length), dtype=data.dtype)
# for i in range(batchsize)
# result[i] = arr.p
if test_flag:
move = int((length-finesize)*0.5)
......@@ -129,9 +134,10 @@ def ToInputShape(data,opt,test_flag = False):
if opt.model_name in['lstm','cnn_1d','resnet18_1d','resnet34_1d','multi_scale_resnet_1d','micro_multi_scale_resnet_1d']:
result = random_transform_1d(data, _finesize, test_flag=test_flag)
# result = np.zeros((batchsize,opt.input_nc,_finesize),dtype=np.float64)
# for i in range(0,batchsize):
# result[i]=random_transform_1d(data[i],finesize = _finesize,test_flag=test_flag)
elif opt.model_name == 'autoencoder':
# _finesize = loadsize
result = random_transform_1d(data, opt.finesize, test_flag=test_flag)
# unsupported now
# elif opt.model_name=='lstm':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册