diff --git a/Code/.DS_Store b/Code/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..530dfc5d41f9275e207926dc884d731fa3a98229 Binary files /dev/null and b/Code/.DS_Store differ diff --git a/Code/1_data_prepare/1_2_split_dataset.py b/Code/1_data_prepare/1_2_split_dataset.py index 21db34e70ad6a472aa0dfa507c3c4354a6c50ff2..da01167351b439f20253b4788b8ad95c354bf994 100644 --- a/Code/1_data_prepare/1_2_split_dataset.py +++ b/Code/1_data_prepare/1_2_split_dataset.py @@ -8,10 +8,10 @@ import glob import random import shutil -dataset_dir = '../../Data/cifar-10-png/raw_test/' -train_dir = '../../Data/train/' -valid_dir = '../../Data/valid/' -test_dir = '../../Data/test/' +dataset_dir = os.path.join("..", "..", "Data", "cifar-10-png", "raw_test") +train_dir = os.path.join("..", "..", "Data", "train") +valid_dir = os.path.join("..", "..", "Data", "valid") +test_dir = os.path.join("..", "..", "Data", "test") train_per = 0.8 valid_per = 0.1 @@ -27,7 +27,7 @@ if __name__ == '__main__': for root, dirs, files in os.walk(dataset_dir): for sDir in dirs: - imgs_list = glob.glob(os.path.join(root, sDir)+'/*.png') + imgs_list = glob.glob(os.path.join(root, sDir) + '*.png') random.seed(666) random.shuffle(imgs_list) imgs_num = len(imgs_list) @@ -37,14 +37,14 @@ if __name__ == '__main__': for i in range(imgs_num): if i < train_point: - out_dir = train_dir + sDir + '/' + out_dir = os.path.join(train_dir, sDir) elif i < valid_point: - out_dir = valid_dir + sDir + '/' + out_dir = os.path.join(valid_dir + sDir) else: - out_dir = test_dir + sDir + '/' + out_dir = os.path.join(test_dir, sDir) makedir(out_dir) - out_path = out_dir + os.path.split(imgs_list[i])[-1] + out_path = os.path.join(out_dir, os.path.split(imgs_list[i])[-1]) shutil.copy(imgs_list[i], out_path) print('Class:{}, train:{}, valid:{}, test:{}'.format(sDir, train_point, valid_point-train_point, imgs_num-valid_point)) diff --git a/Code/1_data_prepare/1_3_generate_txt.py b/Code/1_data_prepare/1_3_generate_txt.py index c588b72e28e2e5620a8bd5780b23be5735cf4ff3..057f0d6eec8b8ca590ed73fee818eb9b3634d406 100644 --- a/Code/1_data_prepare/1_3_generate_txt.py +++ b/Code/1_data_prepare/1_3_generate_txt.py @@ -4,11 +4,11 @@ import os 为数据集生成对应的txt文件 ''' -train_txt_path = '../../Data/train.txt' -train_dir = '../../Data/train/' +train_txt_path = os.path.join("..", "..", "Data", "train.txt") +train_dir = os.path.join("..", "..", "Data", "train") -valid_txt_path = '../../Data/valid.txt' -valid_dir = '../../Data/valid/' +valid_txt_path = os.path.join("..", "..", "Data", "valid.txt") +valid_dir = os.path.join("..", "..", "Data", "valid") def gen_txt(txt_path, img_dir): @@ -30,4 +30,5 @@ def gen_txt(txt_path, img_dir): if __name__ == '__main__': gen_txt(train_txt_path, train_dir) - gen_txt(valid_txt_path, valid_dir) \ No newline at end of file + gen_txt(valid_txt_path, valid_dir) + diff --git a/Code/2_model/2_finetune.py b/Code/2_model/2_finetune.py index 90407e8f73f339379f61bec7d60c64727908d1c6..c6307c4c7b0e82de87561520e63f37abb30f84e5 100644 --- a/Code/2_model/2_finetune.py +++ b/Code/2_model/2_finetune.py @@ -14,8 +14,8 @@ sys.path.append("..") from utils.utils import MyDataset, validate, show_confMat from datetime import datetime -train_txt_path = '../../Data/train.txt' -valid_txt_path = '../../Data/valid.txt' +train_txt_path = os.path.join("..", "..", "Data", "train.txt") +valid_txt_path = os.path.join("..", "..", "Data", "valid.txt") classes_name = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] @@ -25,7 +25,7 @@ lr_init = 0.001 max_epoch = 1 # log -result_dir = '../../Result/' +result_dir = os.path.join("..", "..", "Result") now_time = datetime.now() time_str = datetime.strftime(now_time, '%m-%d_%H-%M-%S') diff --git a/Code/4_viewer/.DS_Store b/Code/4_viewer/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..759af63da55621238aeec06bba251b2ba5572c01 Binary files /dev/null and b/Code/4_viewer/.DS_Store differ diff --git a/Code/4_viewer/1_tensorboardX_demo.py b/Code/4_viewer/1_tensorboardX_demo.py index 9da8523b66653f485196b3132c8d402151044c97..acb60ba4c950209124bf06371719d91889e17f6a 100644 --- a/Code/4_viewer/1_tensorboardX_demo.py +++ b/Code/4_viewer/1_tensorboardX_demo.py @@ -1,4 +1,5 @@ # coding: utf-8 +import os import torch import torchvision.utils as vutils import numpy as np @@ -7,7 +8,7 @@ from torchvision import datasets from tensorboardX import SummaryWriter resnet18 = models.resnet18(False) -writer = SummaryWriter('../../Result/runs') +writer = SummaryWriter(os.path.join("..", "..", "Result", "runs")) sample_rate = 44100 freqs = [262, 294, 330, 349, 392, 440, 440, 440, 440, 440, 440] @@ -23,10 +24,10 @@ for n_iter in range(100): s1 = torch.rand(1) # value to keep s2 = torch.rand(1) # data grouping by `slash` - writer.add_scalar('data/scalar_systemtime', s1[0], n_iter) + writer.add_scalar(os.path.join("data", "scalar_systemtime"), s1[0], n_iter) # data grouping by `slash` - writer.add_scalar('data/scalar_customtime', s1[0], n_iter, walltime=n_iter) - writer.add_scalars('data/scalar_group', {"xsinx": n_iter * np.sin(n_iter), + writer.add_scalar(os.path.join("data", "scalar_customtime"), s1[0], n_iter, walltime=n_iter) + writer.add_scalars(os.path.join("data", "scalar_group"), {"xsinx": n_iter * np.sin(n_iter), "xcosx": n_iter * np.cos(n_iter), "arctanx": np.arctan(n_iter)}, n_iter) x = torch.rand(32, 3, 64, 64) # output from network @@ -56,15 +57,15 @@ for n_iter in range(100): precision, recall, n_iter) # export scalar data to JSON for external processing -writer.export_scalars_to_json("../../Result/all_scalars.json") +writer.export_scalars_to_json(os.path.join("..", "..", "Result", "all_scalars.json")) -dataset = datasets.MNIST('../../Data/mnist', train=False, download=True) +dataset = datasets.MNIST(os.path.join("..", "..", "Data", "mnist"), train=False, download=True) images = dataset.test_data[:100].float() label = dataset.test_labels[:100] features = images.view(100, 784) writer.add_embedding(features, metadata=label, label_img=images.unsqueeze(1)) writer.add_embedding(features, global_step=1, tag='noMetadata') -dataset = datasets.MNIST('../../Data/mnist', train=True, download=True) +dataset = datasets.MNIST(os.path.join("..", "..", "Data", "mnist"), train=True, download=True) images_train = dataset.train_data[:100].float() labels_train = dataset.train_labels[:100] features_train = images_train.view(100, 784) diff --git a/Code/4_viewer/2_visual_weights.py b/Code/4_viewer/2_visual_weights.py index cc1e340da720b4b6fc2d2cb921860117360a7629..fa9fda727805ae73c689dfbaaa3c1358f6399cfe 100644 --- a/Code/4_viewer/2_visual_weights.py +++ b/Code/4_viewer/2_visual_weights.py @@ -1,4 +1,5 @@ # coding: utf-8 +import os import torch import torchvision.utils as vutils from tensorboardX import SummaryWriter @@ -42,10 +43,10 @@ class Net(nn.Module): net = Net() # 创建一个网络 -pretrained_dict = torch.load('../2_model/net_params.pkl') +pretrained_dict = torch.load(os.path.join("..", "2_model", "net_params.pkl")) net.load_state_dict(pretrained_dict) -writer = SummaryWriter(log_dir='../../Result/visual_weights') +writer = SummaryWriter(log_dir=os.path.join("..", ".." "Result", "visual_weights")) params = net.state_dict() for k, v in params.items(): if 'conv' in k and 'weight' in k: diff --git a/Code/4_viewer/3_visual_featuremaps.py b/Code/4_viewer/3_visual_featuremaps.py index 07ef0aaf7ebaaa135f085536e8c42e5beeca75e9..a1dd4d3cc9d421cfd73934eba12c7fc2b559afb0 100644 --- a/Code/4_viewer/3_visual_featuremaps.py +++ b/Code/4_viewer/3_visual_featuremaps.py @@ -1,4 +1,5 @@ # coding: utf-8 +import os import torch import torchvision.utils as vutils import numpy as np @@ -12,9 +13,9 @@ from torch.utils.data import DataLoader vis_layer = 'conv1' -log_dir = '../../Result/visual_featuremaps' -txt_path = '../../Data/visual.txt' -pretrained_path = '../../Data/net_params_72p.pkl' +log_dir = os.path.join("..", ".." "Result", "visual_featuremaps") +txt_path = os.path.join("..", "..", "Data", "visual.txt") +pretrained_path = os.path.join("..", "..", "Data", "net_params_72p.pkl") net = Net() pretrained_dict = torch.load(pretrained_path) diff --git a/Code/4_viewer/4_hist_grad_weight.py b/Code/4_viewer/4_hist_grad_weight.py index 677df7845c1425b7f51efb79ec80020245021351..79b00d8e9b1d2a780a976b4d9669f38f8ad86ab4 100644 --- a/Code/4_viewer/4_hist_grad_weight.py +++ b/Code/4_viewer/4_hist_grad_weight.py @@ -9,13 +9,14 @@ from torch.autograd import Variable import torch.nn as nn import torch.optim as optim import sys +import os sys.path.append("..") from utils.utils import MyDataset, validate, show_confMat, Net from tensorboardX import SummaryWriter from datetime import datetime -train_txt_path = '../../Data/train.txt' -valid_txt_path = '../../Data/valid.txt' +train_txt_path = os.path.join("..", "..", "Data", "train.txt") +valid_txt_path = os.path.join("..", "..", "Data", "valid.txt") classes_name = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] @@ -25,7 +26,7 @@ lr_init = 0.001 max_epoch = 1 # log -log_dir = '../../Result/hist_grad_weight' +log_dir = os.path.join("..", "..", "Result", "hist_grad_weight") writer = SummaryWriter(log_dir=log_dir) diff --git a/Code/4_viewer/6_hook_for_grad_cam.py b/Code/4_viewer/6_hook_for_grad_cam.py index b6926d3dd1f44fd9cfb9eeeaba6121a29a4b7b0b..faf4b8802b679fd07b97fa67b91ccfc70f54eb59 100644 --- a/Code/4_viewer/6_hook_for_grad_cam.py +++ b/Code/4_viewer/6_hook_for_grad_cam.py @@ -130,9 +130,9 @@ def gen_cam(feature_map, grads): if __name__ == '__main__': BASE_DIR = os.path.dirname(os.path.abspath(__file__)) - path_img = os.path.join(BASE_DIR, "..", "..", "Data/cam_img/", "test_img_8.png") - path_net = os.path.join(BASE_DIR, "..", "..", "Data/", "net_params_72p.pkl") - output_dir = os.path.join(BASE_DIR, "..", "..", "Result/backward_hook_cam/") + path_img = os.path.join(BASE_DIR, "..", "..", "Data", "cam_img", "test_img_8.png") + path_net = os.path.join(BASE_DIR, "..", "..", "Data", "net_params_72p.pkl") + output_dir = os.path.join(BASE_DIR, "..", "..", "Result", "backward_hook_cam") classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') fmap_block = list() diff --git a/Code/main_training/main.py b/Code/main_training/main.py index 13f3cf720eac28992b43964a1dbad0705b0f4641..8fd0c526df67da939c8f362edea56725c1be4a71 100644 --- a/Code/main_training/main.py +++ b/Code/main_training/main.py @@ -15,8 +15,8 @@ from utils.utils import MyDataset, validate, show_confMat from tensorboardX import SummaryWriter from datetime import datetime -train_txt_path = '../../Data/train.txt' -valid_txt_path = '../../Data/valid.txt' +train_txt_path = os.path.join("..", "..", "Data", "train.txt") +valid_txt_path = os.path.join("..", "..", "Data", "valid.txt") classes_name = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] @@ -26,7 +26,7 @@ lr_init = 0.001 max_epoch = 1 # log -result_dir = '../../Result/' +result_dir = os.path.join("..", "..", "Result") now_time = datetime.now() time_str = datetime.strftime(now_time, '%m-%d_%H-%M-%S')