# print('WARNING: According to the original paper, only one filter should be transformed in each iteration, but the current iteration transforms {} filter(s).'.format(count))
"Warning: The number of the whole data ({}) is smaller than the batch_size ({}), and drop_last is turnning on, so nothing will feed in program, Terminated now. Please reset batch_size to a smaller number or feed more data!"
.format(len(full_lines),settings.batch_size))
os._exit(1)
ifnum_trainers>1andmode=="train":
assertself.shuffle_seedisnotNone,"multiprocess train, shuffle seed must be set!"
np.random.RandomState(self.shuffle_seed).shuffle(
full_lines)
elifshuffle:
assertself.shuffle_seedisnotNone,"multiprocess train, shuffle seed must be set!"
np.random.RandomState(self.shuffle_seed).shuffle(
full_lines)
batch_data=[]
forlineinfull_lines:
img_path,label=line.split()
img_path=os.path.join(data_dir,img_path)
batch_data.append([img_path,int(label)])
iflen(batch_data)==batch_size:
ifmode=='train'ormode=='val'ormode=='test':
yieldbatch_data
batch_data=[]
returnread_file_list
data_reader=reader()
ifmode=='train'andnum_trainers>1:
assertself.shuffle_seedisnotNone, \
"If num_trainers > 1, the shuffle_seed must be set, because " \
)==0,"please support correct batch_size({}), which can be divided by available cards({}), you can change the number of cards by indicating: export CUDA_VISIBLE_DEVICES= ".format(
Using mixup process in training, it will return 5 results, include data_loader, image, y_a(label), y_b(label) and lamda, or it will return 3 results, include data_loader, image, and label.