提交 efde1c17 编写于 作者: 欲游山河十万里's avatar 欲游山河十万里

我对代码进行了一些优化和改进

上级 6d0af1ed
...@@ -17,9 +17,9 @@ def listPathAllfiles(dirname): ...@@ -17,9 +17,9 @@ def listPathAllfiles(dirname):
if __name__ == '__main__': if __name__ == '__main__':
annos_path = r"E:\06服饰\Deepfashion2\train\train\annos" # 改成需要路径 annos_path = r"F:\Deepfashion2\validation\annos" # 改成需要路径
image_path = r"E:\06服饰\Deepfashion2\train\train\images" # 改成需要路径 image_path = r"F:\Deepfashion2\validation\images" # 改成需要路径
labels_path = r"E:\06服饰\Deepfashion2\train\train\labels" # 改成需要路径 labels_path = r"F:\Deepfashion2\validation\labels" # 改成需要路径
num_images = len(os.listdir(annos_path)) num_images = len(os.listdir(annos_path))
...@@ -37,7 +37,7 @@ if __name__ == '__main__': ...@@ -37,7 +37,7 @@ if __name__ == '__main__':
if i == 'source' or i == 'pair_id': if i == 'source' or i == 'pair_id':
continue continue
else: else:
box = temp[i]['bounding_box'] box = temp[i]['segmentation']
x_1 = round((box[0] + box[2]) / 2 / width, 6) x_1 = round((box[0] + box[2]) / 2 / width, 6)
y_1 = round((box[1] + box[3]) / 2 / height, 6) y_1 = round((box[1] + box[3]) / 2 / height, 6)
w = round((box[2] - box[0]) / width, 6) w = round((box[2] - box[0]) / width, 6)
......
# coding:utf-8
import json
import os
import os.path
from PIL import Image
from tqdm import tqdm
def listPathAllfiles(dirname):
result = []
for maindir, subdir, file_name_list in os.walk(dirname):
for filename in file_name_list:
apath = os.path.join(maindir, filename)
result.append(apath)
return result
if __name__ == '__main__':
annos_path = r"F:\Deepfashion2\validation\annos" # 改成需要路径
image_path = r"F:\Deepfashion2\validation\images" # 改成需要路径
labels_path = r"F:\Deepfashion2\validation\labels" # 改成需要路径
num_images = len(os.listdir(annos_path))
for num in tqdm(range(1, num_images + 1)):
json_name = os.path.join(annos_path, str(num).zfill(6) + '.json')
image_name = os.path.join(image_path, str(num).zfill(6) + '.jpg')
txtfile = os.path.join(labels_path, str(num).zfill(6) + '.txt')
imag = Image.open(image_name)
width, height = imag.size
res = []
with open(json_name, 'r') as f:
temp = json.loads(f.read())
for i in temp:
if i == 'source' or i == 'pair_id':
continue
else:
box = temp[i]['segmentation']
category_id = int(temp[i]['category_id'] - 1)
list_nums=[]
list_nums.append(str(category_id))
for index in range(len(box)):
for indexj in range(len(box[index])):
list_nums.append(str(box[index][indexj]))
res.append(" ".join(list_nums))
list_nums.clear()
open(txtfile, "w").write("\n".join(res))
...@@ -2,7 +2,7 @@ import os ...@@ -2,7 +2,7 @@ import os
import random import random
import sys import sys
root_path = r'E:/byc/mythree_traindatasets' #要改 root_path = r'F:/Deepfashion2/fours_seg/' #要改
label_path = root_path + r'/labels' #txt格式标签文件的位置(要改) label_path = root_path + r'/labels' #txt格式标签文件的位置(要改)
fenpei_xinxi_path = root_path + r'/fenpei' #分配信息保存位置 fenpei_xinxi_path = root_path + r'/fenpei' #分配信息保存位置
...@@ -10,7 +10,7 @@ if not os.path.exists(fenpei_xinxi_path): ...@@ -10,7 +10,7 @@ if not os.path.exists(fenpei_xinxi_path):
os.makedirs(fenpei_xinxi_path) os.makedirs(fenpei_xinxi_path)
train_test_percent = 1.0 # (训练集+验证集)/(训练集+验证集+测试集)=1即没有测试集 train_test_percent = 1.0 # (训练集+验证集)/(训练集+验证集+测试集)=1即没有测试集
train_valid_percent = 1.0 # 训练集/(训练集+验证集) train_valid_percent = 0.8 # 训练集/(训练集+验证集)
total_txt = os.listdir(label_path) total_txt = os.listdir(label_path)
num = len(total_txt) num = len(total_txt)
......
...@@ -64,7 +64,7 @@ def chonggou(root_path, fenpei_path,img_path,label_path): ...@@ -64,7 +64,7 @@ def chonggou(root_path, fenpei_path,img_path,label_path):
shutil.copy(label_path + str(label_txt_cg_valid[q]), new_dataset_validl) shutil.copy(label_path + str(label_txt_cg_valid[q]), new_dataset_validl)
if __name__ == "__main__": if __name__ == "__main__":
root_path = r'E:/byc/mythree_traindatasets' #根目录 root_path = r'F:/Deepfashion2/fours_seg/' #根目录
fenpei_path = root_path + r'/fenpei' #step2输出的分配信息(6个txt文件) fenpei_path = root_path + r'/fenpei' #step2输出的分配信息(6个txt文件)
img_path = root_path + '\\images\\' #原图位置 img_path = root_path + '\\images\\' #原图位置
label_path = root_path + '\\labels\\'#原txt标签位置 label_path = root_path + '\\labels\\'#原txt标签位置
......
from ultralytics import RTDETR from ultralytics import RTDETR
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = RTDETR("E:/fourworkplace/yolov8v4/ultralytics/cfg/models/v8/yolov8-swinV2-RTViT.yaml") # build a new model from scratch model = RTDETR("D:/BProjectAll/workplace/yolov8V4/ultralytics/cfg/models/v8/yolov8-swinV2-RTViT.yaml") # build a new model from scratch
model.info() model.info()
model1=RTDETR("E:/fourworkplace/yolov8v4/ultralytics/cfg/models/v8/yolov8-swinV2-RT.yaml") model1=RTDETR("D:/BProjectAll/workplace/yolov8V4/ultralytics/cfg/models/v8/yolov8-swinV2-RT.yaml")
model1.info() model1.info()
model2=RTDETR("E:/fourworkplace/yolov8v4/ultralytics/cfg/models/v5/yolov5-swinV2-RTViT.yaml") model2=RTDETR("D:/BProjectAll/workplace/yolov8V4/ultralytics/cfg/models/v5/yolov5-swinV2-RTViT.yaml")
model2.info() model2.info()
model3=RTDETR("E:/fourworkplace/yolov8v4/ultralytics/cfg/models/v3/yolov3-swinV2-RTViT.yaml") model3=RTDETR("D:/BProjectAll/workplace/yolov8V4/ultralytics/cfg/models/v3/yolov3-swinV2-RTViT.yaml")
model3.info() model3.info()
......
from ultralytics import RTDETR from ultralytics import RTDETR
from ultralytics import YOLO from ultralytics import YOLO
epoch_set=100 epoch_set=300
workers_set=14 workers_set=14
model_yaml_path=""
data_yaml_path=""
#开始进行基础的YOLOv5原版训练 #开始进行基础的YOLOv5原版训练
model = YOLO("E:/fourworkplace/yolov8v4/ultralytics/cfg/models/v5/yolov5.yaml") # build a new model from scratch model = YOLO(model_yaml_path+"v5/yolov5.yaml") # build a new model from scratch
model.train(data="E:/fourworkplace/yolov8v4/ultralytics/cfg/datasets/yolov8_four.yaml", model.train(data=data_yaml_path+"datasets/yolov8_four.yaml",
pretrained=True,epochs=epoch_set,batch=64,patience=200,resume=True,workers=workers_set) pretrained=True,epochs=epoch_set,batch=64,patience=200,resume=True,workers=workers_set)
#本文的改进方案在YOLOv5上产生的效果CNN+SiwnV2+RTViT #本文的改进方案在YOLOv5上产生的效果CNN+SiwnV2+RTViT
model = YOLO("E:/fourworkplace/yolov8v4/ultralytics/cfg/models/v5/yolov5-swinV2-RTViT.yaml") # build a new model from scratch model = RTDETR(model_yaml_path+"v5/yolov5-swinV2-RTViT.yaml") # build a new model from scratch
model.train(data="E:/fourworkplace/yolov8v4/ultralytics/cfg/datasets/yolov8_four.yaml", model.train(data=data_yaml_path+"datasets/yolov8_four.yaml",
pretrained=True,epochs=epoch_set,batch=64,patience=200,resume=True,workers=workers_set) pretrained=True,epochs=epoch_set,batch=64,patience=200,resume=True,workers=workers_set)
#开始进行基础的YOLOv8原版训练 #开始进行基础的YOLOv8原版训练
model = YOLO("E:/fourworkplace/yolov8v4/ultralytics/cfg/models/v8/yolov8.yaml") # build a new model from scratch model = YOLO(model_yaml_path+"v8/yolov8.yaml") # build a new model from scratch
model.train(data="E:/fourworkplace/yolov8v4/ultralytics/cfg/datasets/yolov8_four.yaml", model.train(data=data_yaml_path+"datasets/yolov8_four.yaml",
pretrained=True,epochs=epoch_set,batch=64,patience=200,resume=True,workers=workers_set) pretrained=True,epochs=epoch_set,batch=64,patience=200,resume=True,workers=workers_set)
#训练YOLOv8+Swin替换(使用CNN+SwinV2进行训练) #训练YOLOv8+Swin替换(使用CNN+SwinV2进行训练)
model = YOLO("E:/fourworkplace/yolov8v4/ultralytics/cfg/models/v8/yolov8-swinV2-detect.yaml") # build a new model from scratch model = YOLO(model_yaml_path+"v8/yolov8-swinV2-detect.yaml") # build a new model from scratch
model.train(data="E:/fourworkplace/yolov8v4/ultralytics/cfg/datasets/yolov8_four.yaml", model.train(data=data_yaml_path+"datasets/yolov8_four.yaml",
pretrained=True,epochs=epoch_set,batch=64,patience=200,resume=True,workers=workers_set) pretrained=True,epochs=epoch_set,batch=64,patience=200,resume=True,workers=workers_set)
#训练YOLOv8+Swin替换(使用CNN+SwinV2+RT进行训练) #训练YOLOv8+Swin替换(使用CNN+SwinV2+RT进行训练)
model = YOLO("E:/fourworkplace/yolov8v4/ultralytics/cfg/models/v8/yolov8-swinV2-RT.yaml") # build a new model from scratch model = RTDETR(model_yaml_path+"v8/yolov8-swinV2-RT.yaml") # build a new model from scratch
model.train(data="E:/fourworkplace/yolov8v4/ultralytics/cfg/datasets/yolov8_four.yaml", model.train(data=data_yaml_path+"datasets/yolov8_four.yaml",
pretrained=True,epochs=epoch_set,batch=64,patience=200,resume=True,workers=workers_set)
#训练YOLOv8-rtdetr进行训练
model = RTDETR(model_yaml_path+"v8/yolov8-rtdetr.yaml") # build a new model from scratch
model.train(data=data_yaml_path+"datasets/yolov8_four.yaml",
pretrained=True,epochs=epoch_set,batch=64,patience=200,resume=True,workers=workers_set) pretrained=True,epochs=epoch_set,batch=64,patience=200,resume=True,workers=workers_set)
#训练YOLOv8+Swin替换(使用CNN+SwinV2+RTViT进行训练) #训练YOLOv8+Swin替换(使用CNN+SwinV2+RTViT进行训练)
model = YOLO("E:/fourworkplace/yolov8v4/ultralytics/cfg/models/v8/yolov8-swinV2-RTViT.yaml") # build a new model from scratch model = RTDETR(model_yaml_path+"v8/yolov8-swinV2-RTViT.yaml") # build a new model from scratch
model.train(data="E:/fourworkplace/yolov8v4/ultralytics/cfg/datasets/yolov8_four.yaml", model.train(data=data_yaml_path+"datasets/yolov8_four.yaml",
pretrained=True,epochs=epoch_set,batch=64,patience=200,resume=True,workers=workers_set)
#使用YOLOv3进行训练,并把我们的改进方法加到YOLOv3上面以此突出改进方案的优势。
#训练官网原版本的YOLOv3,考虑到YOLOV3的特殊性batch要特别设置,不要太大,设置为32吧
model = YOLO(model_yaml_path+"v3/yolov3.yaml") # build a new model from scratch
model.train(data=data_yaml_path+"datasets/yolov8_four.yaml",
pretrained=True,epochs=epoch_set,batch=64,patience=200,resume=True,workers=workers_set) pretrained=True,epochs=epoch_set,batch=64,patience=200,resume=True,workers=workers_set)
#使用YOLOv3进行训练,并把我们的改进方法加到YOLOv3上面以此突出改进方案的优势。 #使用YOLOv3进行训练,并把我们的改进方法加到YOLOv3上面以此突出改进方案的优势。
#训练官网原版本的YOLOv3+CNN+Swin+RTViT,考虑到YOLOV3的特殊性batch要特别设置,不要太大,设置为32吧
model = RTDETR(model_yaml_path+"v3/yolov3-swinV2-RTViT.yaml") # build a new model from scratch
model.train(data=data_yaml_path+"datasets/yolov8_four.yaml",
pretrained=True,epochs=epoch_set,batch=64,patience=200,resume=True,workers=workers_set)
#下面开始考虑对图像分割问题的处理方案
model = YOLO(model_yaml_path+"v8/yolov8-seg.yaml") # build a new model from scratch
model.train(data=data_yaml_path+"datasets/yolov8_seg_four.yaml",
pretrained=True,epochs=epoch_set,batch=64,patience=200,resume=True,workers=workers_set)
#下面开始考虑对图像分割问题的处理方案 #下面开始考虑对图像分割问题的处理方案
model = YOLO(model_yaml_path+"v8/yolov8-seg-swinV2.yaml") # build a new model from scratch
model.train(data=data_yaml_path+"datasets/yolov8_seg_four.yaml",
pretrained=True,epochs=epoch_set,batch=64,patience=200,resume=True,workers=workers_set)
\ No newline at end of file
# Ultralytics YOLO 🚀, AGPL-3.0 license
# YOLOv8-seg instance segmentation model. For Usage examples see https://docs.ultralytics.com/tasks/segment
# Parameters
nc: 80 # number of classes
scales: # model compound scaling constants, i.e. 'model=yolov8n-seg.yaml' will call yolov8-seg.yaml with scale 'n'
# [depth, width, max_channels]
n: [0.33, 0.25, 1024]
s: [0.33, 0.50, 1024]
m: [0.67, 0.75, 768]
l: [1.00, 1.00, 512]
x: [1.00, 1.25, 512]
# YOLOv8.0n backbone
backbone:
# [from, repeats, module, args]
- [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
- [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
- [-1, 3, SwinV2_CSPB, [128, True]]
- [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
- [-1, 6, SwinV2_CSPB, [256, True]]
- [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
- [-1, 6, SwinV2_CSPB, [512, True]]
- [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
- [-1, 3, SwinV2_CSPB, [1024, True]]
- [-1, 1, SPPF, [1024, 5]] # 9
# YOLOv8.0n head
head:
- [-1, 1, nn.Upsample, [None, 2, 'nearest']]
- [[-1, 6], 1, Concat, [1]] # cat backbone P4
- [-1, 3, SwinV2_CSPB, [512]] # 12
- [-1, 1, nn.Upsample, [None, 2, 'nearest']]
- [[-1, 4], 1, Concat, [1]] # cat backbone P3
- [-1, 3, SwinV2_CSPB, [256]] # 15 (P3/8-small)
- [-1, 1, Conv, [256, 3, 2]]
- [[-1, 12], 1, Concat, [1]] # cat head P4
- [-1, 3, SwinV2_CSPB, [512]] # 18 (P4/16-medium)
- [-1, 1, Conv, [512, 3, 2]]
- [[-1, 9], 1, Concat, [1]] # cat head P5
- [-1, 3, SwinV2_CSPB, [1024]] # 21 (P5/32-large)
- [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Segment(P3, P4, P5)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册