欢迎来到尧图网

客户服务 关于我们

您的位置:首页 > 汽车 > 时评 > 【YOLO(txt)格式转VOC(xml)格式数据集】以及【制作VOC格式数据集 】

【YOLO(txt)格式转VOC(xml)格式数据集】以及【制作VOC格式数据集 】

2025/5/24 1:19:35 来源:https://blog.csdn.net/zzzyyy8/article/details/148041658  浏览:    关键词:【YOLO(txt)格式转VOC(xml)格式数据集】以及【制作VOC格式数据集 】

1.txt—>xml转化代码

如果我们手里只有YOLO标签的数据集,我们要进行VOC格式数据集的制作首先要进行标签的转化,以下是标签转化的脚本。
其中picPath为图片所在文件夹路径;
txtPath为你的YOLO标签对应的txt文件所在路径;
xmlPath为要生成的xml的路径;
同时将脚本中的类别信息换成你自己的。

from xml.dom.minidom import Document
import os
import cv2# def makexml(txtPath, xmlPath, picPath):  # txt所在文件夹路径,xml文件保存路径,图片所在文件夹路径
def makexml(picPath, txtPath, xmlPath):  # txt所在文件夹路径,xml文件保存路径,图片所在文件夹路径"""此函数用于将yolo格式txt标注文件转换为voc格式xml标注文件在自己的标注图片文件夹下建三个子文件夹,分别命名为picture、txt、xml"""# names: ['-1', 'bearing', 'bolt', 'flange', 'gear', 'nut', 'retaining_ring', 'spring', 'washer']dic = {'0': "Double hexagonal column",  # 创建字典用来对类型进行转换'1': "Flange nut",  # 此处的字典要与自己的classes.txt文件中的类对应,且顺序要一致'2': "Hexagon nut",'3': "Hexagon pillar",'4': "Hexagon screw",'5': "Hexagonal steel columnc",'6': "Hexagonal steel column",'7': "Keybar",'8': 'Plastic cushion pillar','9': 'Rectangular nut','10': 'Round head screw','11': 'Spring washer','12': 'T-shaped screw',}files = os.listdir(txtPath)for i, name in enumerate(files):print(f"�� [{i + 1}/{len(files)}] 正在处理:{name}")xmlBuilder = Document()annotation = xmlBuilder.createElement("annotation")  # 创建annotation标签xmlBuilder.appendChild(annotation)txtFile = open(txtPath + name)txtList = txtFile.readlines()# img = cv2.imread(picPath + name[0:-4] + ".jpg")# 自动尝试读取 .jpg, .png, .jpeg 图像img = Noneimg_extensions = [".jpg", ".png", ".jpeg"]for ext in img_extensions:img_path = os.path.join(picPath, name[0:-4] + ext)if os.path.exists(img_path):img = cv2.imread(img_path)if img is not None:break# 如果图片读取失败,则跳过该文件if img is None:print(f"❌ 无法读取图片:{name[0:-4]}(支持格式:.jpg/.png/.jpeg),跳过")continueelse:print(f"✅ 成功读取图片:{name[0:-4]}")Pheight, Pwidth, Pdepth = img.shapefolder = xmlBuilder.createElement("folder")  # folder标签foldercontent = xmlBuilder.createTextNode("driving_annotation_dataset")folder.appendChild(foldercontent)annotation.appendChild(folder)  # folder标签结束filename = xmlBuilder.createElement("filename")  # filename标签filenamecontent = xmlBuilder.createTextNode(name[0:-4] + ".jpg")filename.appendChild(filenamecontent)annotation.appendChild(filename)  # filename标签结束size = xmlBuilder.createElement("size")  # size标签width = xmlBuilder.createElement("width")  # size子标签widthwidthcontent = xmlBuilder.createTextNode(str(Pwidth))width.appendChild(widthcontent)size.appendChild(width)  # size子标签width结束height = xmlBuilder.createElement("height")  # size子标签heightheightcontent = xmlBuilder.createTextNode(str(Pheight))height.appendChild(heightcontent)size.appendChild(height)  # size子标签height结束depth = xmlBuilder.createElement("depth")  # size子标签depthdepthcontent = xmlBuilder.createTextNode(str(Pdepth))depth.appendChild(depthcontent)size.appendChild(depth)  # size子标签depth结束annotation.appendChild(size)  # size标签结束for j in txtList:oneline = j.strip().split(" ")object = xmlBuilder.createElement("object")  # object 标签picname = xmlBuilder.createElement("name")  # name标签namecontent = xmlBuilder.createTextNode(dic[oneline[0]])picname.appendChild(namecontent)object.appendChild(picname)  # name标签结束pose = xmlBuilder.createElement("pose")  # pose标签posecontent = xmlBuilder.createTextNode("Unspecified")pose.appendChild(posecontent)object.appendChild(pose)  # pose标签结束truncated = xmlBuilder.createElement("truncated")  # truncated标签truncatedContent = xmlBuilder.createTextNode("0")truncated.appendChild(truncatedContent)object.appendChild(truncated)  # truncated标签结束difficult = xmlBuilder.createElement("difficult")  # difficult标签difficultcontent = xmlBuilder.createTextNode("0")difficult.appendChild(difficultcontent)object.appendChild(difficult)  # difficult标签结束bndbox = xmlBuilder.createElement("bndbox")  # bndbox标签xmin = xmlBuilder.createElement("xmin")  # xmin标签mathData = int(((float(oneline[1])) * Pwidth + 1) - (float(oneline[3])) * 0.5 * Pwidth)xminContent = xmlBuilder.createTextNode(str(mathData))xmin.appendChild(xminContent)bndbox.appendChild(xmin)  # xmin标签结束ymin = xmlBuilder.createElement("ymin")  # ymin标签mathData = int(((float(oneline[2])) * Pheight + 1) - (float(oneline[4])) * 0.5 * Pheight)yminContent = xmlBuilder.createTextNode(str(mathData))ymin.appendChild(yminContent)bndbox.appendChild(ymin)  # ymin标签结束xmax = xmlBuilder.createElement("xmax")  # xmax标签mathData = int(((float(oneline[1])) * Pwidth + 1) + (float(oneline[3])) * 0.5 * Pwidth)xmaxContent = xmlBuilder.createTextNode(str(mathData))xmax.appendChild(xmaxContent)bndbox.appendChild(xmax)  # xmax标签结束ymax = xmlBuilder.createElement("ymax")  # ymax标签mathData = int(((float(oneline[2])) * Pheight + 1) + (float(oneline[4])) * 0.5 * Pheight)ymaxContent = xmlBuilder.createTextNode(str(mathData))ymax.appendChild(ymaxContent)bndbox.appendChild(ymax)  # ymax标签结束object.appendChild(bndbox)  # bndbox标签结束annotation.appendChild(object)  # object标签结束f = open(xmlPath + name[0:-4] + ".xml", 'w')xmlBuilder.writexml(f, indent='\t', newl='\n', addindent='\t', encoding='utf-8')f.close()print(f"✅ 已保存 XML 文件:{name[0:-4]}.xml\n")if __name__ == "__main__":picPath = "/home/yu/wz/duibi/retinanet-pytorch-master/VOCdeckit/VOC2007/JPEGImages/"  # 图片所在文件夹路径,后面的/一定要带上txtPath = "/home/yu/wz/duibi/retinanet-pytorch-master/VOCdeckit/VOC2007/YOLO/"  # txt所在文件夹路径,后面的/一定要带上xmlPath = "/home/yu/wz/duibi/retinanet-pytorch-master/VOCdeckit/VOC2007/Annotations/"  # xml文件保存路径,后面的/一定要带上makexml(picPath, txtPath, xmlPath)

2.VOC格式数据集处理

VOC格式数据集文件夹存放格式:
ImageSets里面等会是通过脚本生成的,前期只需要整理Annotation和JPEGImages
开始时候:在这里插入图片描述

import os
import random
import xml.etree.ElementTree as ETimport numpy as npfrom utils.utils import get_classes#--------------------------------------------------------------------------------------------------------------------------------#
#   annotation_mode用于指定该文件运行时计算的内容
#   annotation_mode为0代表整个标签处理过程,包括获得VOCdevkit/VOC2007/ImageSets里面的txt以及训练用的2007_train.txt、2007_val.txt
#   annotation_mode为1代表获得VOCdevkit/VOC2007/ImageSets里面的txt
#   annotation_mode为2代表获得训练用的2007_train.txt、2007_val.txt
#--------------------------------------------------------------------------------------------------------------------------------#
annotation_mode     = 0
#-------------------------------------------------------------------#
#   必须要修改,用于生成2007_train.txt、2007_val.txt的目标信息
#   与训练和预测所用的classes_path一致即可
#   如果生成的2007_train.txt里面没有目标信息
#   那么就是因为classes没有设定正确
#   仅在annotation_mode为0和2的时候有效
#-------------------------------------------------------------------#
classes_path        = 'model_data/voc_classes.txt'
#--------------------------------------------------------------------------------------------------------------------------------#
#   trainval_percent用于指定(训练集+验证集)与测试集的比例,默认情况下 (训练集+验证集):测试集 = 9:1
#   train_percent用于指定(训练集+验证集)中训练集与验证集的比例,默认情况下 训练集:验证集 = 9:1
#   仅在annotation_mode为0和1的时候有效
#--------------------------------------------------------------------------------------------------------------------------------#
trainval_percent    = 1.0
train_percent       = 0.8
#-------------------------------------------------------#
#   指向VOC数据集所在的文件夹
#   默认指向根目录下的VOC数据集
#-------------------------------------------------------#
VOCdevkit_path  = 'VOCdevkit'VOCdevkit_sets  = [('2007', 'train'), ('2007', 'val')]
classes, _      = get_classes(classes_path)#-------------------------------------------------------#
#   统计目标数量
#-------------------------------------------------------#
photo_nums  = np.zeros(len(VOCdevkit_sets))
nums        = np.zeros(len(classes))
def convert_annotation(year, image_id, list_file):in_file = open(os.path.join(VOCdevkit_path, 'VOC%s/Annotations/%s.xml'%(year, image_id)), encoding='utf-8')tree=ET.parse(in_file)root = tree.getroot()for obj in root.iter('object'):difficult = 0 if obj.find('difficult')!=None:difficult = obj.find('difficult').textcls = obj.find('name').textif cls not in classes or int(difficult)==1:continuecls_id = classes.index(cls)xmlbox = obj.find('bndbox')b = (int(float(xmlbox.find('xmin').text)), int(float(xmlbox.find('ymin').text)), int(float(xmlbox.find('xmax').text)), int(float(xmlbox.find('ymax').text)))list_file.write(" " + ",".join([str(a) for a in b]) + ',' + str(cls_id))nums[classes.index(cls)] = nums[classes.index(cls)] + 1if __name__ == "__main__":random.seed(0)if " " in os.path.abspath(VOCdevkit_path):raise ValueError("数据集存放的文件夹路径与图片名称中不可以存在空格,否则会影响正常的模型训练,请注意修改。")if annotation_mode == 0 or annotation_mode == 1:print("Generate txt in ImageSets.")xmlfilepath     = os.path.join(VOCdevkit_path, 'VOC2007/Annotations')saveBasePath    = os.path.join(VOCdevkit_path, 'VOC2007/ImageSets/Main')temp_xml        = os.listdir(xmlfilepath)total_xml       = []for xml in temp_xml:if xml.endswith(".xml"):total_xml.append(xml)num     = len(total_xml)  list    = range(num)  tv      = int(num*trainval_percent)  tr      = int(tv*train_percent)  trainval= random.sample(list,tv)  train   = random.sample(trainval,tr)  print("train and val size",tv)print("train size",tr)ftrainval   = open(os.path.join(saveBasePath,'trainval.txt'), 'w')  ftest       = open(os.path.join(saveBasePath,'test.txt'), 'w')  ftrain      = open(os.path.join(saveBasePath,'train.txt'), 'w')  fval        = open(os.path.join(saveBasePath,'val.txt'), 'w')  for i in list:  name=total_xml[i][:-4]+'\n'  if i in trainval:  ftrainval.write(name)  if i in train:  ftrain.write(name)  else:  fval.write(name)  else:  ftest.write(name)  ftrainval.close()  ftrain.close()  fval.close()  ftest.close()print("Generate txt in ImageSets done.")if annotation_mode == 0 or annotation_mode == 2:print("Generate 2007_train.txt and 2007_val.txt for train.")type_index = 0for year, image_set in VOCdevkit_sets:image_ids = open(os.path.join(VOCdevkit_path, 'VOC%s/ImageSets/Main/%s.txt'%(year, image_set)), encoding='utf-8').read().strip().split()list_file = open('%s_%s.txt'%(year, image_set), 'w', encoding='utf-8')for image_id in image_ids:list_file.write('%s/VOC%s/JPEGImages/%s.jpg'%(os.path.abspath(VOCdevkit_path), year, image_id))convert_annotation(year, image_id, list_file)list_file.write('\n')photo_nums[type_index] = len(image_ids)type_index += 1list_file.close()print("Generate 2007_train.txt and 2007_val.txt for train done.")def printTable(List1, List2):for i in range(len(List1[0])):print("|", end=' ')for j in range(len(List1)):print(List1[j][i].rjust(int(List2[j])), end=' ')print("|", end=' ')print()str_nums = [str(int(x)) for x in nums]tableData = [classes, str_nums]colWidths = [0]*len(tableData)len1 = 0for i in range(len(tableData)):for j in range(len(tableData[i])):if len(tableData[i][j]) > colWidths[i]:colWidths[i] = len(tableData[i][j])printTable(tableData, colWidths)if photo_nums[0] <= 500:print("训练集数量小于500,属于较小的数据量,请注意设置较大的训练世代(Epoch)以满足足够的梯度下降次数(Step)。")if np.sum(nums) == 0:print("在数据集中并未获得任何目标,请注意修改classes_path对应自己的数据集,并且保证标签名字正确,否则训练将会没有任何效果!")print("在数据集中并未获得任何目标,请注意修改classes_path对应自己的数据集,并且保证标签名字正确,否则训练将会没有任何效果!")print("在数据集中并未获得任何目标,请注意修改classes_path对应自己的数据集,并且保证标签名字正确,否则训练将会没有任何效果!")print("(重要的事情说三遍)。")

修改代码中的:
在这里插入图片描述
voc_classes.txt存放自己的标签:
在这里插入图片描述
运行脚本最后生成:
在这里插入图片描述
控制台输出每个类别的信息则运行成功,
在这里插入图片描述
同时项目的根目录下生成:

在这里插入图片描述
打开这两个文件夹看一下是否包含自己的数据集信息:
在这里插入图片描述
至此,我们的数据集就制作完毕!

版权声明:

本网仅为发布的内容提供存储空间,不对发表、转载的内容提供任何形式的保证。凡本网注明“来源:XXX网络”的作品,均转载自其它媒体,著作权归作者所有,商业转载请联系作者获得授权,非商业转载请注明出处。

我们尊重并感谢每一位作者,均已注明文章来源和作者。如因作品内容、版权或其它问题,请及时与我们联系,联系邮箱:809451989@qq.com,投稿邮箱:809451989@qq.com

热搜词