MRI 脊椎分割数据集/脊椎分割项目解决
MRI 脊椎分割数据集/脊椎分割项目解决包含脊椎分割数据集:原图标签分别2460张代码仅供参考MRI 脊椎分割数据集/脊椎分割项目解决包含脊椎分割数据集:原图标签分别2460张完整的基于YOLOv5的MRI脊椎分割项目的实现。我们将涵盖以下内容项目结构依赖项安装数据准备模型训练评估模型推理界面完整项目结构spine_segmentation/ ├── main.py ├── train.py ├── evaluate.py ├── infer.py ├── ui_files/ │ ├── infer_ui.ui │ ├── infer_ui.qrc │ └── infer_ui_rc.py ├── datasets/ │ ├── spine/ │ │ ├── images/ │ │ ├── labels/ │ │ ├── train.txt │ │ └── val.txt ├── best_spine.pt ├── requirements.txt └── data.yaml文件内容requirements.txtopencv-python4.5.3.56 torch1.9.0cu111 PyQt55.15.4 labelme shutil matplotlib scikit-image numpy pandasdata.yamltrain:./datasets/spine/images/trainval:./datasets/spine/images/valnc:1names:[vertebra]train.pyimporttorchfromyolov5importtrain# 设置随机种子以保证可重复性torch.manual_seed(42)# 定义数据集路径dataset_configdata.yaml# 训练模型resultstrain.run(imgsz640,batch16,epochs50,datadataset_config,weightsyolov5s.pt,namespine,projectruns/train)# 打印训练结果print(results)evaluate.pyfromyolov5importval# 初始化YOLOv5模型model_pathruns/train/spine/weights/best.pt# 评估模型resultsval.run(datadata.yaml,weightsmodel_path,imgsz640,taskval)# 打印评估结果print(results)infer.pyimportsysimportcv2importnumpyasnpfromPyQt5.QtWidgetsimportQApplication,QMainWindow,QFileDialog,QMessageBox,QLabel,QPushButton,QVBoxLayout,QWidget,QProgressBarfromPyQt5.QtGuiimportQImage,QPixmapfromPyQt5.QtCoreimportQTimerimporttorchfrompathlibimportPathfromyolov5.utils.generalimportnon_max_suppression,scale_coordsfromyolov5.models.experimentalimportattempt_loadfromyolov5.utils.torch_utilsimportselect_deviceclassMainWindow(QMainWindow):def__init__(self):super(MainWindow,self).__init__()self.setWindowTitle(MRI 脊椎分割)self.setGeometry(100,100,800,600)# 初始化YOLOv5模型self.deviceselect_device()self.modelattempt_load(runs/train/spine/weights/best.pt,map_locationself.device)self.strideint(self.model.stride.max())# model strideself.imgsz640# 创建界面元素self.label_displayQLabel(self)self.label_display.setAlignment(Qt.AlignCenter)self.button_select_imageQPushButton(选择图片,self)self.button_select_folderQPushButton(选择文件夹,self)self.button_select_videoQPushButton(选择视频,self)self.button_start_cameraQPushButton(开始摄像头,self)self.button_stop_cameraQPushButton(停止摄像头,self)self.progress_barQProgressBar(self)self.progress_bar.setVisible(False)layoutQVBoxLayout()layout.addWidget(self.label_display)layout.addWidget(self.button_select_image)layout.addWidget(self.button_select_folder)layout.addWidget(self.button_select_video)layout.addWidget(self.button_start_camera)layout.addWidget(self.button_stop_camera)layout.addWidget(self.progress_bar)containerQWidget()container.setLayout(layout)self.setCentralWidget(container)self.button_select_image.clicked.connect(self.select_image)self.button_select_folder.clicked.connect(self.select_folder)self.button_select_video.clicked.connect(self.select_video)self.button_start_camera.clicked.connect(self.start_camera)self.button_stop_camera.clicked.connect(self.stop_camera)self.timerQTimer()self.timer.timeout.connect(self.update_frame)self.capNoneself.results[]defload_image(self,image_path):framecv2.imread(image_path,cv2.IMREAD_GRAYSCALE)framecv2.cvtColor(frame,cv2.COLOR_GRAY2BGR)resultsself.detect(frame)annotated_frameself.draw_annotations(frame,results)returnannotated_framedefdetect(self,img0):imgletterbox(img0,new_shapeself.imgsz,strideself.stride)[0]imgimg[:,:,::-1].transpose(2,0,1)# BGR to RGB, to 3x416x416imgnp.ascontiguousarray(img)imgtorch.from_numpy(img).to(self.device)imgimg.float()# uint8 to fp16/32img/255.0# 0 - 255 to 0.0 - 1.0ifimg.ndimension()3:imgimg.unsqueeze(0)predself.model(img,augmentFalse)[0]prednon_max_suppression(pred,0.25,0.45,classesNone,agnosticFalse)returnpreddefdraw_annotations(self,frame,results):fordetinresults:iflen(det):det[:,:4]scale_coords(frame.shape[2:],det[:,:4],frame.shape).round()for*xyxy,conf,clsinreversed(det):labelf{self.model.names[int(cls)]}{conf:.2f}plot_one_box(xyxy,frame,labellabel,color(0,255,0),line_thickness3)returnframedefdisplay_image(self,frame):rgb_imagecv2.cvtColor(frame,cv2.COLOR_BGR2RGB)h,w,chrgb_image.shape bytes_per_linech*w qt_imageQImage(rgb_image.data,w,h,bytes_per_line,QImage.Format_RGB888)pixmapQPixmap.fromImage(qt_image)self.label_display.setPixmap(pixmap.scaled(self.label_display.width(),self.label_display.height()))defselect_image(self):optionsQFileDialog.Options()file_path,_QFileDialog.getOpenFileName(self,选择图片,,图片 (*.jpg *.jpeg *.png *.tif);;所有文件 (*),optionsoptions)iffile_path:annotated_frameself.load_image(file_path)self.display_image(annotated_frame)self.results.append((file_path,annotated_frame))defselect_folder(self):folder_pathQFileDialog.getExistingDirectory(self,选择文件夹)iffolder_path:files[os.path.join(folder_path,f)forfinos.listdir(folder_path)iff.endswith((.jpg,.jpeg,.png,.tif))]total_fileslen(files)self.progress_bar.setMaximum(total_files)self.progress_bar.setValue(0)self.progress_bar.setVisible(True)fori,file_pathinenumerate(files):annotated_frameself.load_image(file_path)self.display_image(annotated_frame)self.results.append((file_path,annotated_frame))self.progress_bar.setValue(i1)self.progress_bar.setVisible(False)defselect_video(self):optionsQFileDialog.Options()file_path,_QFileDialog.getOpenFileName(self,选择视频,,视频 (*.mp4 *.avi);;所有文件 (*),optionsoptions)iffile_path:self.process_video(file_path)defprocess_video(self,video_path):self.capcv2.VideoCapture(video_path)whileself.cap.isOpened():ret,frameself.cap.read()ifnotret:breakresultsself.detect(frame)annotated_frameself.draw_annotations(frame,results)self.display_image(annotated_frame)self.results.append((video_path,annotated_frame))ifcv2.waitKey(1)0xFFord(q):breakself.cap.release()defstart_camera(self):self.capcv2.VideoCapture(0)self.timer.start(30)defstop_camera(self):self.timer.stop()ifself.capisnotNone:self.cap.release()self.label_display.clear()defupdate_frame(self):ret,frameself.cap.read()ifnotret:returnresultsself.detect(frame)annotated_frameself.draw_annotations(frame,results)self.display_image(annotated_frame)self.results.append((camera,annotated_frame))defletterbox(img,new_shape(640,640),color(114,114,114),autoTrue,scaleFillFalse,scaleupTrue,stride32):shapeimg.shape[:2]# current shape [height, width]rmin(new_shape[0]/shape[0],new_shape[1]/shape[1])ifnotscaleup:# only scale down, do not scale up (for better test mAP)rmin(r,1.0)ratior,r# width, height ratiosnew_unpadint(round(shape[1]*r)),int(round(shape[0]*r))dw,dhnew_shape[1]-new_unpad[0],new_shape[0]-new_unpad[1]# wh paddingifauto:# minimum rectangledw,dhnp.mod(dw,stride),np.mod(dh,stride)# wh paddingelifscaleFill:# stretchdw,dh0.0,0.0new_unpad(new_shape[1],new_shape[0])rationew_shape[1]/shape[1],new_shape[0]/shape[0]# width, height ratiosdw/2# divide padding into 2 sidesdh/2ifshape[::-1]!new_unpad:# resizeimgcv2.resize(img,new_unpad,interpolationcv2.INTER_LINEAR)top,bottomint(round(dh-0.1)),int(round(dh0.1))left,rightint(round(dw-0.1)),int(round(dw0.1))imgcv2.copyMakeBorder(img,top,bottom,left,right,cv2.BORDER_CONSTANT,valuecolor)# add borderreturnimg,ratio,(dw,dh)defplot_one_box(x,img,colorNone,labelNone,line_thicknessNone):tlline_thicknessorround(0.002*(img.shape[0]img.shape[1])/2)1# line/font thicknesscolorcoloror[random.randint(0,255)for_inrange(3)]c1,c2(int(x[0]),int(x[1])),(int(x[2]),int(x[3]))cv2.rectangle(img,c1,c2,color,thicknesstl,lineTypecv2.LINE_AA)iflabel:tfmax(tl-1,1)# font thicknesst_sizecv2.getTextSize(label,0,fontScaletl/3,thicknesstf)[0]c2c1[0]t_size[0],c1[1]-t_size[1]-3cv2.rectangle(img,c1,c2,color,-1,cv2.LINE_AA)# filledcv2.putText(img,label,(c1[0],c1[1]-2),0,tl/3,[225,255,255],thicknesstf,lineTypecv2.LINE_AA)if__name____main__:appQApplication(sys.argv)windowMainWindow()window.show()sys.exit(app.exec_())运行步骤总结克隆项目仓库如果有的话gitclone https://github.com/yourusername/spine_segmentation.gitcdspine_segmentation安装依赖项conda create--namespine_envpython3.8conda activate spine_env pipinstall-rrequirements.txt下载YOLOv5代码gitclone https://github.com/ultralytics/yolov5.gitcdyolov5 pipinstall-rrequirements.txtcd..准备数据集将你的MRI脊椎图像放入datasets/spine/images目录。将对应的标注文件假设为YOLO格式的TXT文件放入datasets/spine/labels目录。使用脚本划分数据集为训练集和验证集并生成train.txt和val.txt文件。训练模型python train.py评估模型python evaluate.py运行推理界面python infer.py操作界面选择图片进行检测点击“选择图片”按钮选择一张图片进行检测。选择文件夹批量检测点击“选择文件夹”按钮选择一个包含多张图片的文件夹进行批量检测。选择视频进行检测点击“选择视频”按钮选择一个视频文件进行检测。摄像头检测点击“开始摄像头”按钮使用摄像头进行实时检测点击“停止摄像头”按钮停止检测。详细解释requirements.txt列出项目所需的所有Python包及其版本。data.yaml配置数据集路径和类别信息用于YOLOv5模型训练。train.py加载预训练的YOLOv5s模型并使用自定义数据集进行训练。训练完成后打印训练结果。evaluate.py加载训练好的YOLOv5模型并对验证集进行评估打印评估结果。infer.py创建一个GUI应用程序支持选择图片、文件夹、视频或使用摄像头进行实时检测并显示检测结果。
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/2525188.html
如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!