yolov8模型实现西红柿成熟程度检测系统
用于实现基于YOLOv8的西红柿成熟程度检测系统。该系统支持图片、文件夹和视频等多种输入方式,并且可以实时检测西红柿的成熟程度。
import os import sys import torch from ultralytics import YOLO from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QLabel, QPushButton, QVBoxLayout, QWidget, QGridLayout from PyQt5.QtGui import QImage, QPixmap from PyQt5.QtCore import QTimer import cv2 import numpy as np # 数据集配置文件 (data.yaml) DATA_YAML_CONTENT = """ train: ./datasets/tomato_maturity/train/images val: ./datasets/tomato_maturity/val/images nc: 4 # 类别数量(包括背景) names: ['background', 'unripe', 'half_ripe', 'ripe'] # 类别名称 """ # 创建数据集配置文件 with open('data.yaml', 'w') as f: f.write(DATA_YAML_CONTENT) # 训练脚本 def train_model(): # 设置随机种子以保证可重复性 torch.manual_seed(42) # 定义数据集路径 dataset_config = 'data.yaml' # 加载预训练的YOLOv8n模型 model = YOLO('yolov8n.pt') # 训练模型 results = model.train( data=dataset_config, epochs=100, imgsz=640, batch=16, name='tomato_maturity', project='runs/train' ) # 评估模型 metrics = model.val() # 保存最佳模型权重 best_model_weights = 'runs/train/tomato_maturity/weights/best.pt' print(f"Best model weights saved to {best_model_weights}") # 评估脚本 def evaluate_model(): # 初始化YOLOv8模型 model = YOLO('runs/train/tomato_maturity/weights/best.pt') # 评估模型 metrics = model.val() # 打印评估结果 print(metrics) # 推理脚本 class MainWindow(QMainWindow): def __init__(self): super(MainWindow, self).__init__() self.setWindowTitle("Tomato Maturity Detection") self.setGeometry(100, 100, 800, 600) # 初始化YOLOv8模型 self.model = YOLO('runs/train/tomato_maturity/weights/best.pt') # 设置类别名称 self.class_names = ['background', 'unripe', 'half_ripe', 'ripe'] # 创建界面元素 self.label_display = QLabel(self) self.label_display.setAlignment(Qt.AlignCenter) self.button_select_image = QPushButton("Select Image", self) self.button_select_folder = QPushButton("Select Folder", self) self.button_select_video = QPushButton("Select Video", self) self.button_start_camera = QPushButton("Start Camera", self) self.button_stop_camera = QPushButton("Stop Camera", self) self.button_select_image.clicked.connect(self.select_image) self.button_select_folder.clicked.connect(self.select_folder) self.button_select_video.clicked.connect(self.select_video) self.button_start_camera.clicked.connect(self.start_camera) self.button_stop_camera.clicked.connect(self.stop_camera) self.timer = QTimer() self.timer.timeout.connect(self.update_frame) self.cap = None # 布局设置 layout = QVBoxLayout() layout.addWidget(self.label_display) button_layout = QHBoxLayout() button_layout.addWidget(self.button_select_image) button_layout.addWidget(self.button_select_folder) button_layout.addWidget(self.button_select_video) button_layout.addWidget(self.button_start_camera) button_layout.addWidget(self.button_stop_camera) layout.addLayout(button_layout) container = QWidget() container.setLayout(layout) self.setCentralWidget(container) def select_image(self): options = QFileDialog.Options() file_path, _ = QFileDialog.getOpenFileName(self, "选择图片", "", "Images (*.jpg *.jpeg *.png);;All Files (*)", options=options) if file_path: self.process_image(file_path) def process_image(self, image_path): frame = cv2.imread(image_path) results = self.model(frame) annotated_frame = self.draw_annotations(frame, results) self.display_image(annotated_frame) def select_folder(self): folder_path = QFileDialog.getExistingDirectory(self, "选择文件夹") if folder_path: image_files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.endswith(('.jpg', '.jpeg', '.png'))] for image_file in image_files: self.process_image(image_file) def select_video(self): options = QFileDialog.Options() file_path, _ = QFileDialog.getOpenFileName(self, "选择视频", "", "Videos (*.mp4 *.avi);;All Files (*)", options=options) if file_path: self.process_video(file_path) def process_video(self, video_path): self.cap = cv2.VideoCapture(video_path) while self.cap.isOpened(): ret, frame = self.cap.read() if not ret: break results = self.model(frame) annotated_frame = self.draw_annotations(frame, results) self.display_image(annotated_frame) if cv2.waitKey(1) & 0xFF == ord('q'): break self.cap.release() def start_camera(self): self.cap = cv2.VideoCapture(0) self.timer.start(30) def stop_camera(self): self.timer.stop() if self.cap is not None: self.cap.release() self.label_display.clear() def update_frame(self): ret, frame = self.cap.read() if not ret: return results = self.model(frame) annotated_frame = self.draw_annotations(frame, results) self.display_image(annotated_frame) def draw_annotations(self, frame, results): for result in results: boxes = result.boxes.cpu().numpy() for box in boxes: r = box.xyxy[0].astype(int) cls = int(box.cls[0]) conf = box.conf[0] label = f"{self.class_names[cls]} {conf:.2f}" color = (0, 255, 0) cv2.rectangle(frame, (r[0], r[1]), (r[2], r[3]), color, 2) cv2.putText(frame, label, (r[0], r[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, color, 2) return frame def display_image(self, frame): rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) h, w, ch = rgb_image.shape bytes_per_line = ch * w qt_image = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888) pixmap = QPixmap.fromImage(qt_image) self.label_display.setPixmap(pixmap.scaled(self.label_display.width(), self.label_display.height(), Qt.KeepAspectRatio)) if __name__ == "__main__": # 运行模式选项 mode = input("Enter mode (train, evaluate, infer): ") if mode == "train": train_model() elif mode == "evaluate": evaluate_model() elif mode == "infer": app = QApplication(sys.argv) window = MainWindow() window.show() sys.exit(app.exec_()) else: print("Invalid mode. Please choose from 'train', 'evaluate', 'infer'.") 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209 文件结构
tomato_maturity_detection/ ├── main.py ├── datasets/ │ └── tomato_maturity/ │ ├── images/ │ │ ├── image1.jpg │ │ ├── image2.jpg │ │ └── ... │ └── labels/ │ ├── image1.txt │ ├── image2.txt │ └── ... ├── data.yaml └── requirements.txt 1234567891011121314 安装依赖项
首先,确保你已经安装了所有必要的依赖项。你可以通过以下命令安装:
pip install -r requirements.txt 1
requirements.txt 内容如下:
opencv-python torch==1.9 ultralytics PyQt5 1234 数据集格式
假设你的数据集已经按照YOLOv5/YOLOv8的格式进行了标注。每个图像文件对应一个文本文件,其中包含边界框信息。例如:
image1.jpg
1 0.5 0.5 0.2 0.2 2 0.7 0.7 0.1 0.1 3 0.3 0.3 0.1 0.1 123
这表示图像中有三个对象,第一个对象的类别是1(未熟),第二个对象的类别是2(半熟),第三个对象的类别是3(熟透)。
配置说明 数据集目录结构 datasets/tomato_maturity/images: 存放所有的图片文件。datasets/tomato_maturity/labels: 存放对应的标注文件(.txt 格式)。 数据集配置文件 (data.yaml)创建一个 data.yaml 文件来配置数据集路径和类别信息。
运行步骤总结克隆项目仓库(如果有的话):
git clone https://github.com/yourusername/tomato_maturity_detection.git cd tomato_maturity_detection 12
安装依赖项:
pip install -r requirements.txt 1
训练模型:
python main.py train 1
评估模型:
python main.py evaluate 1
运行推理界面:
python main.py infer 1
操作界面:
选择图片进行检测。选择文件夹中的所有图片进行批量检测。选择视频进行检测。使用摄像头进行实时检测。以上是完整的基于YOLOv8的西红柿成熟程度检测系统的项目介绍和代码实现。该项目支持图片、文件夹、视频等多种输入方式,并且可以实时检测西红柿的成熟程度。希望这些详细的信息和代码能够帮助你顺利实施和优化你的西红柿成熟程度检测系统。
相关知识
基于YOLOv8/YOLOv7/YOLOv6/YOLOv5的玉米病虫害检测系统(Python+PySide6界面+训练代码)
基于深度学习的植物病害检测系统(网页版+YOLOv8/v7/v6/v5代码+训练数据集)
基于YOLOv8的稻田虫害检测系统【附源码+可远程安装部署】
YOLOv11 vs YOLOv8:谁才是真正的AI检测之王?
YOLOv8系列】(七)毕设实战:YOLOv8+Pyqt5实现鲜花智能分类系统
基于深度学习YOLOv8\YOLOv5的花卉识别鲜花识别检测分类系统设计
<项目代码>YOLOv8 番茄叶片病害识别<目标检测>
基于改进YOLOv8的水稻病虫害检测方法及系统
基于深度学习的田间杂草检测系统(网页版+YOLOv8/v7/v6/v5代码+训练数据集)
基于深度学习的农作物害虫检测系统(网页版+YOLOv8/v7/v6/v5代码+训练数据集)
网址: 如何实现基于Yolov8的西红柿/番茄成熟度检测系统 yolov8番茄成熟度检测 该系统支持图片、文件夹和视频等多种输入方式,并且可以实时检测西红柿的成熟程度。 https://m.huajiangbk.com/newsview1347464.html
上一篇: 一种基于视觉识别的百香果成熟度检 |
下一篇: 不同成熟度木奶果果实品质及耐贮性 |