12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667 |
- from PyQt5.QtCore import QThread # 引入多线程,设备是多个,一个设备一个任务
- import cv2
- import numpy as np
- from ultralytics import YOLO # 1
- # from cv2 import VideoCapture
- # 1. 定义信号(引入)
- from PyQt5.QtCore import pyqtSignal
- class CameraDev(QThread):
- # 定义信号(定义)
- sig_video = pyqtSignal(bytes, int, int, int) # 信号传递的数据(图像的二进制数据,字节序列(bytes), 图像高度int,宽度int,通道数int)
- def __init__(self):
- super(CameraDev, self).__init__()
- # 开始视频抓取的任务初始化
- # 初始化摄像头
- self.cam = cv2.VideoCapture(
- 0, # 摄像头的编号,从0
- cv2.CAP_DSHOW # 视频的处理调用DirectX 3D (DirectShow)
- )
- self.isOver = False
- self.model = YOLO("mods/best.pt") # 2
- def run(self):
- # kernel = np.array([ # 深度学习就是找到一个kernel是的特征对分类有效
- # [0, -2, 0],
- # [-2, 8, -2],
- # [0, -2, 0]
- # ])
- # 设备线程的任务,run结束,则任务结束
- while not self.isOver:
- # 反复抓取视频处理
- # print("设备准备工作!")
- status, img = self.cam.read() # 从摄像头读取图像
- if status:
- # print(img.shape)
- # 显示图像
- # 调用人工智能模块,进行图像识别(处理)
- # img = cv2.GaussianBlur(img, (3, 3), 2.0)
- # img = cv2.filter2D(img, -1, kernel, delta=200.0)
- result = self.model(img) # 3
- # 处理结果 # 4
- boxes = result[0].boxes
- names = result[0].names
- if len(boxes) >= 1:
- cls = int(boxes.cls[0].cpu().item())
- conf = boxes.conf[0].cpu().item()
- x1, y1, x2, y2 = boxes.xyxy[0].cpu().numpy().astype(np.int32)
- # 标注:目标区域,名字,概率
- img = cv2.rectangle(img, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=5)
- img = cv2.putText(img, F"{names[cls]}:{conf:.2f}", (x1, y1), 0, 3, color=(255, 0, 0), thickness=3)
- # 2. 发送信号
- self.sig_video.emit(img.tobytes(), img.shape[0], img.shape[1], img.shape[2])
- QThread.usleep(100000) # 1000000微秒 = 1秒
-
- def close(self):
- # 停止多任务
- self.isOver = True
- while self.isRunning():
- pass
- print("线程终止")
- # 释放设备
- self.cam.release()
- print("设备释放")
-
|