12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455 |
- from PyQt5.QtCore import QThread # 引入多线程,设备是多个,一个设备一个任务
- import cv2
- import numpy as np
- # from cv2 import VideoCapture
- # 1. 定义信号(引入)
- from PyQt5.QtCore import pyqtSignal
- class CameraDev(QThread):
- # 定义信号(定义)
- sig_video = pyqtSignal(bytes, int, int, int) # 信号传递的数据(图像的二进制数据,字节序列(bytes), 图像高度int,宽度int,通道数int)
- def __init__(self):
- super(CameraDev, self).__init__()
- # 开始视频抓取的任务初始化
- # 初始化摄像头
- self.cam = cv2.VideoCapture(
- 0, # 摄像头的编号,从0
- cv2.CAP_DSHOW # 视频的处理调用DirectX 3D (DirectShow)
- )
- self.isOver = False
- def run(self):
- kernel = np.array([ #深度学习就是找到一个kernel是的特征对分类有效
- [1, 0, -1],
- [1, 0, -1],
- [1, 0, -1]
- ])
- # 设备线程的任务,run结束,则任务结束
- while True:
- # 反复抓取视频处理
- # print("设备准备工作!")
- status, img = self.cam.read() # 从摄像头读取图像
- if status:
- # print(img.shape)
- # 显示图像
- # 调用人工智能模块,进行图像识别(处理)
- # img = cv2.GaussianBlur(img, (9, 9), 5.0)
- img = cv2.filter2D(img, -1, kernel, delta=200.0)
- # 2. 发送信号
- self.sig_video.emit(img.tobytes(), img.shape[0], img.shape[1], img.shape[2])
- QThread.usleep(100000) # 1000000微秒 = 1秒
- def close(self):
- # 关闭摄像头
- self.cam.close()
- # 释放设备
- self.cam.release()
- # 停止多任务
- self.isOver = True
- while self.isRunning():
- pass
- print("线程终止")
- # 释放设备
- self.cam.release()
- print("设备释放")
|