Jelajahi Sumber

first commit

Gq 1 tahun lalu
melakukan
a7c7cb6897
93 mengubah file dengan 1832 tambahan dan 0 penghapusan
  1. TEMPAT SAMPAH
      .ipynb_checkpoints/1-checkpoint.jpg
  2. TEMPAT SAMPAH
      1.jpg
  3. TEMPAT SAMPAH
      Day02/.ipynb_checkpoints/1-checkpoint.jpg
  4. 139 0
      Day02/.ipynb_checkpoints/图像处理与图像特征-checkpoint.ipynb
  5. TEMPAT SAMPAH
      Day02/1.jpg
  6. 7 0
      Day02/ex01.py
  7. TEMPAT SAMPAH
      Day02/gpu.bmp
  8. TEMPAT SAMPAH
      Day02/sea.jpg
  9. 139 0
      Day02/图像处理与图像特征.ipynb
  10. 33 0
      Day03/.ipynb_checkpoints/day03卷积特征-checkpoint.ipynb
  11. 110 0
      Day03/.ipynb_checkpoints/day03笔记-checkpoint.ipynb
  12. 28 0
      Day03/.ipynb_checkpoints/zuoye-checkpoint.ipynb
  13. TEMPAT SAMPAH
      Day03/2.jpg
  14. 27 0
      Day03/codes/ex_qt01.py
  15. 26 0
      Day03/codes/notes.txt
  16. 33 0
      Day03/day03卷积特征.ipynb
  17. 110 0
      Day03/day03笔记.ipynb
  18. TEMPAT SAMPAH
      Day03/gpu.bmp
  19. 28 0
      Day03/zuoye.ipynb
  20. TEMPAT SAMPAH
      Day03/zuoye.jpg
  21. 7 0
      Day04/monitor/main.py
  22. 63 0
      Day04/monitor/ui/monitor.ui
  23. 46 0
      Day04/monitor/ui/monitor_ui.py
  24. 1 0
      Day05/aiapp/app.bat
  25. TEMPAT SAMPAH
      Day05/aiapp/dev/__pycache__/camera.cpython-310.pyc
  26. 53 0
      Day05/aiapp/dev/camera.py
  27. TEMPAT SAMPAH
      Day05/aiapp/frame/__pycache__/app.cpython-310.pyc
  28. TEMPAT SAMPAH
      Day05/aiapp/frame/__pycache__/win.cpython-310.pyc
  29. 10 0
      Day05/aiapp/frame/app.py
  30. 48 0
      Day05/aiapp/frame/win.py
  31. 9 0
      Day05/aiapp/main.py
  32. 10 0
      Day05/aiapp/traffic_ui.py
  33. TEMPAT SAMPAH
      Day05/aiapp/ui/__pycache__/traffic_ui.cpython-310.pyc
  34. 63 0
      Day05/aiapp/ui/monitor.ui
  35. 1 0
      Day05/aiapp/ui/tools.bat
  36. 63 0
      Day05/aiapp/ui/traffic.ui
  37. 46 0
      Day05/aiapp/ui/traffic_ui.py
  38. TEMPAT SAMPAH
      Day06/infer/01_0.jpg
  39. TEMPAT SAMPAH
      Day06/infer/02_4.jpg
  40. TEMPAT SAMPAH
      Day06/infer/05_2.jpg
  41. TEMPAT SAMPAH
      Day06/infer/__pycache__/model.cpython-310.pyc
  42. 38 0
      Day06/infer/infer.py
  43. TEMPAT SAMPAH
      Day06/infer/lenet5.pt
  44. 80 0
      Day06/infer/model.py
  45. TEMPAT SAMPAH
      Day06/lenet/00_5.jpg
  46. TEMPAT SAMPAH
      Day06/lenet/01_0.jpg
  47. TEMPAT SAMPAH
      Day06/lenet/02_4.jpg
  48. TEMPAT SAMPAH
      Day06/lenet/03_1.jpg
  49. TEMPAT SAMPAH
      Day06/lenet/04_9.jpg
  50. TEMPAT SAMPAH
      Day06/lenet/05_2.jpg
  51. TEMPAT SAMPAH
      Day06/lenet/06_1.jpg
  52. TEMPAT SAMPAH
      Day06/lenet/07_3.jpg
  53. TEMPAT SAMPAH
      Day06/lenet/08_1.jpg
  54. TEMPAT SAMPAH
      Day06/lenet/09_4.jpg
  55. TEMPAT SAMPAH
      Day06/lenet/__pycache__/model.cpython-310.pyc
  56. TEMPAT SAMPAH
      Day06/lenet/datasets/MNIST/raw/t10k-images-idx3-ubyte
  57. TEMPAT SAMPAH
      Day06/lenet/datasets/MNIST/raw/t10k-images-idx3-ubyte.gz
  58. TEMPAT SAMPAH
      Day06/lenet/datasets/MNIST/raw/t10k-labels-idx1-ubyte
  59. TEMPAT SAMPAH
      Day06/lenet/datasets/MNIST/raw/t10k-labels-idx1-ubyte.gz
  60. TEMPAT SAMPAH
      Day06/lenet/datasets/MNIST/raw/train-images-idx3-ubyte
  61. TEMPAT SAMPAH
      Day06/lenet/datasets/MNIST/raw/train-images-idx3-ubyte.gz
  62. TEMPAT SAMPAH
      Day06/lenet/datasets/MNIST/raw/train-labels-idx1-ubyte
  63. TEMPAT SAMPAH
      Day06/lenet/datasets/MNIST/raw/train-labels-idx1-ubyte.gz
  64. 28 0
      Day06/lenet/ds.py
  65. TEMPAT SAMPAH
      Day06/lenet/lenet5.pt
  66. 80 0
      Day06/lenet/model.py
  67. 79 0
      Day06/lenet/train.py
  68. TEMPAT SAMPAH
      Day07/2.jpg
  69. TEMPAT SAMPAH
      Day07/IMG_3052.JPG
  70. TEMPAT SAMPAH
      Day07/best.pt
  71. 10 0
      Day07/dataset.yaml
  72. TEMPAT SAMPAH
      Day07/last.pt
  73. 35 0
      Day07/pred.py
  74. 6 0
      Day07/test.py
  75. 7 0
      Day07/train.py
  76. TEMPAT SAMPAH
      Day07/yolov8n.pt
  77. 46 0
      README.md
  78. 1 0
      aiapp/app.bat
  79. TEMPAT SAMPAH
      aiapp/dev/__pycache__/camera.cpython-310.pyc
  80. 68 0
      aiapp/dev/camera.py
  81. TEMPAT SAMPAH
      aiapp/frame/__pycache__/app.cpython-310.pyc
  82. TEMPAT SAMPAH
      aiapp/frame/__pycache__/win.cpython-310.pyc
  83. 10 0
      aiapp/frame/app.py
  84. 48 0
      aiapp/frame/win.py
  85. 9 0
      aiapp/main.py
  86. TEMPAT SAMPAH
      aiapp/mods/best.pt
  87. 10 0
      aiapp/traffic_ui.py
  88. TEMPAT SAMPAH
      aiapp/ui/__pycache__/traffic_ui.cpython-310.pyc
  89. 63 0
      aiapp/ui/monitor.ui
  90. 1 0
      aiapp/ui/tools.bat
  91. 63 0
      aiapp/ui/traffic.ui
  92. 46 0
      aiapp/ui/traffic_ui.py
  93. 4 0
      commit.bat

TEMPAT SAMPAH
.ipynb_checkpoints/1-checkpoint.jpg


TEMPAT SAMPAH
1.jpg


TEMPAT SAMPAH
Day02/.ipynb_checkpoints/1-checkpoint.jpg


File diff ditekan karena terlalu besar
+ 139 - 0
Day02/.ipynb_checkpoints/图像处理与图像特征-checkpoint.ipynb


TEMPAT SAMPAH
Day02/1.jpg


+ 7 - 0
Day02/ex01.py

@@ -0,0 +1,7 @@
+import numpy # 向量与矩阵运算库(线性代数运算库) 1.19.5
+import cv2   # opencv-python opencv-contrib-python:图像处理
+# 生成数组
+img_array = [[[0,0,255,128] for i in range(255)] for j in range(255)] # 255 * 255的图像,图像的像素[0,255,0,128]
+# cv2中只支持numpy的格式
+img_numpy = numpy.array(img_array)
+cv2.imwrite("1.jpg",img_numpy)

TEMPAT SAMPAH
Day02/gpu.bmp


TEMPAT SAMPAH
Day02/sea.jpg


File diff ditekan karena terlalu besar
+ 139 - 0
Day02/图像处理与图像特征.ipynb


File diff ditekan karena terlalu besar
+ 33 - 0
Day03/.ipynb_checkpoints/day03卷积特征-checkpoint.ipynb


File diff ditekan karena terlalu besar
+ 110 - 0
Day03/.ipynb_checkpoints/day03笔记-checkpoint.ipynb


File diff ditekan karena terlalu besar
+ 28 - 0
Day03/.ipynb_checkpoints/zuoye-checkpoint.ipynb


TEMPAT SAMPAH
Day03/2.jpg


+ 27 - 0
Day03/codes/ex_qt01.py

@@ -0,0 +1,27 @@
+# 引入模块
+from PyQt5.QtWidgets import QApplication
+from PyQt5.QtWidgets import QDialog
+from PyQt5.QtWidgets import QMainWindow
+from PyQt5.QtWidgets import QPushButton
+# 创建Qt应用
+app = QApplication([]) #参数:命令行参数
+"""
+    Qt的应用必须在App之间
+"""
+# dlg = QDialog()
+dlg = QMainWindow
+#改变对话框的大小
+dlg.resize(1000,1000) #设置窗体大小
+dlg.move(100,100) #设置窗体的位置
+dlg.setWindowTitle("我的窗体")
+
+# 创建一个按钮
+btn = QPushButton("登录",dlg)
+btn.resize(100,36)
+btn.move(200,200)
+
+btn.show()
+
+dlg.show()
+
+app.exec() #让应用程序进入消息循环

+ 26 - 0
Day03/codes/notes.txt

@@ -0,0 +1,26 @@
+1. 条件
+    安装的模块: PyQt5, PyQt5-tools
+    where python 确定python的安装路径: ${PYTHON_HOME}
+    安装的模块在
+        ${PYTHON_HOME}/Lib/site-packages/PyQt5
+        ${PYTHON_HOME}/Lib/site-packages/PyQt5-tools
+        ${PYTHON_HOME}/Lib/site-packages/qt5-application
+        ${PYTHON_HOME}/Lib/site-packages/qt5-application/Qt/bin
+            |-designer.exe GUI(Graphic User Interface)
+            |-uic.exe 把我们设计的GUI转换为python代码
+            |-Script\pyuic5.exe  =  uic.exe
+    备注:下载pip install PyQt5 -i https://pypi.tuna.tsinghua.edu.cn/simple
+             pip install PyQt5-tools  -i https://pypi.tuna.tsinghua.edu.cn/simple
+2.开发一个窗体
+  2.1. 创建QApplication
+  2.2. 创建对话框
+      窗体:主窗体(菜单),对话框(无菜单),闪频(没有标题栏)
+3.程序开发的结构(模式)
+   main.py  #程序入口
+   根目录moniter
+        |- ui     #界面设计
+        |- form    #窗体
+        |- biz     #业务实现(人工智能推理接口)
+        |- dev     #摄像头的处理
+
+4.

File diff ditekan karena terlalu besar
+ 33 - 0
Day03/day03卷积特征.ipynb


File diff ditekan karena terlalu besar
+ 110 - 0
Day03/day03笔记.ipynb


TEMPAT SAMPAH
Day03/gpu.bmp


File diff ditekan karena terlalu besar
+ 28 - 0
Day03/zuoye.ipynb


TEMPAT SAMPAH
Day03/zuoye.jpg


+ 7 - 0
Day04/monitor/main.py

@@ -0,0 +1,7 @@
+
+
+# pyuics -o monitor_ui.py monitor.ui
+# -o 表示输出
+# monitor_ui.py 表示输出文件,可以随意命名
+# monitor.ui 我们设计的界面文件,这个文件只能翻译以后使用
+# pyuic5 是一个把ui文件翻译成py文件的工具

+ 63 - 0
Day04/monitor/ui/monitor.ui

@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ui version="4.0">
+ <class>Dialog</class>
+ <widget class="QDialog" name="Dialog">
+  <property name="geometry">
+   <rect>
+    <x>0</x>
+    <y>0</y>
+    <width>400</width>
+    <height>300</height>
+   </rect>
+  </property>
+  <property name="windowTitle">
+   <string>智能交通监控系统</string>
+  </property>
+  <widget class="QLabel" name="label">
+   <property name="geometry">
+    <rect>
+     <x>20</x>
+     <y>20</y>
+     <width>121</width>
+     <height>91</height>
+    </rect>
+   </property>
+   <property name="styleSheet">
+    <string notr="true">border-width:2px;
+border-style:solid;
+border-color:purple;
+border-radius:10px;
+border-bottom-color:green;
+border-left-color:blue;
+border-right-color:orange</string>
+   </property>
+   <property name="text">
+    <string>视频显示区</string>
+   </property>
+  </widget>
+  <widget class="QPushButton" name="pushButton">
+   <property name="geometry">
+    <rect>
+     <x>10</x>
+     <y>170</y>
+     <width>101</width>
+     <height>41</height>
+    </rect>
+   </property>
+   <property name="styleSheet">
+    <string notr="true">border-width:1px;
+border-style:solid;
+border-radius:7px;
+border-top-color:#ffffff;
+border-left-color:#ffffff;
+border-bottom-color:#888888;
+border-right-color:#888888</string>
+   </property>
+   <property name="text">
+    <string>处理视频</string>
+   </property>
+  </widget>
+ </widget>
+ <resources/>
+ <connections/>
+</ui>

+ 46 - 0
Day04/monitor/ui/monitor_ui.py

@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+# Form implementation generated from reading ui file 'monitor.ui'
+#
+# Created by: PyQt5 UI code generator 5.15.9
+#
+# WARNING: Any manual changes made to this file will be lost when pyuic5 is
+# run again.  Do not edit this file unless you know what you are doing.
+
+
+from PyQt5 import QtCore, QtGui, QtWidgets
+
+
+class Ui_Dialog(object):
+    def setupUi(self, Dialog):
+        Dialog.setObjectName("Dialog")
+        Dialog.resize(400, 300)
+        self.label = QtWidgets.QLabel(Dialog)
+        self.label.setGeometry(QtCore.QRect(20, 20, 121, 91))
+        self.label.setStyleSheet("border-width:2px;\n"
+"border-style:solid;\n"
+"border-color:purple;\n"
+"border-radius:10px;\n"
+"border-bottom-color:green;\n"
+"border-left-color:blue;\n"
+"border-right-color:orange")
+        self.label.setObjectName("label")
+        self.pushButton = QtWidgets.QPushButton(Dialog)
+        self.pushButton.setGeometry(QtCore.QRect(10, 170, 101, 41))
+        self.pushButton.setStyleSheet("border-width:1px;\n"
+"border-style:solid;\n"
+"border-radius:7px;\n"
+"border-top-color:#ffffff;\n"
+"border-left-color:#ffffff;\n"
+"border-bottom-color:#888888;\n"
+"border-right-color:#888888")
+        self.pushButton.setObjectName("pushButton")
+
+        self.retranslateUi(Dialog)
+        QtCore.QMetaObject.connectSlotsByName(Dialog)
+
+    def retranslateUi(self, Dialog):
+        _translate = QtCore.QCoreApplication.translate
+        Dialog.setWindowTitle(_translate("Dialog", "智能交通监控系统"))
+        self.label.setText(_translate("Dialog", "视频显示区"))
+        self.pushButton.setText(_translate("Dialog", "处理视频"))

+ 1 - 0
Day05/aiapp/app.bat

@@ -0,0 +1 @@
+python main.py

TEMPAT SAMPAH
Day05/aiapp/dev/__pycache__/camera.cpython-310.pyc


+ 53 - 0
Day05/aiapp/dev/camera.py

@@ -0,0 +1,53 @@
+from PyQt5.QtCore import QThread    # 引入多线程,设备是多个,一个设备一个任务
+import cv2
+import numpy as np
+# from cv2 import VideoCapture 
+
+# 1. 定义信号(引入)
+from PyQt5.QtCore import pyqtSignal
+
+class CameraDev(QThread):
+    # 定义信号(定义)
+    sig_video = pyqtSignal(bytes, int, int, int)  # 信号传递的数据(图像的二进制数据,字节序列(bytes), 图像高度int,宽度int,通道数int)
+    def __init__(self):
+        super(CameraDev, self).__init__()
+        # 开始视频抓取的任务初始化
+        # 初始化摄像头
+        self.cam = cv2.VideoCapture(
+            0, # 摄像头的编号,从0
+            cv2.CAP_DSHOW # 视频的处理调用DirectX 3D (DirectShow)
+        )
+        self.isOver = False
+
+    def run(self):
+        kernel = np.array([  # 深度学习就是找到一个kernel是的特征对分类有效
+            [0, -2, 0],
+            [-2, 8, -2],
+            [0, -2, 0]
+        ])
+        # 设备线程的任务,run结束,则任务结束
+        while not self.isOver:
+            # 反复抓取视频处理
+            # print("设备准备工作!")
+            status, img = self.cam.read()  # 从摄像头读取图像
+            if status:
+                # print(img.shape)
+                # 显示图像
+                # 调用人工智能模块,进行图像识别(处理)
+                # img = cv2.GaussianBlur(img, (3, 3), 2.0)
+                img = cv2.filter2D(img, -1, kernel, delta=200.0)
+                # 2. 发送信号
+                self.sig_video.emit(img.tobytes(), img.shape[0], img.shape[1], img.shape[2])
+            QThread.usleep(100000)  # 1000000微秒 = 1秒
+    
+    def close(self):
+        # 停止多任务
+        self.isOver = True
+        while self.isRunning():
+            pass
+
+        print("线程终止")
+        # 释放设备
+        self.cam.release()
+        print("设备释放")
+                

TEMPAT SAMPAH
Day05/aiapp/frame/__pycache__/app.cpython-310.pyc


TEMPAT SAMPAH
Day05/aiapp/frame/__pycache__/win.cpython-310.pyc


+ 10 - 0
Day05/aiapp/frame/app.py

@@ -0,0 +1,10 @@
+from PyQt5.QtWidgets import QApplication
+from frame.win import Win
+
+class App(QApplication):  #扩展QApplication类
+    def __init__(self):    #实现构造器
+       super(App,self).__init__([])  #调用父类构造器(参数:命令行参数)
+    
+       #调用Win
+       self.win = Win()
+       self.win.show()

+ 48 - 0
Day05/aiapp/frame/win.py

@@ -0,0 +1,48 @@
+from PyQt5.QtWidgets import QDialog
+from PyQt5.QtGui import QImage  #,QPxmap
+from PyQt5.QtGui import QPixmap  
+# 引入
+from ui.traffic_ui import Ui_Dialog
+
+from dev.camera import CameraDev
+
+class Win(QDialog):  # 扩展QDialog(新增,覆盖功能)
+    def __init__(self):  # 实现构造器(完成初始化,数据初始化,功能初始化)
+        super(Win, self).__init__()   # 调用父类构造器
+        # 调用ui
+        # 创建对象
+        self.ui = Ui_Dialog()  
+        # 关联ui到当前窗体
+        self.ui.setupUi(self)
+        
+        # 创建一个设备对象
+        self.dev = CameraDev()
+        # 启动设备线程工作
+        self.dev.start()
+
+        # 3. 绑定信号与槽。
+        self.dev.sig_video.connect(self.showVideo)
+
+    # 3. 定义槽(Slot)函数 (Qt技术:信号与槽),一定与信号同型
+    def showVideo(self, data, h, w, c):
+        # print("(",h, ",", w, ",",c, ")")  # python格式字符串
+        # 1. 使用data,h, w, c创建QImage
+        q_img = QImage(
+            data,    # 图像的字节数组
+            w,       # 图像宽度
+            h,       # 图像高度
+            w * c,   # 图像每行字节数
+            QImage.Format_BGR888   # 图像格式BGR,每个通道8个bit,1个字节
+        )
+        # 2. 使用QImage创建QPixmap
+        pix_img = QPixmap.fromImage(q_img)  # 自动从QImage转换为QPixmap,QLabel只支持QPixmap格式
+
+        # 3. 显示QLabel:lblVideo  
+        self.ui.label.setPixmap(pix_img)
+
+        # 4 适当的缩放
+        self.ui.label.setScaledContents(True)
+        def closeEvent(self,e): # 当窗体关闭前会调用
+            self.dev.close()
+            print("窗口关闭")
+            

+ 9 - 0
Day05/aiapp/main.py

@@ -0,0 +1,9 @@
+import PyQt5.QtCore
+
+from frame.app import App
+PyQt5.QtCore.QCoreApplication.setAttribute(PyQt5.QtCore.Qt.AA_EnableHighDpiScaling)
+
+app =App()
+
+app.exec()  #消息循环(程序循环处理操作系统发过来的消息)阻塞函数
+print("程序正常终止")

+ 10 - 0
Day05/aiapp/traffic_ui.py

@@ -0,0 +1,10 @@
+# -*- coding: utf-8 -*-
+
+# Form implementation generated from reading ui file 'traffic.ui'
+#
+# Created by: PyQt5 UI code generator 5.15.9
+#
+# WARNING: Any manual changes made to this file will be lost when pyuic5 is
+# run again.  Do not edit this file unless you know what you are doing.
+
+

TEMPAT SAMPAH
Day05/aiapp/ui/__pycache__/traffic_ui.cpython-310.pyc


+ 63 - 0
Day05/aiapp/ui/monitor.ui

@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ui version="4.0">
+ <class>Dialog</class>
+ <widget class="QDialog" name="Dialog">
+  <property name="geometry">
+   <rect>
+    <x>0</x>
+    <y>0</y>
+    <width>525</width>
+    <height>339</height>
+   </rect>
+  </property>
+  <property name="windowTitle">
+   <string>智能交通监控系统</string>
+  </property>
+  <widget class="QLabel" name="label">
+   <property name="geometry">
+    <rect>
+     <x>0</x>
+     <y>0</y>
+     <width>371</width>
+     <height>221</height>
+    </rect>
+   </property>
+   <property name="styleSheet">
+    <string notr="true">border-width:2px;
+border-style:solid;
+border-color:purple;
+border-radius:10px;
+border-bottom-color:green;
+border-left-color:blue;
+border-right-color:orange</string>
+   </property>
+   <property name="text">
+    <string>视频显示区</string>
+   </property>
+  </widget>
+  <widget class="QPushButton" name="pushButton">
+   <property name="geometry">
+    <rect>
+     <x>130</x>
+     <y>240</y>
+     <width>71</width>
+     <height>31</height>
+    </rect>
+   </property>
+   <property name="styleSheet">
+    <string notr="true">border-width:1px;
+border-style:solid;
+border-radius:7px;
+border-top-color:#ffffff;
+border-left-color:#ffffff;
+border-bottom-color:#888888;
+border-right-color:#888888</string>
+   </property>
+   <property name="text">
+    <string>图像处理</string>
+   </property>
+  </widget>
+ </widget>
+ <resources/>
+ <connections/>
+</ui>

+ 1 - 0
Day05/aiapp/ui/tools.bat

@@ -0,0 +1 @@
+@pyuic5 -o traffic_ui.py traffic.ui

+ 63 - 0
Day05/aiapp/ui/traffic.ui

@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ui version="4.0">
+ <class>Dialog</class>
+ <widget class="QDialog" name="Dialog">
+  <property name="geometry">
+   <rect>
+    <x>0</x>
+    <y>0</y>
+    <width>400</width>
+    <height>300</height>
+   </rect>
+  </property>
+  <property name="windowTitle">
+   <string>智能交通监控系统</string>
+  </property>
+  <widget class="QLabel" name="label">
+   <property name="geometry">
+    <rect>
+     <x>20</x>
+     <y>20</y>
+     <width>121</width>
+     <height>91</height>
+    </rect>
+   </property>
+   <property name="styleSheet">
+    <string notr="true">border-width:2px;
+border-style:solid;
+border-color:purple;
+border-radius:10px;
+border-bottom-color:green;
+border-left-color:blue;
+border-right-color:orange</string>
+   </property>
+   <property name="text">
+    <string>视频显示区</string>
+   </property>
+  </widget>
+  <widget class="QPushButton" name="pushButton">
+   <property name="geometry">
+    <rect>
+     <x>10</x>
+     <y>170</y>
+     <width>101</width>
+     <height>41</height>
+    </rect>
+   </property>
+   <property name="styleSheet">
+    <string notr="true">border-width:1px;
+border-style:solid;
+border-radius:7px;
+border-top-color:#ffffff;
+border-left-color:#ffffff;
+border-bottom-color:#888888;
+border-right-color:#888888</string>
+   </property>
+   <property name="text">
+    <string>处理视频</string>
+   </property>
+  </widget>
+ </widget>
+ <resources/>
+ <connections/>
+</ui>

+ 46 - 0
Day05/aiapp/ui/traffic_ui.py

@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+# Form implementation generated from reading ui file 'traffic.ui'
+#
+# Created by: PyQt5 UI code generator 5.15.9
+#
+# WARNING: Any manual changes made to this file will be lost when pyuic5 is
+# run again.  Do not edit this file unless you know what you are doing.
+
+
+from PyQt5 import QtCore, QtGui, QtWidgets
+
+
+class Ui_Dialog(object):
+    def setupUi(self, Dialog):
+        Dialog.setObjectName("Dialog")
+        Dialog.resize(400, 300)
+        self.label = QtWidgets.QLabel(Dialog)
+        self.label.setGeometry(QtCore.QRect(20, 20, 121, 91))
+        self.label.setStyleSheet("border-width:2px;\n"
+"border-style:solid;\n"
+"border-color:purple;\n"
+"border-radius:10px;\n"
+"border-bottom-color:green;\n"
+"border-left-color:blue;\n"
+"border-right-color:orange")
+        self.label.setObjectName("label")
+        self.pushButton = QtWidgets.QPushButton(Dialog)
+        self.pushButton.setGeometry(QtCore.QRect(10, 170, 101, 41))
+        self.pushButton.setStyleSheet("border-width:1px;\n"
+"border-style:solid;\n"
+"border-radius:7px;\n"
+"border-top-color:#ffffff;\n"
+"border-left-color:#ffffff;\n"
+"border-bottom-color:#888888;\n"
+"border-right-color:#888888")
+        self.pushButton.setObjectName("pushButton")
+
+        self.retranslateUi(Dialog)
+        QtCore.QMetaObject.connectSlotsByName(Dialog)
+
+    def retranslateUi(self, Dialog):
+        _translate = QtCore.QCoreApplication.translate
+        Dialog.setWindowTitle(_translate("Dialog", "智能交通监控系统"))
+        self.label.setText(_translate("Dialog", "视频显示区"))
+        self.pushButton.setText(_translate("Dialog", "处理视频"))

TEMPAT SAMPAH
Day06/infer/01_0.jpg


TEMPAT SAMPAH
Day06/infer/02_4.jpg


TEMPAT SAMPAH
Day06/infer/05_2.jpg


TEMPAT SAMPAH
Day06/infer/__pycache__/model.cpython-310.pyc


+ 38 - 0
Day06/infer/infer.py

@@ -0,0 +1,38 @@
+from model import LeNet5
+import torch
+import cv2
+import numpy as np
+
+class  DigitClassifier:
+    def __init__(self): # 初始化
+        super(DigitClassifier, self).__init__()
+        # 创建网络
+        self.net = LeNet5()
+        # 加载模型算子(训练好的模型)
+        state = torch.load("lenet5.pt")
+        self.net.load_state_dict(state)
+
+    
+    def recognize_file(self, digit_file): # 输入图像文件
+        # 1. 读取文件
+        img = cv2.imread(digit_file)
+        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+        # 2. 处理文件:图像转为NCHW格式的float张量
+        img = img.astype(np.float32) 
+        img = img / 255.0   # 像素转换为0-1之间的值
+        img = torch.from_numpy(img).clone()  # 把矩阵转换为张量
+        img = img.view(1, 1, 28, 28)  # 模型支持4维图像 NCHW
+        # 3. 调用self.net预测
+        y = self.net(img)
+        # 4. 处理预测结果:类别与概率
+        cls = torch.argmax(y, dim=1).item()  # item取张量的值
+        prob = y[0][cls].item()
+        print(cls, prob)
+        return cls,  prob# 返回类别,返回这个类别的概率
+
+
+
+if __name__ == "__main__":
+    classifier = DigitClassifier() # 生成分类器
+    cls, prob = classifier.recognize_file("05_2.jpg")
+    print(F"类别:{cls},概率:{prob}")

TEMPAT SAMPAH
Day06/infer/lenet5.pt


+ 80 - 0
Day06/infer/model.py

@@ -0,0 +1,80 @@
+from torch.nn import Module    # 扩展该类实现我们自己的深度网络模型
+from torch.nn import Conv2d, Linear   # 卷积运算(特征抽取),全邻接线性运算(分类器)
+from torch.nn.functional import relu, max_pool2d, avg_pool2d  # relu折线函数,maxpool(从数组返回一个最大值)
+import torch
+
+class LeNet5(Module):
+    # 构造器
+    def __init__(self, class_num=10):  # 10手写数字的分类。一共10个类别
+        super(LeNet5, self).__init__()
+        """
+            5层  (28 * 28 * 1)
+                |- 1. 卷积5 * 5 -> (28 * 28 * 6)    -(2, 2) -> (14, 14 , 6)
+                |- 2. 卷积5 * 5 -> (10 * 10 * 16)   -(2, 2) -> (5, 5, 16)
+                |- 3. 卷积5 * 5 -> (1 * 1 * 120)   
+                |- 4. 全连接 120 -> 84 
+                |- 5. 全连接 84 - 10 (1, 0, 0, 0, 0, 0, 0, 0, 0, 0)  取概率最大的下标就是识别出来的数字
+        """
+        self.conv1 = Conv2d(in_channels=1, out_channels=6,  kernel_size=5, stride=1, padding=2)
+        self.conv2 = Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0)
+        self.conv3 = Conv2d(in_channels=16, out_channels=120, kernel_size=5, stride=1,padding=0)
+
+        self.fc1 = Linear(120, 84)
+        self.fc2 = Linear(84, 10)
+
+
+    # 预测函数
+    def forward(self, x):
+        """
+            x表述输入图像, 格式是:[NHWC]
+        """
+        y = x
+        # 计算预测
+        # 第一层网络
+        y = self.conv1(y)  # (28*28*1) -> (28*28*6)
+        y = max_pool2d(y, (2, 2))  # (28*28*6) -> (14*14*6)
+        y = relu(y)        # 过滤负值(所有的负值设置为0)
+
+        # 第二层
+        y = self.conv2(y)   # (14*14*6) -> (10*10*16)
+        y = max_pool2d(y, (2, 2))   # (10*10*16) -> (5*5*16)
+        y = relu(y)         # 过滤负值
+
+        # 第三层
+        y = self.conv3(y)   # (5*5*16) -> (1*1*120)
+
+        # 把y从(1*1*120) -> (120)向量
+        y = y.view(-1, 120)  
+
+        # 第四层
+        y = self.fc1(y)
+        y = relu(y)
+
+        # 第五层
+        y = self.fc2(y)
+        # y = relu(y)        # 这个激活函数已经没有意义
+
+        # 把向量的分量全部转换0-1之间的值(概率)     
+        y = torch.softmax(y, dim=1) 
+
+        return y
+
+
+# print(__name__)
+if __name__ == "__main__":  # 表示是独立执行册程序块
+    # 下面代码被调用,则执行不到。
+    img = torch.randint(0, 256, (1, 1, 28, 28))  # 构造一个随机矩阵 == 噪音图像[NCHW]
+    img = img.float()    # 神经网络输入的必须是float类型
+    net = LeNet5()
+    y = net(img)
+    # y = net.forward(img)  # 等价于y = net(img)
+
+    # 判定最大下标
+    cls = torch.argmax(y, dim=1)
+    print(F"识别的结果是:{cls.numpy()[0]}")
+    print(y)
+
+      
+
+
+

TEMPAT SAMPAH
Day06/lenet/00_5.jpg


TEMPAT SAMPAH
Day06/lenet/01_0.jpg


TEMPAT SAMPAH
Day06/lenet/02_4.jpg


TEMPAT SAMPAH
Day06/lenet/03_1.jpg


TEMPAT SAMPAH
Day06/lenet/04_9.jpg


TEMPAT SAMPAH
Day06/lenet/05_2.jpg


TEMPAT SAMPAH
Day06/lenet/06_1.jpg


TEMPAT SAMPAH
Day06/lenet/07_3.jpg


TEMPAT SAMPAH
Day06/lenet/08_1.jpg


TEMPAT SAMPAH
Day06/lenet/09_4.jpg


TEMPAT SAMPAH
Day06/lenet/__pycache__/model.cpython-310.pyc


TEMPAT SAMPAH
Day06/lenet/datasets/MNIST/raw/t10k-images-idx3-ubyte


TEMPAT SAMPAH
Day06/lenet/datasets/MNIST/raw/t10k-images-idx3-ubyte.gz


TEMPAT SAMPAH
Day06/lenet/datasets/MNIST/raw/t10k-labels-idx1-ubyte


TEMPAT SAMPAH
Day06/lenet/datasets/MNIST/raw/t10k-labels-idx1-ubyte.gz


TEMPAT SAMPAH
Day06/lenet/datasets/MNIST/raw/train-images-idx3-ubyte


TEMPAT SAMPAH
Day06/lenet/datasets/MNIST/raw/train-images-idx3-ubyte.gz


TEMPAT SAMPAH
Day06/lenet/datasets/MNIST/raw/train-labels-idx1-ubyte


TEMPAT SAMPAH
Day06/lenet/datasets/MNIST/raw/train-labels-idx1-ubyte.gz


+ 28 - 0
Day06/lenet/ds.py

@@ -0,0 +1,28 @@
+from torchvision.datasets import MNIST
+from torchvision.transforms import Compose, ToTensor
+import numpy as np
+import cv2
+
+# 加载数据集(如果不存在则下载)
+transform = Compose([ToTensor()]) # 把图像转换为float的张量
+ds_mnist = MNIST(root="datasets", train=True, download=True, transform=transform)
+
+# print(len(ds_mnist))
+
+# print(ds_mnist[0][0].shape)
+# print(type(ds_mnist))
+# print(ds_mnist[0][1])  # 类别
+
+# print(ds_mnist[0][0])
+
+# 把图像保存为文件
+for i in range(10):  
+    img, cls = ds_mnist[i]   # img是0-1之间float
+
+    img = img.mul(255)  # 0-255
+    img = img.numpy().copy()
+    img = img.astype(np.uint8) # 转为整数
+
+    # 转换成28 * 28 * 1的图像矩阵
+    img = img.transpose(1, 2, 0) # 1 * 28 * 28 -> 28 * 28 * 1
+    cv2.imwrite(F"{i:02d}_{cls}.jpg", img)

TEMPAT SAMPAH
Day06/lenet/lenet5.pt


+ 80 - 0
Day06/lenet/model.py

@@ -0,0 +1,80 @@
+from torch.nn import Module    # 扩展该类实现我们自己的深度网络模型
+from torch.nn import Conv2d, Linear   # 卷积运算(特征抽取),全邻接线性运算(分类器)
+from torch.nn.functional import relu, max_pool2d, avg_pool2d  # relu折线函数,maxpool(从数组返回一个最大值)
+import torch
+
+class LeNet5(Module):
+    # 构造器
+    def __init__(self, class_num=10):  # 10手写数字的分类。一共10个类别
+        super(LeNet5, self).__init__()
+        """
+            5层  (28 * 28 * 1)
+                |- 1. 卷积5 * 5 -> (28 * 28 * 6)    -(2, 2) -> (14, 14 , 6)
+                |- 2. 卷积5 * 5 -> (10 * 10 * 16)   -(2, 2) -> (5, 5, 16)
+                |- 3. 卷积5 * 5 -> (1 * 1 * 120)   
+                |- 4. 全连接 120 -> 84 
+                |- 5. 全连接 84 - 10 (1, 0, 0, 0, 0, 0, 0, 0, 0, 0)  取概率最大的下标就是识别出来的数字
+        """
+        self.conv1 = Conv2d(in_channels=1, out_channels=6,  kernel_size=5, stride=1, padding=2)
+        self.conv2 = Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0)
+        self.conv3 = Conv2d(in_channels=16, out_channels=120, kernel_size=5, stride=1,padding=0)
+
+        self.fc1 = Linear(120, 84)
+        self.fc2 = Linear(84, 10)
+
+
+    # 预测函数
+    def forward(self, x):
+        """
+            x表述输入图像, 格式是:[NHWC]
+        """
+        y = x
+        # 计算预测
+        # 第一层网络
+        y = self.conv1(y)  # (28*28*1) -> (28*28*6)
+        y = max_pool2d(y, (2, 2))  # (28*28*6) -> (14*14*6)
+        y = relu(y)        # 过滤负值(所有的负值设置为0)
+
+        # 第二层
+        y = self.conv2(y)   # (14*14*6) -> (10*10*16)
+        y = max_pool2d(y, (2, 2))   # (10*10*16) -> (5*5*16)
+        y = relu(y)         # 过滤负值
+
+        # 第三层
+        y = self.conv3(y)   # (5*5*16) -> (1*1*120)
+
+        # 把y从(1*1*120) -> (120)向量
+        y = y.view(-1, 120)  
+
+        # 第四层
+        y = self.fc1(y)
+        y = relu(y)
+
+        # 第五层
+        y = self.fc2(y)
+        # y = relu(y)        # 这个激活函数已经没有意义
+
+        # 把向量的分量全部转换0-1之间的值(概率)     
+        y = torch.softmax(y, dim=1) 
+
+        return y
+
+
+# print(__name__)
+if __name__ == "__main__":  # 表示是独立执行册程序块
+    # 下面代码被调用,则执行不到。
+    img = torch.randint(0, 256, (1, 1, 28, 28))  # 构造一个随机矩阵 == 噪音图像[NCHW]
+    img = img.float()    # 神经网络输入的必须是float类型
+    net = LeNet5()
+    y = net(img)
+    # y = net.forward(img)  # 等价于y = net(img)
+
+    # 判定最大下标
+    cls = torch.argmax(y, dim=1)
+    print(F"识别的结果是:{cls.numpy()[0]}")
+    print(y)
+
+      
+
+
+

+ 79 - 0
Day06/lenet/train.py

@@ -0,0 +1,79 @@
+from model import LeNet5    # 引入我们写好的神经网络(也是我们要训练的网络)
+from torch.nn import CrossEntropyLoss  # 引入损失函数(用来做分类损失最佳)
+from torch.optim import Adam  # 损失函数优化器(其他损失函数的最小值:使用梯度下降法)
+from torchvision.datasets import MNIST # 数据集
+from torchvision.transforms import Compose, ToTensor  # 图像转换为张量
+from torch.utils.data import DataLoader # 批次数据集(数据集不是一次训练,而是分成多个批次)
+import torch  # 调用torch的基本函数
+import os   # 路径与文件处理
+
+class Trainer:
+    def __init__(self):
+        # 准备训练需要的数据、损失函数,优化器,学习率
+        self.lr = 0.001
+        self.m_file = "lenet5.pt"  # 模型的保存文件
+        self.net = LeNet5() # 需要训练的网络
+        self.loss_f = CrossEntropyLoss()  # 损失函数
+        self.optimizer = Adam(self.net.parameters(), lr=self.lr)  # 优化器
+
+        # 数据集 - 训练集
+        self.trans = Compose([ToTensor()])
+        self.ds_train = MNIST(root="datasets", download=True, train=True, transform=self.trans)
+        self.bt_train = DataLoader(self.ds_train, batch_size=1000, shuffle=True) 
+
+        # 数据集 - 验证集
+        self.ds_valid = MNIST(root="datasets", download=True, train=False, transform=self.trans)
+        self.bt_valid = DataLoader(self.ds_valid, batch_size=1000, shuffle=False) #  shuffle=False是否随机打乱
+
+
+
+    def train_one(self):
+        # 训练一轮epoch,60批次batch,每个批次1000张录像
+        # 循环训练每个批次
+        batch = 1
+        for x, y in self.bt_train:
+            # print(F"\t|-第{batch:02d}批次训练。")
+            y_ = self.net(x)  # 进行预测
+            # 计算误差
+            loss = self.loss_f(y_, y)   # 使用真实的标签与预测标签计算sunshi
+            # 优化卷积核与全链接矩阵
+            self.optimizer.zero_grad()
+            loss.backward()   # 使用导数计算梯度
+            self.optimizer.step()  # 更新梯度,优化网络模型
+            batch+=1
+
+    @torch.no_grad()   # 该函数中的运算都不会进行求导跟踪
+    def valid(self):
+        # 使用测试数据集验证 (准确率,损失值)
+        all_num = 0.0 # 验证的样本数
+        acc_num = 0.0 # 识别正确数量
+        all_loss = 0.0 # 累加每个样本的损失
+        for t_x, t_y in self.bt_valid:
+            # 统计样本数
+            all_num  += len(t_x)
+            #  预测
+            t_y_ = self.net(t_x)
+            # 判定准确
+            y_cls = torch.argmax(t_y_, dim=1)
+            # 统计正确率
+            acc_num  += (y_cls == t_y).float().sum()
+            # 统计损失
+            all_loss += self.loss_f(t_y_, t_y)
+        print(F"\t|- 训练损失:{all_loss/all_num:8.6f}")
+        print(F"\t|- 准确率:{acc_num * 100.0/all_num:5.2f}%")
+
+
+
+    def train(self, epoch):
+        # 训练指定的论数
+        for e in range(epoch):
+            print(F"第{e:04d}轮训练。")
+            self.train_one()
+            self.valid()
+        
+        # 保存训练的模型
+
+
+if __name__ == "__main__":
+    trainer = Trainer()
+    trainer.train(10) # 训练10轮

TEMPAT SAMPAH
Day07/2.jpg


TEMPAT SAMPAH
Day07/IMG_3052.JPG


TEMPAT SAMPAH
Day07/best.pt


+ 10 - 0
Day07/dataset.yaml

@@ -0,0 +1,10 @@
+# Please insure that your custom_dataset are put in same parent dir with YOLOv6_DIR
+train: images/train # train images
+val: images/val # val images
+test: images/test # test images (optional)
+
+# whether it is coco dataset, only coco dataset should be set to True.
+is_coco: False
+# Classes
+nc: 4  # number of classes
+names: ['YQ', 'ZL', 'ZY', 'ZYT']  # class names

TEMPAT SAMPAH
Day07/last.pt


+ 35 - 0
Day07/pred.py

@@ -0,0 +1,35 @@
+# 侦测
+from ultralytics import YOLO
+import cv2
+import numpy as np
+
+# 加载我们训练的模型
+model = YOLO("best.pt")
+# 预测结果
+result = model("IMG_3052.JPG")  # 利用模型侦测图片中的目标
+
+# print(result[0])   # [0]表示第一张图像的识别结果
+
+names = result[0].names # 类别对应的名字
+print(names)
+
+boxes = result[0].boxes  # 取一个目标
+
+cls = int(boxes.cls[0].cpu().item())
+conf = boxes.conf[0].cpu().item()
+
+x1, y1, x2, y2 = boxes.xyxy[0].cpu().numpy().astype(np.int32)
+
+print(cls)              # 类别ID
+print(conf)             # 目标侦测成功的置信度 
+print(x1, y1, x2, y2)   # 侦测目标的区域
+print(names[cls])   # 获取侦测的目标名字
+
+
+# 把目标在图像中标注出来
+# 1. 打开图像
+img = cv2.imread("IMG_3052.JPG")
+# 2. 标注图像
+img = cv2.rectangle(img, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=5)
+# 3. 保存图像
+cv2.imwrite("2.jpg", img)

+ 6 - 0
Day07/test.py

@@ -0,0 +1,6 @@
+#测试安装是否正确
+from ultralytics import YOLO    #You only look once
+
+if __name__ == "__main__":    #不加这个语句,运行会出问题
+    #加载别人训练好的模型
+    model= YOLO("yolov8n.pt")   #生成模型

+ 7 - 0
Day07/train.py

@@ -0,0 +1,7 @@
+#训练
+from ultralytics import YOLO
+
+if __name__ == "__main__":
+    model = YOLO("yolov8n.pt")   #创建一个模型,加载预训练的算子(coco数据集,imagenet数据集,voc)
+
+    model.train(data="dataset.yaml",epochs=2)

TEMPAT SAMPAH
Day07/yolov8n.pt


+ 46 - 0
README.md

@@ -0,0 +1,46 @@
+## 第一天实训日志
+-  **1.任务**
+>
+>
+-  **2.内容**
+>
+>
+-  **3.要求**
+>
+>
+-  **4.提交**
+>
+>
+## 第二天实训日志
+
+## 第三天实训日志
+> - **任务**
+> - 任务1:
+>     - 完成UI文件
+>     - 翻译成py文件
+> - 任务2:
+>     - 拍一个照片
+>     - 利用我们今天讲的Sobel算子,输出一个浮雕效果的图像照片
+- **内容**
+> - 图像处理
+> - 卷积特征
+> - Qt的应用编程模式
+> - Qt的UI设计与翻译
+- **要求**
+> - ui文件
+> - 翻译文件  
+> - 拍的照片+程序+输出文件
+## 第四天实训日志
+## 第五天实训日志
+## 第六天实训日志
+>  **任务**
+>- 实现目标识别
+>-   标识目标
+>-  **内容**
+>-  用已有的数据集、训练、推理模型(掌握模型训练,推理侦测)
+>-  完成自己的数据集工程(标注、格式转换、描述数据集yaml)
+>-  完成推理侦测的实现
+   
+## 第八天实训日志
+## 第九天实训日志
+## 第十天实训日志

+ 1 - 0
aiapp/app.bat

@@ -0,0 +1 @@
+python main.py

TEMPAT SAMPAH
aiapp/dev/__pycache__/camera.cpython-310.pyc


+ 68 - 0
aiapp/dev/camera.py

@@ -0,0 +1,68 @@
+from PyQt5.QtCore import QThread    # 引入多线程,设备是多个,一个设备一个任务
+import cv2
+import numpy as np
+from ultralytics import YOLO   # 1
+# from cv2 import VideoCapture 
+
+# 1. 定义信号(引入)
+from PyQt5.QtCore import pyqtSignal
+
+class CameraDev(QThread):
+    # 定义信号(定义)
+    sig_video = pyqtSignal(bytes, int, int, int)  # 信号传递的数据(图像的二进制数据,字节序列(bytes), 图像高度int,宽度int,通道数int)
+    def __init__(self):
+        super(CameraDev, self).__init__()
+        # 开始视频抓取的任务初始化
+        # 初始化摄像头
+        self.cam = cv2.VideoCapture(
+            0, # 摄像头的编号,从0
+            cv2.CAP_DSHOW # 视频的处理调用DirectX 3D (DirectShow)
+        )
+        self.isOver = False
+        self.model = YOLO("mods/best.pt")   # 2
+
+    def run(self):
+        # kernel = np.array([  # 深度学习就是找到一个kernel是的特征对分类有效
+        #     [0, -2, 0],
+        #     [-2, 8, -2],
+        #     [0, -2, 0]
+        # ])
+        # 设备线程的任务,run结束,则任务结束
+        while not self.isOver:
+            # 反复抓取视频处理
+            # print("设备准备工作!")
+            status, img = self.cam.read()  # 从摄像头读取图像
+            if status:
+                # print(img.shape)
+                # 显示图像
+                # 调用人工智能模块,进行图像识别(处理)
+                # img = cv2.GaussianBlur(img, (3, 3), 2.0)
+                # img = cv2.filter2D(img, -1, kernel, delta=200.0)
+                result = self.model(img)  # 3
+                # 处理结果  # 4
+                boxes = result[0].boxes
+                names = result[0].names
+                if len(boxes) >= 1:
+                    cls = int(boxes.cls[0].cpu().item())
+                    conf = boxes.conf[0].cpu().item()
+                    x1, y1, x2, y2 = boxes.xyxy[0].cpu().numpy().astype(np.int32)
+                    # 标注:目标区域,名字,概率
+                    img = cv2.rectangle(img, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=5)
+                    img = cv2.putText(img, F"{names[cls]}:{conf:.2f}", (x1, y1), 0, 3, color=(255, 0, 0), thickness=3)
+
+                # 2. 发送信号
+                self.sig_video.emit(img.tobytes(), img.shape[0], img.shape[1], img.shape[2])
+            QThread.usleep(100000)  # 1000000微秒 = 1秒
+    
+    def close(self):
+        # 停止多任务
+        self.isOver = True
+        while self.isRunning():
+            pass
+
+        print("线程终止")
+        # 释放设备
+        self.cam.release()
+        print("设备释放")
+
+                

TEMPAT SAMPAH
aiapp/frame/__pycache__/app.cpython-310.pyc


TEMPAT SAMPAH
aiapp/frame/__pycache__/win.cpython-310.pyc


+ 10 - 0
aiapp/frame/app.py

@@ -0,0 +1,10 @@
+from PyQt5.QtWidgets import QApplication
+from frame.win import Win
+
+class App(QApplication):  #扩展QApplication类
+    def __init__(self):    #实现构造器
+       super(App,self).__init__([])  #调用父类构造器(参数:命令行参数)
+    
+       #调用Win
+       self.win = Win()
+       self.win.show()

+ 48 - 0
aiapp/frame/win.py

@@ -0,0 +1,48 @@
+from PyQt5.QtWidgets import QDialog
+from PyQt5.QtGui import QImage  #,QPxmap
+from PyQt5.QtGui import QPixmap  
+# 引入
+from ui.traffic_ui import Ui_Dialog
+
+from dev.camera import CameraDev
+
+class Win(QDialog):  # 扩展QDialog(新增,覆盖功能)
+    def __init__(self):  # 实现构造器(完成初始化,数据初始化,功能初始化)
+        super(Win, self).__init__()   # 调用父类构造器
+        # 调用ui
+        # 创建对象
+        self.ui = Ui_Dialog()  
+        # 关联ui到当前窗体
+        self.ui.setupUi(self)
+        
+        # 创建一个设备对象
+        self.dev = CameraDev()
+        # 启动设备线程工作
+        self.dev.start()
+
+        # 3. 绑定信号与槽。
+        self.dev.sig_video.connect(self.showVideo)
+
+    # 3. 定义槽(Slot)函数 (Qt技术:信号与槽),一定与信号同型
+    def showVideo(self, data, h, w, c):
+        # print("(",h, ",", w, ",",c, ")")  # python格式字符串
+        # 1. 使用data,h, w, c创建QImage
+        q_img = QImage(
+            data,    # 图像的字节数组
+            w,       # 图像宽度
+            h,       # 图像高度
+            w * c,   # 图像每行字节数
+            QImage.Format_BGR888   # 图像格式BGR,每个通道8个bit,1个字节
+        )
+        # 2. 使用QImage创建QPixmap
+        pix_img = QPixmap.fromImage(q_img)  # 自动从QImage转换为QPixmap,QLabel只支持QPixmap格式
+
+        # 3. 显示QLabel:lblVideo  
+        self.ui.label.setPixmap(pix_img)
+
+        # 4 适当的缩放
+        self.ui.label.setScaledContents(True)
+        def closeEvent(self,e): # 当窗体关闭前会调用
+            self.dev.close()
+            print("窗口关闭")
+            

+ 9 - 0
aiapp/main.py

@@ -0,0 +1,9 @@
+import PyQt5.QtCore
+
+from frame.app import App
+PyQt5.QtCore.QCoreApplication.setAttribute(PyQt5.QtCore.Qt.AA_EnableHighDpiScaling)
+
+app =App()
+
+app.exec()  #消息循环(程序循环处理操作系统发过来的消息)阻塞函数
+print("程序正常终止")

TEMPAT SAMPAH
aiapp/mods/best.pt


+ 10 - 0
aiapp/traffic_ui.py

@@ -0,0 +1,10 @@
+# -*- coding: utf-8 -*-
+
+# Form implementation generated from reading ui file 'traffic.ui'
+#
+# Created by: PyQt5 UI code generator 5.15.9
+#
+# WARNING: Any manual changes made to this file will be lost when pyuic5 is
+# run again.  Do not edit this file unless you know what you are doing.
+
+

TEMPAT SAMPAH
aiapp/ui/__pycache__/traffic_ui.cpython-310.pyc


+ 63 - 0
aiapp/ui/monitor.ui

@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ui version="4.0">
+ <class>Dialog</class>
+ <widget class="QDialog" name="Dialog">
+  <property name="geometry">
+   <rect>
+    <x>0</x>
+    <y>0</y>
+    <width>525</width>
+    <height>339</height>
+   </rect>
+  </property>
+  <property name="windowTitle">
+   <string>智能交通监控系统</string>
+  </property>
+  <widget class="QLabel" name="label">
+   <property name="geometry">
+    <rect>
+     <x>0</x>
+     <y>0</y>
+     <width>371</width>
+     <height>221</height>
+    </rect>
+   </property>
+   <property name="styleSheet">
+    <string notr="true">border-width:2px;
+border-style:solid;
+border-color:purple;
+border-radius:10px;
+border-bottom-color:green;
+border-left-color:blue;
+border-right-color:orange</string>
+   </property>
+   <property name="text">
+    <string>视频显示区</string>
+   </property>
+  </widget>
+  <widget class="QPushButton" name="pushButton">
+   <property name="geometry">
+    <rect>
+     <x>130</x>
+     <y>240</y>
+     <width>71</width>
+     <height>31</height>
+    </rect>
+   </property>
+   <property name="styleSheet">
+    <string notr="true">border-width:1px;
+border-style:solid;
+border-radius:7px;
+border-top-color:#ffffff;
+border-left-color:#ffffff;
+border-bottom-color:#888888;
+border-right-color:#888888</string>
+   </property>
+   <property name="text">
+    <string>图像处理</string>
+   </property>
+  </widget>
+ </widget>
+ <resources/>
+ <connections/>
+</ui>

+ 1 - 0
aiapp/ui/tools.bat

@@ -0,0 +1 @@
+@pyuic5 -o traffic_ui.py traffic.ui

+ 63 - 0
aiapp/ui/traffic.ui

@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ui version="4.0">
+ <class>Dialog</class>
+ <widget class="QDialog" name="Dialog">
+  <property name="geometry">
+   <rect>
+    <x>0</x>
+    <y>0</y>
+    <width>400</width>
+    <height>300</height>
+   </rect>
+  </property>
+  <property name="windowTitle">
+   <string>智能交通监控系统</string>
+  </property>
+  <widget class="QLabel" name="label">
+   <property name="geometry">
+    <rect>
+     <x>20</x>
+     <y>20</y>
+     <width>121</width>
+     <height>91</height>
+    </rect>
+   </property>
+   <property name="styleSheet">
+    <string notr="true">border-width:2px;
+border-style:solid;
+border-color:purple;
+border-radius:10px;
+border-bottom-color:green;
+border-left-color:blue;
+border-right-color:orange</string>
+   </property>
+   <property name="text">
+    <string>视频显示区</string>
+   </property>
+  </widget>
+  <widget class="QPushButton" name="pushButton">
+   <property name="geometry">
+    <rect>
+     <x>10</x>
+     <y>170</y>
+     <width>101</width>
+     <height>41</height>
+    </rect>
+   </property>
+   <property name="styleSheet">
+    <string notr="true">border-width:1px;
+border-style:solid;
+border-radius:7px;
+border-top-color:#ffffff;
+border-left-color:#ffffff;
+border-bottom-color:#888888;
+border-right-color:#888888</string>
+   </property>
+   <property name="text">
+    <string>处理视频</string>
+   </property>
+  </widget>
+ </widget>
+ <resources/>
+ <connections/>
+</ui>

+ 46 - 0
aiapp/ui/traffic_ui.py

@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+# Form implementation generated from reading ui file 'traffic.ui'
+#
+# Created by: PyQt5 UI code generator 5.15.9
+#
+# WARNING: Any manual changes made to this file will be lost when pyuic5 is
+# run again.  Do not edit this file unless you know what you are doing.
+
+
+from PyQt5 import QtCore, QtGui, QtWidgets
+
+
+class Ui_Dialog(object):
+    def setupUi(self, Dialog):
+        Dialog.setObjectName("Dialog")
+        Dialog.resize(400, 300)
+        self.label = QtWidgets.QLabel(Dialog)
+        self.label.setGeometry(QtCore.QRect(20, 20, 121, 91))
+        self.label.setStyleSheet("border-width:2px;\n"
+"border-style:solid;\n"
+"border-color:purple;\n"
+"border-radius:10px;\n"
+"border-bottom-color:green;\n"
+"border-left-color:blue;\n"
+"border-right-color:orange")
+        self.label.setObjectName("label")
+        self.pushButton = QtWidgets.QPushButton(Dialog)
+        self.pushButton.setGeometry(QtCore.QRect(10, 170, 101, 41))
+        self.pushButton.setStyleSheet("border-width:1px;\n"
+"border-style:solid;\n"
+"border-radius:7px;\n"
+"border-top-color:#ffffff;\n"
+"border-left-color:#ffffff;\n"
+"border-bottom-color:#888888;\n"
+"border-right-color:#888888")
+        self.pushButton.setObjectName("pushButton")
+
+        self.retranslateUi(Dialog)
+        QtCore.QMetaObject.connectSlotsByName(Dialog)
+
+    def retranslateUi(self, Dialog):
+        _translate = QtCore.QCoreApplication.translate
+        Dialog.setWindowTitle(_translate("Dialog", "智能交通监控系统"))
+        self.label.setText(_translate("Dialog", "视频显示区"))
+        self.pushButton.setText(_translate("Dialog", "处理视频"))

+ 4 - 0
commit.bat

@@ -0,0 +1,4 @@
+cd C:\shixun\20200404309guoqi
+git add *
+git commit -m "×÷Òµ"
+git push -u origin master

Beberapa file tidak ditampilkan karena terlalu banyak file yang berubah dalam diff ini