opencv python的face_recognition库由于GIL的原因没有多线程

toe95027  于 2023-10-24  发布在  Python
关注(0)|答案(1)|浏览(125)

我正在使用Python构建一个面部考勤应用程序。我使用PyQt5构建UI(在下面的代码中,PyQt5PyQt6共享相同的语法),OpenCV (cv2)获取网络摄像头帧,face_recognition库编码和识别面部。为了保持GUI的响应性,我确实将一个具有面部识别功能的工人移动到另一个QThread
这是一个示例代码,可以解决我的问题:

  • WebcamThread使用cv2.VideoCapture()获取网络摄像头帧,并向主线程发出一些信号:
  • ndarray_frame_signal:返回numpy.ndarray帧,由face_recognition库处理,
  • qimage_frame_signal:返回QtGui.QImage帧以显示在PyQt GUI上。
  • EncodeFaceThread从队列中获取帧,并使用face_recognition库执行一些任务。
from queue import Queue
from sys import argv
from cv2 import COLOR_BGR2RGB, VideoCapture, cvtColor
from face_recognition import compare_faces, face_encodings, face_locations
from numpy import ndarray
from time import sleep
from PyQt5.QtCore import QThread, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QApplication, QLabel, QMainWindow

class WebcamThread(QThread):
    ndarray_frame_signal = pyqtSignal(ndarray)
    qimage_frame_signal = pyqtSignal(QImage)

    def __init__(self):
        super().__init__()
        self.cap = VideoCapture(0)

    def run(self):
        while self.isRunning():
            # Get current webcam frame and convert it to RGB
            frame = cvtColor(self.cap.read()[1], COLOR_BGR2RGB)
            self.ndarray_frame_signal.emit(frame)
            # Convert `ndarray` frame to `QImage` frame
            # and emit it to `qimage_frame_signal`
            h, w, _ = frame.shape
            qimage_frame = QImage(frame.data, w, h, QImage.Format.Format_RGB888)
            self.qimage_frame_signal.emit(qimage_frame)

class EncodeFaceThread(QThread):
    def __init__(self):
        super().__init__()
        self.known_encodings = []
        self.queue = Queue()

    def run(self):
        while self.isRunning():
            frame = self.queue.get()
            f_locations = face_locations(frame)
            # Only accept one face in a frame
            if len(f_locations) != 1:
                continue
            f_encoding = face_encodings(frame, f_locations)[0]
            # If this face encoding doesn't match any known encoding, add it
            if not any(compare_faces(self.known_encodings, f_encoding)):
                self.known_encodings.append(f_encoding)

class MainWindow(QMainWindow):
    def __init__(self):
        super().__init__()
        # Setup UI (don't care about it)
        self.setMinimumSize(660, 500)
        self.image_display = QLabel(self)
        self.image_display.setGeometry(10, 10, 640, 480)
        # Initialize threads
        self.webcam_thread = WebcamThread()
        self.webcam_thread.ndarray_frame_signal.connect(self.push_in_queue)
        self.webcam_thread.qimage_frame_signal.connect(self.display)
        self.webcam_thread.start()
        self.encode_face_thread = EncodeFaceThread()
        self.encode_face_thread.start()

    @pyqtSlot(ndarray)
    def push_in_queue(self, frame):
        self.encode_face_thread.queue.put(frame)

    @pyqtSlot(QImage)
    def display(self, frame):
        qpixmap_frame = QPixmap.fromImage(frame)
        self.image_display.setPixmap(qpixmap_frame)
    

app = QApplication(argv)
window = MainWindow()
window.show()
app.exec()

但是,由于EncodeFaceThread()face_recognition库的处理,UI仍然缓慢和滞后。确实,当我像这样重写EncodeFaceThread时,网络摄像头显示不再滞后。

class EncodeFaceThread(QThread):
    def __init__(self):
        super().__init__()

    def run(self):
        while self.isRunning():
            # Do some long-running task
            sleep(10)

那么,如何正确实现这个EncodeFaceThread呢?

kyvafyod

kyvafyod1#

当然,下面是如何使用face_recognition库修改所提供的框架代码以包含face_locationsface_encodings任务:

import sys
import cv2
import numpy as np
import face_recognition
from PyQt5.QtCore import Qt, QObject, QThread, pyqtSignal, QTimer
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QVBoxLayout, QWidget

class WebcamThread(QThread):
    ndarray_frame_signal = pyqtSignal(object)
    qimage_frame_signal = pyqtSignal(object)
    
    def run(self):
        cap = cv2.VideoCapture(0)
        while True:
            ret, frame = cap.read()
            if not ret:
                break
            
            self.ndarray_frame_signal.emit(frame)
            
            q_image = QImage(frame.data, frame.shape[1], frame.shape[0], QImage.Format_RGB888)
            self.qimage_frame_signal.emit(q_image)

        cap.release()

class EncodeFaceThread(QThread):
    def __init__(self, frame_queue):
        super().__init__()
        self.frame_queue = frame_queue
    
    def run(self):
        while True:
            frame = self.frame_queue.get()  # Get frame from the queue
            if frame is None:
                break
            
            # Convert QImage to numpy array
            np_frame = np.array(frame)
            
            # Convert BGR to RGB
            rgb_frame = cv2.cvtColor(np_frame, cv2.COLOR_BGR2RGB)
            
            # Perform face_locations and face_encodings tasks using face_recognition
            face_locations = face_recognition.face_locations(rgb_frame)
            face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
            
            # Handle face detection and recognition results here
            
            # Sleep briefly to manage the queue and not consume too much CPU
            self.msleep(50)

class MainApp(QMainWindow):
    def __init__(self):
        super().__init__()
        self.setWindowTitle("Facial Attendance Application")
        
        self.central_widget = QWidget(self)
        self.setCentralWidget(self.central_widget)
        
        self.layout = QVBoxLayout(self.central_widget)
        self.label = QLabel(self)
        self.layout.addWidget(self.label)
        
        self.frame_queue = Queue()  # Create a queue for frame sharing
        self.encode_thread = EncodeFaceThread(self.frame_queue)
        self.encode_thread.start()
        
        self.webcam_thread = WebcamThread()
        self.webcam_thread.qimage_frame_signal.connect(self.update_image)
        self.webcam_thread.ndarray_frame_signal.connect(self.frame_queue.put)
        self.webcam_thread.start()

    def update_image(self, qimage):
        self.label.setPixmap(QPixmap.fromImage(qimage))

if __name__ == "__main__":
    app = QApplication(sys.argv)
    main_app = MainApp()
    main_app.show()
    sys.exit(app.exec_())

在此修改版本中,EncodeFaceThread接收QImage帧,将其转换为numpy数组,然后使用face_recognition库执行face_locationsface_encodings任务。请记住,根据您的应用需求处理检测和识别结果。此外,QTimer用于控制线程的处理频率,以避免消耗过多的CPU资源。

相关问题