From 3282d4d77e4280236d5d69c1081f3953f3970f3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=82=93=E6=99=BA=E8=88=AA?= <23373333@buaa.edu.cn> Date: Mon, 26 Jan 2026 17:26:33 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E7=BA=BF=E7=A8=8B=E6=9E=B6?= =?UTF-8?q?=E6=9E=84?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- reproject/analyzer.py | 111 ++++++++++++++++++++++++------------------ reproject/main.py | 12 ++--- 2 files changed, 70 insertions(+), 53 deletions(-) diff --git a/reproject/analyzer.py b/reproject/analyzer.py index 4766e05..93d7a14 100644 --- a/reproject/analyzer.py +++ b/reproject/analyzer.py @@ -2,6 +2,8 @@ import cv2 import mediapipe as mp import time import numpy as np +import threading +import queue from collections import deque from geometry_utils import ( calculate_ear, @@ -39,9 +41,6 @@ class MonitorSystem: # 初始化人脸底库 self.face_lib = FaceLibrary(face_db) - # 状态变量 - self.current_user = None - # --- 时间控制 --- self.last_identity_check_time = 0 self.IDENTITY_CHECK_INTERVAL = 2.0 @@ -61,6 +60,13 @@ class MonitorSystem: # 缓存上一次的检测结果 self.cached_emotion = {"label": "detecting...", "va": (0.0, 0.0)} + self.task_queue = queue.Queue(maxsize=2) + self.current_user = None + self.current_emotion = "Neutral" + + self.id_emo_thread = threading.Thread(target=self._id_emo_loop, daemon=True) + self.id_emo_thread.start() + def _get_smoothed_value(self, history, current_val): """内部函数:计算滑动平均值""" history.append(current_val) @@ -182,20 +188,18 @@ class MonitorSystem: now = time.time() # --- 身份识别 --- if now - self.last_identity_check_time > self.IDENTITY_CHECK_INTERVAL: - # xs = [l.x for l in landmarks] - # ys = [l.y for l in landmarks] - # # 计算人脸框 - # face_loc = ( - # int(min(ys) * h), int(max(xs) * w), - # int(max(ys) * h), int(min(xs) * w) - # ) - # pad = 20 - # face_loc = (max(0, face_loc[0]-pad), min(w, face_loc[1]+pad), - # min(h, face_loc[2]+pad), max(0, face_loc[3]-pad)) + sface_loc = ( + int(min(ys) * h), int(max(xs) * w), + int(max(ys) * h), int(min(xs) * w) + ) + spad = 20 + sface_loc = (max(0, sface_loc[0]-spad), min(w, sface_loc[1]+spad), + min(h, sface_loc[2]+spad), max(0, sface_loc[3]-spad)) - match_result = self.face_lib.identify(rgb_frame, face_location=face_loc) - if match_result: - self.current_user = match_result["info"] + if self.task_queue.full(): + self.task_queue.get() + self.task_queue.put((rgb_frame.copy(), sface_loc, 0)) + self.last_identity_check_time = now analysis_data["identity"] = self.current_user @@ -204,40 +208,26 @@ class MonitorSystem: if HAS_EMOTION_MODULE and ( now - self.last_emotion_check_time > self.EMOTION_CHECK_INTERVAL ): - if results.multi_face_landmarks: - landmarks = results.multi_face_landmarks[0].landmark - xs = [l.x for l in landmarks] - ys = [l.y for l in landmarks] - # 计算裁剪坐标 - x_min = int(min(xs) * w) - x_max = int(max(xs) * w) - y_min = int(min(ys) * h) - y_max = int(max(ys) * h) + # 计算裁剪坐标 + x_min = int(min(xs) * w) + x_max = int(max(xs) * w) + y_min = int(min(ys) * h) + y_max = int(max(ys) * h) - pad_x = int((x_max - x_min) * 0.2) - pad_y = int((y_max - y_min) * 0.2) + pad_x = int((x_max - x_min) * 0.1) + pad_y = int((y_max - y_min) * 0.1) - x_min = max(0, x_min - pad_x) - x_max = min(w, x_max + pad_x) - y_min = max(0, y_min - pad_y) - y_max = min(h, y_max + pad_y) - - face_crop = frame[y_min:y_max, x_min:x_max] - - if face_crop.size > 0: - try: - emo_results = analyze_emotion_with_hsemotion(face_crop) - - if emo_results: - top_res = emo_results[0] - self.cached_emotion["label"] = top_res.get( - "emotion", "unknown" - ) - self.cached_emotion["va"] = top_res.get("vaVal", (0.0, 0.0)) - - except Exception as e: - print(f"情绪分析出错: {e}") + x_min = max(0, x_min - pad_x) + x_max = min(w, x_max + pad_x) + y_min = max(0, y_min - pad_y) + y_max = min(h, y_max + pad_y) + + face_loc = (y_min, x_max, y_max, x_min) + + if self.task_queue.full(): + self.task_queue.get() + self.task_queue.put((frame.copy(), face_loc, 1)) self.last_emotion_check_time = now @@ -245,3 +235,30 @@ class MonitorSystem: analysis_data["emotion_va"] = self.cached_emotion["va"] return analysis_data + + def _id_emo_loop(self): + while True: + try: + frame, face_loc, task_type = self.task_queue.get() + if task_type == 0: + match_result = self.face_lib.identify(frame, face_location=face_loc) + if match_result: + self.current_user = match_result["info"] + elif task_type == 1 and HAS_EMOTION_MODULE: + face_crop = frame[face_loc[0]:face_loc[2], face_loc[3]:face_loc[1]] + + if face_crop.size > 0: + try: + emo_results = analyze_emotion_with_hsemotion(face_crop) + + if emo_results: + top_res = emo_results[0] + self.cached_emotion["label"] = top_res.get( + "emotion", "unknown" + ) + self.cached_emotion["va"] = top_res.get("vaVal", (0.0, 0.0)) + + except Exception as e: + print(f"情绪分析出错: {e}") + except Exception as e: + print(f"线程处理出错: {e}") diff --git a/reproject/main.py b/reproject/main.py index 5ad2399..ce792f4 100644 --- a/reproject/main.py +++ b/reproject/main.py @@ -262,15 +262,15 @@ def video_stream_thread(): # f"filesink location={filename} " # ) # out = cv2.VideoWriter(gst_pipeline, cv2.CAP_GSTREAMER, 0, fps, (width, height)) - out1 = cv2.VideoWriter('output1.mp4', fourcc, 30.0, (1280, 720)) - out2 = cv2.VideoWriter('output2.mp4', fourcc, 30.0, (1280, 720)) + # out1 = cv2.VideoWriter('output1.mp4', fourcc, 30.0, (1280, 720)) + # out2 = cv2.VideoWriter('output2.mp4', fourcc, 30.0, (1280, 720)) while not stop_event.is_set(): try: frame = video_queue.get(timeout=1) # small_frame = cv2.resize(apply_soft_roi(frame), (1280, 720)) server.provide_frame(frame) - out1.write(frame) - out2.write(frame) + # out1.write(frame) + # out2.write(frame) except queue.Empty: continue except Exception as e: @@ -320,8 +320,8 @@ def video_stream_thread(): # except Exception as e: # print(f"[Video] 重连中... {e}") # time.sleep(3) - out1.release() - out2.release() + # out1.release() + # out2.release() print("[Video] 线程结束")