修改线程架构

This commit is contained in:
邓智航
2026-01-26 17:26:33 +08:00
parent b3997c2646
commit 3282d4d77e
2 changed files with 70 additions and 53 deletions

View File

@@ -2,6 +2,8 @@ import cv2
import mediapipe as mp import mediapipe as mp
import time import time
import numpy as np import numpy as np
import threading
import queue
from collections import deque from collections import deque
from geometry_utils import ( from geometry_utils import (
calculate_ear, calculate_ear,
@@ -39,9 +41,6 @@ class MonitorSystem:
# 初始化人脸底库 # 初始化人脸底库
self.face_lib = FaceLibrary(face_db) self.face_lib = FaceLibrary(face_db)
# 状态变量
self.current_user = None
# --- 时间控制 --- # --- 时间控制 ---
self.last_identity_check_time = 0 self.last_identity_check_time = 0
self.IDENTITY_CHECK_INTERVAL = 2.0 self.IDENTITY_CHECK_INTERVAL = 2.0
@@ -61,6 +60,13 @@ class MonitorSystem:
# 缓存上一次的检测结果 # 缓存上一次的检测结果
self.cached_emotion = {"label": "detecting...", "va": (0.0, 0.0)} self.cached_emotion = {"label": "detecting...", "va": (0.0, 0.0)}
self.task_queue = queue.Queue(maxsize=2)
self.current_user = None
self.current_emotion = "Neutral"
self.id_emo_thread = threading.Thread(target=self._id_emo_loop, daemon=True)
self.id_emo_thread.start()
def _get_smoothed_value(self, history, current_val): def _get_smoothed_value(self, history, current_val):
"""内部函数:计算滑动平均值""" """内部函数:计算滑动平均值"""
history.append(current_val) history.append(current_val)
@@ -182,20 +188,18 @@ class MonitorSystem:
now = time.time() now = time.time()
# --- 身份识别 --- # --- 身份识别 ---
if now - self.last_identity_check_time > self.IDENTITY_CHECK_INTERVAL: if now - self.last_identity_check_time > self.IDENTITY_CHECK_INTERVAL:
# xs = [l.x for l in landmarks] sface_loc = (
# ys = [l.y for l in landmarks] int(min(ys) * h), int(max(xs) * w),
# # 计算人脸框 int(max(ys) * h), int(min(xs) * w)
# face_loc = ( )
# int(min(ys) * h), int(max(xs) * w), spad = 20
# int(max(ys) * h), int(min(xs) * w) sface_loc = (max(0, sface_loc[0]-spad), min(w, sface_loc[1]+spad),
# ) min(h, sface_loc[2]+spad), max(0, sface_loc[3]-spad))
# pad = 20
# face_loc = (max(0, face_loc[0]-pad), min(w, face_loc[1]+pad), if self.task_queue.full():
# min(h, face_loc[2]+pad), max(0, face_loc[3]-pad)) self.task_queue.get()
self.task_queue.put((rgb_frame.copy(), sface_loc, 0))
match_result = self.face_lib.identify(rgb_frame, face_location=face_loc)
if match_result:
self.current_user = match_result["info"]
self.last_identity_check_time = now self.last_identity_check_time = now
analysis_data["identity"] = self.current_user analysis_data["identity"] = self.current_user
@@ -204,40 +208,26 @@ class MonitorSystem:
if HAS_EMOTION_MODULE and ( if HAS_EMOTION_MODULE and (
now - self.last_emotion_check_time > self.EMOTION_CHECK_INTERVAL now - self.last_emotion_check_time > self.EMOTION_CHECK_INTERVAL
): ):
if results.multi_face_landmarks:
landmarks = results.multi_face_landmarks[0].landmark
xs = [l.x for l in landmarks]
ys = [l.y for l in landmarks]
# 计算裁剪坐标 # 计算裁剪坐标
x_min = int(min(xs) * w) x_min = int(min(xs) * w)
x_max = int(max(xs) * w) x_max = int(max(xs) * w)
y_min = int(min(ys) * h) y_min = int(min(ys) * h)
y_max = int(max(ys) * h) y_max = int(max(ys) * h)
pad_x = int((x_max - x_min) * 0.2) pad_x = int((x_max - x_min) * 0.1)
pad_y = int((y_max - y_min) * 0.2) pad_y = int((y_max - y_min) * 0.1)
x_min = max(0, x_min - pad_x) x_min = max(0, x_min - pad_x)
x_max = min(w, x_max + pad_x) x_max = min(w, x_max + pad_x)
y_min = max(0, y_min - pad_y) y_min = max(0, y_min - pad_y)
y_max = min(h, y_max + pad_y) y_max = min(h, y_max + pad_y)
face_crop = frame[y_min:y_max, x_min:x_max] face_loc = (y_min, x_max, y_max, x_min)
if face_crop.size > 0: if self.task_queue.full():
try: self.task_queue.get()
emo_results = analyze_emotion_with_hsemotion(face_crop) self.task_queue.put((frame.copy(), face_loc, 1))
if emo_results:
top_res = emo_results[0]
self.cached_emotion["label"] = top_res.get(
"emotion", "unknown"
)
self.cached_emotion["va"] = top_res.get("vaVal", (0.0, 0.0))
except Exception as e:
print(f"情绪分析出错: {e}")
self.last_emotion_check_time = now self.last_emotion_check_time = now
@@ -245,3 +235,30 @@ class MonitorSystem:
analysis_data["emotion_va"] = self.cached_emotion["va"] analysis_data["emotion_va"] = self.cached_emotion["va"]
return analysis_data return analysis_data
def _id_emo_loop(self):
while True:
try:
frame, face_loc, task_type = self.task_queue.get()
if task_type == 0:
match_result = self.face_lib.identify(frame, face_location=face_loc)
if match_result:
self.current_user = match_result["info"]
elif task_type == 1 and HAS_EMOTION_MODULE:
face_crop = frame[face_loc[0]:face_loc[2], face_loc[3]:face_loc[1]]
if face_crop.size > 0:
try:
emo_results = analyze_emotion_with_hsemotion(face_crop)
if emo_results:
top_res = emo_results[0]
self.cached_emotion["label"] = top_res.get(
"emotion", "unknown"
)
self.cached_emotion["va"] = top_res.get("vaVal", (0.0, 0.0))
except Exception as e:
print(f"情绪分析出错: {e}")
except Exception as e:
print(f"线程处理出错: {e}")

View File

@@ -262,15 +262,15 @@ def video_stream_thread():
# f"filesink location={filename} " # f"filesink location={filename} "
# ) # )
# out = cv2.VideoWriter(gst_pipeline, cv2.CAP_GSTREAMER, 0, fps, (width, height)) # out = cv2.VideoWriter(gst_pipeline, cv2.CAP_GSTREAMER, 0, fps, (width, height))
out1 = cv2.VideoWriter('output1.mp4', fourcc, 30.0, (1280, 720)) # out1 = cv2.VideoWriter('output1.mp4', fourcc, 30.0, (1280, 720))
out2 = cv2.VideoWriter('output2.mp4', fourcc, 30.0, (1280, 720)) # out2 = cv2.VideoWriter('output2.mp4', fourcc, 30.0, (1280, 720))
while not stop_event.is_set(): while not stop_event.is_set():
try: try:
frame = video_queue.get(timeout=1) frame = video_queue.get(timeout=1)
# small_frame = cv2.resize(apply_soft_roi(frame), (1280, 720)) # small_frame = cv2.resize(apply_soft_roi(frame), (1280, 720))
server.provide_frame(frame) server.provide_frame(frame)
out1.write(frame) # out1.write(frame)
out2.write(frame) # out2.write(frame)
except queue.Empty: except queue.Empty:
continue continue
except Exception as e: except Exception as e:
@@ -320,8 +320,8 @@ def video_stream_thread():
# except Exception as e: # except Exception as e:
# print(f"[Video] 重连中... {e}") # print(f"[Video] 重连中... {e}")
# time.sleep(3) # time.sleep(3)
out1.release() # out1.release()
out2.release() # out2.release()
print("[Video] 线程结束") print("[Video] 线程结束")