修改线程架构
This commit is contained in:
@@ -2,6 +2,8 @@ import cv2
|
||||
import mediapipe as mp
|
||||
import time
|
||||
import numpy as np
|
||||
import threading
|
||||
import queue
|
||||
from collections import deque
|
||||
from geometry_utils import (
|
||||
calculate_ear,
|
||||
@@ -39,9 +41,6 @@ class MonitorSystem:
|
||||
# 初始化人脸底库
|
||||
self.face_lib = FaceLibrary(face_db)
|
||||
|
||||
# 状态变量
|
||||
self.current_user = None
|
||||
|
||||
# --- 时间控制 ---
|
||||
self.last_identity_check_time = 0
|
||||
self.IDENTITY_CHECK_INTERVAL = 2.0
|
||||
@@ -61,6 +60,13 @@ class MonitorSystem:
|
||||
# 缓存上一次的检测结果
|
||||
self.cached_emotion = {"label": "detecting...", "va": (0.0, 0.0)}
|
||||
|
||||
self.task_queue = queue.Queue(maxsize=2)
|
||||
self.current_user = None
|
||||
self.current_emotion = "Neutral"
|
||||
|
||||
self.id_emo_thread = threading.Thread(target=self._id_emo_loop, daemon=True)
|
||||
self.id_emo_thread.start()
|
||||
|
||||
def _get_smoothed_value(self, history, current_val):
|
||||
"""内部函数:计算滑动平均值"""
|
||||
history.append(current_val)
|
||||
@@ -182,20 +188,18 @@ class MonitorSystem:
|
||||
now = time.time()
|
||||
# --- 身份识别 ---
|
||||
if now - self.last_identity_check_time > self.IDENTITY_CHECK_INTERVAL:
|
||||
# xs = [l.x for l in landmarks]
|
||||
# ys = [l.y for l in landmarks]
|
||||
# # 计算人脸框
|
||||
# face_loc = (
|
||||
# int(min(ys) * h), int(max(xs) * w),
|
||||
# int(max(ys) * h), int(min(xs) * w)
|
||||
# )
|
||||
# pad = 20
|
||||
# face_loc = (max(0, face_loc[0]-pad), min(w, face_loc[1]+pad),
|
||||
# min(h, face_loc[2]+pad), max(0, face_loc[3]-pad))
|
||||
sface_loc = (
|
||||
int(min(ys) * h), int(max(xs) * w),
|
||||
int(max(ys) * h), int(min(xs) * w)
|
||||
)
|
||||
spad = 20
|
||||
sface_loc = (max(0, sface_loc[0]-spad), min(w, sface_loc[1]+spad),
|
||||
min(h, sface_loc[2]+spad), max(0, sface_loc[3]-spad))
|
||||
|
||||
match_result = self.face_lib.identify(rgb_frame, face_location=face_loc)
|
||||
if match_result:
|
||||
self.current_user = match_result["info"]
|
||||
if self.task_queue.full():
|
||||
self.task_queue.get()
|
||||
self.task_queue.put((rgb_frame.copy(), sface_loc, 0))
|
||||
|
||||
self.last_identity_check_time = now
|
||||
|
||||
analysis_data["identity"] = self.current_user
|
||||
@@ -204,40 +208,26 @@ class MonitorSystem:
|
||||
if HAS_EMOTION_MODULE and (
|
||||
now - self.last_emotion_check_time > self.EMOTION_CHECK_INTERVAL
|
||||
):
|
||||
if results.multi_face_landmarks:
|
||||
landmarks = results.multi_face_landmarks[0].landmark
|
||||
xs = [l.x for l in landmarks]
|
||||
ys = [l.y for l in landmarks]
|
||||
|
||||
# 计算裁剪坐标
|
||||
x_min = int(min(xs) * w)
|
||||
x_max = int(max(xs) * w)
|
||||
y_min = int(min(ys) * h)
|
||||
y_max = int(max(ys) * h)
|
||||
# 计算裁剪坐标
|
||||
x_min = int(min(xs) * w)
|
||||
x_max = int(max(xs) * w)
|
||||
y_min = int(min(ys) * h)
|
||||
y_max = int(max(ys) * h)
|
||||
|
||||
pad_x = int((x_max - x_min) * 0.2)
|
||||
pad_y = int((y_max - y_min) * 0.2)
|
||||
pad_x = int((x_max - x_min) * 0.1)
|
||||
pad_y = int((y_max - y_min) * 0.1)
|
||||
|
||||
x_min = max(0, x_min - pad_x)
|
||||
x_max = min(w, x_max + pad_x)
|
||||
y_min = max(0, y_min - pad_y)
|
||||
y_max = min(h, y_max + pad_y)
|
||||
|
||||
face_crop = frame[y_min:y_max, x_min:x_max]
|
||||
|
||||
if face_crop.size > 0:
|
||||
try:
|
||||
emo_results = analyze_emotion_with_hsemotion(face_crop)
|
||||
|
||||
if emo_results:
|
||||
top_res = emo_results[0]
|
||||
self.cached_emotion["label"] = top_res.get(
|
||||
"emotion", "unknown"
|
||||
)
|
||||
self.cached_emotion["va"] = top_res.get("vaVal", (0.0, 0.0))
|
||||
|
||||
except Exception as e:
|
||||
print(f"情绪分析出错: {e}")
|
||||
x_min = max(0, x_min - pad_x)
|
||||
x_max = min(w, x_max + pad_x)
|
||||
y_min = max(0, y_min - pad_y)
|
||||
y_max = min(h, y_max + pad_y)
|
||||
|
||||
face_loc = (y_min, x_max, y_max, x_min)
|
||||
|
||||
if self.task_queue.full():
|
||||
self.task_queue.get()
|
||||
self.task_queue.put((frame.copy(), face_loc, 1))
|
||||
|
||||
self.last_emotion_check_time = now
|
||||
|
||||
@@ -245,3 +235,30 @@ class MonitorSystem:
|
||||
analysis_data["emotion_va"] = self.cached_emotion["va"]
|
||||
|
||||
return analysis_data
|
||||
|
||||
def _id_emo_loop(self):
|
||||
while True:
|
||||
try:
|
||||
frame, face_loc, task_type = self.task_queue.get()
|
||||
if task_type == 0:
|
||||
match_result = self.face_lib.identify(frame, face_location=face_loc)
|
||||
if match_result:
|
||||
self.current_user = match_result["info"]
|
||||
elif task_type == 1 and HAS_EMOTION_MODULE:
|
||||
face_crop = frame[face_loc[0]:face_loc[2], face_loc[3]:face_loc[1]]
|
||||
|
||||
if face_crop.size > 0:
|
||||
try:
|
||||
emo_results = analyze_emotion_with_hsemotion(face_crop)
|
||||
|
||||
if emo_results:
|
||||
top_res = emo_results[0]
|
||||
self.cached_emotion["label"] = top_res.get(
|
||||
"emotion", "unknown"
|
||||
)
|
||||
self.cached_emotion["va"] = top_res.get("vaVal", (0.0, 0.0))
|
||||
|
||||
except Exception as e:
|
||||
print(f"情绪分析出错: {e}")
|
||||
except Exception as e:
|
||||
print(f"线程处理出错: {e}")
|
||||
|
||||
Reference in New Issue
Block a user