修改线程架构
This commit is contained in:
@@ -2,6 +2,8 @@ import cv2
|
||||
import mediapipe as mp
|
||||
import time
|
||||
import numpy as np
|
||||
import threading
|
||||
import queue
|
||||
from collections import deque
|
||||
from geometry_utils import (
|
||||
calculate_ear,
|
||||
@@ -39,9 +41,6 @@ class MonitorSystem:
|
||||
# 初始化人脸底库
|
||||
self.face_lib = FaceLibrary(face_db)
|
||||
|
||||
# 状态变量
|
||||
self.current_user = None
|
||||
|
||||
# --- 时间控制 ---
|
||||
self.last_identity_check_time = 0
|
||||
self.IDENTITY_CHECK_INTERVAL = 2.0
|
||||
@@ -61,6 +60,13 @@ class MonitorSystem:
|
||||
# 缓存上一次的检测结果
|
||||
self.cached_emotion = {"label": "detecting...", "va": (0.0, 0.0)}
|
||||
|
||||
self.task_queue = queue.Queue(maxsize=2)
|
||||
self.current_user = None
|
||||
self.current_emotion = "Neutral"
|
||||
|
||||
self.id_emo_thread = threading.Thread(target=self._id_emo_loop, daemon=True)
|
||||
self.id_emo_thread.start()
|
||||
|
||||
def _get_smoothed_value(self, history, current_val):
|
||||
"""内部函数:计算滑动平均值"""
|
||||
history.append(current_val)
|
||||
@@ -182,20 +188,18 @@ class MonitorSystem:
|
||||
now = time.time()
|
||||
# --- 身份识别 ---
|
||||
if now - self.last_identity_check_time > self.IDENTITY_CHECK_INTERVAL:
|
||||
# xs = [l.x for l in landmarks]
|
||||
# ys = [l.y for l in landmarks]
|
||||
# # 计算人脸框
|
||||
# face_loc = (
|
||||
# int(min(ys) * h), int(max(xs) * w),
|
||||
# int(max(ys) * h), int(min(xs) * w)
|
||||
# )
|
||||
# pad = 20
|
||||
# face_loc = (max(0, face_loc[0]-pad), min(w, face_loc[1]+pad),
|
||||
# min(h, face_loc[2]+pad), max(0, face_loc[3]-pad))
|
||||
sface_loc = (
|
||||
int(min(ys) * h), int(max(xs) * w),
|
||||
int(max(ys) * h), int(min(xs) * w)
|
||||
)
|
||||
spad = 20
|
||||
sface_loc = (max(0, sface_loc[0]-spad), min(w, sface_loc[1]+spad),
|
||||
min(h, sface_loc[2]+spad), max(0, sface_loc[3]-spad))
|
||||
|
||||
if self.task_queue.full():
|
||||
self.task_queue.get()
|
||||
self.task_queue.put((rgb_frame.copy(), sface_loc, 0))
|
||||
|
||||
match_result = self.face_lib.identify(rgb_frame, face_location=face_loc)
|
||||
if match_result:
|
||||
self.current_user = match_result["info"]
|
||||
self.last_identity_check_time = now
|
||||
|
||||
analysis_data["identity"] = self.current_user
|
||||
@@ -204,10 +208,6 @@ class MonitorSystem:
|
||||
if HAS_EMOTION_MODULE and (
|
||||
now - self.last_emotion_check_time > self.EMOTION_CHECK_INTERVAL
|
||||
):
|
||||
if results.multi_face_landmarks:
|
||||
landmarks = results.multi_face_landmarks[0].landmark
|
||||
xs = [l.x for l in landmarks]
|
||||
ys = [l.y for l in landmarks]
|
||||
|
||||
# 计算裁剪坐标
|
||||
x_min = int(min(xs) * w)
|
||||
@@ -215,15 +215,37 @@ class MonitorSystem:
|
||||
y_min = int(min(ys) * h)
|
||||
y_max = int(max(ys) * h)
|
||||
|
||||
pad_x = int((x_max - x_min) * 0.2)
|
||||
pad_y = int((y_max - y_min) * 0.2)
|
||||
pad_x = int((x_max - x_min) * 0.1)
|
||||
pad_y = int((y_max - y_min) * 0.1)
|
||||
|
||||
x_min = max(0, x_min - pad_x)
|
||||
x_max = min(w, x_max + pad_x)
|
||||
y_min = max(0, y_min - pad_y)
|
||||
y_max = min(h, y_max + pad_y)
|
||||
|
||||
face_crop = frame[y_min:y_max, x_min:x_max]
|
||||
face_loc = (y_min, x_max, y_max, x_min)
|
||||
|
||||
if self.task_queue.full():
|
||||
self.task_queue.get()
|
||||
self.task_queue.put((frame.copy(), face_loc, 1))
|
||||
|
||||
self.last_emotion_check_time = now
|
||||
|
||||
analysis_data["emotion_label"] = self.cached_emotion["label"]
|
||||
analysis_data["emotion_va"] = self.cached_emotion["va"]
|
||||
|
||||
return analysis_data
|
||||
|
||||
def _id_emo_loop(self):
|
||||
while True:
|
||||
try:
|
||||
frame, face_loc, task_type = self.task_queue.get()
|
||||
if task_type == 0:
|
||||
match_result = self.face_lib.identify(frame, face_location=face_loc)
|
||||
if match_result:
|
||||
self.current_user = match_result["info"]
|
||||
elif task_type == 1 and HAS_EMOTION_MODULE:
|
||||
face_crop = frame[face_loc[0]:face_loc[2], face_loc[3]:face_loc[1]]
|
||||
|
||||
if face_crop.size > 0:
|
||||
try:
|
||||
@@ -238,10 +260,5 @@ class MonitorSystem:
|
||||
|
||||
except Exception as e:
|
||||
print(f"情绪分析出错: {e}")
|
||||
|
||||
self.last_emotion_check_time = now
|
||||
|
||||
analysis_data["emotion_label"] = self.cached_emotion["label"]
|
||||
analysis_data["emotion_va"] = self.cached_emotion["va"]
|
||||
|
||||
return analysis_data
|
||||
except Exception as e:
|
||||
print(f"线程处理出错: {e}")
|
||||
|
||||
@@ -262,15 +262,15 @@ def video_stream_thread():
|
||||
# f"filesink location={filename} "
|
||||
# )
|
||||
# out = cv2.VideoWriter(gst_pipeline, cv2.CAP_GSTREAMER, 0, fps, (width, height))
|
||||
out1 = cv2.VideoWriter('output1.mp4', fourcc, 30.0, (1280, 720))
|
||||
out2 = cv2.VideoWriter('output2.mp4', fourcc, 30.0, (1280, 720))
|
||||
# out1 = cv2.VideoWriter('output1.mp4', fourcc, 30.0, (1280, 720))
|
||||
# out2 = cv2.VideoWriter('output2.mp4', fourcc, 30.0, (1280, 720))
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
frame = video_queue.get(timeout=1)
|
||||
# small_frame = cv2.resize(apply_soft_roi(frame), (1280, 720))
|
||||
server.provide_frame(frame)
|
||||
out1.write(frame)
|
||||
out2.write(frame)
|
||||
# out1.write(frame)
|
||||
# out2.write(frame)
|
||||
except queue.Empty:
|
||||
continue
|
||||
except Exception as e:
|
||||
@@ -320,8 +320,8 @@ def video_stream_thread():
|
||||
# except Exception as e:
|
||||
# print(f"[Video] 重连中... {e}")
|
||||
# time.sleep(3)
|
||||
out1.release()
|
||||
out2.release()
|
||||
# out1.release()
|
||||
# out2.release()
|
||||
print("[Video] 线程结束")
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user