Files
cv_state_ana/reproject/main.py
2026-02-28 18:11:03 +08:00

677 lines
21 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
from calendar import c
from tracemalloc import stop
import cv2
import threading
import time
import queue
import socket
import json
import urllib.request
import struct
import numpy as np
import mediapipe as mp
from analyzer import MonitorSystem
from webrtc_server import WebRTCServer
from HeartRateMonitor import HeartRateMonitor
API_URL = "http://10.128.48.48:5000/api/states"
CAMERA_ID = 5
BASIC_FACE_DB = {
"Zhihang": {"name": "Zhihang Deng", "age": 20, "image-path": "zhihang.png"},
"Yaoyu": {"name": "Yaoyu Zhang", "age": 20, "image-path": "yaoyu.jpg"},
}
VIDEO_FILE = ["video_temp1.mp4", "video_temp2.mp4"]
frame_queue = queue.Queue(maxsize=2)
video_queue = queue.Queue(maxsize=10)
ana_video_queue = queue.Queue(maxsize=10)
data_queue = queue.Queue(maxsize=10)
show_queue = queue.Queue(maxsize=10)
front_data_queue = queue.Queue(maxsize=10)
ana_data_queue = queue.Queue(maxsize=10)
stop_event = threading.Event()
def capture_thread():
"""
采集线程:优化了分发逻辑,对视频流进行降频处理
"""
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
print("[Capture] 摄像头启动...")
frame_count = 0
last_time = time.time()
while not stop_event.is_set():
ret, frame = cap.read()
if not ret:
break
if not frame_queue.full():
frame_queue.put(frame)
else:
try:
frame_queue.get_nowait()
frame_queue.put(frame)
except queue.Empty:
pass
# try:
# if video_queue.full():
# video_queue.get_nowait()
# video_queue.put(frame)
# except:
# pass
frame_count += 1
# time.sleep(1 / 30)
# current_time = time.time()
# if current_time - last_time >= 1.0:
# print(f"[Capture] FPS: {frame_count}")
# frame_count = 0
# last_time = current_time
# print(current_time - last_time)
# last_time = current_time
cap.release()
print("[Capture] 线程结束")
def analysis_thread():
"""
核心分析线程:
1. 即使无人脸也发送状态(字段为空字符串)。
2. 队列满时丢弃旧数据,保证数据实时性。
"""
monitor = MonitorSystem(BASIC_FACE_DB)
print("[Analysis] 分析系统启动...")
freq = 0
gap = 60
status = 0 # 0:open 1:close
last_time = time.time()
last_freq = 0
heart_monitor = HeartRateMonitor()
while not stop_event.is_set():
try:
frame = frame_queue.get(timeout=1)
except queue.Empty:
continue
# 核心分析
result = monitor.process_frame(frame)
result["eye_close_freq"] = 0
result["heart_rate"] = 0
if video_queue.full():
video_queue.get_nowait()
video_queue.put(result["frame"])
if ana_video_queue.full():
ana_video_queue.get_nowait()
ana_video_queue.put(result["frame"])
# print(f"[Analysis] {time.strftime('%Y-%m-%d %H:%M:%S')} - Frame processed")
payload = {
"seat_id": CAMERA_ID,
"timestamp": time.time(),
"heart_rate": 0,
"emo_v": 0,
"emo_a": 0,
"pose_0": 0, #pitch
"pose_1": 0, #yaw
"pose_2": 0, #roll
"ear": 0,
"mar": 0,
"label": "",
"eye_close_freq": 0,
"iris_ratio_x": 0,
"iris_ratio_y": 0,
}
front_data = {
"seat_id": CAMERA_ID,
"timestamp": time.time(),
"label": "",
"eye_close_freq": 0.0,
"iris_ratio_x": 0,
"iris_ratio_y": 0,
"pose_0": 0, #pitch
"pose_1": 0, #yaw
"pose_2": 0, #roll
"heart_rate": 0,
}
if result["has_face"]: # and result["identity"]:
payload.update(
{
"ear": result["ear"],
"mar": result["mar"],
"iris_ratio_x": result["iris_ratio"][0],
"iris_ratio_y": result["iris_ratio"][1],
"pose_0": result["pose"][0],
"pose_1": result["pose"][1],
"pose_2": result["pose"][2],
"label": result["emotion_label"],
"emo_v": result["emotion_va"][0],
"emo_a": result["emotion_va"][1],
}
)
front_data.update(
{
"label": result["emotion_label"],
"iris_ratio_x": result["iris_ratio"][0],
"iris_ratio_y": result["iris_ratio"][1],
"pose_0": result["pose"][0],
"pose_1": result["pose"][1],
"pose_2": result["pose"][2],
}
)
# elif result["has_face"]:
# payload.update(
# {
# "name": "Unknown",
# "ear": result["ear"],
# "mar": result["mar"],
# "iris_ratio": result["iris_ratio"],
# "pose": result["pose"],
# "emo_label": result["emotion_label"],
# "emo_va": result["emotion_va"],
# }
# )
if result["has_face"] and result["ear"] < 0.2:
if status == 0:
freq += 1
status = 1
elif result["has_face"] and result["ear"] >= 0.2:
if status == 1:
freq += 1
status = 0
if time.time() - last_time >= gap:
last_freq = freq / 2
freq = 0
last_time = time.time()
result["eye_close_freq"] = last_freq
payload["eye_close_freq"] = last_freq
front_data["eye_close_freq"] = last_freq
bpm = heart_monitor.process_frame(frame, result["landmark"])
if bpm != None:
result["heart_rate"] = bpm
payload["heart_rate"] = bpm
front_data["heart_rate"] = bpm
if data_queue.full():
try:
_ = data_queue.get_nowait()
except queue.Empty:
pass
if front_data_queue.full():
try:
_ = front_data_queue.get_nowait()
except queue.Empty:
pass
if ana_data_queue.full():
try:
_ = ana_data_queue.get_nowait()
except queue.Empty:
pass
data_queue.put(payload)
ana_data_queue.put(payload)
front_data_queue.put(front_data)
show_queue.put((result["frame"], result))
# draw_debug_info(frame, result)
# cv2.imshow("Monitor Client", frame)
print("[Analysis] 分析线程结束")
def video_stream_thread(server):
"""
发送线程:优化了 Socket 设置和压缩参数
"""
fourcc = cv2.VideoWriter_fourcc(*'avc1')
# # jetson-nvenc 编码器
# # -----------------------------------------------------------
# # 1. 定义参数
# filename = 'output.mp4'
# fps = 30.0
# width = 1280
# height = 720
# # 2. 构建 GStreamer 软编码管道
# # 核心思路BGR (OpenCV) -> I420 (YUV) -> x264enc (CPU编码) -> MP4
# gst_pipeline = (
# f"appsrc ! "
# f"video/x-raw, format=BGR, width={width}, height={height}, framerate={int(fps)}/1 ! "
# f"queue ! "
# f"videoconvert ! "
# f"video/x-raw, format=I420 ! " # 转换颜色空间给编码器
# f"x264enc speed-preset=ultrafast tune=zerolatency bitrate=2000 ! " # bitrate=2000 即 2Mbps体积很小
# f"h264parse ! "
# f"qtmux ! "
# f"filesink location={filename} "
# )
# print(f"[Video] 尝试启动管道: {gst_pipeline}")
# # 3. 初始化 VideoWriter (必须使用 CAP_GSTREAMER)
# out = cv2.VideoWriter(gst_pipeline, cv2.CAP_GSTREAMER, 0, fps, (width, height))
# # 4. 严检查
# if not out.isOpened():
# print("❌ [Fatal Error] 视频录制启动失败!")
# print("可能原因:")
# print("1. 你的 OpenCV 没有开启 GStreamer 支持 (cv2.getBuildInformation() 查看)")
# print("2. 系统缺少插件 (尝试 sudo apt install gstreamer1.0-plugins-ugly)")
# # --- 最后的保底:如果上面都挂了,用 MPEG-4 ---
# print("⚠️ 正在回退到 mp4v (MPEG-4) 编码...")
# fourcc = cv2.VideoWriter_fourcc(*'mp4v')
# out = cv2.VideoWriter(filename, fourcc, fps, (width, height))
# else:
# print("✅ [Video] H.264 软编码启动成功!视频将保存为 MP4。")
# -----------------------------------------------------------
out1 = cv2.VideoWriter('output1.mp4', fourcc, 30.0, (1280, 720))
# out2 = cv2.VideoWriter('output2.mp4', fourcc, 30.0, (1280, 720))
while not stop_event.is_set():
try:
frame = video_queue.get(timeout=1)
server.provide_frame(frame)
data = front_data_queue.get(timeout=1)
server.send_data(json.dumps(data))
out1.write(frame)
# out2.write(frame)
except queue.Empty:
continue
except Exception as e:
print(f"[Video] 发送错误: {e}")
continue
# while not stop_event.is_set():
# try:
# with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# s.connect((SERVER_HOST, SERVER_PORT))
# print(f"[Video] 已连接")
# camera_id_bytes = CAMERA_ID.encode("utf-8")
# while not stop_event.is_set():
# try:
# frame = video_queue.get(timeout=1)
# small_frame = cv2.resize(apply_soft_roi(frame), (1280, 720))
# ret, buffer = cv2.imencode(
# ".jpg", small_frame, [cv2.IMWRITE_JPEG_QUALITY, 50]
# )
# if not ret:
# continue
# frame_bytes = buffer.tobytes()
# header_id_len = len(camera_id_bytes).to_bytes(4, "big")
# header_frame_len = len(frame_bytes).to_bytes(4, "big")
# packet = (
# header_id_len
# + camera_id_bytes
# + header_frame_len
# + frame_bytes
# )
# s.sendall(packet)
# except queue.Empty:
# continue
# except Exception as e:
# print(f"[Video] 发送断开: {e}")
# break
# except Exception as e:
# print(f"[Video] 重连中... {e}")
# time.sleep(3)
out1.release()
# out2.release()
print("[Video] 线程结束")
def data_upload_thread():
"""
周期性爆发模式
逻辑:每隔 30 秒,连续发送 5 次数据(间隔 1 秒)。
由于 analysis_thread 保证了队列里总是最新数据,这里取到的就是实时状态。
"""
print("[Data] 数据上报线程启动 (周期模式: 休眠30s -> 连发5次)")
LONG_SLEEP = 30
BURST_COUNT = 5
BURST_GAP = 1
while not stop_event.is_set():
# --- 阶段 1: 长休眠 (30秒) ---
if stop_event.wait(LONG_SLEEP):
break
# --- 阶段 2: 爆发发送 (5次) ---
print(f"[Data] 开始上报周期 (连发 {BURST_COUNT} 次)...")
try:
while not data_queue.empty():
data_queue.get_nowait()
except queue.Empty:
pass
time.sleep(0.1)
for i in range(BURST_COUNT):
if stop_event.is_set():
break
try:
data = data_queue.get(timeout=1.5)
try:
req = urllib.request.Request(
url=API_URL,
data=json.dumps(data).encode("utf-8"),
headers={"Content-Type": "application/json"},
method="POST",
)
with urllib.request.urlopen(req, timeout=2) as resp:
pass
# 打印日志
id_info = data.get("seat_id", "Unknown")
print(
f"[Data Upload {i+1}/{BURST_COUNT}] {id_info} | Time:{data['timestamp']}"
)
# print(json.dumps(data, indent=2))
except Exception as e:
print(f"[Data] Upload Error: {e}")
# print(json.dumps(data, indent=2))
except queue.Empty:
print(f"[Data] 队列为空,跳过第 {i+1} 次发送")
if i < BURST_COUNT - 1:
stop_event.wait(BURST_GAP)
print("[Data] 数据上报线程结束")
def alert_thread(server):
last_record_time = time.time()
file_id = 0
sleep_time = 0 # 实时
haqian_time = 0 # 每分钟跟随检测
heart_spe = 0 # 每分钟跟随检测
heart_num = 0
eye_close = False # 每分钟跟随检测
down_emo_time = 0 # 每五分钟跟随检测
pianyi_time = 0 # 实时
no_face_time = 0 # 实时
alert_info = {
"sleep_time": "长时间闭眼,存在睡觉可能性",
"haqian_time": "频繁打哈欠,存在疲劳可能性",
"heart_spe": "心率不正常,存在紧张或疲劳可能性",
"eye_close": "频繁闭眼,存在疲劳可能性",
"down_emo_time": "情绪低落,存在不适可能性",
"pianyi_time": "频繁偏头,存在注意力不集中可能性",
"no_face_time": "长时间无人脸,存在离岗或睡岗可能性",
}
buffered_frame = []
alert_time = time.time()
emo_time = time.time()
alert_status = False
alert_st = ""
level = 0
while not stop_event.is_set():
frame = ana_video_queue.get(timeout=1)
buffered_frame.append(frame)
now = time.time()
data = None
if now - alert_time >= 1:
data = ana_data_queue.get(timeout=1)
# sleep_time
if data["ear"] < 0.2:
sleep_time += 1
else:
sleep_time = 0
# haqian_time
if data["mar"] > 0.95:
haqian_time += 1
# heart_spe
heart_spe += data["heart_rate"]
heart_num += 1
# eye_close
if data["eye_close_freq"] > 20:
eye_close = True
# down_emo_time
if data["label"] in ["sad", "bored", "sleepy", "angry", "annoying"]:
down_emo_time += 1
# pianyi_time
if data["pose_0"] > 25 or data["pose_0"] < -10 or data["pose_1"] > 50 or data["pose_1"] < -50:
pianyi_time += 1
# no_face_time
if data["label"] == "":
no_face_time += 1
alert_time = now
if sleep_time >= 20:
alert_status = True
alert_st += alert_info["sleep_time"] + "; "
sleep_time = 0
level += 1
if pianyi_time >= 20:
alert_status = True
alert_st += alert_info["pianyi_time"] + "; "
pianyi_time = 0
level += 1
if no_face_time >= 60:
alert_status = True
alert_st += alert_info["no_face_time"] + "; "
no_face_time = 0
level += 1
if now - emo_time >= 300:
if down_emo_time > 150:
alert_status = True
alert_st += alert_info["down_emo_time"] + "; "
level += 1
emo_time = now
down_emo_time = 0
if now - last_record_time >= 60:
heart_spe = heart_spe // heart_num if heart_num != 0 else 0
if haqian_time > 5:
alert_status = True
alert_st += alert_info["haqian_time"] + ";"
level += 1
if heart_spe < 60 or heart_spe > 120:
alert_status = True
alert_st += alert_info["heart_spe"] + ";"
level += 1
if eye_close:
alert_status = True
alert_st += alert_info["eye_close"] + ";"
level += 1
#TODO: 发送警报
haqian_time = 0
heart_spe = 0
heart_num = 0
eye_close = False
last_record_time = now
info_level = ""
if level >= 6:
info_level = "严重"
elif level >=4:
info_level = "中等"
else:
info_level = "轻微"
if alert_status:
print(f"警报: {alert_st}")
alert = server.alert(int(time.time()), alert_st, info_level)
alert.start(width=1280, height=720, fps=30)
for f in buffered_frame:
alert.provide_frame(f)
alert.end()
alert_status = False
alert_st = ""
buffered_frame = []
level = 0
def draw_debug_info(frame, result):
"""在画面上画出即时数据"""
if not result["has_face"]:
cv2.putText(
frame, "NO FACE", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2
)
return
# 显示身份
id_text = result["identity"]["name"] if result["identity"] else "Unknown"
color = (0, 255, 0) if result["identity"] else (0, 255, 255)
cv2.putText(
frame, f"User: {id_text}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2
)
# 显示数据
cv2.putText(
frame,
f"EAR: {result['ear']}",
(20, 70),
cv2.FONT_HERSHEY_SIMPLEX,
0.6,
(255, 255, 0),
1,
)
cv2.putText(
frame,
f"MAR: {result['mar']}",
(20, 95),
cv2.FONT_HERSHEY_SIMPLEX,
0.6,
(255, 255, 0),
1,
)
cv2.putText(
frame,
f"Iris Ratio: {result['iris_ratio']}",
(20, 190),
cv2.FONT_HERSHEY_SIMPLEX,
0.6,
(255, 255, 0),
1,
)
cv2.putText(
frame,
f"Eye Close Freq: {result['eye_close_freq']}",
(20, 170),
cv2.FONT_HERSHEY_SIMPLEX,
0.6,
(255, 0, 255),
1,
)
cv2.putText(
frame,
f"Heart Rate BPM: {result['heart_rate']}",
(20, 210),
cv2.FONT_HERSHEY_SIMPLEX,
0.6,
(0, 165, 255),
1,
)
if result["ear"] < 0.15:
cv2.putText(
frame, "EYE CLOSE", (250, 250), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2
)
p, y, r = result["pose"]
cv2.putText(
frame,
f"Pose: P{p} Y{y} R{r}",
(20, 120),
cv2.FONT_HERSHEY_SIMPLEX,
0.6,
(0, 255, 255),
1,
)
emo = result.get("emotion_label", "N/A")
va = result.get("emotion_va", (0, 0))
# 显示格式: Emo: happy (-0.5, 0.2)
emo_text = f"Emo: {emo} ({va[0]:.2f}, {va[1]:.2f})"
cv2.putText(
frame, emo_text, (20, 145), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 165, 255), 1
)
if __name__ == "__main__":
server = WebRTCServer(60, 5, "ws://10.128.48.48:5000")
server.start()
t1 = threading.Thread(target=capture_thread, daemon=True)
t2 = threading.Thread(target=analysis_thread, daemon=True)
t3 = threading.Thread(target=video_stream_thread, daemon=True, args=(server,))
t4 = threading.Thread(target=data_upload_thread, daemon=True)
t5 = threading.Thread(target=alert_thread, daemon=True, args=(server,))
t1.start()
t2.start()
t3.start()
t4.start()
t5.start()
try:
while not stop_event.is_set():
try:
frame, result = show_queue.get(timeout=1)
except queue.Empty:
continue
# frame = apply_soft_roi(frame)
display_frame = frame.copy()
draw_debug_info(display_frame, result)
cv2.imshow("Monitor Client", display_frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
stop_event.set()
# time.sleep(1)
cv2.destroyAllWindows()
except KeyboardInterrupt:
print("停止程序...")
stop_event.set()
t1.join()
t2.join()
t3.join()
t4.join()
t5.join()