286 lines
9.0 KiB
Python
286 lines
9.0 KiB
Python
import cv2
|
|
import threading
|
|
import time
|
|
import queue
|
|
import socket
|
|
import json
|
|
import urllib.request
|
|
import struct
|
|
from analyzer import MonitorSystem
|
|
|
|
SERVER_HOST = '10.128.50.6'
|
|
SERVER_PORT = 65432
|
|
API_URL = "http://10.128.50.6:5000/api/states"
|
|
CAMERA_ID = "23373333"
|
|
|
|
BASIC_FACE_DB = {
|
|
"Zhihang": {"name": "Zhihang Deng", "age": 20, "image-path": "zhihang.png"},
|
|
"Yaoyu": {"name": "Yaoyu Zhang", "age": 20, "image-path": "yaoyu.jpg"},
|
|
}
|
|
|
|
|
|
frame_queue = queue.Queue(maxsize=2)
|
|
|
|
video_queue = queue.Queue(maxsize=1)
|
|
|
|
data_queue = queue.Queue(maxsize=10)
|
|
|
|
stop_event = threading.Event()
|
|
|
|
def capture_thread():
|
|
"""
|
|
采集线程:优化了分发逻辑,对视频流进行降频处理
|
|
"""
|
|
cap = cv2.VideoCapture(0)
|
|
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
|
|
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
|
|
|
|
print("[Capture] 摄像头启动...")
|
|
|
|
frame_count = 0
|
|
|
|
while not stop_event.is_set():
|
|
ret, frame = cap.read()
|
|
if not ret:
|
|
break
|
|
|
|
if not frame_queue.full():
|
|
frame_queue.put(frame)
|
|
else:
|
|
try:
|
|
frame_queue.get_nowait()
|
|
frame_queue.put(frame)
|
|
except queue.Empty:
|
|
pass
|
|
|
|
|
|
if frame_count % 2 == 0:
|
|
try:
|
|
if video_queue.full():
|
|
video_queue.get_nowait()
|
|
video_queue.put(frame)
|
|
except:
|
|
pass
|
|
|
|
frame_count += 1
|
|
time.sleep(0.01)
|
|
|
|
cap.release()
|
|
print("[Capture] 线程结束")
|
|
|
|
def analysis_thread():
|
|
"""
|
|
核心分析线程:
|
|
1. 即使无人脸也发送状态(字段为空字符串)。
|
|
2. 队列满时丢弃旧数据,保证数据实时性。
|
|
"""
|
|
monitor = MonitorSystem(BASIC_FACE_DB)
|
|
print("[Analysis] 分析系统启动...")
|
|
|
|
while not stop_event.is_set():
|
|
try:
|
|
frame = frame_queue.get(timeout=1)
|
|
except queue.Empty:
|
|
continue
|
|
|
|
# 核心分析
|
|
result = monitor.process_frame(frame)
|
|
|
|
payload = {
|
|
"id": CAMERA_ID,
|
|
"time": time.strftime("%Y-%m-%d %H:%M:%S"),
|
|
"name": "",
|
|
"ear": "",
|
|
"mar": "",
|
|
"pose": "",
|
|
"emo_label": "",
|
|
"emo_va": ""
|
|
}
|
|
|
|
if result["has_face"] and result["identity"]:
|
|
payload.update({
|
|
"name": result["identity"]["name"],
|
|
"ear": result["ear"],
|
|
"mar": result["mar"],
|
|
"pose": result["pose"],
|
|
"emo_label": result["emotion_label"],
|
|
"emo_va": result["emotion_va"]
|
|
})
|
|
elif result["has_face"]:
|
|
payload.update({
|
|
"name": "Unknown",
|
|
"ear": result["ear"],
|
|
"mar": result["mar"],
|
|
"pose": result["pose"],
|
|
"emo_label": result["emotion_label"],
|
|
"emo_va": result["emotion_va"]
|
|
})
|
|
|
|
if data_queue.full():
|
|
try:
|
|
_ = data_queue.get_nowait()
|
|
except queue.Empty:
|
|
pass
|
|
|
|
data_queue.put(payload)
|
|
|
|
draw_debug_info(frame, result)
|
|
cv2.imshow("Monitor Client", frame)
|
|
if cv2.waitKey(1) & 0xFF == ord('q'):
|
|
stop_event.set()
|
|
|
|
cv2.destroyAllWindows()
|
|
print("[Analysis] 分析线程结束")
|
|
|
|
def video_stream_thread():
|
|
"""
|
|
发送线程:优化了 Socket 设置和压缩参数
|
|
"""
|
|
print(f"[Video] 准备连接服务器 {SERVER_HOST}:{SERVER_PORT} ...")
|
|
|
|
while not stop_event.is_set():
|
|
try:
|
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
|
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
|
|
|
s.connect((SERVER_HOST, SERVER_PORT))
|
|
print(f"[Video] 已连接")
|
|
|
|
camera_id_bytes = CAMERA_ID.encode('utf-8')
|
|
|
|
while not stop_event.is_set():
|
|
try:
|
|
frame = video_queue.get(timeout=1)
|
|
|
|
small_frame = cv2.resize(frame, (320, 240))
|
|
|
|
ret, buffer = cv2.imencode('.jpg', small_frame, [cv2.IMWRITE_JPEG_QUALITY, 50])
|
|
|
|
if not ret: continue
|
|
|
|
frame_bytes = buffer.tobytes()
|
|
header_id_len = len(camera_id_bytes).to_bytes(4, 'big')
|
|
header_frame_len = len(frame_bytes).to_bytes(4, 'big')
|
|
|
|
packet = header_id_len + camera_id_bytes + header_frame_len + frame_bytes
|
|
s.sendall(packet)
|
|
|
|
except queue.Empty:
|
|
continue
|
|
except Exception as e:
|
|
print(f"[Video] 发送断开: {e}")
|
|
break
|
|
|
|
except Exception as e:
|
|
print(f"[Video] 重连中... {e}")
|
|
time.sleep(3)
|
|
|
|
print("[Video] 线程结束")
|
|
|
|
def data_upload_thread():
|
|
"""
|
|
周期性爆发模式
|
|
逻辑:每隔 30 秒,连续发送 5 次数据(间隔 1 秒)。
|
|
由于 analysis_thread 保证了队列里总是最新数据,这里取到的就是实时状态。
|
|
"""
|
|
print("[Data] 数据上报线程启动 (周期模式: 休眠30s -> 连发5次)")
|
|
|
|
LONG_SLEEP = 30
|
|
BURST_COUNT = 5
|
|
BURST_GAP = 1
|
|
|
|
while not stop_event.is_set():
|
|
# --- 阶段 1: 长休眠 (30秒) ---
|
|
if stop_event.wait(LONG_SLEEP):
|
|
break
|
|
|
|
# --- 阶段 2: 爆发发送 (5次) ---
|
|
print(f"[Data] 开始上报周期 (连发 {BURST_COUNT} 次)...")
|
|
|
|
try:
|
|
while not data_queue.empty():
|
|
data_queue.get_nowait()
|
|
except queue.Empty:
|
|
pass
|
|
|
|
time.sleep(0.1)
|
|
|
|
for i in range(BURST_COUNT):
|
|
if stop_event.is_set():
|
|
break
|
|
|
|
try:
|
|
data = data_queue.get(timeout=1.5)
|
|
try:
|
|
req = urllib.request.Request(
|
|
url=API_URL,
|
|
data=json.dumps(data).encode('utf-8'),
|
|
headers={'Content-Type': 'application/json'},
|
|
method='POST'
|
|
)
|
|
with urllib.request.urlopen(req, timeout=2) as resp:
|
|
pass
|
|
|
|
# 打印日志
|
|
name_info = data['name'] if data['name'] else "NO-FACE"
|
|
print(f"[Data Upload {i+1}/{BURST_COUNT}] {name_info} | Time:{data['time']}")
|
|
|
|
except Exception as e:
|
|
print(f"[Data] Upload Error: {e}")
|
|
|
|
except queue.Empty:
|
|
print(f"[Data] 队列为空,跳过第 {i+1} 次发送")
|
|
|
|
if i < BURST_COUNT - 1:
|
|
stop_event.wait(BURST_GAP)
|
|
|
|
print("[Data] 数据上报线程结束")
|
|
|
|
def draw_debug_info(frame, result):
|
|
"""在画面上画出即时数据"""
|
|
if not result["has_face"]:
|
|
cv2.putText(frame, "NO FACE", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
|
|
return
|
|
|
|
# 显示身份
|
|
id_text = result["identity"]["name"] if result["identity"] else "Unknown"
|
|
color = (0, 255, 0) if result["identity"] else (0, 255, 255)
|
|
cv2.putText(frame, f"User: {id_text}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
|
|
|
|
# 显示数据
|
|
cv2.putText(frame, f"EAR: {result['ear']}", (20, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 0), 1)
|
|
cv2.putText(frame, f"MAR: {result['mar']}", (20, 95), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 0), 1)
|
|
if result['ear'] < 0.15:
|
|
cv2.putText(frame, "EYE CLOSE", (250, 250), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
|
|
|
|
p, y, r = result["pose"]
|
|
cv2.putText(frame, f"Pose: P{p} Y{y} R{r}", (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 1)
|
|
|
|
emo = result.get("emotion_label", "N/A")
|
|
va = result.get("emotion_va", (0,0))
|
|
# 显示格式: Emo: happy (-0.5, 0.2)
|
|
emo_text = f"Emo: {emo} ({va[0]:.2f}, {va[1]:.2f})"
|
|
cv2.putText(frame, emo_text, (20, 145), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 165, 255), 1)
|
|
|
|
if __name__ == "__main__":
|
|
t1 = threading.Thread(target=capture_thread, daemon=True)
|
|
t2 = threading.Thread(target=analysis_thread, daemon=True)
|
|
t3 = threading.Thread(target=video_stream_thread, daemon=True)
|
|
t4 = threading.Thread(target=data_upload_thread, daemon=True)
|
|
|
|
t1.start()
|
|
t2.start()
|
|
t3.start()
|
|
t4.start()
|
|
|
|
try:
|
|
while not stop_event.is_set():
|
|
time.sleep(1)
|
|
except KeyboardInterrupt:
|
|
print("停止程序...")
|
|
stop_event.set()
|
|
|
|
t1.join()
|
|
t2.join()
|
|
t3.join()
|
|
t4.join() |