feat: 重构存储策略为 SQLite

- 新增 config/database.py: SQLite 数据库管理器
  - WAL 模式提升写入性能
  - 异步批量写入队列
  - 滚动清理机制(保留7天)

- 新增 core/storage_manager.py: 图片存储管理
  - 异步保存抓拍图片
  - 本地缓存断网容灾
  - 按日期分目录存储

- 更新 config/settings.py: 添加 SQLite 配置
  - SQLiteConfig 数据类
  - 环境变量支持

- 更新 core/result_reporter.py: 适配新存储
  - 使用 SQLite 替代 MySQL
  - AlertInfo 数据类重构
  - 断网自动缓存到本地
This commit is contained in:
2026-01-30 11:34:51 +08:00
parent 6dc3442cc2
commit ccb021677c
11 changed files with 1120 additions and 874 deletions

View File

@@ -1,316 +1,375 @@
"""
数据库连接配置模块
提供MySQL数据库连接池管理和操作封装
SQLite 数据库模块
边缘AI推理服务的本地数据存储
特性:
- WAL 模式Write-Ahead Logging提升写入性能
- 异步写入策略
- 滚动清理机制保留7天数据
"""
import os
import sqlite3
import threading
import queue
import time
import logging
from contextlib import contextmanager
from typing import Any, Dict, Generator, List, Optional
from sqlalchemy import create_engine, Column, String, Boolean, Integer, Float, Text, JSON, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, Session
from sqlalchemy.pool import QueuePool
from config.settings import get_settings, DatabaseConfig
from datetime import datetime, timedelta
from typing import Any, Dict, List, Optional, Generator
from dataclasses import dataclass, field
from pathlib import Path
logger = logging.getLogger(__name__)
Base = declarative_base()
@dataclass
class StorageConfig:
"""存储配置类"""
db_path: str = "./data/security_events.db"
image_dir: str = "./data/captures"
retention_days: int = 7
wal_mode: bool = True
batch_size: int = 100
flush_interval: float = 5.0
class CameraInfo(Base):
"""摄像头信息表模型"""
__tablename__ = "camera_info"
id = Column(Integer, primary_key=True, autoincrement=True)
camera_id = Column(String(64), unique=True, nullable=False, index=True)
camera_name = Column(String(128), nullable=True)
rtsp_url = Column(String(512), nullable=False)
status = Column(Boolean, default=True)
enabled = Column(Boolean, default=True)
location = Column(String(256), nullable=True)
extra_params = Column(JSON, nullable=True)
created_at = Column(DateTime, nullable=True)
updated_at = Column(DateTime, nullable=True)
@dataclass
class AlertRecord:
"""告警记录"""
alert_id: str
camera_id: str
roi_id: str
alert_type: str
target_class: Optional[str] = None
confidence: Optional[float] = None
bbox: Optional[List[float]] = None
message: Optional[str] = None
image_path: Optional[str] = None
status: str = "pending"
created_at: datetime = field(default_factory=datetime.now)
processed_at: Optional[datetime] = None
class ROIConfigModel(Base):
"""ROI配置表模型"""
__tablename__ = "roi_config"
id = Column(Integer, primary_key=True, autoincrement=True)
roi_id = Column(String(64), unique=True, nullable=False, index=True)
camera_id = Column(String(64), nullable=False, index=True)
roi_type = Column(String(32), nullable=False) # 'polygon' or 'rectangle'
coordinates = Column(JSON, nullable=False) # 多边形顶点或矩形坐标
algorithm_type = Column(String(32), nullable=False) # 'leave_post', 'intrusion', etc.
alert_threshold = Column(Integer, default=3)
alert_cooldown = Column(Integer, default=300)
enabled = Column(Boolean, default=True)
extra_params = Column(JSON, nullable=True)
created_at = Column(DateTime, nullable=True)
updated_at = Column(DateTime, nullable=True)
class AlertRecord(Base):
"""告警记录表模型"""
__tablename__ = "alert_records"
id = Column(Integer, primary_key=True, autoincrement=True)
alert_id = Column(String(64), unique=True, nullable=False, index=True)
camera_id = Column(String(64), nullable=False, index=True)
roi_id = Column(String(64), nullable=False, index=True)
alert_type = Column(String(32), nullable=False)
target_class = Column(String(64), nullable=True)
confidence = Column(Float, nullable=True)
bbox = Column(JSON, nullable=True)
message = Column(Text, nullable=True)
screenshot = Column(Text, nullable=True) # Base64编码的截图
status = Column(String(32), default="pending")
created_at = Column(DateTime, nullable=True)
processed_at = Column(DateTime, nullable=True)
class DatabaseManager:
"""数据库连接管理器类"""
class SQLiteManager:
"""SQLite 数据库管理器"""
_instance = None
_engine = None
_session_factory = None
_available = False
_lock = threading.Lock()
def __new__(cls):
def __new__(cls, config: Optional[StorageConfig] = None):
if cls._instance is None:
cls._instance = super().__new__(cls)
with cls._lock:
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self, config: Optional[DatabaseConfig] = None):
if self._engine is not None:
def __init__(self, config: Optional[StorageConfig] = None):
if self._initialized:
return
if config is None:
settings = get_settings()
config = settings.database
config = StorageConfig()
self._init_engine(config)
self.config = config
self._conn: Optional[sqlite3.Connection] = None
self._write_queue: queue.Queue = queue.Queue()
self._running = False
self._write_thread: Optional[threading.Thread] = None
self._cleanup_thread: Optional[threading.Thread] = None
self._init_directories()
self._init_database()
self._start_background_threads()
self._initialized = True
logger.info(f"SQLite 数据库初始化成功: {config.db_path}")
def _init_engine(self, config: DatabaseConfig):
"""初始化数据库引擎"""
try:
connection_string = (
f"mysql+pymysql://{config.username}:{config.password}"
f"@{config.host}:{config.port}/{config.database}"
f"?charset=utf8mb4"
def _init_directories(self):
"""初始化目录"""
Path(self.config.db_path).parent.mkdir(parents=True, exist_ok=True)
Path(self.config.image_dir).mkdir(parents=True, exist_ok=True)
def _init_database(self):
"""初始化数据库表"""
self._conn = sqlite3.connect(
self.config.db_path,
check_same_thread=False,
timeout=30.0
)
if self.config.wal_mode:
cursor = self._conn.cursor()
cursor.execute("PRAGMA journal_mode=WAL;")
cursor.execute("PRAGMA synchronous=NORMAL;")
cursor.execute("PRAGMA cache_size=-64000;")
self._conn.commit()
cursor = self._conn.cursor()
cursor.execute("""
CREATE TABLE IF NOT EXISTS alert_records (
id INTEGER PRIMARY KEY AUTOINCREMENT,
alert_id TEXT UNIQUE NOT NULL,
camera_id TEXT NOT NULL,
roi_id TEXT NOT NULL,
alert_type TEXT NOT NULL,
target_class TEXT,
confidence REAL,
bbox TEXT,
message TEXT,
image_path TEXT,
status TEXT DEFAULT 'pending',
created_at TEXT NOT NULL,
processed_at TEXT
)
self._engine = create_engine(
connection_string,
poolclass=QueuePool,
pool_size=config.pool_size,
pool_recycle=config.pool_recycle,
echo=config.echo,
pool_pre_ping=True,
max_overflow=5,
)
self._session_factory = sessionmaker(bind=self._engine)
test_connection = self._engine.connect()
test_connection.close()
self._available = True
logger.info(f"数据库引擎初始化成功: {config.host}:{config.port}/{config.database}")
except Exception as e:
self._available = False
logger.warning(f"数据库连接失败,服务将在无数据库模式下运行: {e}")
@property
def is_available(self) -> bool:
"""检查数据库是否可用"""
return self._available
@contextmanager
def get_session(self) -> Generator[Session, None, None]:
"""获取数据库会话上下文"""
if not self._available:
logger.warning("数据库不可用,跳过数据库操作")
yield None
return
""")
session = self._session_factory()
try:
yield session
session.commit()
except Exception as e:
session.rollback()
logger.error(f"数据库操作异常: {e}")
raise
finally:
session.close()
def get_camera_info(self, camera_id: Optional[str] = None) -> List[Dict[str, Any]]:
"""获取摄像头信息"""
if not self._available:
logger.warning("数据库不可用,返回空摄像头列表")
return []
cursor.execute("""
CREATE INDEX IF NOT EXISTS idx_alert_camera
ON alert_records(camera_id)
""")
cursor.execute("""
CREATE INDEX IF NOT EXISTS idx_alert_created
ON alert_records(created_at)
""")
cursor.execute("""
CREATE INDEX IF NOT EXISTS idx_alert_status
ON alert_records(status)
""")
with self.get_session() as session:
if session is None:
return []
query = session.query(CameraInfo)
if camera_id:
query = query.filter(CameraInfo.camera_id == camera_id)
cameras = query.filter(CameraInfo.enabled == True).all()
result = []
for camera in cameras:
result.append({
"camera_id": camera.camera_id,
"camera_name": camera.camera_name,
"rtsp_url": camera.rtsp_url,
"status": camera.status,
"location": camera.location,
"extra_params": camera.extra_params,
})
return result
self._conn.commit()
def get_roi_configs(self, camera_id: Optional[str] = None,
roi_id: Optional[str] = None) -> List[Dict[str, Any]]:
"""获取ROI配置"""
if not self._available:
logger.warning("数据库不可用返回空ROI配置列表")
return []
def _start_background_threads(self):
"""启动后台线程"""
self._running = True
with self.get_session() as session:
if session is None:
return []
query = session.query(ROIConfigModel)
if camera_id:
query = query.filter(ROIConfigModel.camera_id == camera_id)
if roi_id:
query = query.filter(ROIConfigModel.roi_id == roi_id)
query = query.filter(ROIConfigModel.enabled == True)
rois = query.all()
result = []
for roi in rois:
result.append({
"roi_id": roi.roi_id,
"camera_id": roi.camera_id,
"roi_type": roi.roi_type,
"coordinates": roi.coordinates,
"algorithm_type": roi.algorithm_type,
"alert_threshold": roi.alert_threshold,
"alert_cooldown": roi.alert_cooldown,
"extra_params": roi.extra_params,
})
return result
self._write_thread = threading.Thread(
target=self._write_worker,
name="SQLiteWrite",
daemon=True
)
self._write_thread.start()
self._cleanup_thread = threading.Thread(
target=self._cleanup_worker,
name="SQLiteCleanup",
daemon=True
)
self._cleanup_thread.start()
def save_alert_record(self, alert_data: Dict[str, Any]) -> bool:
"""保存告警记录"""
if not self._available:
logger.warning("数据库不可用,跳过保存告警记录")
return False
def _write_worker(self):
"""异步写入工作线程"""
batch = []
last_flush = time.time()
try:
with self.get_session() as session:
if session is None:
return False
alert = AlertRecord(
alert_id=alert_data.get("alert_id"),
camera_id=alert_data.get("camera_id"),
roi_id=alert_data.get("roi_id"),
alert_type=alert_data.get("alert_type"),
target_class=alert_data.get("target_class"),
confidence=alert_data.get("confidence"),
bbox=alert_data.get("bbox"),
message=alert_data.get("message"),
screenshot=alert_data.get("screenshot"),
status=alert_data.get("status", "pending"),
while self._running:
try:
try:
item = self._write_queue.get(timeout=1.0)
batch.append(item)
except queue.Empty:
pass
should_flush = (
len(batch) >= self.config.batch_size or
time.time() - last_flush >= self.config.flush_interval
)
session.add(alert)
session.flush()
logger.info(f"告警记录保存成功: {alert_data.get('alert_id')}")
return True
except Exception as e:
logger.error(f"保存告警记录失败: {e}")
return False
if batch and (should_flush or len(batch) >= 1000):
self._flush_batch(batch)
batch.clear()
last_flush = time.time()
except Exception as e:
logger.error(f"SQLite 写入异常: {e}")
if batch:
self._flush_batch(batch)
def update_alert_status(self, alert_id: str, status: str) -> bool:
def _flush_batch(self, batch: List[Dict[str, Any]]):
"""批量写入数据库"""
try:
cursor = self._conn.cursor()
for record in batch:
cursor.execute("""
INSERT OR REPLACE INTO alert_records (
alert_id, camera_id, roi_id, alert_type,
target_class, confidence, bbox, message,
image_path, status, created_at, processed_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""", (
record['alert_id'],
record['camera_id'],
record['roi_id'],
record['alert_type'],
record.get('target_class'),
record.get('confidence'),
record.get('bbox'),
record.get('message'),
record.get('image_path'),
record.get('status', 'pending'),
record['created_at'],
record.get('processed_at'),
))
self._conn.commit()
logger.debug(f"批量写入 {len(batch)} 条记录")
except Exception as e:
logger.error(f"批量写入失败: {e}")
def _cleanup_worker(self):
"""清理工作线程(每天执行一次)"""
while self._running:
try:
time.sleep(3600)
if self._running:
self.cleanup_old_data()
except Exception as e:
logger.error(f"数据清理异常: {e}")
def queue_alert(self, alert: AlertRecord):
"""将告警加入写入队列"""
record = {
'alert_id': alert.alert_id,
'camera_id': alert.camera_id,
'roi_id': alert.roi_id,
'alert_type': alert.alert_type,
'target_class': alert.target_class,
'confidence': alert.confidence,
'bbox': str(alert.bbox) if alert.bbox else None,
'message': alert.message,
'image_path': alert.image_path,
'status': alert.status,
'created_at': alert.created_at.isoformat(),
'processed_at': alert.processed_at.isoformat() if alert.processed_at else None,
}
self._write_queue.put(record)
def get_alerts(
self,
camera_id: Optional[str] = None,
status: Optional[str] = None,
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
limit: int = 100
) -> List[Dict[str, Any]]:
"""查询告警记录"""
cursor = self._conn.cursor()
query = "SELECT * FROM alert_records WHERE 1=1"
params = []
if camera_id:
query += " AND camera_id = ?"
params.append(camera_id)
if status:
query += " AND status = ?"
params.append(status)
if start_time:
query += " AND created_at >= ?"
params.append(start_time.isoformat())
if end_time:
query += " AND created_at <= ?"
params.append(end_time.isoformat())
query += " ORDER BY created_at DESC LIMIT ?"
params.append(limit)
cursor.execute(query, params)
rows = cursor.fetchall()
columns = ['id', 'alert_id', 'camera_id', 'roi_id', 'alert_type',
'target_class', 'confidence', 'bbox', 'message', 'image_path',
'status', 'created_at', 'processed_at']
return [dict(zip(columns, row)) for row in rows]
def update_status(self, alert_id: str, status: str) -> bool:
"""更新告警状态"""
if not self._available:
logger.warning("数据库不可用,跳过更新告警状态")
return False
try:
with self.get_session() as session:
if session is None:
return False
from sqlalchemy import update
from datetime import datetime
stmt = update(AlertRecord).where(
AlertRecord.alert_id == alert_id
).values(
status=status,
processed_at=datetime.now()
)
session.execute(stmt)
logger.info(f"告警状态更新成功: {alert_id} -> {status}")
return True
cursor = self._conn.cursor()
cursor.execute("""
UPDATE alert_records
SET status = ?, processed_at = ?
WHERE alert_id = ?
""", (status, datetime.now().isoformat(), alert_id))
self._conn.commit()
return cursor.rowcount > 0
except Exception as e:
logger.error(f"更新告警状态失败: {e}")
logger.error(f"更新状态失败: {e}")
return False
def create_tables(self):
"""创建所有表"""
if not self._available:
logger.warning("数据库不可用,跳过创建表")
return
Base.metadata.create_all(self._engine)
logger.info("数据库表创建完成")
def cleanup_old_data(self):
"""清理过期数据"""
try:
cutoff = (datetime.now() - timedelta(days=self.config.retention_days)).isoformat()
cursor = self._conn.cursor()
cursor.execute("SELECT image_path FROM alert_records WHERE created_at < ?", (cutoff,))
images = cursor.fetchall()
for (img_path,) in images:
if img_path and os.path.exists(img_path):
try:
os.remove(img_path)
except Exception:
pass
cursor.execute("DELETE FROM alert_records WHERE created_at < ?", (cutoff,))
deleted = cursor.rowcount
self._conn.commit()
logger.info(f"清理完成: 删除 {deleted} 条过期记录")
return deleted
except Exception as e:
logger.error(f"数据清理失败: {e}")
return 0
def drop_tables(self):
"""删除所有表"""
if not self._available:
return
def get_statistics(self) -> Dict[str, Any]:
"""获取统计信息"""
cursor = self._conn.cursor()
Base.metadata.drop_all(self._engine)
logger.info("数据库表删除完成")
cursor.execute("SELECT COUNT(*) FROM alert_records")
total = cursor.fetchone()[0]
cursor.execute("SELECT COUNT(*) FROM alert_records WHERE status = 'pending'")
pending = cursor.fetchone()[0]
cursor.execute("SELECT COUNT(*) FROM alert_records WHERE created_at > ?",
((datetime.now() - timedelta(hours=24)).isoformat(),))
today = cursor.fetchone()[0]
db_size = os.path.getsize(self.config.db_path) / (1024 * 1024)
return {
"total_alerts": total,
"pending_alerts": pending,
"today_alerts": today,
"db_size_mb": round(db_size, 2),
"queue_size": self._write_queue.qsize(),
"retention_days": self.config.retention_days,
}
def close(self):
"""关闭数据库连接"""
if self._engine:
self._engine.dispose()
logger.info("数据库连接已关闭")
"""关闭数据库"""
self._running = False
if self._write_thread and self._write_thread.is_alive():
self._write_thread.join(timeout=10)
if self._conn:
self._conn.close()
logger.info("SQLite 数据库已关闭")
def get_database_manager() -> DatabaseManager:
"""获取数据库管理器单例"""
return DatabaseManager()
def init_database():
"""初始化数据库"""
db_manager = get_database_manager()
db_manager.create_tables()
return db_manager
def get_sqlite_manager() -> SQLiteManager:
"""获取 SQLite 管理器单例"""
return SQLiteManager()

View File

@@ -10,7 +10,7 @@ from typing import List, Optional
@dataclass
class DatabaseConfig:
"""数据库配置类"""
"""数据库配置类MySQL - 云端)"""
host: str = "localhost"
port: int = 3306
username: str = "root"
@@ -21,6 +21,17 @@ class DatabaseConfig:
echo: bool = False
@dataclass
class SQLiteConfig:
"""SQLite 配置(边缘侧本地存储)"""
db_path: str = "./data/security_events.db"
image_dir: str = "./data/captures"
retention_days: int = 7
wal_mode: bool = True
batch_size: int = 100
flush_interval: float = 5.0
@dataclass
class RedisConfig:
"""Redis配置类"""
@@ -61,7 +72,7 @@ class VideoStreamConfig:
@dataclass
class InferenceConfig:
"""推理配置类"""
model_path: str = "./models/yolov8s.engine"
model_path: str = "./models/yolo11n.engine"
input_width: int = 480
input_height: int = 480
batch_size: int = 1
@@ -111,7 +122,6 @@ class Settings:
def _load_env_vars(self):
"""从环境变量加载配置"""
# 数据库配置
self.database = DatabaseConfig(
host=os.getenv("DB_HOST", "localhost"),
port=int(os.getenv("DB_PORT", "3306")),
@@ -120,14 +130,19 @@ class Settings:
database=os.getenv("DB_DATABASE", "edge_inference"),
)
# Redis配置
self.sqlite = SQLiteConfig(
db_path=os.getenv("SQLITE_DB_PATH", "./data/security_events.db"),
image_dir=os.getenv("SQLITE_IMAGE_DIR", "./data/captures"),
retention_days=int(os.getenv("SQLITE_RETENTION_DAYS", "7")),
wal_mode=os.getenv("SQLITE_WAL_MODE", "1") == "1",
)
self.redis = RedisConfig(
host=os.getenv("REDIS_HOST", "localhost"),
port=int(os.getenv("REDIS_PORT", "6379")),
password=os.getenv("REDIS_PASSWORD"),
)
# MQTT配置
self.mqtt = MQTTConfig(
broker_host=os.getenv("MQTT_BROKER_HOST", "localhost"),
broker_port=int(os.getenv("MQTT_BROKER_PORT", "1883")),
@@ -136,13 +151,11 @@ class Settings:
password=os.getenv("MQTT_PASSWORD"),
)
# 视频流配置
self.video_stream = VideoStreamConfig(
default_fps=int(os.getenv("VIDEO_DEFAULT_FPS", "5")),
reconnect_max_attempts=int(os.getenv("VIDEO_RECONNECT_ATTEMPTS", "5")),
)
# 推理配置
self.inference = InferenceConfig(
model_path=os.getenv("MODEL_PATH", "./models/yolo11n.engine"),
input_width=int(os.getenv("INPUT_WIDTH", "480")),
@@ -153,13 +166,11 @@ class Settings:
nms_threshold=float(os.getenv("NMS_THRESHOLD", "0.45")),
)
# 日志配置
self.log_level = os.getenv("LOG_LEVEL", "INFO")
self.log_dir = os.getenv("LOG_DIR", "./logs")
self.log_file_max_size = int(os.getenv("LOG_FILE_MAX_SIZE", "10485760")) # 10MB
self.log_file_max_size = int(os.getenv("LOG_FILE_MAX_SIZE", "10485760"))
self.log_file_backup_count = int(os.getenv("LOG_FILE_BACKUP_COUNT", "5"))
# 工作时间配置
self.working_hours = self._parse_working_hours()
def _parse_working_hours(self) -> List[dict]:

File diff suppressed because it is too large Load Diff

281
core/storage_manager.py Normal file
View File

@@ -0,0 +1,281 @@
"""
存储管理器模块
负责图片保存、异步写入和断网容灾
特性:
- 异步保存抓拍图片
- 断网本地缓存
- 批量同步到云端
"""
import os
import cv2
import uuid
import logging
import threading
import queue
import time
from datetime import datetime
from pathlib import Path
from typing import Optional, Dict, Any, List
from dataclasses import dataclass, field
logger = logging.getLogger(__name__)
@dataclass
class CaptureConfig:
"""抓拍配置"""
image_dir: str = "./data/captures"
quality: int = 85
max_width: int = 1920
max_height: int = 1080
save_format: str = ".jpg"
@dataclass
class PendingCapture:
"""待保存的抓拍"""
image: Any
camera_id: str
alert_id: str
timestamp: datetime = field(default_factory=datetime.now)
class ImageStorageManager:
"""图片存储管理器"""
_instance = None
_lock = threading.Lock()
def __new__(cls, config: Optional[CaptureConfig] = None):
if cls._instance is None:
with cls._lock:
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self, config: Optional[CaptureConfig] = None):
if self._initialized:
return
if config is None:
config = CaptureConfig()
self.config = config
self._save_queue: queue.Queue = queue.Queue()
self._running = False
self._save_thread: Optional[threading.Thread] = None
self._saved_count = 0
self._failed_count = 0
self._init_directories()
self._start_save_thread()
self._initialized = True
logger.info(f"图片存储管理器初始化: {config.image_dir}")
def _init_directories(self):
"""初始化目录"""
Path(self.config.image_dir).mkdir(parents=True, exist_ok=True)
def _start_save_thread(self):
"""启动保存线程"""
self._running = True
self._save_thread = threading.Thread(
target=self._save_worker,
name="ImageSave",
daemon=True
)
self._save_thread.start()
def _save_worker(self):
"""图片保存工作线程"""
while self._running:
try:
try:
capture: PendingCapture = self._save_queue.get(timeout=1.0)
self._save_image(capture)
except queue.Empty:
pass
except Exception as e:
logger.error(f"图片保存异常: {e}")
def _save_image(self, capture: PendingCapture) -> Optional[str]:
"""保存单张图片"""
try:
image = capture.image
if image is None:
self._failed_count += 1
return None
if len(image.shape) == 3 and image.shape[2] == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if image.shape[1] > self.config.max_width or image.shape[0] > self.config.max_height:
scale = min(
self.config.max_width / image.shape[1],
self.config.max_height / image.shape[0]
)
new_size = (
int(image.shape[1] * scale),
int(image.shape[0] * scale)
)
image = cv2.resize(image, new_size, interpolation=cv2.INTER_AREA)
date_dir = capture.timestamp.strftime("%Y%m%d")
save_dir = Path(self.config.image_dir) / date_dir
save_dir.mkdir(parents=True, exist_ok=True)
filename = f"{capture.camera_id}_{capture.alert_id}{self.config.save_format}"
filepath = save_dir / filename
success = cv2.imwrite(
str(filepath),
image,
[cv2.IMWRITE_JPEG_QUALITY, self.config.quality]
)
if success:
self._saved_count += 1
logger.debug(f"图片已保存: {filepath}")
return str(filepath)
else:
self._failed_count += 1
return None
except Exception as e:
logger.error(f"保存图片失败: {e}")
self._failed_count += 1
return None
def save_capture(
self,
image: Any,
camera_id: str,
alert_id: str,
timestamp: Optional[datetime] = None
) -> Optional[str]:
"""异步保存抓拍图片"""
capture = PendingCapture(
image=image,
camera_id=camera_id,
alert_id=alert_id,
timestamp=timestamp or datetime.now()
)
self._save_queue.put(capture)
return f"<queued: {alert_id}>"
def get_image_path(self, camera_id: str, alert_id: str) -> Optional[str]:
"""获取已保存图片路径"""
date_str = datetime.now().strftime("%Y%m%d")
filename = f"{camera_id}_{alert_id}{self.config.save_format}"
filepath = Path(self.config.image_dir) / date_str / filename
if filepath.exists():
return str(filepath)
return None
def cleanup_old_images(self, days: int = 7):
"""清理过期图片"""
cutoff = datetime.now().timestamp() - (days * 24 * 60 * 60)
cleaned = 0
try:
for filepath in Path(self.config.image_dir).rglob(f"*{self.config.save_format}"):
if filepath.stat().st_mtime < cutoff:
filepath.unlink()
cleaned += 1
logger.info(f"清理完成: 删除 {cleaned} 张过期图片")
return cleaned
except Exception as e:
logger.error(f"清理图片失败: {e}")
return 0
def get_statistics(self) -> Dict[str, Any]:
"""获取统计信息"""
return {
"saved_count": self._saved_count,
"failed_count": self._failed_count,
"queue_size": self._save_queue.qsize(),
"image_dir": self.config.image_dir,
}
def close(self):
"""关闭存储管理器"""
self._running = False
if self._save_thread and self._save_thread.is_alive():
self._save_thread.join(timeout=10)
logger.info(f"图片存储已关闭: 成功 {self._saved_count}, 失败 {self._failed_count}")
class LocalCacheManager:
"""本地缓存管理器(断网容灾)"""
def __init__(self, cache_dir: str = "./data/cache"):
self.cache_dir = Path(cache_dir)
self.cache_dir.mkdir(parents=True, exist_ok=True)
self._pending_dir = self.cache_dir / "pending"
self._pending_dir.mkdir(exist_ok=True)
self._sync_lock = threading.Lock()
def cache_alert(self, alert_data: Dict[str, Any]) -> str:
"""缓存告警数据(断网时使用)"""
cache_id = str(uuid.uuid4())
cache_path = self._pending_dir / f"{cache_id}.json"
try:
import json
with open(cache_path, 'w', encoding='utf-8') as f:
json.dump(alert_data, f, ensure_ascii=False, indent=2)
return cache_id
except Exception as e:
logger.error(f"缓存告警失败: {e}")
return ""
def get_pending_count(self) -> int:
"""获取待同步数量"""
return len(list(self._pending_dir.glob("*.json")))
def get_pending_alerts(self) -> List[Dict[str, Any]]:
"""获取待同步的告警"""
alerts = []
try:
import json
for cache_path in self._pending_dir.glob("*.json"):
with open(cache_path, 'r', encoding='utf-8') as f:
alert = json.load(f)
alert['_cache_id'] = cache_path.stem
alerts.append(alert)
except Exception as e:
logger.error(f"读取缓存告警失败: {e}")
return alerts
def remove_cached(self, cache_id: str):
"""移除已同步的缓存"""
cache_path = self._pending_dir / f"{cache_id}.json"
if cache_path.exists():
cache_path.unlink()
def clear_cache(self):
"""清空缓存"""
for cache_path in self._pending_dir.glob("*.json"):
cache_path.unlink()
def get_image_storage() -> ImageStorageManager:
"""获取图片存储管理器"""
return ImageStorageManager()
def get_local_cache() -> LocalCacheManager:
"""获取本地缓存管理器"""
return LocalCacheManager()

View File

@@ -158,3 +158,108 @@ ConnectionRefusedError: [WinError 10061] 由于目标计算机积极拒绝,无
2026-01-29 18:11:31 | INFO | main | MQTT客户端已关闭
2026-01-29 18:11:31 | INFO | main | Edge_Inference_Service 已停止
2026-01-29 18:11:31 | INFO | main | 运行统计: {'start_time': datetime.datetime(2026, 1, 29, 18, 7, 35, 334780), 'total_frames_processed': 0, 'total_alerts_generated': 0, 'uptime_seconds': 236.659473}
2026-01-30 09:17:29 | INFO | main | Edge_Inference_Service 初始化开始
2026-01-30 09:17:29 | INFO | main | ==================================================
2026-01-30 09:17:29 | INFO | main | Edge_Inference_Service 启动
2026-01-30 09:17:29 | INFO | main | ==================================================
2026-01-30 09:17:33 | WARNING | main | 数据库不可见,服务将在无数据库模式下运行
2026-01-30 09:17:37 | INFO | main | 配置管理器初始化成功
2026-01-30 09:17:37 | INFO | main | 流管理器初始化成功
2026-01-30 09:17:37 | INFO | main | 图像预处理器初始化完成: 输入尺寸 480x480, Batch大小 1-8, FP16模式 True
2026-01-30 09:17:37 | INFO | main | 预处理器初始化成功
2026-01-30 09:17:37 | INFO | main | TensorRT引擎初始化配置: 模型=./models/yolo11n.engine, 输入尺寸=480x480, Batch=1, FP16=True
2026-01-30 09:17:37 | ERROR | main | TensorRT引擎加载失败: negative dimensions are not allowed
Traceback (most recent call last):
File "c:\Users\16337\PycharmProjects\ai_edge\core\tensorrt_engine.py", line 102, in load_engine
self._allocate_memory_pool()
File "c:\Users\16337\PycharmProjects\ai_edge\core\tensorrt_engine.py", line 152, in _allocate_memory_pool
self._memory_pool["input"] = np.zeros(shape, dtype=dtype)
ValueError: negative dimensions are not allowed
2026-01-30 09:17:37 | INFO | main | 推理引擎加载成功: ./models/yolo11n.engine
2026-01-30 09:17:37 | INFO | main | 后处理器初始化完成: NMS阈值=0.45, 置信度阈值=0.5
2026-01-30 09:17:37 | INFO | main | 后处理器初始化成功
2026-01-30 09:17:37 | INFO | main | 结果上报器初始化成功
2026-01-30 09:17:37 | INFO | main | 所有组件初始化完成
2026-01-30 09:17:37 | INFO | main | 已启动 0 个视频流
2026-01-30 09:17:37 | INFO | main | Edge_Inference_Service 已启动
2026-01-30 09:17:41 | ERROR | main | MQTT连接失败: [WinError 10061] 由于目标计算机积极拒绝,无法连接。
Traceback (most recent call last):
File "c:\Users\16337\PycharmProjects\ai_edge\core\result_reporter.py", line 220, in connect
self._client.connect(
File "C:\Users\16337\miniconda3\envs\yolo\lib\site-packages\paho\mqtt\client.py", line 914, in connect
return self.reconnect()
File "C:\Users\16337\miniconda3\envs\yolo\lib\site-packages\paho\mqtt\client.py", line 1044, in reconnect
sock = self._create_socket_connection()
File "C:\Users\16337\miniconda3\envs\yolo\lib\site-packages\paho\mqtt\client.py", line 3685, in _create_socket_connection
return socket.create_connection(addr, timeout=self._connect_timeout, source_address=source)
File "C:\Users\16337\miniconda3\envs\yolo\lib\socket.py", line 857, in create_connection
raise err
File "C:\Users\16337\miniconda3\envs\yolo\lib\socket.py", line 845, in create_connection
sock.connect(sa)
ConnectionRefusedError: [WinError 10061] 由于目标计算机积极拒绝,无法连接。
2026-01-30 09:17:41 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:18:11 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:18:41 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:19:11 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:19:41 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:20:11 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:20:41 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:21:11 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:21:13 | INFO | main | 收到信号 2, 正在停止服务...
2026-01-30 09:21:13 | INFO | main | 已停止所有视频流
2026-01-30 09:21:13 | INFO | main | 已停止所有视频流
2026-01-30 09:21:13 | INFO | main | 所有引擎已释放
2026-01-30 09:21:13 | INFO | main | MQTT连接已断开
2026-01-30 09:21:13 | INFO | main | MQTT客户端已关闭
2026-01-30 09:21:13 | INFO | main | Edge_Inference_Service 已停止
2026-01-30 09:21:13 | INFO | main | 运行统计: {'start_time': datetime.datetime(2026, 1, 30, 9, 17, 37, 843486), 'total_frames_processed': 0, 'total_alerts_generated': 0, 'uptime_seconds': 215.781095}
2026-01-30 09:21:16 | INFO | main | Edge_Inference_Service 初始化开始
2026-01-30 09:21:16 | INFO | main | ==================================================
2026-01-30 09:21:16 | INFO | main | Edge_Inference_Service 启动
2026-01-30 09:21:16 | INFO | main | ==================================================
2026-01-30 09:21:20 | WARNING | main | 数据库不可见,服务将在无数据库模式下运行
2026-01-30 09:21:24 | INFO | main | 配置管理器初始化成功
2026-01-30 09:21:24 | INFO | main | 流管理器初始化成功
2026-01-30 09:21:24 | INFO | main | 图像预处理器初始化完成: 输入尺寸 480x480, Batch大小 1-8, FP16模式 True
2026-01-30 09:21:24 | INFO | main | 预处理器初始化成功
2026-01-30 09:21:24 | INFO | main | TensorRT引擎初始化配置: 模型=./models/yolo11n.engine, 输入尺寸=480x480, Batch=1, FP16=True
2026-01-30 09:21:24 | INFO | main | 连接事件: load - TensorRT -> ./models/yolo11n.engine
2026-01-30 09:21:24 | INFO | main | TensorRT引擎加载成功: ./models/yolo11n.engine
2026-01-30 09:21:24 | INFO | main | 引擎已加载: default
2026-01-30 09:21:24 | INFO | main | 推理引擎加载成功: ./models/yolo11n.engine
2026-01-30 09:21:24 | INFO | main | 后处理器初始化完成: NMS阈值=0.45, 置信度阈值=0.5
2026-01-30 09:21:24 | INFO | main | 后处理器初始化成功
2026-01-30 09:21:24 | INFO | main | 结果上报器初始化成功
2026-01-30 09:21:24 | INFO | main | 所有组件初始化完成
2026-01-30 09:21:24 | INFO | main | 已启动 0 个视频流
2026-01-30 09:21:24 | INFO | main | Edge_Inference_Service 已启动
2026-01-30 09:21:28 | ERROR | main | MQTT连接失败: [WinError 10061] 由于目标计算机积极拒绝,无法连接。
Traceback (most recent call last):
File "c:\Users\16337\PycharmProjects\ai_edge\core\result_reporter.py", line 220, in connect
self._client.connect(
File "C:\Users\16337\miniconda3\envs\yolo\lib\site-packages\paho\mqtt\client.py", line 914, in connect
return self.reconnect()
File "C:\Users\16337\miniconda3\envs\yolo\lib\site-packages\paho\mqtt\client.py", line 1044, in reconnect
sock = self._create_socket_connection()
File "C:\Users\16337\miniconda3\envs\yolo\lib\site-packages\paho\mqtt\client.py", line 3685, in _create_socket_connection
return socket.create_connection(addr, timeout=self._connect_timeout, source_address=source)
File "C:\Users\16337\miniconda3\envs\yolo\lib\socket.py", line 857, in create_connection
raise err
File "C:\Users\16337\miniconda3\envs\yolo\lib\socket.py", line 845, in create_connection
sock.connect(sa)
ConnectionRefusedError: [WinError 10061] 由于目标计算机积极拒绝,无法连接。
2026-01-30 09:21:28 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:21:58 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:22:28 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:22:58 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:23:28 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:23:58 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:24:28 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:24:58 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:25:28 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:25:58 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:26:28 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:26:58 | WARNING | main | 消息已加入待发送队列: edge/heartbeat/edge_inference_device
2026-01-30 09:27:08 | INFO | main | 收到信号 2, 正在停止服务...
2026-01-30 09:27:08 | INFO | main | 已停止所有视频流
2026-01-30 09:27:08 | INFO | main | 已停止所有视频流

View File

@@ -122,3 +122,40 @@ Traceback (most recent call last):
File "C:\Users\16337\miniconda3\envs\yolo\lib\socket.py", line 845, in create_connection
sock.connect(sa)
ConnectionRefusedError: [WinError 10061] 由于目标计算机积极拒绝,无法连接。
2026-01-30 09:17:37 | ERROR | main | TensorRT引擎加载失败: negative dimensions are not allowed
Traceback (most recent call last):
File "c:\Users\16337\PycharmProjects\ai_edge\core\tensorrt_engine.py", line 102, in load_engine
self._allocate_memory_pool()
File "c:\Users\16337\PycharmProjects\ai_edge\core\tensorrt_engine.py", line 152, in _allocate_memory_pool
self._memory_pool["input"] = np.zeros(shape, dtype=dtype)
ValueError: negative dimensions are not allowed
2026-01-30 09:17:41 | ERROR | main | MQTT连接失败: [WinError 10061] 由于目标计算机积极拒绝,无法连接。
Traceback (most recent call last):
File "c:\Users\16337\PycharmProjects\ai_edge\core\result_reporter.py", line 220, in connect
self._client.connect(
File "C:\Users\16337\miniconda3\envs\yolo\lib\site-packages\paho\mqtt\client.py", line 914, in connect
return self.reconnect()
File "C:\Users\16337\miniconda3\envs\yolo\lib\site-packages\paho\mqtt\client.py", line 1044, in reconnect
sock = self._create_socket_connection()
File "C:\Users\16337\miniconda3\envs\yolo\lib\site-packages\paho\mqtt\client.py", line 3685, in _create_socket_connection
return socket.create_connection(addr, timeout=self._connect_timeout, source_address=source)
File "C:\Users\16337\miniconda3\envs\yolo\lib\socket.py", line 857, in create_connection
raise err
File "C:\Users\16337\miniconda3\envs\yolo\lib\socket.py", line 845, in create_connection
sock.connect(sa)
ConnectionRefusedError: [WinError 10061] 由于目标计算机积极拒绝,无法连接。
2026-01-30 09:21:28 | ERROR | main | MQTT连接失败: [WinError 10061] 由于目标计算机积极拒绝,无法连接。
Traceback (most recent call last):
File "c:\Users\16337\PycharmProjects\ai_edge\core\result_reporter.py", line 220, in connect
self._client.connect(
File "C:\Users\16337\miniconda3\envs\yolo\lib\site-packages\paho\mqtt\client.py", line 914, in connect
return self.reconnect()
File "C:\Users\16337\miniconda3\envs\yolo\lib\site-packages\paho\mqtt\client.py", line 1044, in reconnect
sock = self._create_socket_connection()
File "C:\Users\16337\miniconda3\envs\yolo\lib\site-packages\paho\mqtt\client.py", line 3685, in _create_socket_connection
return socket.create_connection(addr, timeout=self._connect_timeout, source_address=source)
File "C:\Users\16337\miniconda3\envs\yolo\lib\socket.py", line 857, in create_connection
raise err
File "C:\Users\16337\miniconda3\envs\yolo\lib\socket.py", line 845, in create_connection
sock.connect(sa)
ConnectionRefusedError: [WinError 10061] 由于目标计算机积极拒绝,无法连接。

BIN
models/yolo11n.engine Normal file

Binary file not shown.

Binary file not shown.