Files
Security_AI_integrated/api/alarm.py
2026-01-20 17:42:18 +08:00

143 lines
4.6 KiB
Python

from datetime import datetime
from typing import List, Optional
from fastapi import APIRouter, Depends, HTTPException, Query
from sqlalchemy.orm import Session
from db.crud import (
create_alarm,
get_alarm_stats,
get_alarms,
update_alarm,
)
from db.models import get_db
from inference.pipeline import get_pipeline
router = APIRouter(prefix="/api/alarms", tags=["告警管理"])
@router.get("", response_model=List[dict])
def list_alarms(
camera_id: Optional[int] = None,
event_type: Optional[str] = None,
limit: int = Query(default=100, le=1000),
offset: int = Query(default=0, ge=0),
db: Session = Depends(get_db),
):
alarms = get_alarms(db, camera_id=camera_id, event_type=event_type, limit=limit, offset=offset)
return [
{
"id": alarm.id,
"camera_id": alarm.camera_id,
"roi_id": alarm.roi_id,
"event_type": alarm.event_type,
"confidence": alarm.confidence,
"snapshot_path": alarm.snapshot_path,
"llm_checked": alarm.llm_checked,
"llm_result": alarm.llm_result,
"processed": alarm.processed,
"created_at": alarm.created_at.isoformat() if alarm.created_at else None,
}
for alarm in alarms
]
@router.get("/stats")
def get_alarm_statistics(db: Session = Depends(get_db)):
stats = get_alarm_stats(db)
return stats
@router.get("/{alarm_id}", response_model=dict)
def get_alarm(alarm_id: int, db: Session = Depends(get_db)):
from db.crud import get_alarms
alarms = get_alarms(db, limit=1)
alarm = next((a for a in alarms if a.id == alarm_id), None)
if not alarm:
raise HTTPException(status_code=404, detail="告警不存在")
return {
"id": alarm.id,
"camera_id": alarm.camera_id,
"roi_id": alarm.roi_id,
"event_type": alarm.event_type,
"confidence": alarm.confidence,
"snapshot_path": alarm.snapshot_path,
"llm_checked": alarm.llm_checked,
"llm_result": alarm.llm_result,
"processed": alarm.processed,
"created_at": alarm.created_at.isoformat() if alarm.created_at else None,
}
@router.put("/{alarm_id}")
def update_alarm_status(
alarm_id: int,
llm_checked: Optional[bool] = None,
llm_result: Optional[str] = None,
processed: Optional[bool] = None,
db: Session = Depends(get_db),
):
alarm = update_alarm(db, alarm_id, llm_checked=llm_checked, llm_result=llm_result, processed=processed)
if not alarm:
raise HTTPException(status_code=404, detail="告警不存在")
return {"message": "更新成功"}
@router.post("/{alarm_id}/llm-check")
async def trigger_llm_check(alarm_id: int, db: Session = Depends(get_db)):
from db.crud import get_alarms
alarms = get_alarms(db, limit=1)
alarm = next((a for a in alarms if a.id == alarm_id), None)
if not alarm:
raise HTTPException(status_code=404, detail="告警不存在")
if not alarm.snapshot_path or not os.path.exists(alarm.snapshot_path):
raise HTTPException(status_code=400, detail="截图不存在")
try:
from config import get_config
config = get_config()
if not config.llm.enabled:
raise HTTPException(status_code=400, detail="大模型功能未启用")
import base64
with open(alarm.snapshot_path, "rb") as f:
img_base64 = base64.b64encode(f.read()).decode("utf-8")
from openai import OpenAI
client = OpenAI(
api_key=config.llm.api_key,
base_url=config.llm.base_url,
)
prompt = """分析这张监控截图,判断是否存在异常行为。请简要说明:
1. 画面中是否有人
2. 人员位置和行为
3. 是否存在异常"""
response = client.chat.completions.create(
model=config.llm.model,
messages=[
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{img_base64}"}},
{"type": "text", "text": prompt},
],
}
],
)
result = response.choices[0].message.content
update_alarm(db, alarm_id, llm_checked=True, llm_result=result)
return {"message": "大模型分析完成", "result": result}
except Exception as e:
raise HTTPException(status_code=500, detail=f"大模型调用失败: {str(e)}")
@router.get("/queue/size")
def get_event_queue_size():
pipeline = get_pipeline()
return {"size": len(pipeline.event_queue), "max_size": pipeline.config.inference.event_queue_maxlen}