Files
Test_AI/optimized_multi_camera_tensorrt.py
2026-01-20 11:14:10 +08:00

491 lines
17 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#!/usr/bin/env python3
"""
优化的多摄像头 TensorRT 推理脚本
支持:
1. 多路摄像头并发推理
2. 动态输入尺寸320~640
3. 批量推理优化
4. 详细性能统计
5. 高GPU利用率
"""
import cv2
import numpy as np
import yaml
import time
import datetime
import threading
import queue
import json
import os
from collections import defaultdict
from ultralytics import YOLO
class PerformanceStats:
"""性能统计类"""
def __init__(self, cam_id):
self.cam_id = cam_id
self.frame_count = 0
self.inference_times = []
self.start_time = None
self.lock = threading.Lock()
def start(self):
self.start_time = time.time()
def record_inference(self, inference_time_ms):
"""记录推理时间(毫秒)"""
with self.lock:
self.inference_times.append(inference_time_ms)
self.frame_count += 1
def get_stats(self):
"""获取统计信息"""
with self.lock:
if not self.start_time or self.frame_count == 0:
return None
elapsed = time.time() - self.start_time
avg_fps = self.frame_count / elapsed if elapsed > 0 else 0
stats = {
'cam_id': self.cam_id,
'total_frames': self.frame_count,
'elapsed_time': elapsed,
'avg_fps': avg_fps,
'avg_inference_ms': np.mean(self.inference_times) if self.inference_times else 0,
'p50_inference_ms': np.percentile(self.inference_times, 50) if self.inference_times else 0,
'p95_inference_ms': np.percentile(self.inference_times, 95) if self.inference_times else 0,
'p99_inference_ms': np.percentile(self.inference_times, 99) if self.inference_times else 0,
}
return stats
class CameraReader:
"""摄像头读取器 - 独立线程读取帧"""
def __init__(self, cam_id, rtsp_url, target_size=640):
self.cam_id = cam_id
self.rtsp_url = rtsp_url
self.target_size = target_size
self.frame_queue = queue.Queue(maxsize=2)
self.running = True
self.cap = None
self.thread = None
# 性能统计
self.stats = PerformanceStats(cam_id)
def start(self):
"""启动读取线程"""
self.thread = threading.Thread(target=self._read_loop, daemon=True)
self.thread.start()
self.stats.start()
def _read_loop(self):
"""读取循环"""
try:
# 打开视频流
self.cap = cv2.VideoCapture(self.rtsp_url, cv2.CAP_FFMPEG)
if not self.cap.isOpened():
print(f"[{self.cam_id}] ⚠️ 无法打开视频流")
return
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
print(f"[{self.cam_id}] ✅ 视频流已连接")
while self.running:
ret, frame = self.cap.read()
if not ret:
time.sleep(0.01)
continue
# Resize到目标尺寸
if frame.shape[0] != self.target_size or frame.shape[1] != self.target_size:
frame = cv2.resize(frame, (self.target_size, self.target_size))
# 放入队列(如果队列满,丢弃旧帧)
if self.frame_queue.full():
try:
self.frame_queue.get_nowait()
except queue.Empty:
pass
self.frame_queue.put(frame)
except Exception as e:
print(f"[{self.cam_id}] ❌ 读取线程异常: {e}")
finally:
if self.cap is not None:
self.cap.release()
def get_frame(self):
"""获取最新帧(非阻塞)"""
try:
return self.frame_queue.get_nowait()
except queue.Empty:
return None
def stop(self):
"""停止读取"""
self.running = False
if self.thread is not None:
self.thread.join(timeout=3.0)
class BatchInferenceEngine:
"""批量推理引擎"""
def __init__(self, model_path, batch_size=4, imgsz=640, conf_thresh=0.45):
self.model_path = model_path
self.batch_size = batch_size
self.imgsz = imgsz
self.conf_thresh = conf_thresh
# 加载模型
print(f"🚀 加载 TensorRT 引擎: {model_path}")
self.model = YOLO(model_path, task='detect')
# TensorRT引擎不需要.to('cuda')它已经是GPU模型
print(f"✅ 引擎加载成功,批次大小: {batch_size}")
# 批量缓冲区
self.batch_buffer = []
self.batch_cam_ids = []
self.lock = threading.Lock()
def add_to_batch(self, cam_id, frame):
"""添加帧到批次缓冲区"""
with self.lock:
self.batch_buffer.append(frame)
self.batch_cam_ids.append(cam_id)
# 如果达到批次大小返回True
return len(self.batch_buffer) >= self.batch_size
def infer_batch(self):
"""批量推理"""
with self.lock:
if not self.batch_buffer:
return []
# 获取当前批次
frames = self.batch_buffer[:self.batch_size]
cam_ids = self.batch_cam_ids[:self.batch_size]
# 清空已处理的
self.batch_buffer = self.batch_buffer[self.batch_size:]
self.batch_cam_ids = self.batch_cam_ids[self.batch_size:]
# 批量推理
start_time = time.time()
try:
results = self.model.predict(
frames,
imgsz=self.imgsz,
conf=self.conf_thresh,
verbose=False,
device=0, # 使用GPU 0
half=True,
classes=[0] # person only
)
inference_time = (time.time() - start_time) * 1000 # 转换为毫秒
# 计算每帧的推理时间
per_frame_time = inference_time / len(frames)
# 返回结果
return [(cam_ids[i], results[i], per_frame_time) for i in range(len(frames))]
except Exception as e:
print(f"❌ 批量推理失败: {e}")
return []
def get_remaining_batch(self):
"""获取剩余的批次(用于测试结束时)"""
with self.lock:
if not self.batch_buffer:
return []
frames = self.batch_buffer
cam_ids = self.batch_cam_ids
self.batch_buffer = []
self.batch_cam_ids = []
# 推理剩余帧
start_time = time.time()
try:
results = self.model.predict(
frames,
imgsz=self.imgsz,
conf=self.conf_thresh,
verbose=False,
device=0,
half=True,
classes=[0]
)
inference_time = (time.time() - start_time) * 1000
per_frame_time = inference_time / len(frames)
return [(cam_ids[i], results[i], per_frame_time) for i in range(len(frames))]
except Exception as e:
print(f"❌ 剩余批次推理失败: {e}")
return []
class MultiCameraInferenceSystem:
"""多摄像头推理系统"""
def __init__(self, config_path, model_path, batch_size=4, target_size=640, max_cameras=None):
self.config_path = config_path
self.model_path = model_path
self.batch_size = batch_size
self.target_size = target_size
# 加载配置
with open(config_path, 'r', encoding='utf-8') as f:
cfg = yaml.safe_load(f)
# 获取摄像头配置
cameras = cfg['cameras']
if max_cameras:
cameras = cameras[:max_cameras]
# 初始化摄像头读取器
self.camera_readers = {}
for cam_cfg in cameras:
cam_id = cam_cfg['id']
rtsp_url = cam_cfg['rtsp_url']
reader = CameraReader(cam_id, rtsp_url, target_size)
self.camera_readers[cam_id] = reader
print(f"✅ 初始化 {len(self.camera_readers)} 个摄像头")
# 初始化推理引擎
model_cfg = cfg['model']
self.inference_engine = BatchInferenceEngine(
model_path,
batch_size=batch_size,
imgsz=target_size,
conf_thresh=model_cfg['conf_threshold']
)
self.running = False
def start(self):
"""启动系统"""
print(f"\n{'='*60}")
print("启动多摄像头推理系统")
print(f"{'='*60}")
print(f"摄像头数量: {len(self.camera_readers)}")
print(f"批次大小: {self.batch_size}")
print(f"输入尺寸: {self.target_size}x{self.target_size}")
print(f"{'='*60}\n")
# 启动所有摄像头读取器
for reader in self.camera_readers.values():
reader.start()
# 等待摄像头连接
print("⏳ 等待摄像头连接...")
time.sleep(3)
self.running = True
def run(self, test_duration=60):
"""运行推理"""
print(f"🚀 开始推理,测试时长: {test_duration}\n")
start_time = time.time()
last_print_time = start_time
total_frames = 0
try:
while self.running and (time.time() - start_time) < test_duration:
# 从所有摄像头收集帧
frames_collected = 0
for cam_id, reader in self.camera_readers.items():
frame = reader.get_frame()
if frame is not None:
# 添加到批次缓冲区
batch_ready = self.inference_engine.add_to_batch(cam_id, frame)
frames_collected += 1
# 如果批次准备好,执行推理
if batch_ready:
results = self.inference_engine.infer_batch()
# 记录统计
for cam_id, result, inference_time in results:
self.camera_readers[cam_id].stats.record_inference(inference_time)
total_frames += 1
# 如果没有收集到帧,短暂休眠
if frames_collected == 0:
time.sleep(0.001)
# 每5秒打印一次进度
current_time = time.time()
if current_time - last_print_time >= 5.0:
elapsed = current_time - start_time
avg_fps = total_frames / elapsed if elapsed > 0 else 0
print(f"⏱️ {elapsed:.0f}s | 总帧数: {total_frames} | 平均FPS: {avg_fps:.1f}")
last_print_time = current_time
except KeyboardInterrupt:
print("\n⏹️ 测试被用户中断")
finally:
# 处理剩余的批次
remaining_results = self.inference_engine.get_remaining_batch()
for cam_id, result, inference_time in remaining_results:
self.camera_readers[cam_id].stats.record_inference(inference_time)
total_frames += 1
# 生成报告
self.generate_report(total_frames, time.time() - start_time)
def generate_report(self, total_frames, elapsed_time):
"""生成性能报告"""
print(f"\n{'='*60}")
print("性能测试报告")
print(f"{'='*60}\n")
# 收集所有摄像头的统计
all_stats = []
all_inference_times = []
for cam_id, reader in self.camera_readers.items():
stats = reader.stats.get_stats()
if stats:
all_stats.append(stats)
all_inference_times.extend(reader.stats.inference_times)
# 总体统计
avg_fps = total_frames / elapsed_time if elapsed_time > 0 else 0
print(f"总体性能:")
print(f" 总帧数: {total_frames}")
print(f" 测试时长: {elapsed_time:.1f}")
print(f" 平均FPS: {avg_fps:.1f}")
if all_inference_times:
print(f" 平均推理延迟: {np.mean(all_inference_times):.1f}ms")
print(f" P50推理延迟: {np.percentile(all_inference_times, 50):.1f}ms")
print(f" P95推理延迟: {np.percentile(all_inference_times, 95):.1f}ms")
print(f" P99推理延迟: {np.percentile(all_inference_times, 99):.1f}ms")
print(f"\n各摄像头性能:")
print(f"{'摄像头ID':<15} {'帧数':<10} {'FPS':<10} {'平均延迟(ms)':<15} {'P95延迟(ms)':<15}")
print(f"{'-'*70}")
for stats in sorted(all_stats, key=lambda x: x['cam_id']):
print(f"{stats['cam_id']:<15} {stats['total_frames']:<10} "
f"{stats['avg_fps']:<10.1f} {stats['avg_inference_ms']:<15.1f} "
f"{stats['p95_inference_ms']:<15.1f}")
# 保存结果
output_dir = "multi_camera_results"
os.makedirs(output_dir, exist_ok=True)
results_data = {
'total_frames': total_frames,
'elapsed_time': elapsed_time,
'avg_fps': avg_fps,
'avg_inference_ms': np.mean(all_inference_times) if all_inference_times else 0,
'p50_inference_ms': np.percentile(all_inference_times, 50) if all_inference_times else 0,
'p95_inference_ms': np.percentile(all_inference_times, 95) if all_inference_times else 0,
'p99_inference_ms': np.percentile(all_inference_times, 99) if all_inference_times else 0,
'camera_stats': all_stats,
'batch_size': self.batch_size,
'target_size': self.target_size,
'num_cameras': len(self.camera_readers),
'timestamp': datetime.datetime.now().isoformat()
}
json_file = os.path.join(output_dir, f"results_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.json")
with open(json_file, 'w', encoding='utf-8') as f:
json.dump(results_data, f, indent=2, ensure_ascii=False)
print(f"\n✅ 结果已保存: {json_file}")
def stop(self):
"""停止系统"""
print("\n正在停止系统...")
self.running = False
# 停止所有摄像头读取器
for reader in self.camera_readers.values():
reader.stop()
print("系统已停止")
def main():
"""主函数"""
import argparse
parser = argparse.ArgumentParser(description='多摄像头TensorRT推理系统')
parser.add_argument('--config', default='config.yaml', help='配置文件路径')
parser.add_argument('--model', default='C:/Users/16337/PycharmProjects/Security/yolo11n.engine',
help='TensorRT引擎路径')
parser.add_argument('--batch-size', type=int, default=4, help='批次大小')
parser.add_argument('--target-size', type=int, default=640, help='输入尺寸')
parser.add_argument('--duration', type=int, default=60, help='测试时长(秒)')
parser.add_argument('--max-cameras', type=int, default=None, help='最大摄像头数量')
args = parser.parse_args()
print("多摄像头 TensorRT 推理系统")
print("=" * 60)
# 检查文件
if not os.path.exists(args.config):
print(f"❌ 配置文件不存在: {args.config}")
return
if not os.path.exists(args.model):
print(f"❌ TensorRT引擎不存在: {args.model}")
return
# 创建系统
try:
system = MultiCameraInferenceSystem(
config_path=args.config,
model_path=args.model,
batch_size=args.batch_size,
target_size=args.target_size,
max_cameras=args.max_cameras
)
# 启动系统
system.start()
# 运行推理
system.run(test_duration=args.duration)
# 停止系统
system.stop()
print("\n🎉 测试完成!")
except Exception as e:
print(f"\n❌ 系统异常: {e}")
import traceback
traceback.print_exc()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("\n\n⏹️ 程序被用户中断")
except Exception as e:
print(f"\n❌ 程序异常: {e}")
import traceback
traceback.print_exc()