fix(edge): 修复 batch 推理超过 MAX_BATCH_SIZE 导致缓冲区溢出
队列中 ROI 数量超过 8 时(多摄像头多 ROI 绑定场景), 一次性送入 TensorRT 引擎导致 np.copyto 溢出。 改为按 max_batch_size 分块推理。 Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
22
main.py
22
main.py
@@ -562,26 +562,30 @@ class EdgeInferenceService:
|
|||||||
self._logger.error(f"处理帧失败 {camera_id}: {e}")
|
self._logger.error(f"处理帧失败 {camera_id}: {e}")
|
||||||
|
|
||||||
def _batch_process_rois(self):
|
def _batch_process_rois(self):
|
||||||
"""批量处理 ROI - 真正的 batch 推理"""
|
"""批量处理 ROI - 真正的 batch 推理(按 max_batch_size 分块)"""
|
||||||
with self._batch_lock:
|
with self._batch_lock:
|
||||||
roi_items = self._batch_roi_queue
|
roi_items = self._batch_roi_queue
|
||||||
if not roi_items:
|
if not roi_items:
|
||||||
return
|
return
|
||||||
self._batch_roi_queue = []
|
self._batch_roi_queue = []
|
||||||
|
|
||||||
|
engine = self._engine_manager.get_engine("default")
|
||||||
|
if engine is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
# 按 max_batch_size 分块处理
|
||||||
|
for chunk_start in range(0, len(roi_items), self._max_batch_size):
|
||||||
|
chunk = roi_items[chunk_start:chunk_start + self._max_batch_size]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
images = [item[4] for item in roi_items]
|
images = [item[4] for item in chunk]
|
||||||
scale_infos = [item[5] for item in roi_items]
|
scale_infos = [item[5] for item in chunk]
|
||||||
|
|
||||||
# 真正的 batch: 将所有 ROI 裁剪图拼成 [N,3,H,W] 一次推理
|
# 真正的 batch: 将所有 ROI 裁剪图拼成 [N,3,H,W] 一次推理
|
||||||
batch_data, _ = self._preprocessor._batch_preprocessor.preprocess_batch(
|
batch_data, _ = self._preprocessor._batch_preprocessor.preprocess_batch(
|
||||||
images
|
images
|
||||||
)
|
)
|
||||||
|
|
||||||
engine = self._engine_manager.get_engine("default")
|
|
||||||
if engine is None:
|
|
||||||
return
|
|
||||||
|
|
||||||
# 一次性推理整个 batch
|
# 一次性推理整个 batch
|
||||||
outputs, inference_time_ms = engine.infer(batch_data)
|
outputs, inference_time_ms = engine.infer(batch_data)
|
||||||
|
|
||||||
@@ -593,7 +597,7 @@ class EdgeInferenceService:
|
|||||||
shapes = [o.shape if hasattr(o, 'shape') else type(o) for o in outputs]
|
shapes = [o.shape if hasattr(o, 'shape') else type(o) for o in outputs]
|
||||||
self._logger.debug(f"[推理诊断] batch_data shape={batch_data.shape}, outputs={shapes}, 耗时={inference_time_ms:.1f}ms")
|
self._logger.debug(f"[推理诊断] batch_data shape={batch_data.shape}, outputs={shapes}, 耗时={inference_time_ms:.1f}ms")
|
||||||
|
|
||||||
batch_size = len(roi_items)
|
batch_size = len(chunk)
|
||||||
batch_results = self._postprocessor.batch_process_detections(
|
batch_results = self._postprocessor.batch_process_detections(
|
||||||
outputs,
|
outputs,
|
||||||
batch_size,
|
batch_size,
|
||||||
@@ -603,7 +607,7 @@ class EdgeInferenceService:
|
|||||||
total_detections = sum(len(r[0]) for r in batch_results)
|
total_detections = sum(len(r[0]) for r in batch_results)
|
||||||
self._logger.debug(f"[推理] batch_size={batch_size}, 总检测数={total_detections}, conf_thresh={self._settings.inference.conf_threshold}")
|
self._logger.debug(f"[推理] batch_size={batch_size}, 总检测数={total_detections}, conf_thresh={self._settings.inference.conf_threshold}")
|
||||||
|
|
||||||
for idx, (camera_id, roi, bind, frame, _, scale_info) in enumerate(roi_items):
|
for idx, (camera_id, roi, bind, frame, _, scale_info) in enumerate(chunk):
|
||||||
boxes, scores, class_ids = batch_results[idx]
|
boxes, scores, class_ids = batch_results[idx]
|
||||||
|
|
||||||
# 无论是否检测到目标都要调用算法(离岗检测需要"无人"信号)
|
# 无论是否检测到目标都要调用算法(离岗检测需要"无人"信号)
|
||||||
|
|||||||
Reference in New Issue
Block a user