Adding result shower

This commit is contained in:
2025-12-10 16:55:28 +02:00
parent 5370d31dce
commit 833b222fad
7 changed files with 672 additions and 71 deletions

View File

@@ -42,6 +42,7 @@ class InferenceEngine:
relative_path: str,
conf: float = 0.25,
save_to_db: bool = True,
repository_root: Optional[str] = None,
) -> Dict:
"""
Detect objects in a single image.
@@ -51,11 +52,17 @@ class InferenceEngine:
relative_path: Relative path from repository root
conf: Confidence threshold
save_to_db: Whether to save results to database
repository_root: Base directory used to compute relative_path (if known)
Returns:
Dictionary with detection results
"""
try:
# Normalize storage path (fall back to absolute path when repo root is unknown)
stored_relative_path = relative_path
if not repository_root:
stored_relative_path = str(Path(image_path).resolve())
# Get image dimensions
img = Image.open(image_path)
width, height = img.size
@@ -66,34 +73,58 @@ class InferenceEngine:
# Add/get image in database
image_id = self.db_manager.get_or_create_image(
relative_path=relative_path,
relative_path=stored_relative_path,
filename=Path(image_path).name,
width=width,
height=height,
)
# Save detections to database
if save_to_db and detections:
detection_records = []
for det in detections:
# Use normalized bbox from detection
bbox_normalized = det[
"bbox_normalized"
] # [x_min, y_min, x_max, y_max]
inserted_count = 0
deleted_count = 0
record = {
"image_id": image_id,
"model_id": self.model_id,
"class_name": det["class_name"],
"bbox": tuple(bbox_normalized),
"confidence": det["confidence"],
"segmentation_mask": det.get("segmentation_mask"),
"metadata": {"class_id": det["class_id"]},
}
detection_records.append(record)
# Save detections to database, replacing any previous results for this image/model
if save_to_db:
deleted_count = self.db_manager.delete_detections_for_image(
image_id, self.model_id
)
if detections:
detection_records = []
for det in detections:
# Use normalized bbox from detection
bbox_normalized = det[
"bbox_normalized"
] # [x_min, y_min, x_max, y_max]
self.db_manager.add_detections_batch(detection_records)
logger.info(f"Saved {len(detection_records)} detections to database")
metadata = {
"class_id": det["class_id"],
"source_path": str(Path(image_path).resolve()),
}
if repository_root:
metadata["repository_root"] = str(
Path(repository_root).resolve()
)
record = {
"image_id": image_id,
"model_id": self.model_id,
"class_name": det["class_name"],
"bbox": tuple(bbox_normalized),
"confidence": det["confidence"],
"segmentation_mask": det.get("segmentation_mask"),
"metadata": metadata,
}
detection_records.append(record)
inserted_count = self.db_manager.add_detections_batch(
detection_records
)
logger.info(
f"Saved {inserted_count} detections to database (replaced {deleted_count})"
)
else:
logger.info(
f"Detection run removed {deleted_count} stale entries but produced no new detections"
)
return {
"success": True,
@@ -142,7 +173,12 @@ class InferenceEngine:
rel_path = get_relative_path(image_path, repository_root)
# Perform detection
result = self.detect_single(image_path, rel_path, conf)
result = self.detect_single(
image_path,
rel_path,
conf=conf,
repository_root=repository_root,
)
results.append(result)
# Update progress

View File

@@ -7,6 +7,9 @@ from ultralytics import YOLO
from pathlib import Path
from typing import Optional, List, Dict, Callable, Any
import torch
from PIL import Image
import tempfile
import os
from src.utils.logger import get_logger
@@ -162,10 +165,12 @@ class YOLOWrapper:
if self.model is None:
self.load_model()
prepared_source, cleanup_path = self._prepare_source(source)
try:
logger.info(f"Running inference on {source}")
results = self.model.predict(
source=source,
source=prepared_source,
conf=conf,
iou=iou,
save=save,
@@ -182,6 +187,14 @@ class YOLOWrapper:
except Exception as e:
logger.error(f"Error during inference: {e}")
raise
finally:
if cleanup_path:
try:
os.remove(cleanup_path)
except OSError as cleanup_error:
logger.warning(
f"Failed to delete temporary RGB image {cleanup_path}: {cleanup_error}"
)
def export(
self, format: str = "onnx", output_path: Optional[str] = None, **kwargs
@@ -210,6 +223,36 @@ class YOLOWrapper:
logger.error(f"Error exporting model: {e}")
raise
def _prepare_source(self, source):
"""Convert single-channel images to RGB temporarily for inference."""
cleanup_path = None
if isinstance(source, (str, Path)):
source_path = Path(source)
if source_path.is_file():
try:
with Image.open(source_path) as img:
if len(img.getbands()) == 1:
rgb_img = img.convert("RGB")
suffix = source_path.suffix or ".png"
tmp = tempfile.NamedTemporaryFile(
suffix=suffix, delete=False
)
tmp_path = tmp.name
tmp.close()
rgb_img.save(tmp_path)
cleanup_path = tmp_path
logger.info(
f"Converted single-channel image {source_path} to RGB for inference"
)
return tmp_path, cleanup_path
except Exception as convert_error:
logger.warning(
f"Failed to preprocess {source_path} as RGB, continuing with original file: {convert_error}"
)
return source, cleanup_path
def _format_training_results(self, results) -> Dict[str, Any]:
"""Format training results into dictionary."""
try: