From 6ae7481e2585815553e18b8d2569c3bf771fcdb5 Mon Sep 17 00:00:00 2001 From: Martin Laasmaa Date: Fri, 19 Dec 2025 10:15:53 +0200 Subject: [PATCH] Adding debug messages --- src/model/yolo_wrapper.py | 4 +++- src/utils/ultralytics_16bit_patch.py | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/model/yolo_wrapper.py b/src/model/yolo_wrapper.py index c02c78e..b2c90e5 100644 --- a/src/model/yolo_wrapper.py +++ b/src/model/yolo_wrapper.py @@ -192,7 +192,9 @@ class YOLOWrapper: prepared_source, cleanup_path = self._prepare_source(source) try: - logger.info(f"Running inference on {source}") + logger.info( + f"Running inference on {source} -> prepared_source {prepared_source}" + ) results = self.model.predict( source=prepared_source, conf=conf, diff --git a/src/utils/ultralytics_16bit_patch.py b/src/utils/ultralytics_16bit_patch.py index c976bc1..6c25f58 100644 --- a/src/utils/ultralytics_16bit_patch.py +++ b/src/utils/ultralytics_16bit_patch.py @@ -106,6 +106,7 @@ def apply_ultralytics_16bit_tiff_patches(*, force: bool = False) -> None: def preprocess_batch_16bit(self, batch: dict) -> dict: # type: ignore[override] # Start from upstream behavior to keep device placement + multiscale identical, # but replace the 255 division with dtype-aware scaling. + logger.info(f"Preprocessing batch with monkey-patched preprocess_batch") for k, v in batch.items(): if isinstance(v, torch.Tensor): batch[k] = v.to(self.device, non_blocking=self.device.type == "cuda")