diff --git a/src/model/yolo_wrapper.py b/src/model/yolo_wrapper.py index c02c78e..b2c90e5 100644 --- a/src/model/yolo_wrapper.py +++ b/src/model/yolo_wrapper.py @@ -192,7 +192,9 @@ class YOLOWrapper: prepared_source, cleanup_path = self._prepare_source(source) try: - logger.info(f"Running inference on {source}") + logger.info( + f"Running inference on {source} -> prepared_source {prepared_source}" + ) results = self.model.predict( source=prepared_source, conf=conf, diff --git a/src/utils/ultralytics_16bit_patch.py b/src/utils/ultralytics_16bit_patch.py index c976bc1..6c25f58 100644 --- a/src/utils/ultralytics_16bit_patch.py +++ b/src/utils/ultralytics_16bit_patch.py @@ -106,6 +106,7 @@ def apply_ultralytics_16bit_tiff_patches(*, force: bool = False) -> None: def preprocess_batch_16bit(self, batch: dict) -> dict: # type: ignore[override] # Start from upstream behavior to keep device placement + multiscale identical, # but replace the 255 division with dtype-aware scaling. + logger.info(f"Preprocessing batch with monkey-patched preprocess_batch") for k, v in batch.items(): if isinstance(v, torch.Tensor): batch[k] = v.to(self.device, non_blocking=self.device.type == "cuda")