Grayscale RGB conversion modified
This commit is contained in:
@@ -18,7 +18,7 @@ models:
|
|||||||
training:
|
training:
|
||||||
default_epochs: 100
|
default_epochs: 100
|
||||||
default_batch_size: 16
|
default_batch_size: 16
|
||||||
default_imgsz: 1024
|
default_imgsz: 640
|
||||||
default_patience: 50
|
default_patience: 50
|
||||||
default_lr0: 0.01
|
default_lr0: 0.01
|
||||||
two_stage:
|
two_stage:
|
||||||
@@ -32,8 +32,8 @@ training:
|
|||||||
epochs: 150
|
epochs: 150
|
||||||
lr0: 0.0003
|
lr0: 0.0003
|
||||||
patience: 30
|
patience: 30
|
||||||
last_dataset_yaml: /home/martin/code/object_detection/data/datasets/data.yaml
|
last_dataset_yaml: /home/martin/code/object_detection/data/datasets-revert/data.yaml
|
||||||
last_dataset_dir: /home/martin/code/object_detection/data/datasets
|
last_dataset_dir: /home/martin/code/object_detection/data/datasets-revert
|
||||||
detection:
|
detection:
|
||||||
default_confidence: 0.25
|
default_confidence: 0.25
|
||||||
default_iou: 0.45
|
default_iou: 0.45
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import torch
|
|||||||
from PIL import Image
|
from PIL import Image
|
||||||
import tempfile
|
import tempfile
|
||||||
import os
|
import os
|
||||||
|
import numpy as np
|
||||||
from src.utils.logger import get_logger
|
from src.utils.logger import get_logger
|
||||||
|
|
||||||
|
|
||||||
@@ -188,7 +189,7 @@ class YOLOWrapper:
|
|||||||
logger.error(f"Error during inference: {e}")
|
logger.error(f"Error during inference: {e}")
|
||||||
raise
|
raise
|
||||||
finally:
|
finally:
|
||||||
if cleanup_path:
|
if 0: # cleanup_path:
|
||||||
try:
|
try:
|
||||||
os.remove(cleanup_path)
|
os.remove(cleanup_path)
|
||||||
except OSError as cleanup_error:
|
except OSError as cleanup_error:
|
||||||
@@ -233,7 +234,26 @@ class YOLOWrapper:
|
|||||||
try:
|
try:
|
||||||
with Image.open(source_path) as img:
|
with Image.open(source_path) as img:
|
||||||
if len(img.getbands()) == 1:
|
if len(img.getbands()) == 1:
|
||||||
rgb_img = img.convert("RGB")
|
grayscale = np.array(img)
|
||||||
|
if grayscale.ndim == 3:
|
||||||
|
grayscale = grayscale[:, :, 0]
|
||||||
|
original_dtype = grayscale.dtype
|
||||||
|
grayscale = grayscale.astype(np.float32)
|
||||||
|
|
||||||
|
if np.issubdtype(original_dtype, np.integer):
|
||||||
|
dtype_info = np.iinfo(original_dtype)
|
||||||
|
denom = float(max(dtype_info.max, 1))
|
||||||
|
else:
|
||||||
|
max_val = (
|
||||||
|
float(grayscale.max()) if grayscale.size else 0.0
|
||||||
|
)
|
||||||
|
denom = max(max_val, 1.0)
|
||||||
|
|
||||||
|
grayscale = np.clip(grayscale / denom, 0.0, 1.0)
|
||||||
|
grayscale_u8 = (grayscale * 255.0).round().astype(np.uint8)
|
||||||
|
rgb_arr = np.repeat(grayscale_u8[:, :, None], 3, axis=2)
|
||||||
|
rgb_img = Image.fromarray(rgb_arr, mode="RGB")
|
||||||
|
|
||||||
suffix = source_path.suffix or ".png"
|
suffix = source_path.suffix or ".png"
|
||||||
tmp = tempfile.NamedTemporaryFile(
|
tmp = tempfile.NamedTemporaryFile(
|
||||||
suffix=suffix, delete=False
|
suffix=suffix, delete=False
|
||||||
@@ -243,7 +263,7 @@ class YOLOWrapper:
|
|||||||
rgb_img.save(tmp_path)
|
rgb_img.save(tmp_path)
|
||||||
cleanup_path = tmp_path
|
cleanup_path = tmp_path
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Converted single-channel image {source_path} to RGB for inference"
|
f"Converted single-channel image {source_path} to RGB for inference at {tmp_path}"
|
||||||
)
|
)
|
||||||
return tmp_path, cleanup_path
|
return tmp_path, cleanup_path
|
||||||
except Exception as convert_error:
|
except Exception as convert_error:
|
||||||
|
|||||||
Reference in New Issue
Block a user