Compare commits
5 Commits
float32int
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| e5036c10cf | |||
| c7e388d9ae | |||
| 6b995e7325 | |||
| 0e0741d323 | |||
| dd99a0677c |
@@ -34,7 +34,7 @@ from PySide6.QtWidgets import (
|
||||
from src.database.db_manager import DatabaseManager
|
||||
from src.model.yolo_wrapper import YOLOWrapper
|
||||
from src.utils.config_manager import ConfigManager
|
||||
from src.utils.image import Image, convert_grayscale_to_rgb_preserve_range
|
||||
from src.utils.image import Image
|
||||
from src.utils.logger import get_logger
|
||||
|
||||
|
||||
@@ -1368,7 +1368,7 @@ class TrainingTab(QWidget):
|
||||
img_obj = Image(src)
|
||||
pil_img = img_obj.pil_image
|
||||
if len(pil_img.getbands()) == 1:
|
||||
rgb_img = convert_grayscale_to_rgb_preserve_range(pil_img)
|
||||
rgb_img = img_obj.convert_grayscale_to_rgb_preserve_range()
|
||||
else:
|
||||
rgb_img = pil_img.convert("RGB")
|
||||
rgb_img.save(dst)
|
||||
|
||||
@@ -9,7 +9,7 @@ from typing import Optional, List, Dict, Callable, Any
|
||||
import torch
|
||||
import tempfile
|
||||
import os
|
||||
from src.utils.image import Image, convert_grayscale_to_rgb_preserve_range
|
||||
from src.utils.image import Image
|
||||
from src.utils.logger import get_logger
|
||||
|
||||
|
||||
@@ -238,7 +238,7 @@ class YOLOWrapper:
|
||||
img_obj = Image(source_path)
|
||||
pil_img = img_obj.pil_image
|
||||
if len(pil_img.getbands()) == 1:
|
||||
rgb_img = convert_grayscale_to_rgb_preserve_range(pil_img)
|
||||
rgb_img = img_obj.convert_grayscale_to_rgb_preserve_range()
|
||||
else:
|
||||
rgb_img = pil_img.convert("RGB")
|
||||
|
||||
|
||||
@@ -277,6 +277,38 @@ class Image:
|
||||
"""
|
||||
return self._channels >= 3
|
||||
|
||||
def convert_grayscale_to_rgb_preserve_range(
|
||||
self,
|
||||
) -> PILImage.Image:
|
||||
"""Convert a single-channel PIL image to RGB while preserving dynamic range.
|
||||
|
||||
Returns:
|
||||
PIL Image in RGB mode with intensities normalized to 0-255.
|
||||
"""
|
||||
if self._channels == 3:
|
||||
return self.pil_image
|
||||
|
||||
grayscale = self.data
|
||||
if grayscale.ndim == 3:
|
||||
grayscale = grayscale[:, :, 0]
|
||||
|
||||
original_dtype = grayscale.dtype
|
||||
grayscale = grayscale.astype(np.float32)
|
||||
|
||||
if grayscale.size == 0:
|
||||
return PILImage.new("RGB", self.shape, color=(0, 0, 0))
|
||||
|
||||
if np.issubdtype(original_dtype, np.integer):
|
||||
denom = float(max(np.iinfo(original_dtype).max, 1))
|
||||
else:
|
||||
max_val = float(grayscale.max())
|
||||
denom = max(max_val, 1.0)
|
||||
|
||||
grayscale = np.clip(grayscale / denom, 0.0, 1.0)
|
||||
grayscale_u8 = (grayscale * 255.0).round().astype(np.uint8)
|
||||
rgb_arr = np.repeat(grayscale_u8[:, :, None], 3, axis=2)
|
||||
return PILImage.fromarray(rgb_arr, mode="RGB")
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""String representation of the Image object."""
|
||||
return (
|
||||
@@ -289,40 +321,3 @@ class Image:
|
||||
def __str__(self) -> str:
|
||||
"""String representation of the Image object."""
|
||||
return self.__repr__()
|
||||
|
||||
|
||||
def convert_grayscale_to_rgb_preserve_range(
|
||||
pil_image: PILImage.Image,
|
||||
) -> PILImage.Image:
|
||||
"""Convert a single-channel PIL image to RGB while preserving dynamic range.
|
||||
|
||||
Args:
|
||||
pil_image: Single-channel PIL image (e.g., 16-bit grayscale).
|
||||
|
||||
Returns:
|
||||
PIL Image in RGB mode with intensities normalized to 0-255.
|
||||
"""
|
||||
|
||||
if pil_image.mode == "RGB":
|
||||
return pil_image
|
||||
|
||||
grayscale = np.array(pil_image)
|
||||
if grayscale.ndim == 3:
|
||||
grayscale = grayscale[:, :, 0]
|
||||
|
||||
original_dtype = grayscale.dtype
|
||||
grayscale = grayscale.astype(np.float32)
|
||||
|
||||
if grayscale.size == 0:
|
||||
return PILImage.new("RGB", pil_image.size, color=(0, 0, 0))
|
||||
|
||||
if np.issubdtype(original_dtype, np.integer):
|
||||
denom = float(max(np.iinfo(original_dtype).max, 1))
|
||||
else:
|
||||
max_val = float(grayscale.max())
|
||||
denom = max(max_val, 1.0)
|
||||
|
||||
grayscale = np.clip(grayscale / denom, 0.0, 1.0)
|
||||
grayscale_u8 = (grayscale * 255.0).round().astype(np.uint8)
|
||||
rgb_arr = np.repeat(grayscale_u8[:, :, None], 3, axis=2)
|
||||
return PILImage.fromarray(rgb_arr, mode="RGB")
|
||||
|
||||
@@ -12,23 +12,32 @@ class UT:
|
||||
Operetta files along with rois drawn in ImageJ
|
||||
"""
|
||||
|
||||
def __init__(self, roifile_fn: Path):
|
||||
def __init__(self, roifile_fn: Path, no_labels: bool):
|
||||
self.roifile_fn = roifile_fn
|
||||
print("is file", self.roifile_fn.is_file())
|
||||
self.rois = None
|
||||
if no_labels:
|
||||
self.rois = ImagejRoi.fromfile(self.roifile_fn)
|
||||
self.stem = self.roifile_fn.stem.strip("-RoiSet")
|
||||
self.stem = self.roifile_fn.stem.split("Roi-")[1]
|
||||
else:
|
||||
self.roifile_fn = roifile_fn / roifile_fn.parts[-1]
|
||||
self.stem = self.roifile_fn.stem
|
||||
|
||||
print(self.roifile_fn)
|
||||
|
||||
print(self.stem)
|
||||
self.image, self.image_props = self._load_images()
|
||||
|
||||
def _load_images(self):
|
||||
"""Loading sequence of tif files
|
||||
array sequence is CZYX
|
||||
"""
|
||||
print(self.roifile_fn.parent, self.stem)
|
||||
fns = list(self.roifile_fn.parent.glob(f"{self.stem}*.tif*"))
|
||||
print("Loading images:", self.roifile_fn.parent, self.stem)
|
||||
fns = list(self.roifile_fn.parent.glob(f"{self.stem.lower()}*.tif*"))
|
||||
stems = [fn.stem.split(self.stem)[-1] for fn in fns]
|
||||
n_ch = len(set([stem.split("-ch")[-1].split("t")[0] for stem in stems]))
|
||||
n_p = len(set([stem.split("-")[0] for stem in stems]))
|
||||
n_t = len(set([stem.split("t")[1] for stem in stems]))
|
||||
print(n_ch, n_p, n_t)
|
||||
|
||||
with TiffFile(fns[0]) as tif:
|
||||
img = tif.asarray()
|
||||
@@ -42,6 +51,7 @@ class UT:
|
||||
"height": h,
|
||||
"dtype": dtype,
|
||||
}
|
||||
print("Image props", self.image_props)
|
||||
|
||||
image_stack = np.zeros((n_ch, n_p, w, h), dtype=dtype)
|
||||
for fn in fns:
|
||||
@@ -49,7 +59,7 @@ class UT:
|
||||
img = tif.asarray()
|
||||
stem = fn.stem.split(self.stem)[-1]
|
||||
ch = int(stem.split("-ch")[-1].split("t")[0])
|
||||
p = int(stem.split("-")[0].lstrip("p"))
|
||||
p = int(stem.split("-")[0].split("p")[1])
|
||||
t = int(stem.split("t")[1])
|
||||
print(fn.stem, "ch", ch, "p", p, "t", t)
|
||||
image_stack[ch - 1, p - 1] = img
|
||||
@@ -82,10 +92,21 @@ class UT:
|
||||
):
|
||||
"""Export rois to a file"""
|
||||
with open(path / subfolder / f"{self.stem}.txt", "w") as f:
|
||||
for roi in self.rois:
|
||||
# TODO add image coordinates normalization
|
||||
coords = ""
|
||||
for x, y in roi.subpixel_coordinates:
|
||||
for i, roi in enumerate(self.rois):
|
||||
rc = roi.subpixel_coordinates
|
||||
if rc is None:
|
||||
print(
|
||||
f"No coordinates: {self.roifile_fn}, element {i}, out of {len(self.rois)}"
|
||||
)
|
||||
continue
|
||||
xmn, ymn = rc.min(axis=0)
|
||||
xmx, ymx = rc.max(axis=0)
|
||||
xc = (xmn + xmx) / 2
|
||||
yc = (ymn + ymx) / 2
|
||||
bw = xmx - xmn
|
||||
bh = ymx - ymn
|
||||
coords = f"{xc/self.width} {yc/self.height} {bw/self.width} {bh/self.height} "
|
||||
for x, y in rc:
|
||||
coords += f"{x/self.width} {y/self.height} "
|
||||
f.write(f"{class_index} {coords}\n")
|
||||
|
||||
@@ -104,6 +125,7 @@ class UT:
|
||||
self.image = np.max(self.image[channel], axis=0)
|
||||
print(self.image.shape)
|
||||
|
||||
print(path / subfolder / f"{self.stem}.tif")
|
||||
with TiffWriter(path / subfolder / f"{self.stem}.tif") as tif:
|
||||
tif.write(self.image)
|
||||
|
||||
@@ -112,11 +134,27 @@ if __name__ == "__main__":
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("input", type=Path)
|
||||
parser.add_argument("output", type=Path)
|
||||
parser.add_argument("-i", "--input", nargs="*", type=Path)
|
||||
parser.add_argument("-o", "--output", type=Path)
|
||||
parser.add_argument(
|
||||
"--no-labels",
|
||||
action="store_false",
|
||||
help="Source does not have labels, export only images",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
for rfn in args.input.glob("*.zip"):
|
||||
ut = UT(rfn)
|
||||
for path in args.input:
|
||||
print("Path:", path)
|
||||
if not args.no_labels:
|
||||
print("No labels")
|
||||
ut = UT(path, args.no_labels)
|
||||
ut.export_image(args.output, plane_mode="max projection", channel=0)
|
||||
|
||||
else:
|
||||
for rfn in Path(path).glob("*.zip"):
|
||||
print("Roi FN:", rfn)
|
||||
ut = UT(rfn, args.no_labels)
|
||||
ut.export_rois(args.output, class_index=0)
|
||||
ut.export_image(args.output, plane_mode="max projection", channel=0)
|
||||
|
||||
print()
|
||||
|
||||
184
tests/show_yolo_seg.py
Normal file
184
tests/show_yolo_seg.py
Normal file
@@ -0,0 +1,184 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
show_yolo_seg.py
|
||||
|
||||
Usage:
|
||||
python show_yolo_seg.py /path/to/image.jpg /path/to/labels.txt
|
||||
|
||||
Supports:
|
||||
- Segmentation polygons: "class x1 y1 x2 y2 ... xn yn"
|
||||
- YOLO bbox lines as fallback: "class x_center y_center width height"
|
||||
Coordinates can be normalized [0..1] or absolute pixels (auto-detected).
|
||||
"""
|
||||
import sys
|
||||
import cv2
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
import random
|
||||
|
||||
|
||||
def parse_label_line(line):
|
||||
parts = line.strip().split()
|
||||
if not parts:
|
||||
return None
|
||||
cls = int(float(parts[0]))
|
||||
coords = [float(x) for x in parts[1:]]
|
||||
return cls, coords
|
||||
|
||||
|
||||
def coords_are_normalized(coords):
|
||||
# If every coordinate is between 0 and 1 (inclusive-ish), assume normalized
|
||||
if not coords:
|
||||
return False
|
||||
return max(coords) <= 1.001
|
||||
|
||||
|
||||
def yolo_bbox_to_xyxy(coords, img_w, img_h):
|
||||
# coords: [xc, yc, w, h] normalized or absolute
|
||||
xc, yc, w, h = coords[:4]
|
||||
if max(coords) <= 1.001:
|
||||
xc *= img_w
|
||||
yc *= img_h
|
||||
w *= img_w
|
||||
h *= img_h
|
||||
x1 = int(round(xc - w / 2))
|
||||
y1 = int(round(yc - h / 2))
|
||||
x2 = int(round(xc + w / 2))
|
||||
y2 = int(round(yc + h / 2))
|
||||
return x1, y1, x2, y2
|
||||
|
||||
|
||||
def poly_to_pts(coords, img_w, img_h):
|
||||
# coords: [x1 y1 x2 y2 ...] either normalized or absolute
|
||||
if coords_are_normalized(coords[4:]):
|
||||
coords = [
|
||||
coords[i] * (img_w if i % 2 == 0 else img_h) for i in range(len(coords))
|
||||
]
|
||||
pts = np.array(coords, dtype=np.int32).reshape(-1, 2)
|
||||
return pts
|
||||
|
||||
|
||||
def random_color_for_class(cls):
|
||||
random.seed(cls) # deterministic per class
|
||||
return tuple(int(x) for x in np.array([random.randint(0, 255) for _ in range(3)]))
|
||||
|
||||
|
||||
def draw_annotations(img, labels, alpha=0.4, draw_bbox_for_poly=True):
|
||||
# img: BGR numpy array
|
||||
overlay = img.copy()
|
||||
h, w = img.shape[:2]
|
||||
for cls, coords in labels:
|
||||
if not coords:
|
||||
continue
|
||||
# polygon case (>=6 coordinates)
|
||||
if len(coords) >= 6:
|
||||
color = random_color_for_class(cls)
|
||||
|
||||
x1, y1, x2, y2 = yolo_bbox_to_xyxy(coords[:4], w, h)
|
||||
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
|
||||
|
||||
pts = poly_to_pts(coords[4:], w, h)
|
||||
# fill on overlay
|
||||
cv2.fillPoly(overlay, [pts], color)
|
||||
# outline on base image
|
||||
cv2.polylines(img, [pts], isClosed=True, color=color, thickness=2)
|
||||
# put class text at first point
|
||||
x, y = int(pts[0, 0]), int(pts[0, 1]) - 6
|
||||
cv2.putText(
|
||||
img,
|
||||
str(cls),
|
||||
(x, max(6, y)),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
0.6,
|
||||
(255, 255, 255),
|
||||
2,
|
||||
cv2.LINE_AA,
|
||||
)
|
||||
|
||||
# YOLO bbox case (4 coords)
|
||||
elif len(coords) == 4:
|
||||
x1, y1, x2, y2 = yolo_bbox_to_xyxy(coords, w, h)
|
||||
color = random_color_for_class(cls)
|
||||
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
|
||||
cv2.putText(
|
||||
img,
|
||||
str(cls),
|
||||
(x1, max(6, y1 - 4)),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
0.6,
|
||||
(255, 255, 255),
|
||||
2,
|
||||
cv2.LINE_AA,
|
||||
)
|
||||
else:
|
||||
# Unknown / invalid format, skip
|
||||
continue
|
||||
|
||||
# blend overlay for filled polygons
|
||||
cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0, img)
|
||||
return img
|
||||
|
||||
|
||||
def load_labels_file(label_path):
|
||||
labels = []
|
||||
with open(label_path, "r") as f:
|
||||
for raw in f:
|
||||
line = raw.strip()
|
||||
if not line:
|
||||
continue
|
||||
parsed = parse_label_line(line)
|
||||
if parsed:
|
||||
labels.append(parsed)
|
||||
return labels
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Show YOLO segmentation / polygon annotations"
|
||||
)
|
||||
parser.add_argument("image", type=str, help="Path to image file")
|
||||
parser.add_argument("labels", type=str, help="Path to YOLO label file (polygons)")
|
||||
parser.add_argument(
|
||||
"--alpha", type=float, default=0.4, help="Polygon fill alpha (0..1)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-bbox", action="store_true", help="Don't draw bounding boxes for polygons"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
img_path = Path(args.image)
|
||||
lbl_path = Path(args.labels)
|
||||
|
||||
if not img_path.exists():
|
||||
print("Image not found:", img_path)
|
||||
sys.exit(1)
|
||||
if not lbl_path.exists():
|
||||
print("Label file not found:", lbl_path)
|
||||
sys.exit(1)
|
||||
|
||||
img = cv2.imread(str(img_path), cv2.IMREAD_COLOR)
|
||||
if img is None:
|
||||
print("Could not load image:", img_path)
|
||||
sys.exit(1)
|
||||
|
||||
labels = load_labels_file(str(lbl_path))
|
||||
if not labels:
|
||||
print("No labels parsed from", lbl_path)
|
||||
# continue and just show image
|
||||
out = draw_annotations(
|
||||
img.copy(), labels, alpha=args.alpha, draw_bbox_for_poly=(not args.no_bbox)
|
||||
)
|
||||
|
||||
# Convert BGR -> RGB for matplotlib display
|
||||
out_rgb = cv2.cvtColor(out, cv2.COLOR_BGR2RGB)
|
||||
plt.figure(figsize=(10, 10 * out.shape[0] / out.shape[1]))
|
||||
plt.imshow(out_rgb)
|
||||
plt.axis("off")
|
||||
plt.title(f"{img_path.name} ({lbl_path.name})")
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user