Adding a file and feature to delete all detections from database

This commit is contained in:
2026-01-16 13:43:05 +02:00
parent 2c494dac49
commit 20578c1fdf
5 changed files with 191 additions and 28 deletions

View File

@@ -462,6 +462,22 @@ class DatabaseManager:
finally:
conn.close()
def delete_all_detections(self) -> int:
"""Delete all detections from the database.
Returns:
Number of rows deleted.
"""
conn = self.get_connection()
try:
cursor = conn.cursor()
cursor.execute("DELETE FROM detections")
conn.commit()
return cursor.rowcount
finally:
conn.close()
# ==================== Statistics Operations ====================
def get_detection_statistics(

View File

@@ -55,10 +55,7 @@ CREATE TABLE IF NOT EXISTS object_classes (
-- Insert default object classes
INSERT OR IGNORE INTO object_classes (class_name, color, description) VALUES
('cell', '#FF0000', 'Cell object'),
('nucleus', '#00FF00', 'Cell nucleus'),
('mitochondria', '#0000FF', 'Mitochondria'),
('vesicle', '#FFFF00', 'Vesicle');
('terminal', '#FFFF00', 'Axion terminal');
-- Annotations table: stores manual annotations
CREATE TABLE IF NOT EXISTS annotations (

View File

@@ -66,6 +66,13 @@ class ResultsTab(QWidget):
self.refresh_btn.clicked.connect(self.refresh)
controls_layout.addWidget(self.refresh_btn)
self.delete_all_btn = QPushButton("Delete All Detections")
self.delete_all_btn.setToolTip(
"Permanently delete ALL detections from the database.\n" "This cannot be undone."
)
self.delete_all_btn.clicked.connect(self._delete_all_detections)
controls_layout.addWidget(self.delete_all_btn)
self.export_labels_btn = QPushButton("Export Labels")
self.export_labels_btn.setToolTip(
"Export YOLO .txt labels for the selected image/model run.\n"
@@ -139,6 +146,41 @@ class ResultsTab(QWidget):
layout.addWidget(splitter)
self.setLayout(layout)
def _delete_all_detections(self):
"""Delete all detections from the database after user confirmation."""
confirm = QMessageBox.warning(
self,
"Delete All Detections",
"This will permanently delete ALL detections from the database.\n\n"
"This action cannot be undone.\n\n"
"Do you want to continue?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No,
)
if confirm != QMessageBox.Yes:
return
try:
deleted = self.db_manager.delete_all_detections()
except Exception as exc:
logger.error(f"Failed to delete all detections: {exc}")
QMessageBox.critical(
self,
"Error",
f"Failed to delete detections:\n{exc}",
)
return
QMessageBox.information(
self,
"Delete All Detections",
f"Deleted {deleted} detection(s) from the database.",
)
# Reset UI state.
self.refresh()
def refresh(self):
"""Refresh the detection list and preview."""
self._load_detection_summary()

View File

@@ -0,0 +1,103 @@
import numpy as np
from pathlib import Path
from skimage.draw import polygon
from tifffile import TiffFile
from src.database.db_manager import DatabaseManager
def read_image(image_path: Path) -> np.ndarray:
metadata = {}
with TiffFile(image_path) as tif:
image = tif.asarray()
metadata = tif.imagej_metadata
return image, metadata
def main():
polygon_vertices = np.array([[10, 10], [50, 10], [50, 50], [10, 50]])
image = np.zeros((100, 100), dtype=np.uint8)
rr, cc = polygon(polygon_vertices[:, 0], polygon_vertices[:, 1])
image[rr, cc] = 255
if __name__ == "__main__":
db = DatabaseManager()
model_name = "c17"
model_id = db.get_models(filters={"model_name": model_name})[0]["id"]
print(f"Model name {model_name}, id {model_id}")
detections = db.get_detections(filters={"model_id": model_id})
file_stems = set()
for detection in detections:
file_stems.add(detection["image_filename"].split("_")[0])
print("Files:", file_stems)
for stem in file_stems:
print(stem)
detections = db.get_detections(filters={"model_id": model_id, "i.filename": f"LIKE %{stem}%"})
annotations = []
for detection in detections:
source_path = Path(detection["metadata"]["source_path"])
image, metadata = read_image(source_path)
offset = np.array(list(map(int, metadata["tile_section"].split(","))))[::-1]
scale = np.array(list(map(int, metadata["patch_size"].split(","))))[::-1]
# tile_size = np.array(list(map(int, metadata["tile_size"].split(","))))
segmentation = np.array(detection["segmentation_mask"]) # * tile_size
# print(source_path, image, metadata, segmentation.shape)
# print(offset)
# print(scale)
# print(segmentation)
# segmentation = (segmentation + offset * tile_size) / (tile_size * scale)
segmentation = (segmentation + offset) / scale
yolo_annotation = f"{detection['metadata']['class_id']} " + " ".join(
[f"{x:.6f} {y:.6f}" for x, y in segmentation]
)
annotations.append(yolo_annotation)
# print(segmentation)
# print(yolo_annotation)
# aa
print(
" ",
detection["model_name"],
detection["image_id"],
detection["image_filename"],
source_path,
metadata["label_path"],
)
# section_i_section_j = detection["image_filename"].split("_")[1].split(".")[0]
# print(" ", section_i_section_j)
label_path = metadata["label_path"]
print(" ", label_path)
with open(label_path, "w") as f:
f.write("\n".join(annotations))
exit()
for detection in detections:
print(detection["model_name"], detection["image_id"], detection["image_filename"])
print(detections[0])
# polygon_vertices = np.array([[10, 10], [50, 10], [50, 50], [10, 50]])
# image = np.zeros((100, 100), dtype=np.uint8)
# rr, cc = polygon(polygon_vertices[:, 0], polygon_vertices[:, 1])
# image[rr, cc] = 255
# import matplotlib.pyplot as plt
# plt.imshow(image, cmap='gray')
# plt.show()

View File

@@ -189,33 +189,38 @@ def main():
# continue and just show image
out = draw_annotations(img.copy(), labels, alpha=args.alpha, draw_bbox_for_poly=(not args.no_bbox))
lclass, coords = labels[0]
print(lclass, coords)
bbox = coords[:4]
print("bbox", bbox)
bbox = np.array(bbox) * np.array([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
yc, xc, h, w = bbox
print("bbox", bbox)
# polyline = np.array(coords[4:]).reshape(-1, 2) * np.array([img.shape[1], img.shape[0]])
polyline = np.array(coords).reshape(-1, 2) * np.array([img.shape[1], img.shape[0]])
print("pl", coords[4:])
print("pl", polyline)
# Convert BGR -> RGB for matplotlib display
# out_rgb = cv2.cvtColor(out, cv2.COLOR_BGR2RGB)
out_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# out_rgb = Image()
plt.figure(figsize=(10, 10 * out.shape[0] / out.shape[1]))
plt.imshow(out_rgb)
plt.plot(polyline[:, 0], polyline[:, 1], "y", linewidth=2)
if 0:
plt.plot(
[yc - h / 2, yc - h / 2, yc + h / 2, yc + h / 2, yc - h / 2],
[xc - w / 2, xc + w / 2, xc + w / 2, xc - w / 2, xc - w / 2],
"r",
linewidth=2,
)
plt.imshow(out_rgb.transpose(1, 0, 2))
else:
plt.imshow(out_rgb)
for label in labels:
lclass, coords = label
# print(lclass, coords)
bbox = coords[:4]
# print("bbox", bbox)
bbox = np.array(bbox) * np.array([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
yc, xc, h, w = bbox
# print("bbox", bbox)
# polyline = np.array(coords[4:]).reshape(-1, 2) * np.array([img.shape[1], img.shape[0]])
polyline = np.array(coords).reshape(-1, 2) * np.array([img.shape[1], img.shape[0]])
# print("pl", coords[4:])
# print("pl", polyline)
# Convert BGR -> RGB for matplotlib display
# out_rgb = cv2.cvtColor(out, cv2.COLOR_BGR2RGB)
# out_rgb = Image()
plt.plot(polyline[:, 0], polyline[:, 1], "y", linewidth=2)
if 0:
plt.plot(
[yc - h / 2, yc - h / 2, yc + h / 2, yc + h / 2, yc - h / 2],
[xc - w / 2, xc + w / 2, xc + w / 2, xc - w / 2, xc - w / 2],
"r",
linewidth=2,
)
# plt.axis("off")
plt.title(f"{img_path.name} ({lbl_path.name})")