Skip to content

Commit

Permalink
Update doc strings (#109)
Browse files Browse the repository at this point in the history
Update all doc strings
constantinpape authored Dec 31, 2024
1 parent b29b464 commit 3b514cf
Showing 69 changed files with 3,435 additions and 2,095 deletions.
53 changes: 35 additions & 18 deletions elf/color/palette.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import os
import glob
from typing import Optional, Tuple

import numpy as np
try:
@@ -9,28 +10,37 @@
glasey_impl = None


def glasbey(n_ids, base_palette_name, overwrite_base_palette=False,
no_black=True, lightness_range=None, chroma_range=None,
hue_range=None):
""" Compute glasbey palette for maximally distant colors.
def glasbey(
n_ids: int,
base_palette_name: str,
overwrite_base_palette: bool = False,
no_black: bool = True,
lightness_range: Optional[Tuple] = None,
chroma_range: Optional[Tuple] = None,
hue_range: Optional[Tuple] = None,
) -> np.ndarray:
"""Compute glasbey palette for maximally distant colors.
Wrapper around https://github.com/taketwo/glasbey, based on
"Glasbey et al. Colour Displays for Categorical Images."
Arguments:
n_ids [int] - number of ids, corresponding to entries in the palette.
base_palette_name [str] - name of the base palette.
overwrite_base_palette [bool] -
no_black [bool] -
lightness_range [tuple] -
chroma_range [tuple] -
hue_range [tuple] -
Args:
n_ids: Number of ids, corresponding to entries in the palette.
base_palette_name: Name of the base palette.
overwrite_base_palette: Argument for glasbey functionality.
no_black: Argument for glasbey functionality.
lightness_range: Argument for glasbey functionality.
chroma_range: Argument for glasbey functionality.
hue_range: Argument for glasbey functionality.
Returns:
The colortable.
"""
if glasey_impl is None:
raise ImportError("Glasbey module is not available")

palette_folder = os.path.join(GLASBEY_FOLDER, 'palettes')
palettes = glob.glob(os.path.join(palette_folder, '*.txt'))
palette_folder = os.path.join(GLASBEY_FOLDER, "palettes")
palettes = glob.glob(os.path.join(palette_folder, "*.txt"))
palettes = {os.path.splitext(os.path.split(name)[1])[0]: name for name in palettes}
if base_palette_name not in palettes:
palette_names = list(palettes.keys())
@@ -44,10 +54,17 @@ def glasbey(n_ids, base_palette_name, overwrite_base_palette=False,
hue_range=hue_range)
new_palette = gb.generate_palette(size=n_ids)
new_palette = gb.convert_palette_to_rgb(new_palette)
return np.array(new_palette, dtype='uint8')
return np.array(new_palette, dtype="uint8")


def random_colors(n_ids: int) -> np.ndarray:
"""Get random colortable.
def random_colors(n_ids):
""" Get random colortable."""
Args:
n_ids: Number of ids, corresponding to entries in the palette.
Returns:
The colortable.
"""
shape = (n_ids, 3)
return np.random.randint(0, 255, size=shape, dtype='uint8')
return np.random.randint(0, 255, size=shape, dtype="uint8")
49 changes: 29 additions & 20 deletions elf/evaluation/cremi_score.py
Original file line number Diff line number Diff line change
@@ -1,46 +1,55 @@
from typing import Optional, Sequence, Tuple

import numpy as np

from .util import compute_ignore_mask, contigency_table
from .rand_index import compute_rand_scores
from .variation_of_information import compute_vi_scores


def cremi_score(segmentation, groundtruth, ignore_seg=None, ignore_gt=None):
""" Computes cremi scores between two segmentations
def cremi_score(
segmentation: np.ndarray,
groundtruth: np.ndarray,
ignore_seg: Optional[Sequence[int]] = None,
ignore_gt: Optional[Sequence[int]] = None,
) -> Tuple[float, float, float, float]:
"""Compute cremi score of two segmentations.
This score was used as the evaluation metric for the CREMI challenge.
It is defined as the geometric mean of the variation of information and the adapted rand score.
Args:
segmentation: Candidate segmentation to evaluate.
groundtruth: Groundtruth segmentation.
ignore_seg: Ignore ids for the segmentation.
ignore_gt: Ignore ids for the groundtruth.
Arguments:
segmentation [np.ndarray] - candidate segmentation to evaluate
groundtruth [np.ndarray] - groundtruth
ignore_seg [listlike] - ignore ids for segmentation (default: None)
ignore_gt [listlike] - ignore ids for groundtruth (default: None)
Retuns:
float - vi-split
float - vi-merge
float - adapted rand error
float - cremi score
The variation of information split score.
The variation of information merge score.
The adapted rand error.
The cremi score.
"""

ignore_mask = compute_ignore_mask(segmentation, groundtruth,
ignore_seg, ignore_gt)
ignore_mask = compute_ignore_mask(segmentation, groundtruth, ignore_seg, ignore_gt)
if ignore_mask is not None:
segmentation = segmentation[ignore_mask]
groundtruth = groundtruth[ignore_mask]
else:
# if we don't have a mask, we need to make sure the segmentations are
segmentation = segmentation.ravel()
groundtruth = groundtruth.ravel()

# compute ids, counts and overlaps making up the contigency table
# Compute ids, counts and overlaps making up the contigency table.
a_dict, b_dict, p_ids, p_counts = contigency_table(groundtruth, segmentation)
n_points = segmentation.size

# compute vi scores
vis, vim = compute_vi_scores(a_dict, b_dict, p_ids, p_counts, n_points,
use_log2=True)
# Compute VI scores.
vis, vim = compute_vi_scores(a_dict, b_dict, p_ids, p_counts, n_points, use_log2=True)

# compute and rand scores
# Compute rand score.
ari, _ = compute_rand_scores(a_dict, b_dict, p_counts, n_points)

# compute the cremi score = geometric mean of voi and ari
# Compute the cremi score = geometric mean of voi and ari.
cs = np.sqrt(ari * (vis + vim))

return vis, vim, ari, cs
45 changes: 27 additions & 18 deletions elf/evaluation/dice.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,31 @@
from typing import Optional

import numpy as np
import nifty.ground_truth as ngt

# implementations based on:
# https://github.com/kreshuklab/sparse-object-embeddings/blob/master/pytorch3dunet/clustering/sbd.py


def dice_score(segmentation, groundtruth, threshold_seg=0, threshold_gt=0):
""" Compute the dice score between binarized segmentation and ground-truth.
def dice_score(
segmentation: np.ndarray,
groundtruth: np.ndarray,
threshold_seg: Optional[float] = 0,
threshold_gt: Optional[float] = 0,
) -> float:
"""Compute the dice score between binarized segmentation and ground-truth.
Note: for comparing probaility maps (i.e. predictions in range [0, 1]) with this function
For comparing probaility maps (i.e. predictions in range [0, 1]) with this function
you need to set the thresholds to None. Otherwise the results will be wrong.
Arguments:
segmentation [np.ndarray] - candidate segmentation to evaluate
groundtruth [np.ndarray] - groundtruth
threshold_seg [float] - the threshold applied to the segmentation.
If None the segmentation is not thresholded.
threshold_gt [float] - the threshold applied to the ground-truth.
If None the ground-truth is not thresholded.
Args:
segmentation: Candidate segmentation to evaluate.
groundtruth: Groundtruth segmentation.
threshold_seg: The threshold applied to the segmentation. If None, the segmentation is not thresholded.
threshold_gt: The threshold applied to the ground-truth. If None, the groundtruth is not thresholded.
Returns:
float - the dice score
The dice score.
"""
assert segmentation.shape == groundtruth.shape, f"{segmentation.shape}, {groundtruth.shape}"
if threshold_seg is None:
@@ -93,18 +98,22 @@ def _best_dice_nifty(gt, seg, average_scores=True):
return dice_scores


def symmetric_best_dice_score(segmentation, groundtruth, impl="nifty"):
""" Compute the best symmetric dice score between the objects in the groundtruth and segmentation.
def symmetric_best_dice_score(
segmentation: np.ndarray,
groundtruth: np.ndarray,
impl: str = "nifty",
) -> float:
"""Compute the best symmetric dice score between the objects in the groundtruth and segmentation.
This metric is used in the CVPPP instance segmentation challenge.
Arguments:
segmentation [np.ndarray] - candidate segmentation to evaluate
groundtruth [np.ndarray] - groundtruth
impl [str] - implementation used to compute the best dice score (default: "nifty")
Args:
segmentation: Candidate segmentation to evaluate.
groundtruth: Groundtruth segmentation.
impl: Implementation used to compute the best dice score. The available implementations are 'nifty' and 'numpy'.
Returns:
float - the best symmetric dice score
The best symmetric dice score.
"""
assert impl in ("nifty", "numpy")
best_dice = _best_dice_nifty if impl == "nifty" else _best_dice_numpy
82 changes: 59 additions & 23 deletions elf/evaluation/matching.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
from typing import Optional, Tuple
from typing import Dict, List, Optional, Tuple, Union

import numpy as np
from scipy.optimize import linear_sum_assignment
from .util import contigency_table


def intersection_over_union(overlap):
"""@private
"""
if np.sum(overlap) == 0:
return overlap
n_pixels_pred = np.sum(overlap, axis=0, keepdims=True)
@@ -15,13 +17,17 @@ def intersection_over_union(overlap):


def intersection_over_true(overlap):
"""@private
"""
if np.sum(overlap) == 0:
return overlap
n_pixels_true = np.sum(overlap, axis=1, keepdims=True)
return overlap / n_pixels_true


def intersection_over_pred(overlap):
"""@private
"""
if np.sum(overlap) == 0:
return overlap
n_pixels_pred = np.sum(overlap, axis=0, keepdims=True)
@@ -31,22 +37,32 @@ def intersection_over_pred(overlap):
MATCHING_CRITERIA = {"iou": intersection_over_union,
"iot": intersection_over_true,
"iop": intersection_over_pred}
"""@private
"""


def precision(tp, fp, fn):
"""@private
"""
return tp/(tp+fp) if tp > 0 else 0


def recall(tp, fp, fn):
"""@private
"""
return tp/(tp+fn) if tp > 0 else 0


def segmentation_accuracy(tp, fp, fn):
"""@private
"""
# -> https://www.kaggle.com/c/data-science-bowl-2018#evaluation
return tp/(tp+fp+fn) if tp > 0 else 0


def f1(tp, fp, fn):
"""@private
"""
return (2*tp)/(2*tp+fp+fn) if tp > 0 else 0


@@ -58,9 +74,9 @@ def label_overlap(
"""Compute the number of overlapping elements for objects in two label images.
Args:
seg_a: candidate segmentation to evaluate
seg_b: candidate segmentation to compare to
ignore_label: overlap of any objects with this label are not
seg_a: Candidate segmentation to evaluate.
seg_b: Segmentation to compare to.
ignore_label: Overlap of any objects with this label are not
taken into account in the output. `None` indicates that no label
should be ignored. It is assumed that the `ignore_label` has the
same meaning in both segmentations.
@@ -132,21 +148,30 @@ def _compute_tps(scores, n_matched, threshold):
return tp


def matching(segmentation, groundtruth, threshold=0.5, criterion="iou", ignore_label=0):
""" Scores from matching objects in segmentation and groundtruth.
def matching(
segmentation: np.ndarray,
groundtruth: np.ndarray,
threshold: float = 0.5,
criterion: str = "iou",
ignore_label: int = 0,
) -> Dict[str, float]:
"""Compute scores from matching objects in segmentation and groundtruth.
Implementation based on:
https://github.com/mpicbg-csbd/stardist/blob/master/stardist/matching.py
Arguments:
segmentation [np.ndarray] - candidate segmentation to evaluate
groundtruth [np.ndarray] - groundtruth segmentation
threshold [float] - overlap threshold (default: 0.5)
criterion [str] - matching criterion. Can be one of "iou", "iop", "iot". (default: "iou")
ignore_label [int] - overlap of any objects with this label are not
Args:
segmentation: Candidate segmentation to evaluate.
groundtruth: Groundtruth segmentation.
threshold: Overlap threshold.
criterion: Matching criterion. Can be one of "iou", "iop", "iot".
ignore_label: Overlap of any objects with this label are not
taken into account in the output. `None` indicates that no label
should be ignored. It is assumed that the `ignore_label` has the
same meaning in both segmentations.
Returns:
Mapping of the names for different metrics to their respective scores.
"""

n_true, n_matched, n_pred, scores = _compute_scores(segmentation, groundtruth, criterion, ignore_label)
@@ -160,22 +185,33 @@ def matching(segmentation, groundtruth, threshold=0.5, criterion="iou", ignore_l
return stats


def mean_segmentation_accuracy(segmentation, groundtruth, thresholds=None, return_accuracies=False, ignore_label=0):
"""This implements the segmentation accuracy metrics from PascalVoc.
See https://link.springer.com/article/10.1007/s11263-009-0275-4
def mean_segmentation_accuracy(
segmentation: np.ndarray,
groundtruth: np.ndarray,
thresholds: Optional[List[float]] = None,
return_accuracies: bool = False,
ignore_label: int = 0,
) -> Union[float, Tuple[float, List[float]]]:
"""Compute the mean segmentation accuracy metrics for comparing two segmentation results.
The implementation follows the DSB 2018 Nucelus Segmentation Challenge.
This metric was introduced in the PascalVoc Challenge:
https://link.springer.com/article/10.1007/s11263-009-0275-4
The implementation used here follows the DSB 2018 Nucelus Segmentation Challenge.
Arguments:
segmentation [np.ndarray] - candidate segmentation to evaluate
groundtruth [np.ndarray] - groundtruth segmentation
thresholds [sequence of floats] - overlap thresholds,
by default np.arange(0.5, 1., 0.05) is used (default: None)
return_aps [bool] - whether to return intermediate aps (default: false)
ignore_label [int] - overlap of any objects with this label are not
Args:
segmentation: Candidate segmentation to evaluate.
groundtruth: Groundtruth segmentation.
thresholds: Overlap thresholds, by default np.arange(0.5, 1., 0.05) is used.
return_accuracies: Whether to return intermediate scores.
ignore_label: Overlap of any objects with this label are not
taken into account in the output. `None` indicates that no label
should be ignored. It is assumed that the `ignore_label` has the
same meaning in both segmentations.
Returns:
The mean segmentation accuracy score.
The segmentation accuracies for the individual overlap thresholds.
Only returned if return_accuracies is set to True.
"""
n_true, n_matched, n_pred, scores = _compute_scores(
segmentation, groundtruth, criterion="iou", ignore_label=ignore_label
Loading

0 comments on commit 3b514cf

Please sign in to comment.