From 035b2cd6c3c7e8fbef37a45e8a0ae5c410b18cdf Mon Sep 17 00:00:00 2001 From: Matt Einhorn Date: Tue, 17 Jun 2025 15:04:02 -0400 Subject: [PATCH 1/2] Update args and docs from cellfinder. --- benchmarks/benchmarks/cellfinder.py | 102 +++++++------ brainglobe_workflows/brainmapper/main.py | 58 ++++---- brainglobe_workflows/brainmapper/parser.py | 137 ++++++++++++++---- brainglobe_workflows/cellfinder/cellfinder.py | 70 +++++---- brainglobe_workflows/configs/cellfinder.json | 9 +- 5 files changed, 242 insertions(+), 134 deletions(-) diff --git a/benchmarks/benchmarks/cellfinder.py b/benchmarks/benchmarks/cellfinder.py index b392fd3e..ee4bc99d 100644 --- a/benchmarks/benchmarks/cellfinder.py +++ b/benchmarks/benchmarks/cellfinder.py @@ -204,29 +204,34 @@ def setup(self): def time_cellfinder_run(self): cellfinder_run( - self.signal_array, - self.background_array, - self.cfg.voxel_sizes, - self.cfg.start_plane, - self.cfg.end_plane, - self.cfg.trained_model, - self.cfg.model_weights, - self.cfg.model, - self.cfg.batch_size, - self.cfg.n_free_cpus, - self.cfg.network_voxel_sizes, - self.cfg.soma_diameter, - self.cfg.ball_xy_size, - self.cfg.ball_z_size, - self.cfg.ball_overlap_fraction, - self.cfg.log_sigma_size, - self.cfg.n_sds_above_mean_thresh, - self.cfg.soma_spread_factor, - self.cfg.max_cluster_size, - self.cfg.cube_width, - self.cfg.cube_height, - self.cfg.cube_depth, - self.cfg.network_depth, + signal_array=self.signal_array, + background_array=self.background_array, + voxel_sizes=self.cfg.voxel_sizes, + start_plane=self.cfg.start_plane, + end_plane=self.cfg.end_plane, + trained_model=self.cfg.trained_model, + model_weights=self.cfg.model_weights, + model=self.cfg.model, + classification_batch_size=self.cfg.classification_batch_size, + n_free_cpus=self.cfg.n_free_cpus, + network_voxel_sizes=self.cfg.network_voxel_sizes, + soma_diameter=self.cfg.soma_diameter, + ball_xy_size=self.cfg.ball_xy_size, + ball_z_size=self.cfg.ball_z_size, + ball_overlap_fraction=self.cfg.ball_overlap_fraction, + log_sigma_size=self.cfg.log_sigma_size, + n_sds_above_mean_thresh=self.cfg.n_sds_above_mean_thresh, + n_sds_above_mean_tiled_thresh=self.cfg.n_sds_above_mean_tiled_thresh, + tiled_thresh_tile_size=self.cfg.tiled_thresh_tile_size, + soma_spread_factor=self.cfg.soma_spread_factor, + max_cluster_size=self.cfg.max_cluster_size, + cube_width=self.cfg.cube_width, + cube_height=self.cfg.cube_height, + cube_depth=self.cfg.cube_depth, + network_depth=self.cfg.network_depth, + detection_batch_size=self.cfg.detection_batch_size, + torch_device=self.cfg.torch_device, + pin_memory=self.cfg.pin_memory, ) @@ -244,29 +249,34 @@ def setup(self): # detect cells self.detected_cells = cellfinder_run( - self.signal_array, - self.background_array, - self.cfg.voxel_sizes, - self.cfg.start_plane, - self.cfg.end_plane, - self.cfg.trained_model, - self.cfg.model_weights, - self.cfg.model, - self.cfg.batch_size, - self.cfg.n_free_cpus, - self.cfg.network_voxel_sizes, - self.cfg.soma_diameter, - self.cfg.ball_xy_size, - self.cfg.ball_z_size, - self.cfg.ball_overlap_fraction, - self.cfg.log_sigma_size, - self.cfg.n_sds_above_mean_thresh, - self.cfg.soma_spread_factor, - self.cfg.max_cluster_size, - self.cfg.cube_width, - self.cfg.cube_height, - self.cfg.cube_depth, - self.cfg.network_depth, + signal_array=self.signal_array, + background_array=self.background_array, + voxel_sizes=self.cfg.voxel_sizes, + start_plane=self.cfg.start_plane, + end_plane=self.cfg.end_plane, + trained_model=self.cfg.trained_model, + model_weights=self.cfg.model_weights, + model=self.cfg.model, + classification_batch_size=self.cfg.classification_batch_size, + n_free_cpus=self.cfg.n_free_cpus, + network_voxel_sizes=self.cfg.network_voxel_sizes, + soma_diameter=self.cfg.soma_diameter, + ball_xy_size=self.cfg.ball_xy_size, + ball_z_size=self.cfg.ball_z_size, + ball_overlap_fraction=self.cfg.ball_overlap_fraction, + log_sigma_size=self.cfg.log_sigma_size, + n_sds_above_mean_thresh=self.cfg.n_sds_above_mean_thresh, + n_sds_above_mean_tiled_thresh=self.cfg.n_sds_above_mean_tiled_thresh, + tiled_thresh_tile_size=self.cfg.tiled_thresh_tile_size, + soma_spread_factor=self.cfg.soma_spread_factor, + max_cluster_size=self.cfg.max_cluster_size, + cube_width=self.cfg.cube_width, + cube_height=self.cfg.cube_height, + cube_depth=self.cfg.cube_depth, + network_depth=self.cfg.network_depth, + detection_batch_size=self.cfg.detection_batch_size, + torch_device=self.cfg.torch_device, + pin_memory=self.cfg.pin_memory, ) def time_save_cells(self): diff --git a/brainglobe_workflows/brainmapper/main.py b/brainglobe_workflows/brainmapper/main.py index 89b600f9..fcd868fa 100644 --- a/brainglobe_workflows/brainmapper/main.py +++ b/brainglobe_workflows/brainmapper/main.py @@ -120,21 +120,26 @@ def run_all(args, what_to_run, atlas): ) points = detect.main( - signal_array, - args.start_plane, - args.end_plane, - args.voxel_sizes, - args.soma_diameter, - args.max_cluster_size, - args.ball_xy_size, - args.ball_z_size, - args.ball_overlap_fraction, - args.soma_spread_factor, - args.n_free_cpus, - args.log_sigma_size, - args.n_sds_above_mean_thresh, + signal_array=signal_array, + start_plane=args.start_plane, + end_plane=args.end_plane, + voxel_sizes=args.voxel_sizes, + soma_diameter=args.soma_diameter, + max_cluster_size=args.max_cluster_size, + ball_xy_size=args.ball_xy_size, + ball_z_size=args.ball_z_size, + ball_overlap_fraction=args.ball_overlap_fraction, + soma_spread_factor=args.soma_spread_factor, + n_free_cpus=args.n_free_cpus, + log_sigma_size=args.log_sigma_size, + n_sds_above_mean_thresh=args.n_sds_above_mean_thresh, + n_sds_above_mean_tiled_thresh=args.n_sds_above_mean_tiled_thresh, + tiled_thresh_tile_size=args.tiled_thresh_tile_size, save_planes=args.save_planes, plane_directory=args.plane_directory, + batch_size=args.detection_batch_size, + torch_device=args.torch_device, + pin_memory=args.pin_memory, ) ensure_directory_exists(args.paths.points_directory) @@ -166,19 +171,20 @@ def run_all(args, what_to_run, atlas): background_array = read_z_stack(args.background_planes_path[0]) points = classify.main( - points, - signal_array, - background_array, - args.n_free_cpus, - args.voxel_sizes, - args.network_voxel_sizes, - args.batch_size, - args.cube_height, - args.cube_width, - args.cube_depth, - args.trained_model, - model_weights, - args.network_depth, + points=points, + signal_array=signal_array, + background_array=background_array, + n_free_cpus=args.n_free_cpus, + voxel_sizes=args.voxel_sizes, + network_voxel_sizes=args.network_voxel_sizes, + batch_size=args.classification_batch_size, + cube_height=args.cube_height, + cube_width=args.cube_width, + cube_depth=args.cube_depth, + trained_model=args.trained_model, + model_weights=model_weights, + network_depth=args.network_depth, + pin_memory=args.pin_memory, ) save_cells( points, diff --git a/brainglobe_workflows/brainmapper/parser.py b/brainglobe_workflows/brainmapper/parser.py index 68154b77..be75bf4f 100644 --- a/brainglobe_workflows/brainmapper/parser.py +++ b/brainglobe_workflows/brainmapper/parser.py @@ -10,11 +10,13 @@ ArgumentParser, ArgumentTypeError, ) +from functools import partial from brainglobe_utils.general.numerical import ( check_positive_float, check_positive_int, ) +from brainglobe_utils.general.string import check_str from brainreg.core.cli import atlas_parse, geometry_parser, niftyreg_parse from brainreg.core.cli import backend_parse as brainreg_backend_parse from cellfinder.core.download.cli import download_parser @@ -85,7 +87,7 @@ def main_parse(parser): nargs="+", required=True, help="Path to the directory of the signal files. Can also be a text" - "file pointing to the files.", + "file pointing to the files. For a 3d tiff, data is in z, y, x order", ) main_parser.add_argument( "-b", @@ -95,7 +97,8 @@ def main_parse(parser): nargs=1, required=True, help="Path to the directory of the background files. Can also be a " - "text file pointing to the files.", + "text file pointing to the files. For a 3d tiff, data is in z, y, x " + "order", ) main_parser.add_argument( "-o", @@ -138,14 +141,16 @@ def pixel_parser(parser): "--voxel-sizes", dest="voxel_sizes", required=True, - nargs="+", - help="Voxel sizes in microns, in the order of data orientation. " - "e.g. '5 2 2'", + nargs=3, + type=partial(check_positive_float, none_allowed=False), + help="Voxel sizes in microns, in the order of data orientation " + "(z, y, x). E.g. '5 2 2'", ) pixel_opt_parser.add_argument( "--network-voxel-sizes", dest="network_voxel_sizes", - nargs="+", + nargs=3, + type=partial(check_positive_float, none_allowed=False), default=[5, 1, 1], help="Voxel sizes in microns that the machine learning network was " "trained on, in the order of data orientation. e.g. '5 2 2'." @@ -200,14 +205,16 @@ def io_parse(parser): dest="start_plane", type=check_positive_int, default=0, - help="The first plane to process in the Z dimension.", + help="The first plane index to process in the Z dimension (inclusive, " + "to process a subset of the data).", ) io_parser.add_argument( "--end-plane", dest="end_plane", type=int, default=-1, - help="The last plane to process in the Z dimension.", + help="The last plane to process in the Z dimension (exclusive, to " + "process a subset of the data).", ) return parser @@ -244,8 +251,9 @@ def cell_detect_parse(parser): dest="max_cluster_size", type=check_positive_int, default=100000, - help="Largest putative cell cluster (in cubic um) where " - "splitting should be attempted", + help="Largest detected cell cluster (in cubic um) where splitting " + "should be attempted. Clusters above this size will be labeled as " + "artifacts.", ) cell_detect_parser.add_argument( @@ -253,7 +261,7 @@ def cell_detect_parse(parser): dest="soma_diameter", type=check_positive_float, default=16, - help="The expected soma size in um in the x/y dimensions", + help="The expected in-plane (xy) soma diameter (microns).", ) cell_detect_parser.add_argument( @@ -261,24 +269,22 @@ def cell_detect_parse(parser): dest="ball_xy_size", type=check_positive_int, default=6, - help="The size in um of the ball used " - "for the morphological filter in the x/y dimensions", + help="3d filter's in-plane (xy) filter ball size (microns).", ) cell_detect_parser.add_argument( "--ball-z-size", dest="ball_z_size", type=check_positive_int, default=15, - help="The size in um of the ball used " - "for the morphological filter in the z dimension", + help="3d filter's axial (z) filter ball size (microns).", ) cell_detect_parser.add_argument( "--ball-overlap-fraction", dest="ball_overlap_fraction", type=check_positive_float, default=0.6, - help="The fraction of the ball that has to cover thresholded pixels " - "for the centre pixel to be considered a nucleus pixel", + help="3d filter's fraction of the ball filter needed to be filled by " + "foreground voxels, centered on a voxel, to retain the voxel.", ) cell_detect_parser.add_argument( @@ -286,24 +292,63 @@ def cell_detect_parse(parser): dest="log_sigma_size", type=check_positive_float, default=0.2, - help="The filter size used in the Laplacian of Gaussian filter to " - "enhance the cell intensities. Given as a fraction of the " - "soma-diameter.", + help="Gaussian filter width (as a fraction of soma diameter) used " + "during 2d in-plane Laplacian of Gaussian filtering.", ) cell_detect_parser.add_argument( "--threshold", dest="n_sds_above_mean_thresh", - type=check_positive_float, + type=float, + default=10, + help="Per-plane intensity threshold (the number of standard " + "deviations above the mean) of the filtered 2d planes used to mark " + "pixels as foreground or background.", + ) + cell_detect_parser.add_argument( + "--tiled-threshold", + dest="n_sds_above_mean_tiled_thresh", + type=float, default=10, - help="The cell threshold, in multiples of the standard deviation" - "above the mean", + help="Per-plane, per-tile intensity threshold (the number of standard" + " deviations above the mean) for the filtered 2d planes used to mark " + "pixels as foreground or background. When used, (tile size is not " + "zero) a pixel is marked as foreground if its intensity is above both" + " the per-plane and per-tile threshold. I.e. it's above the set " + "number of standard deviations of the per-plane average and of the " + "per-plane per-tile average for the tile that contains it.", + ) + cell_detect_parser.add_argument( + "--tiled-threshold-tile-size", + dest="tiled_thresh_tile_size", + type=check_positive_float, + default=None, + help="The tile size used to tile the x, y plane to calculate the " + "local average intensity for the tiled threshold. The value is " + "multiplied by soma diameter (i.e. 1 means one soma diameter). If " + "zero or None, the tiled threshold is disabled and only the per-plane" + " threshold is used. Tiling is done with 50%% overlap when striding.", ) cell_detect_parser.add_argument( "--soma-spread-factor", dest="soma_spread_factor", type=check_positive_float, default=1.4, - help="Soma size spread factor (for splitting up cell clusters)", + help="Cell spread factor for determining the largest cell volume " + "before splitting up cell clusters. Structures with spherical volume " + "of diameter `soma_spread_factor * soma_diameter` or less will not " + "be split.", + ) + cell_detect_parser.add_argument( + "--detection-batch-size", + dest="detection_batch_size", + type=check_positive_int, + default=None, + help="The number of planes of the original data volume to process at " + "once. When None (the default), it defaults to 1 for GPU and 4 for " + "CPU. The GPU/CPU memory must be able to contain this many planes " + "for all the filters. For performance-critical applications, tune to " + "maximize memory usage without running out. Check your GPU/CPU memory" + " to verify it's not full.", ) return parser @@ -334,11 +379,21 @@ def classification_parse(parser): ) classification_parser.add_argument( "--batch-size", - dest="batch_size", + dest="classification_batch_size", + type=check_positive_int, + default=64, + help="Deprecated. Use classification-batch-size instead.", + ) + classification_parser.add_argument( + "--classification-batch-size", + dest="classification_batch_size", type=check_positive_int, default=64, - help="Batch size for classification. Can be adjusted depending on " - "GPU memory.", + help="How many potential cells to classify at one time. The GPU/CPU " + "memory must be able to contain at once this many data cubes for the " + "models. For performance-critical applications, tune to maximize " + "memory usage without running out. Check your GPU/CPU memory to " + "verify it's not full.", ) return parser @@ -350,21 +405,24 @@ def cube_extract_parse(parser): dest="cube_width", type=check_positive_int, default=50, - help="The width of the cubes to extract (must be even)", + help="The width of the data cube centered on the cell, used for " + "classification", ) cube_extract_parser.add_argument( "--cube-height", dest="cube_height", type=check_positive_int, default=50, - help="The height of the cubes to extract (must be even)", + help="The height of the data cube centered on the cell, used " + "for classification", ) cube_extract_parser.add_argument( "--cube-depth", dest="cube_depth", type=check_positive_int, default=20, - help="The depth (z) of the cubes to extract", + help="The depth of the data cube centered on the cell, used for " + "classification", ) cube_extract_parser.add_argument( "--save-empty-cubes", @@ -421,6 +479,25 @@ def misc_parse(parser): help="The number of CPU cores on the machine to leave " "unused by the program to spare resources.", ) + misc_parser.add_argument( + "--torch-device", + dest="torch_device", + type=check_str, + default=None, + help="The device on which to run the computation. If not specified " + "(None), cuda will be used if a GPU is available, otherwise cpu. You " + "can also manually specify cuda or cpu.", + ) + misc_parser.add_argument( + "--pin-memory", + dest="pin_memory", + action="store_true", + help="Pins data to be sent to the GPU to the CPU memory. This allows " + "faster GPU data speeds, but can only be used if the data used by the" + " GPU can stay in the CPU RAM while the GPU uses it. I.e. there's " + "enough RAM. Otherwise, if there's a risk of the RAM being paged, it " + "shouldn't be used. Defaults to False.", + ) misc_parser.add_argument( "--max-ram", dest="max_ram", diff --git a/brainglobe_workflows/cellfinder/cellfinder.py b/brainglobe_workflows/cellfinder/cellfinder.py index 14dbe7d3..f1c6af7b 100644 --- a/brainglobe_workflows/cellfinder/cellfinder.py +++ b/brainglobe_workflows/cellfinder/cellfinder.py @@ -56,21 +56,26 @@ class CellfinderConfig: trained_model: Optional[os.PathLike] model_weights: Optional[os.PathLike] model: str - batch_size: int + classification_batch_size: int n_free_cpus: int - network_voxel_sizes: tuple[int, int, int] - soma_diameter: int - ball_xy_size: int - ball_z_size: int + network_voxel_sizes: tuple[float, float, float] + soma_diameter: float + ball_xy_size: float + ball_z_size: float ball_overlap_fraction: float log_sigma_size: float - n_sds_above_mean_thresh: int + n_sds_above_mean_thresh: float + n_sds_above_mean_tiled_thresh: float + tiled_thresh_tile_size: float | None soma_spread_factor: float - max_cluster_size: int + max_cluster_size: float cube_width: int cube_height: int cube_depth: int network_depth: depth_type + detection_batch_size: Optional[int] + torch_device: Optional[str] + pin_memory: bool # Optional parameters @@ -369,29 +374,34 @@ def run_workflow_from_cellfinder_run(cfg: CellfinderConfig): # Run main analysis using `cellfinder_run` detected_cells = cellfinder_run( - signal_array, - background_array, - cfg.voxel_sizes, - cfg.start_plane, - cfg.end_plane, - cfg.trained_model, - cfg.model_weights, - cfg.model, - cfg.batch_size, - cfg.n_free_cpus, - cfg.network_voxel_sizes, - cfg.soma_diameter, - cfg.ball_xy_size, - cfg.ball_z_size, - cfg.ball_overlap_fraction, - cfg.log_sigma_size, - cfg.n_sds_above_mean_thresh, - cfg.soma_spread_factor, - cfg.max_cluster_size, - cfg.cube_width, - cfg.cube_height, - cfg.cube_depth, - cfg.network_depth, + signal_array=signal_array, + background_array=background_array, + voxel_sizes=cfg.voxel_sizes, + start_plane=cfg.start_plane, + end_plane=cfg.end_plane, + trained_model=cfg.trained_model, + model_weights=cfg.model_weights, + model=cfg.model, + classification_batch_size=cfg.classification_batch_size, + n_free_cpus=cfg.n_free_cpus, + network_voxel_sizes=cfg.network_voxel_sizes, + soma_diameter=cfg.soma_diameter, + ball_xy_size=cfg.ball_xy_size, + ball_z_size=cfg.ball_z_size, + ball_overlap_fraction=cfg.ball_overlap_fraction, + log_sigma_size=cfg.log_sigma_size, + n_sds_above_mean_thresh=cfg.n_sds_above_mean_thresh, + n_sds_above_mean_tiled_thresh=cfg.n_sds_above_mean_tiled_thresh, + tiled_thresh_tile_size=cfg.tiled_thresh_tile_size, + soma_spread_factor=cfg.soma_spread_factor, + max_cluster_size=cfg.max_cluster_size, + cube_width=cfg.cube_width, + cube_height=cfg.cube_height, + cube_depth=cfg.cube_depth, + network_depth=cfg.network_depth, + detection_batch_size=cfg.detection_batch_size, + torch_device=cfg.torch_device, + pin_memory=cfg.pin_memory, ) # Save results to xml file diff --git a/brainglobe_workflows/configs/cellfinder.json b/brainglobe_workflows/configs/cellfinder.json index e977271c..37dfc8e6 100644 --- a/brainglobe_workflows/configs/cellfinder.json +++ b/brainglobe_workflows/configs/cellfinder.json @@ -11,7 +11,7 @@ "trained_model": null, "model_weights": null, "model": "resnet50_tv", - "batch_size": 32, + "classification_batch_size": 32, "n_free_cpus": 2, "network_voxel_sizes": [ 5, @@ -24,10 +24,15 @@ "ball_overlap_fraction": 0.6, "log_sigma_size": 0.2, "n_sds_above_mean_thresh": 10, + "n_sds_above_mean_tiled_thresh": 10, + "tiled_thresh_tile_size": null, "soma_spread_factor": 1.4, "max_cluster_size": 100000, "cube_width": 50, "cube_height": 50, "cube_depth": 20, - "network_depth": "50" + "network_depth": "50", + "detection_batch_size": null, + "torch_device": null, + "pin_memory": false } From 035e85b7be63128f0e322e314d9eda8f255ee562 Mon Sep 17 00:00:00 2001 From: Matt Einhorn Date: Fri, 26 Sep 2025 21:50:19 -0400 Subject: [PATCH 2/2] Add new args to cli. --- benchmarks/benchmarks/cellfinder.py | 8 +++++ brainglobe_workflows/brainmapper/main.py | 4 +++ brainglobe_workflows/brainmapper/parser.py | 36 +++++++++++++++++++ brainglobe_workflows/cellfinder/cellfinder.py | 8 +++++ brainglobe_workflows/configs/cellfinder.json | 6 +++- 5 files changed, 61 insertions(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/cellfinder.py b/benchmarks/benchmarks/cellfinder.py index ee4bc99d..54d7beb5 100644 --- a/benchmarks/benchmarks/cellfinder.py +++ b/benchmarks/benchmarks/cellfinder.py @@ -232,6 +232,10 @@ def time_cellfinder_run(self): detection_batch_size=self.cfg.detection_batch_size, torch_device=self.cfg.torch_device, pin_memory=self.cfg.pin_memory, + normalize_channels=self.cfg.normalize_channels, + normalization_down_sampling=self.cfg.normalization_down_sampling, + detect_centre_of_intensity=self.cfg.detect_centre_of_intensity, + classification_max_workers=self.cfg.classification_max_workers, ) @@ -277,6 +281,10 @@ def setup(self): detection_batch_size=self.cfg.detection_batch_size, torch_device=self.cfg.torch_device, pin_memory=self.cfg.pin_memory, + normalize_channels=self.cfg.normalize_channels, + normalization_down_sampling=self.cfg.normalization_down_sampling, + detect_centre_of_intensity=self.cfg.detect_centre_of_intensity, + classification_max_workers=self.cfg.classification_max_workers, ) def time_save_cells(self): diff --git a/brainglobe_workflows/brainmapper/main.py b/brainglobe_workflows/brainmapper/main.py index fcd868fa..b62921b4 100644 --- a/brainglobe_workflows/brainmapper/main.py +++ b/brainglobe_workflows/brainmapper/main.py @@ -140,6 +140,7 @@ def run_all(args, what_to_run, atlas): batch_size=args.detection_batch_size, torch_device=args.torch_device, pin_memory=args.pin_memory, + detect_centre_of_intensity=args.detect_centre_of_intensity, ) ensure_directory_exists(args.paths.points_directory) @@ -185,6 +186,9 @@ def run_all(args, what_to_run, atlas): model_weights=model_weights, network_depth=args.network_depth, pin_memory=args.pin_memory, + normalize_channels=args.normalize_channels, + normalization_down_sampling=args.normalization_down_sampling, + max_workers=args.classification_max_workers, ) save_cells( points, diff --git a/brainglobe_workflows/brainmapper/parser.py b/brainglobe_workflows/brainmapper/parser.py index be75bf4f..6c6a7882 100644 --- a/brainglobe_workflows/brainmapper/parser.py +++ b/brainglobe_workflows/brainmapper/parser.py @@ -350,6 +350,17 @@ def cell_detect_parse(parser): "maximize memory usage without running out. Check your GPU/CPU memory" " to verify it's not full.", ) + cell_detect_parser.add_argument( + "--detect-coi", + dest="detect_centre_of_intensity", + action="store_true", + help="If False, a candidate cell's center is just the mean of the " + "positions of all voxels marked as above background, or bright, in " + "that candidate. The voxel intensity is not taken into account. If " + "True, the center is calculated similar to the center of mass, but " + "using the intensity. So the center gets pulled towards the brighter " + "voxels in the volume.", + ) return parser @@ -395,6 +406,31 @@ def classification_parse(parser): "memory usage without running out. Check your GPU/CPU memory to " "verify it's not full.", ) + classification_parser.add_argument( + "--norm-channels", + dest="normalize_channels", + action="store_true", + help="For classification only - whether to normalize the cubes to the " + "mean/std of the image channels before classification. If the model " + "used for classification was trained on normalized data, this should " + "be enabled.", + ) + classification_parser.add_argument( + "--norm-sampling", + dest="normalization_down_sampling", + type=check_positive_int, + default=32, + help="If normalizing the cubes is enabled, the input channels will be " + "down-sampled in z by this value before calculating their mean/std. " + "E.g. a value of 2 means every second z plane will be used.", + ) + classification_parser.add_argument( + "--classification-max-workers", + dest="classification_max_workers", + type=check_positive_int, + default=6, + help="The max number of sub-processes to use for data loading / processing during classification.", + ) return parser diff --git a/brainglobe_workflows/cellfinder/cellfinder.py b/brainglobe_workflows/cellfinder/cellfinder.py index f1c6af7b..3b74e4aa 100644 --- a/brainglobe_workflows/cellfinder/cellfinder.py +++ b/brainglobe_workflows/cellfinder/cellfinder.py @@ -76,6 +76,10 @@ class CellfinderConfig: detection_batch_size: Optional[int] torch_device: Optional[str] pin_memory: bool + normalize_channels: bool = False + normalization_down_sampling: int = 32 + detect_centre_of_intensity: bool = False + classification_max_workers: int = 6 # Optional parameters @@ -402,6 +406,10 @@ def run_workflow_from_cellfinder_run(cfg: CellfinderConfig): detection_batch_size=cfg.detection_batch_size, torch_device=cfg.torch_device, pin_memory=cfg.pin_memory, + normalize_channels=cfg.normalize_channels, + normalization_down_sampling=cfg.normalization_down_sampling, + detect_centre_of_intensity=cfg.detect_centre_of_intensity, + classification_max_workers=cfg.classification_max_workers, ) # Save results to xml file diff --git a/brainglobe_workflows/configs/cellfinder.json b/brainglobe_workflows/configs/cellfinder.json index 37dfc8e6..0cd2ff05 100644 --- a/brainglobe_workflows/configs/cellfinder.json +++ b/brainglobe_workflows/configs/cellfinder.json @@ -34,5 +34,9 @@ "network_depth": "50", "detection_batch_size": null, "torch_device": null, - "pin_memory": false + "pin_memory": false, + "normalize_channels": false, + "normalization_down_sampling": 32, + "detect_centre_of_intensity": false, + "classification_max_workers": 6 }