Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Start centralizing docstring entries #318

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
194 changes: 194 additions & 0 deletions aslprep/utils/doc.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,194 @@
# -*- coding: utf-8 -*-
"""Functions related to the documentation.

docdict contains the standard documentation entries used across xcp_d.

source: Eric Larson and MNE-python team.
https://github.com/mne-tools/mne-python/blob/main/mne/utils/docs.py
"""
import sys

###################################
# Standard documentation entries
#
docdict = dict()

docdict[
"omp_nthreads"
] = """
omp_nthreads : :obj:`int`
Maximum number of threads an individual process may use.
"""

docdict[
"mem_gb"
] = """
mem_gb : :obj:`float`
Memory limit, in gigabytes.
"""

docdict[
"output_dir"
] = """
output_dir : :obj:`str`
Path to the output directory for ``aslprep`` derivatives.
This should not include the ``aslprep`` folder.
For example, "/path/to/dset/derivatives/".
"""

docdict[
"work_dir"
] = """
work_dir : :obj:`str`
Directory in which to store workflow execution state and temporary files.
"""

docdict[
"analysis_level"
] = """
analysis_level : {"participant"}
The analysis level for ``aslprep``. Must be specified as "participant" since ASLPrep
performs analyses at the participant level.
"""

docdict[
"basil"
] = """
basil : :obj:`bool`
Run BASIL, FSL utils to compute CBF with spatial regularization and partial volume correction.
BASIL will not be run if the ASL file only contains pre-calculated CBF images.
"""

docdict[
"scorescrub"
] = """
scorescrub : :obj:`bool`
Run SCORE and SCRUB, Sudipto Dolui's algorithms for denoising CBF.
SCORE and SCRUB will not be run if the ASL file is short (i.e., if the GE workflow is used).
"""

docdict[
"m0_scale"
] = """
m0_scale : :obj:`float`, optional
Relative scale between ASL (delta-M) and M0 volumes.
The M0 volumes will be multiplied by ``m0_scale`` when CBF is calculated.
The default is 1 (no scaling).
"""

docdict[
"smooth_kernel"
] = """
smooth_kernel : :obj:`float`
Kernel size for smoothing M0.
"""

docdict[
"processing_target"
] = """
processing_target : {"controllabel", "deltam", "cbf"}
The target image types from the ASL file to process.
"""

docdict[
"dummy_vols"
] = """
dummy_vols : :obj:`int`
Number of label-control volume pairs to delete before CBF computation.
"""

docdict[
"name"
] = """
name : :obj:`str`, optional
Name of the workflow. This is used for working directories and workflow graphs.
"""

docdict[
"aslcontext"
] = """
aslcontext : :obj:`str`
Path to the ASL context file.
"""

docdict[
"name_source"
] = """
name_source : :obj:`str`
Path to the raw ASL file. Used as the base name for derivatives.
"""

docdict_indented = {}


def _indentcount_lines(lines):
"""Minimum indent for all lines in line list.

>>> lines = [' one', ' two', ' three']
>>> _indentcount_lines(lines)
1
>>> lines = []
>>> _indentcount_lines(lines)
0
>>> lines = [' one']
>>> _indentcount_lines(lines)
1
>>> _indentcount_lines([' '])
0

"""
indentno = sys.maxsize
for line in lines:
stripped = line.lstrip()
if stripped:
indentno = min(indentno, len(line) - len(stripped))
if indentno == sys.maxsize:
return 0
return indentno


def fill_doc(f):
"""Fill a docstring with docdict entries.

Parameters
----------
f : callable
The function to fill the docstring of. Will be modified in place.

Returns
-------
f : callable
The function, potentially with an updated ``__doc__``.

"""
docstring = f.__doc__
if not docstring:
return f
lines = docstring.splitlines()
# Find the minimum indent of the main docstring, after first line
if len(lines) < 2:
icount = 0
else:
icount = _indentcount_lines(lines[1:])
# Insert this indent to dictionary docstrings
try:
indented = docdict_indented[icount]
except KeyError:
indent = " " * icount
docdict_indented[icount] = indented = {}
for name, dstr in docdict.items():
lines = dstr.splitlines()
try:
newlines = [lines[0]]
for line in lines[1:]:
newlines.append(indent + line)
indented[name] = "\n".join(newlines)
except IndexError:
indented[name] = dstr
try:
f.__doc__ = docstring % indented
except (TypeError, ValueError, KeyError) as exp:
funcname = f.__name__
funcname = docstring.split("\n")[0] if funcname is None else funcname
raise RuntimeError(f"Error documenting {funcname}:\n{str(exp)}")
return f
12 changes: 12 additions & 0 deletions aslprep/utils/misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,18 @@ def _select_last_in_list(lst):
return lst[-1]


def _pick_gm(files):
return files[0]


def _pick_wm(files):
return files[1]


def _pick_csf(files):
return files[2]


def _conditional_downsampling(in_file, in_mask, zoom_th=4.0):
"""Downsample the input dataset for sloppy mode."""
from pathlib import Path
Expand Down
20 changes: 7 additions & 13 deletions aslprep/workflows/asl/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def init_asl_preproc_wf(asl_file):
mean_cbf_score_t1
mean score cbf in T1w space
mean_cbf_scrub_t1, mean_cbf_gm_basil_t1, mean_cbf_basil_t1
scrub, parital volume corrected and basil cbf in T1w space
scrub, partial volume corrected, and basil cbf in T1w space
cbf_ts_std
cbf times series in template space
mean_cbf_std
Expand Down Expand Up @@ -607,9 +607,7 @@ def init_asl_preproc_wf(asl_file):
(asl_reg_wf, syn_unwarp_report_wf, [
("outputnode.anat_to_aslref_xfm", "inputnode.in_xfm"),
]),
(asl_sdc_wf, syn_unwarp_report_wf, [
("outputnode.syn_ref", "inputnode.in_post"),
]),
(asl_sdc_wf, syn_unwarp_report_wf, [("outputnode.syn_ref", "inputnode.in_post")]),
])
# fmt:on

Expand Down Expand Up @@ -707,7 +705,9 @@ def init_asl_preproc_wf(asl_file):

# NOTE: Can this be bundled into the ASL-T1w transform workflow?
aslmask_to_t1w = pe.Node(
ApplyTransforms(interpolation="MultiLabel"), name="aslmask_to_t1w", mem_gb=0.1
ApplyTransforms(interpolation="MultiLabel"),
name="aslmask_to_t1w",
mem_gb=0.1,
)

# fmt:off
Expand Down Expand Up @@ -763,11 +763,7 @@ def init_asl_preproc_wf(asl_file):
# fmt:on

# For GE data, asl-asl, asl-T1, and asl-std should all have "identity" for HMC/SDC.
# fmt:off
workflow.connect([
(asl_split, asl_std_trans_wf, [("out_files", "inputnode.asl_split")]),
])
# fmt:on
workflow.connect([(asl_split, asl_std_trans_wf, [("out_files", "inputnode.asl_split")])])

# asl_derivatives_wf internally parametrizes over snapshotted spaces.
for cbf_deriv in cbf_derivs:
Expand Down Expand Up @@ -821,9 +817,7 @@ def init_asl_preproc_wf(asl_file):
for cbf_deriv in cbf_derivs:
# fmt:off
workflow.connect([
(compute_cbf_wf, plot_cbf_wf, [
(f"outputnode.{cbf_deriv}", f"inputnode.{cbf_deriv}"),
]),
(compute_cbf_wf, plot_cbf_wf, [(f"outputnode.{cbf_deriv}", f"inputnode.{cbf_deriv}")]),
])
# fmt:on

Expand Down
Loading