diff --git a/Dockerfile b/Dockerfile index 3ffd263..69ec60a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,32 +1,37 @@ # Use an image with pre-built ANTs included -FROM gdevenyi/magetbrain-bids-ants:21d7c12ee1e332827b04848eb5f70f55d14cac23 +FROM gdevenyi/magetbrain-bids-ants:82dcdd647211004f3220e4073ea4daf06fdf89f9 RUN apt-get update \ - && apt-get install --auto-remove --no-install-recommends -y parallel \ + && apt-get install --auto-remove --no-install-recommends -y parallel git curl gzip bzip2 gnupg2 unzip coreutils ca-certificates \ && rm -rf /var/lib/apt/lists/* -RUN apt-get update \ - && apt-get install -y --no-install-recommends --auto-remove git curl unzip bzip2 \ - && curl -o anaconda.sh https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \ - && bash anaconda.sh -b -p /opt/anaconda && rm -f anaconda.sh \ - && git clone https://github.com/CobraLab/antsRegistration-MAGeT.git /opt/antsRegistration-MAGeT \ - && (cd /opt/antsRegistration-MAGeT && git checkout tags/v0.2.2.1) \ - && curl -o /opt/atlases-nifti.zip -sL http://cobralab.net/files/atlases-nifti.zip \ +RUN curl --insecure -o anaconda.sh https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \ + && bash anaconda.sh -b -p /opt/anaconda && rm -f anaconda.sh + +RUN curl -o /opt/atlases-nifti.zip -sL http://cobralab.net/files/atlases-nifti.zip \ && mkdir /opt/atlases-nifti \ && unzip /opt/atlases-nifti.zip -d /opt \ && curl -sL http://cobralab.net/files/brains_t1_nifti.tar.bz2 | tar xvj -C /opt/atlases-nifti \ && curl -o /opt/atlases-nifti/colin.zip -sL http://packages.bic.mni.mcgill.ca/mni-models/colin27/mni_colin27_1998_nifti.zip \ && mkdir /opt/atlases-nifti/colin && unzip /opt/atlases-nifti/colin.zip -d /opt/atlases-nifti/colin && rm -f /opt/atlases-nifti/colin.zip \ - && curl -sL https://deb.nodesource.com/setup_4.x | bash - \ - && apt-get install -y nodejs \ - && apt-get purge --auto-remove -y curl unzip bzip2 \ + && gzip /opt/atlases-nifti/colin/colin27_t1_tal_lin.nii + +RUN curl --insecure -sL https://deb.nodesource.com/setup_10.x | bash - \ + && apt-get install -y --no-install-recommends --auto-remove nodejs \ && rm -rf /var/lib/apt/lists/* ENV CONDA_PATH "/opt/anaconda" -RUN /opt/anaconda/bin/pip install git+https://github.com/pipitone/qbatch.git@aade5b9a17c5a5a2fe6b28267b3bca10b05a5936 +RUN /opt/anaconda/bin/conda config --append channels conda-forge +RUN /opt/anaconda/bin/conda install -y numpy scipy nibabel pandas +RUN /opt/anaconda/bin/pip install future six +RUN /opt/anaconda/bin/pip install duecredit +RUN /opt/anaconda/bin/pip install pybids +RUN npm install -g bids-validator@0.26.18 --unsafe-perm -RUN npm install -g bids-validator@0.26.4 +RUN git clone https://github.com/CobraLab/antsRegistration-MAGeT.git /opt/antsRegistration-MAGeT && \ + (cd /opt/antsRegistration-MAGeT && git checkout tags/v0.3.1) +RUN /opt/anaconda/bin/pip install git+https://github.com/pipitone/qbatch.git@951dd1bdfdcbb5fd3f27ee6a3e261eaecac1ef70 ENV PATH /opt/ANTs/bin:/opt/anaconda/bin:/opt/antsRegistration-MAGeT/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ENV QBATCH_SYSTEM local diff --git a/README.md b/README.md index 94987de..a1b92c3 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,14 @@ ## MAGeTbrain segmentation pipeline ### Description -This pipeline takes in native-space T1 or T2 (or multiple co-registered modalities) brain images and volumetrically segments -them using the MAGeTbrain algorithm. +This pipeline takes in native-space T1 brain images and volumetrically segments +them using the MAGeTbrain algorithm using a variety of input atlases. ### Documentation -Provide a link to the documention of your pipeline. +https://github.com/cobralab/antsRegistration-MAGet. ### How to report errors -Provide instructions for users on how to get help and report errors. +Please open an issue at https://github.com/BIDS-Apps/MAGeTbrain/issues ### Acknowledgements Describe how would you would like users to acknowledge use of your App in their papers (citation, a paragraph that can be copy pasted, etc.) @@ -21,7 +21,7 @@ usage: run.py [-h] [--participant_label PARTICIPANT_LABEL [PARTICIPANT_LABEL ...]] [--segmentation_type {amygdala,cerebellum,hippocampus-whitematter,colin27-subcortical,all}] [-v] [--n_cpus N_CPUS] [--fast] [--label-masking] [--no-cleanup] - bids_dir output_dir {participant1,participant2,group} + bids_dir output_dir {participant1,participant2} MAGeTbrain BIDS App entrypoint script. @@ -29,16 +29,16 @@ positional arguments: bids_dir The directory with the input dataset formatted according to the BIDS standard. output_dir The directory where the output files should be stored. - When you are running group level analysis this folder + When you are running partipant2 level analysis this folder must be prepopulated with the results of - theparticipant level analysis. - {participant1,participant2,group} + the participant1 level analysis. + {participant1,participant2} Level of the analysis that will be performed. Multiple - participant level analyses can be run independently - (in parallel) using the same output_dir. In MAGeTbrain - parlance, participant1 = template stage, partipant2 = - subject stage group = resample + vote + qc stage. The - proper order is participant1, participant2, group + participant{1,2} level analyses can be run + independently (in parallel) using the same output_dir. + In MAGeTbrain parlance, participant1 = template stage, + partipant2 = subject + resample + vote + qc stage. The + proper order is participant1, participant2 optional arguments: -h, --help show this help message and exit @@ -53,35 +53,39 @@ optional arguments: The segmentation label type to be used. colin27-subcortical, since it is on a different atlas, is not included in the all setting and must be run - seperately + separately -v, --version show program's version number and exit --n_cpus N_CPUS Number of CPUs/cores available to use. --fast Use faster (less accurate) registration calls --label-masking Use the input labels as registration masks to reduce - computation and (possibily) improve registration + computation and (possibly) improve registration --no-cleanup Do no cleanup intermediate files after group phase ``` -To run it in participant level mode (for one participant): +To run construct the template library, run the participant1 stage: ```sh docker run -i --rm \ -v /Users/filo/data/ds005:/bids_dataset:ro \ -v /Users/filo/outputs:/outputs \ bids/example \ - /bids_dataset /outputs participant --participant_label 01 + /bids_dataset /outputs participant1 --participant_label 01 ``` -After doing this for all subjects (potentially in parallel), the group level analysis + +After doing this for approximately 21 representative subjects (potentially in parallel), +the subject level labeling can be done: can be run: ```sh docker run -i --rm \ -v /Users/filo/data/ds005:/bids_dataset:ro \ -v /Users/filo/outputs:/outputs \ - bids/example \ - /bids_dataset /outputs group + bids/example /outputs participants2 --participant_label 01 ``` -### Special considerations -Describe whether your app has any special requirements. For example: +This can also happen in parallel on a per-subject basis -- Multiple map reduce steps (participant, group, participant2, group2 etc.) -- Unusual memory requirements -- etc. +### Special considerations +- segmentation_types output directories must be kept separate for each type +- participant1 stages can be run in parallel per subject, approximately 21 +subjects should be selected which are a representative subset of the population +under study +- participant2 stages can also be run in parallel, but must be started after +participant1 stages are complete diff --git a/ants-build/Dockerfile b/ants-build/Dockerfile index 1d7406c..2f28da7 100644 --- a/ants-build/Dockerfile +++ b/ants-build/Dockerfile @@ -1,17 +1,15 @@ # Use phusion/baseimage as base image -FROM phusion/baseimage:0.10.1 - -# Use baseimage-docker's init system. -CMD ["/sbin/my_init"] +FROM ubuntu:latest ENV DEBIAN_FRONTEND noninteractive RUN buildDeps='cmake build-essential git zlib1g-dev' \ && apt-get update \ && apt-get install -y $buildDeps --no-install-recommends \ + && apt-get install -y ca-certificates \ && rm -rf /var/lib/apt/lists/* \ && git clone https://github.com/stnava/ANTs.git /opt/ANTs-src \ - && cd /opt/ANTs-src && git checkout 21d7c12ee1e332827b04848eb5f70f55d14cac23 \ + && cd /opt/ANTs-src && git checkout 82dcdd647211004f3220e4073ea4daf06fdf89f9 \ && mkdir /opt/ANTs-src/build && cd /opt/ANTs-src/build \ && cmake -DCMAKE_LINKER=/usr/bin/gold -DITK_BUILD_MINC_SUPPORT:BOOL=ON \ -DBUILD_TESTING:BOOL=OFF -DRUN_LONG_TESTS:BOOL=OFF -DRUN_SHORT_TESTS:BOOL=OFF \ diff --git a/circle.yml b/circle.yml index d2a8d28..227d9ed 100644 --- a/circle.yml +++ b/circle.yml @@ -1,6 +1,7 @@ general: artifacts: - - "~/outputs" + - "~/outputs-colin" + - "~/outputs-colin-labelmask" machine: services: @@ -20,23 +21,32 @@ dependencies: timeout: 21600 - mkdir -p ~/docker; docker save "bids/${CIRCLE_PROJECT_REPONAME,,}" > ~/docker/image.tar : timeout: 21600 - - mkdir -p ${HOME}/outputs + - mkdir -p ${HOME}/outputs-colin + - mkdir -p ${HOME}/outputs-colin-labelmask test: override: # print version - docker run -ti --rm --read-only -v /tmp:/tmp -v /var/tmp:/var/tmp -v ${HOME}/data/ds003_downsampled:/bids_dataset bids/${CIRCLE_PROJECT_REPONAME,,} --version - # template level run for downsampled dataset - - docker run -ti --rm --read-only -v /tmp:/tmp -v /var/tmp:/var/tmp -v ${HOME}/data/ds003_downsampled:/bids_dataset -v ${HOME}/outputs:/outputs bids/${CIRCLE_PROJECT_REPONAME,,} --fast --n_cpus 2 --segmentation_type colin27-subcortical /bids_dataset /outputs participant1 --participant_label 01 : + # template level run for downsampled dataset no masking + - docker run -ti --rm --read-only -v /tmp:/tmp -v /var/tmp:/var/tmp -v ${HOME}/data/ds003_downsampled:/bids_dataset -v ${HOME}/outputs-colin:/outputs bids/${CIRCLE_PROJECT_REPONAME,,} --n_cpus 2 --segmentation_type colin27-subcortical /bids_dataset /outputs participant1 --participant_label 01 : timeout: 21600 - - docker run -ti --rm --read-only -v /tmp:/tmp -v /var/tmp:/var/tmp -v ${HOME}/data/ds003_downsampled:/bids_dataset -v ${HOME}/outputs:/outputs bids/${CIRCLE_PROJECT_REPONAME,,} --fast --n_cpus 2 --segmentation_type colin27-subcortical /bids_dataset /outputs participant1 --participant_label 02 : + - docker run -ti --rm --read-only -v /tmp:/tmp -v /var/tmp:/var/tmp -v ${HOME}/data/ds003_downsampled:/bids_dataset -v ${HOME}/outputs-colin:/outputs bids/${CIRCLE_PROJECT_REPONAME,,} --n_cpus 2 --segmentation_type colin27-subcortical /bids_dataset /outputs participant1 --participant_label 02 : timeout: 2160 - # participant level tests for a longitudinal dataset - - docker run -ti --rm --read-only -v /tmp:/tmp -v /var/tmp:/var/tmp -v ${HOME}/data/ds003_downsampled:/bids_dataset -v ${HOME}/outputs:/outputs bids/${CIRCLE_PROJECT_REPONAME,,} --fast --n_cpus 2 --segmentation_type colin27-subcortical /bids_dataset /outputs participant2 --participant_label 01 : + # participant level tests for a longitudinal dataset no masking + - docker run -ti --rm --read-only -v /tmp:/tmp -v /var/tmp:/var/tmp -v ${HOME}/data/ds003_downsampled:/bids_dataset -v ${HOME}/outputs-colin:/outputs bids/${CIRCLE_PROJECT_REPONAME,,} --n_cpus 2 --segmentation_type colin27-subcortical /bids_dataset /outputs participant2 --participant_label 01 : timeout: 21600 - - docker run -ti --rm --read-only -v /tmp:/tmp -v /var/tmp:/var/tmp -v ${HOME}/data/ds003_downsampled:/bids_dataset -v ${HOME}/outputs:/outputs bids/${CIRCLE_PROJECT_REPONAME,,} --fast --n_cpus 2 --segmentation_type colin27-subcortical /bids_dataset /outputs participant2 --participant_label 02 : + - docker run -ti --rm --read-only -v /tmp:/tmp -v /var/tmp:/var/tmp -v ${HOME}/data/ds003_downsampled:/bids_dataset -v ${HOME}/outputs-colin:/outputs bids/${CIRCLE_PROJECT_REPONAME,,} --n_cpus 2 --segmentation_type colin27-subcortical /bids_dataset /outputs participant2 --participant_label 02 : timeout: 21600 - - docker run -ti --rm --read-only -v /tmp:/tmp -v /var/tmp:/var/tmp -v ${HOME}/data/ds003_downsampled:/bids_dataset -v ${HOME}/outputs:/outputs bids/${CIRCLE_PROJECT_REPONAME,,} --fast --n_cpus 2 --segmentation_type colin27-subcortical /bids_dataset /outputs group : + # template level run for downsampled dataset with masking + - docker run -ti --rm --read-only -v /tmp:/tmp -v /var/tmp:/var/tmp -v ${HOME}/data/ds003_downsampled:/bids_dataset -v ${HOME}/outputs-colin-labelmask:/outputs bids/${CIRCLE_PROJECT_REPONAME,,} --n_cpus 2 --label-masking --segmentation_type colin27-subcortical /bids_dataset /outputs participant1 --participant_label 01 : + timeout: 21600 + - docker run -ti --rm --read-only -v /tmp:/tmp -v /var/tmp:/var/tmp -v ${HOME}/data/ds003_downsampled:/bids_dataset -v ${HOME}/outputs-colin-labelmask:/outputs bids/${CIRCLE_PROJECT_REPONAME,,} --n_cpus 2 --label-masking --segmentation_type colin27-subcortical /bids_dataset /outputs participant1 --participant_label 02 : + timeout: 2160 + # participant level tests for a longitudinal dataset with masking + - docker run -ti --rm --read-only -v /tmp:/tmp -v /var/tmp:/var/tmp -v ${HOME}/data/ds003_downsampled:/bids_dataset -v ${HOME}/outputs-colin-labelmask:/outputs bids/${CIRCLE_PROJECT_REPONAME,,} --n_cpus 2 --label-masking --segmentation_type colin27-subcortical /bids_dataset /outputs participant2 --participant_label 01 : + timeout: 21600 + - docker run -ti --rm --read-only -v /tmp:/tmp -v /var/tmp:/var/tmp -v ${HOME}/data/ds003_downsampled:/bids_dataset -v ${HOME}/outputs-colin-labelmask:/outputs bids/${CIRCLE_PROJECT_REPONAME,,} --n_cpus 2 --label-masking --segmentation_type colin27-subcortical /bids_dataset /outputs participant2 --participant_label 02 : timeout: 21600 deployment: diff --git a/run.py b/run.py index fdcd126..0ca8760 100755 --- a/run.py +++ b/run.py @@ -1,10 +1,13 @@ #!/usr/bin/env python import argparse +import errno import os +import shlex import shutil import subprocess from glob import glob -import errno + +from bids.grabbids import BIDSLayout __version__ = open(os.path.join('/version')).read() @@ -23,8 +26,8 @@ def symlink_force(target, link_name): def run(command, env={}): merged_env = os.environ merged_env.update(env) - process = subprocess.Popen(command, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, shell=True, + process = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, env=merged_env) while True: line = process.stdout.readline() @@ -45,56 +48,88 @@ def run(command, env={}): 'this folder must be prepopulated with the results of the' 'participant level analysis.') parser.add_argument('analysis_level', help='Level of the analysis that will be performed. ' - 'Multiple participant level analyses can be run independently ' + 'Multiple participant{1,2} level analyses can be run independently ' '(in parallel) using the same output_dir. ' - 'In MAGeTbrain parlance, participant1 = template stage, partipant2 = subject stage ' - 'group = resample + vote + qc stage. ' - 'The proper order is participant1, participant2, group', - choices=['participant1', 'participant2', 'group']) + 'In MAGeTbrain parlance, participant1 = template stage, ' + 'partipant2 = subject + resample + vote + qc stage. ' + 'The proper order is participant1, participant2', + choices=['participant1', 'participant2']) parser.add_argument('--participant_label', help='The label(s) of the participant(s) that should be analyzed. The label ' - 'corresponds to sub- from the BIDS spec ' - '(so it does not include "sub-"). If this parameter is not ' - 'provided all subjects should be analyzed. Multiple ' - 'participants can be specified with a space separated list.', - nargs="+") + 'corresponds to sub- from the BIDS spec ' + '(so it does not include "sub-"). If this parameter is not ' + 'provided all subjects should be analyzed. Multiple ' + 'participants can be specified with a space separated list.', + nargs="+") parser.add_argument('--segmentation_type', help='The segmentation label type to be used.' ' colin27-subcortical, since it is on a different atlas, is not included ' - 'in the all setting and must be run seperately', + 'in the all setting and must be run separately', choices=['amygdala', 'cerebellum', - 'hippocampus-whitematter', 'colin27-subcortical', 'all'], + 'hippocampus-whitematter', 'colin27-subcortical', 'all'], default='all') parser.add_argument('-v', '--version', action='version', version='MAGeTbrain version {}'.format(__version__)) parser.add_argument('--n_cpus', help='Number of CPUs/cores available to use.', - default=1, type=int) -parser.add_argument('--fast', help='Use faster (less accurate) registration calls', - action='store_true') + default=1, type=int) +parser.add_argument('--fast', help='Use faster (less accurate) registration calls and float' + ' for numerics', + action='store_true') parser.add_argument('--label-masking', help='Use the input labels as registration masks to reduce computation ' - 'and (possibily) improve registration', - action='store_true') -parser.add_argument('--no-cleanup', help='Do no cleanup intermediate files after group phase', - action='store_true') + 'and (possibly) improve registration', + action='store_true') +parser.add_argument('--no-cleanup', help='Do no cleanup intermediate files after participant2 phase', + action='store_true') args = parser.parse_args() # Check validity of bids dataset run('bids-validator {0}'.format(args.bids_dir)) +layout = BIDSLayout(args.bids_dir) + +if args.analysis_level == "participant1" and not args.participant_label: + raise Exception( + "For template level processing subjects must be explicitly specified") + +# Select subjects +subjects_to_analyze = [] +T1w_files = [] + +# only for a subset of subjects +if args.participant_label: + subjects_to_analyze = args.participant_label +# for all subjects +else: + subjects_to_analyze = layout.get_subjects() + +# Convert subjects to T1W files +for subject_label in subjects_to_analyze: + subject_T1w_files = layout.get(subject=subject_label, type='T1w', + extensions=['.nii', '.nii.gz'], + return_type='file') + if len(subject_T1w_files) == 0: + raise Exception( + "No T1w files found for participant %s" % subject_label) + else: + # If template phase, limit templates to first timepoint for subjects + if args.analysis_level == "participant1": + T1w_files.append(subject_T1w_files[0]) + else: + T1w_files.extend(subject_T1w_files) # Setup magetbrain inputs os.chdir(args.output_dir) run('mb.sh -- init') -#Link in either colin or the big 5 atlases +# Copy in either colin or the big 5 atlases if args.segmentation_type != 'colin27-subcortical': atlases = glob("/opt/atlases-nifti/brains_t1_nifti/*nii.gz") for atlas in atlases: shutil.copy( atlas, '{0}/input/atlas/{1}'.format(args.output_dir, os.path.basename(atlas))) else: - shutil.copy('/opt/atlases-nifti/colin/colin27_t1_tal_lin.nii', - '{0}/input/atlas/colin27_t1.nii'.format(args.output_dir)) + shutil.copy('/opt/atlases-nifti/colin/colin27_t1_tal_lin.nii.gz', + '{0}/input/atlas/colin27_t1.nii.gz'.format(args.output_dir)) -#Link in the labels selected +# Copy in the labels selected if args.segmentation_type == 'amygdala': labels = glob('/opt/atlases-nifti/amygdala/labels/*.nii.gz') for label in labels: @@ -127,57 +162,43 @@ def run(command, env={}): args.output_dir, os.path.splitext(os.path.splitext(os.path.basename(label))[0])[0][0:-1])) elif args.segmentation_type == 'colin27-subcortical': shutil.copy('/opt/atlases-nifti/colin27-subcortical/labels/thalamus-globus_pallidus-striatum.nii.gz', - '{0}/input/atlas/colin27_label_subcortical.nii.gz'.format(args.output_dir)) + '{0}/input/atlas/colin27_label_subcortical.nii.gz'.format(args.output_dir)) -#Select subjects -subjects_to_analyze = [] -# only for a subset of subjects -if args.participant_label: - subjects_to_analyze = args.participant_label -# for all subjects -else: - subject_dirs = glob(os.path.join(args.bids_dir, "sub-*")) - subjects_to_analyze = [subject_dir.split( - "-")[-1] for subject_dir in subject_dirs] - -# running participant level (must be done after template) if args.analysis_level == "participant2": - T1_files = [] - for subject in subjects_to_analyze: - T1_files.append(glob(os.path.join(args.bids_dir, "sub-{0}".format(subject), - "anat", "*_T1w.nii*")) + glob(os.path.join(args.bids_dir, "sub-{0}".format(subject), "ses-*", "anat", "*_T1w.nii*"))) subject_T1_list = [] - for subject_T1s in T1_files: - for session in subject_T1s: - subject_T1_list.append('/{0}/input/subject/{1}'.format(args.output_dir, os.path.basename(session))) - shutil.copy(session, '/{0}/input/subject/{1}'.format(args.output_dir, os.path.basename(session))) - cmd = "QBATCH_PPJ={0} QBATCH_CHUNKSIZE=1 QBATCH_CORES=1 mb.sh {1} {2} -s ".format(args.n_cpus, args.fast and "--reg-command mb_register_fast.sh" or '',args.label_masking and '--label-masking' or '') + " ".join(subject_T1_list) + " -- subject" - run(cmd) + for file in T1w_files: + subject_T1_list.append( + '/{0}/input/subject/{1}'.format(args.output_dir, os.path.basename(file))) + shutil.copy( + file, '/{0}/input/subject/{1}'.format(args.output_dir, os.path.basename(file))) + cmd = 'mb.sh {0} {1} -s '.format( + args.fast and '--fast' or '', args.label_masking and '--label-masking' or '') + cmd += '"' + ' '.join(subject_T1_list) + '"' + \ + ' -- subject resample vote qc' + run(cmd, + env={'QBATCH_PPJ': str(args.n_cpus), + 'QBATCH_CHUNKSIZE': str(1), + 'QBATCH_CORES': str(1)}) + if (not args.no_cleanup): + for file in subject_T1_list: + os.remove(file) + shutil.rmtree("output/transforms/template-subject/" + + os.path.basename(file)) + shutil.rmtree("output/labels/candidates/" + os.path.basename(file)) # running template level preprocessing elif args.analysis_level == "participant1": - template_T1_files = [] - for subject in subjects_to_analyze: - template_T1_files.append(glob(os.path.join(args.bids_dir, "sub-{0}".format(subject), - "anat", "*_T1w.nii*")) + glob(os.path.join(args.bids_dir,"sub-{0}".format(subject),"ses-*","anat", "*_T1w.nii*"))) - # Only choose first item for each list, in case of longitudinal data - # limit list to 21 subjects which is the standard max for MAGeTbrain templates template_T1_list = [] - if args.participant_label: - for subject_file in template_T1_files: - shutil.copy(subject_file[0], '/{0}/input/template/{1}'.format(args.output_dir, os.path.basename(subject_file[0]))) - template_T1_list.append('/{0}/input/template/{1}'.format(args.output_dir, os.path.basename(subject_file[0]))) - else: - for subject_file in template_T1_files[0:20]: - shutil.copy(subject_file[0], '/{0}/input/template/{1}'.format(args.output_dir, os.path.basename(subject_file[0]))) - template_T1_list.append('/{0}/input/template/{1}'.format(args.output_dir, os.path.basename(subject_file[0]))) - cmd = "QBATCH_PPJ={0} QBATCH_CHUNKSIZE=1 QBATCH_CORES=1 mb.sh {1} {2} -t ".format(args.n_cpus, args.fast and '--reg-command mb_register_fast.sh' or '',args.label_masking and '--label-masking' or '') + " ".join(template_T1_list) + " -- template" - run(cmd) - -elif args.analysis_level == "group": - cmd = "QBATCH_PPJ={0} QBATCH_CHUNKSIZE=1 QBATCH_CORES=1 mb.sh".format(args.n_cpus) + " -- resample vote qc" - run(cmd) - if (not args.no_cleanup): - run("rm -rf input") - run("rm -rf output/transforms") - run("rm -rf output/labels/candidates") + + for file in T1w_files: + shutil.copy(file, '/{0}/input/template/{1}'.format( + args.output_dir, os.path.basename(file))) + template_T1_list.append( + '/{0}/input/template/{1}'.format(args.output_dir, os.path.basename(file))) + cmd = 'mb.sh {0} {1} -t '.format( + args.fast and '--fast' or '', args.label_masking and '--label-masking' or '') + cmd += r'"' + ' '.join(template_T1_list) + r'"' + ' -- template' + run(cmd, + env={'QBATCH_PPJ': str(args.n_cpus), + 'QBATCH_CHUNKSIZE': str(1), + 'QBATCH_CORES': str(1)}) diff --git a/version b/version index 2af11b0..93270c7 100644 --- a/version +++ b/version @@ -1 +1 @@ -2.0+beta2 +3.0+beta1