diff --git a/.gitignore b/.gitignore
index c819473283..9bd05053fd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -22,6 +22,7 @@ build.*
/configure.log
/build.log
/core/version.cpp
+/src/exec_version.cpp
/src/project_version.h
/lib/mrtrix3/_version.py
/test/src/project_version.h
diff --git a/.travis.yml b/.travis.yml
index 91f181522e..5d7daa3818 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,30 +1,133 @@
-sudo: false
language: cpp
+cache:
+ apt: true
+ packages: true
+ directories:
+ - $HOME/.cache/pip
addons:
apt:
sources:
- ubuntu-toolchain-r-test
+ - llvm-toolchain-trusty-6.0
packages:
+ - g++-7
+ - clang-6.0
- zlib1g-dev
- libqt4-opengl-dev
- - g++-4.8
- python3
-compiler:
- - clang
-env:
- - py=python2
- - py=python3
-install:
- - export NUMBER_OF_PROCESSORS=4
- - export PATH=`pwd`/release/bin:`pwd`/scripts:${PATH}
- - export EIGEN_CFLAGS=-I`pwd`/../eigen
- - (cd ..; hg clone --insecure https://bitbucket.org/eigen/eigen/; cd eigen; hg update 3.3)
+ - python-pip
+ - python3-pip
+matrix:
+ include:
+ #################################################
+ # Run a single OSX job to test compilation only #
+ #################################################
+ - os: osx
+ env: CFLAGS="-Werror" TRAVIS_CXX=clang++ py=python test=build
+ ###############################################
+ # All subsequent tests are performed on Linux #
+ ###############################################
+ #####################################################################
+ # Build binaries, run tests and check documentation: Clang, Python3 #
+ # (Note: Run this one first, since it'll be the longest job) #
+ #####################################################################
+ - os: linux
+ env: CFLAGS="-Werror" TRAVIS_CXX=clang++-6.0 py=python3 test=run
+ #######################################################################
+ # Build binaries (without optimisation): GCC, Python2 #
+ # (Also ensures both Python 2 and 3 are tested for configure & build) #
+ #######################################################################
+ - os: linux
+ env: CFLAGS="-Werror" TRAVIS_CXX=g++-7 py=python2 test=build
+ #######################################################################
+ # Generate documentation through Sphinx; Use both Python2 and Python3 #
+ #######################################################################
+ - os: linux
+ env: py=python2 test=sphinx
+ - os: linux
+ env: py=python3 test=sphinx
+ ###########################################################
+ # Run the check_syntax script; only needs to be done once #
+ ###########################################################
+ - os: linux
+ env: test=syntax
+ ##############################################
+ # Run PyLint tests; both Python2 and Python3 #
+ ##############################################
+ - os: linux
+ env: py=python2 test=pylint
+ - os: linux
+ env: py=python3 test=pylint
+install:
+ ##################################################
+ # OSX build requires explicit installation of QT #
+ ##################################################
+ - |
+ if [[ "${TRAVIS_OS_NAME}" == "osx" ]]; then
+ brew install qt5
+ brew link --force qt5
+ fi
+ #########################################
+ # Installation requirements for MRtrix3 #
+ #########################################
+ - export NUMBER_OF_PROCESSORS=2
+ - export PATH=`pwd`/bin:${PATH}
+ - export PYTHONPATH=`pwd`/lib:${PYTHONPATH}
+ - |
+ if [[ "${test}" == "sphinx" ]]; then
+ if [[ "${py}" == "python2" ]]; then
+ pip install urllib3[secure];
+ pip install --user recommonmark sphinx sphinx-rtd-theme;
+ else
+ pip3 install --user recommonmark sphinx sphinx-rtd-theme;
+ fi
+ fi
+ - |
+ if [[ "${test}" == "pylint" ]]; then
+ if [[ "${py}" == "python2" ]]; then
+ pip install --user pylint;
+ else
+ pip3 install --user pylint;
+ fi
+ fi
+ - |
+ if [[ "${test}" == "build" || "${test}" == "run" ]]; then
+ export EIGEN_CFLAGS=-I`pwd`/../eigen;
+ (cd ..; hg clone https://bitbucket.org/eigen/eigen/; cd eigen; hg update 3.3);
+ fi
+before_script:
+ #######################################################################################################################
+ # TravisCI sets CXX based on what it thinks the compiler should be; therefore we need to set it as late as possible #
+ # Since this is not needed for the installation section, only for the travis.sh script, moved this to "before_script" #
+ #######################################################################################################################
+ - |
+ if [[ -n "${TRAVIS_CXX}" ]]; then
+ export CXX=${TRAVIS_CXX}
+ fi
script:
- - ./check_memalign && $py ./configure -assert && $py ./build -nowarnings && ./run_tests
-
-after_failure:
- - cat memalign.log
- - cat configure.log
- - cat build.log
- - cat testing.log
+ - ./travis.sh
+before_cache:
+ - rm -f $HOME/.cache/pip/log/debug.log
+after_failure:
+ - |
+ case "${test}" in # CLICK HERE TO FIND REPORTED ERRORS
+ "sphinx")
+ cat sphinx.log
+ ;;
+ "syntax")
+ cat syntax.log
+ ;;
+ "pylint")
+ cat pylint.log
+ ;;
+ "build")
+ cat configure.log
+ cat build.log
+ ;;
+ *)
+ cat configure.log
+ cat build.log
+ cat testing.log
+ cat gitdiff.log
+ esac
- sleep 10
diff --git a/bin/5ttgen b/bin/5ttgen
index 2cf251d5f0..3414b1c889 100755
--- a/bin/5ttgen
+++ b/bin/5ttgen
@@ -11,7 +11,7 @@
# Make the corresponding MRtrix3 Python libraries available
import inspect, os, sys
-lib_folder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], os.pardir, 'lib')))
+lib_folder = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))), os.pardir, 'lib'))
if not os.path.isdir(lib_folder):
sys.stderr.write('Unable to locate MRtrix3 Python libraries')
sys.exit(1)
@@ -46,10 +46,11 @@ app.gotoTempDir()
alg.execute()
-(stdout,stderr) = run.command('5ttcheck result.mif', False)
-if len(stderr) and 'ERROR' in stderr:
- app.warn('Generated image does not perfectly conform to 5TT format')
+stderr = run.command('5ttcheck result.mif')[1]
+if stderr:
+ app.warn('Generated image does not perfectly conform to 5TT format:')
+ for line in stderr:
+ app.warn(line)
-run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + (' -force' if app.force else ''))
+run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + (' -force' if app.forceOverwrite else ''))
app.complete()
-
diff --git a/bin/average_response b/bin/average_response
index bc1b12875c..e2f1a4504e 100755
--- a/bin/average_response
+++ b/bin/average_response
@@ -1,19 +1,18 @@
#!/usr/bin/env python
import sys
-import string
-def errorMessage (msg):
- print (msg)
+def errorMessage(msg):
+ sys.stderr.write(msg + '\n')
exit(1)
if len(sys.argv) < 3 :
- print("example usage: average_response input_response1.txt input_response2.txt input_response3.txt ... output_average_response.txt")
+ sys.stderr.write("example usage: average_response input_response1.txt input_response2.txt input_response3.txt ... output_average_response.txt\n")
exit(1)
num_bvals = 0
num_coeff = 0
num_subjects = len(sys.argv) - 2
-print ("Number of subjects: " + str(num_subjects))
+sys.stdout.write("Number of subjects: " + str(num_subjects) + '\n')
for i in range(1, num_subjects + 1):
with open(sys.argv[i], 'r') as f:
@@ -23,17 +22,18 @@ for i in range(1, num_subjects + 1):
num_coeff = len(lines[0].split())
for l in lines:
if len(l.split()) != num_coeff:
- errorMessage ("Error in file " + sys.argv[i] + ": multi-shell response functions must have the same number of coefficients per b-value (line)")
+ errorMessage("Error in file " + sys.argv[i] + ": multi-shell response functions must have the same number of coefficients per b-value (line)")
else:
if len(lines) != num_bvals:
- errorMessage ("Error in file " + sys.argv[i] + ": Input files do not contain the same number of b-values")
+ errorMessage("Error in file " + sys.argv[i] + ": Input files do not contain the same number of b-values")
for l in lines:
if len(l.split()) != num_coeff:
- errorMessage ("Error in file " + sys.argv[i] + ": multi-shell response functions must have the same number of coefficients per b-value (line)")
+ errorMessage("Error in file " + sys.argv[i] + ": multi-shell response functions must have the same number of coefficients per b-value (line)")
-print ("Number of b-values: " + str(num_bvals))
+sys.stdout.write("Number of b-values: " + str(num_bvals) + '\n')
-average_response = [[0 for x in range(num_coeff)] for y in range(num_bvals)]
+# Can't use multiplication operator in outer loop since it duplicates by reference rather than value
+average_response = [[0] * num_coeff for _ in range(num_bvals)] #pylint: disable=unused-variable
for i in range(1, num_subjects + 1):
with open(sys.argv[i], 'r') as f:
lines = f.readlines()
@@ -44,7 +44,7 @@ for i in range(1, num_subjects + 1):
f = open(sys.argv[len(sys.argv)-1], 'w')
line_out = ''
-for l in range(num_bvals):
+for l in range(num_bvals):
line_out = ''
for c in range(0, num_coeff):
line_out = line_out + "{0:.5f}".format(average_response[l][c]) + ' '
diff --git a/bin/blend b/bin/blend
index c0a9dab3c3..5e4b41bcc9 100755
--- a/bin/blend
+++ b/bin/blend
@@ -1,14 +1,12 @@
#!/usr/bin/env python2
import os
import sys
-import string
-import math
if len(sys.argv) <= 1:
- print 'A script to blend two sets of movie frames together with a desired overlap.'
- print 'The input arguments are two folders containing the movie frames (eg. output from the MRview screenshot tool), and the desired number of overlapping frames.'
- print 'eg: blend folder1 folder2 20 output_folder'
- sys.exit(1)
+ sys.stderr.write('A script to blend two sets of movie frames together with a desired overlap.\n')
+ sys.stderr.write('The input arguments are two folders containing the movie frames (eg. output from the MRview screenshot tool), and the desired number of overlapping frames.\n')
+ sys.stderr.write('eg: blend folder1 folder2 20 output_folder\n')
+ sys.exit(1)
input_folder1 = sys.argv[1]
input_folder2 = sys.argv[2]
@@ -18,19 +16,17 @@ num_overlap = int(sys.argv[3])
output_folder = sys.argv[4]
if not os.path.exists(output_folder):
- os.mkdir(output_folder)
-
+ os.mkdir(output_folder)
+
total_num_output_frames = len(file_list1) + len(file_list2) - num_overlap
for i in range(total_num_output_frames):
- file_name = 'frame' + '%0*d' % (5, i) + '.png'
- if (i <= (len(file_list1) - num_overlap)):
- os.system('cp -L ' + input_folder1 + '/' + file_list1[i] + ' ' + output_folder + '/' + file_name)
- if (i > (len(file_list1) - num_overlap)) and (i < (len(file_list1))):
- i2 = i - (len(file_list1) - num_overlap) - 1
- blend_amount = 100 * float(i2 + 1) / float(num_overlap)
- os.system('convert ' + input_folder1 + '/' + file_list1[i] + ' ' + input_folder2 + '/' + file_list2[i2] + ' -alpha on -compose blend -define compose:args=' + str(blend_amount) + ' -gravity South -composite ' + output_folder + '/' + file_name)
- if (i >= (len(file_list1))):
- i2 = i - (len(file_list1) - num_overlap) - 1
- os.system('cp -L ' + input_folder2 + '/' + file_list2[i2] + ' ' + output_folder + '/' + file_name)
-
-
+ file_name = 'frame' + '%0*d' % (5, i) + '.png'
+ if i <= (len(file_list1) - num_overlap):
+ os.system('cp -L ' + input_folder1 + '/' + file_list1[i] + ' ' + output_folder + '/' + file_name)
+ if i > (len(file_list1) - num_overlap) and i < (len(file_list1)):
+ i2 = i - (len(file_list1) - num_overlap) - 1
+ blend_amount = 100 * float(i2 + 1) / float(num_overlap)
+ os.system('convert ' + input_folder1 + '/' + file_list1[i] + ' ' + input_folder2 + '/' + file_list2[i2] + ' -alpha on -compose blend -define compose:args=' + str(blend_amount) + ' -gravity South -composite ' + output_folder + '/' + file_name)
+ if i >= (len(file_list1)):
+ i2 = i - (len(file_list1) - num_overlap) - 1
+ os.system('cp -L ' + input_folder2 + '/' + file_list2[i2] + ' ' + output_folder + '/' + file_name)
diff --git a/bin/convert_bruker b/bin/convert_bruker
index 043890b420..ecd39f9253 100755
--- a/bin/convert_bruker
+++ b/bin/convert_bruker
@@ -3,7 +3,7 @@
import sys, os.path
if len (sys.argv) != 3:
- print ("usage: convert_bruker 2dseq header.mih")
+ sys.stderr.write("usage: convert_bruker 2dseq header.mih\n")
sys.exit (0)
@@ -12,21 +12,21 @@ if len (sys.argv) != 3:
#sys.exit (1)
if not sys.argv[2].endswith ('.mih'):
- print ("expected .mih suffix as the second argument")
+ sys.stderr.write("expected .mih suffix as the second argument\n")
sys.exit (1)
with open (os.path.join (os.path.dirname (sys.argv[1]), 'reco')) as f:
- lines = f.read().split ('##$');
+ lines = f.read().split ('##$')
with open (os.path.join (os.path.dirname (sys.argv[1]), '../../acqp')) as f:
- lines += f.read().split ('##$');
+ lines += f.read().split ('##$')
with open (os.path.join (os.path.dirname (sys.argv[1]), '../../method')) as f:
- lines += f.read().split ('##$');
+ lines += f.read().split ('##$')
for line in lines:
- line = line.lower();
+ line = line.lower()
if line.startswith ('reco_size='):
mat_size = line.splitlines()[1].split()
print ('mat_size', mat_size)
@@ -62,28 +62,28 @@ with open (sys.argv[2], 'w') as f:
f.write (',' + str(mat_size[2]))
else:
try:
- nslices
+ nslices #pylint: disable=pointless-statement
f.write (',' + str(nslices))
except:
pass
-
+
try:
- nacq
+ nacq #pylint: disable=pointless-statement
f.write (',' + str(nacq))
except:
pass
- f.write ('\nvox: ' + str(res[0]) + ',' + str(res[1]));
- if len(res) > 2:
- f.write (',' + str(res[2]));
+ f.write ('\nvox: ' + str(res[0]) + ',' + str(res[1]))
+ if len(res) > 2:
+ f.write (',' + str(res[2]))
else:
try:
- slicethick
+ slicethick #pylint: disable=pointless-statement
f.write (',' + str(slicethick))
except:
pass
try:
- nacq
+ nacq #pylint: disable=pointless-statement
f.write (',')
except:
pass
@@ -101,24 +101,22 @@ with open (sys.argv[2], 'w') as f:
f.write ('\nlayout: +0,+1')
try:
- nslices
+ nslices #pylint: disable=pointless-statement
f.write (',+2')
except:
pass
try:
- nacq
+ nacq #pylint: disable=pointless-statement
f.write (',+3')
except:
pass
- f.write ('\nfile: ' + sys.argv[1] + '\n')
+ f.write ('\nfile: ' + sys.argv[1] + '\n')
try:
- bvec
- bval
- for n in range (0, len (bval)):
- f.write ('dw_scheme: ' + bvec[3*n] + ',' + bvec[3*n+1] + ',' + str(-float(bvec[3*n+2])) + ',' + bval[n] + '\n')
+ assert len(bvec) == 3*len(bval)
+ bvec = [ bvec[n:n+3] for n in range(0,len(bval),3) ]
+ for direction, value in zip(bvec, bval):
+ f.write ('dw_scheme: ' + direction[0] + ',' + direction[1] + ',' + str(-float(direction[2])) + ',' + value + '\n')
except:
pass
-
-
diff --git a/bin/dwi2response b/bin/dwi2response
index 5e5a490d58..6ef8e3fd3d 100755
--- a/bin/dwi2response
+++ b/bin/dwi2response
@@ -3,16 +3,14 @@
# Script for estimating response functions for spherical deconvolution
# A number of different approaches are available within this script for performing response function estimation.
-
# Make the corresponding MRtrix3 Python libraries available
import inspect, os, sys
-lib_folder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], os.pardir, 'lib')))
+lib_folder = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))), os.pardir, 'lib'))
if not os.path.isdir(lib_folder):
sys.stderr.write('Unable to locate MRtrix3 Python libraries')
sys.exit(1)
sys.path.insert(0, lib_folder)
-
from mrtrix3 import algorithm, app, image, path, run
@@ -23,7 +21,7 @@ app.cmdline.addDescription('Each algorithm available also has its own help page,
# General options
common_options = app.cmdline.add_argument_group('Options common to all dwi2response algorithms')
-common_options.add_argument('-shell', help='The b-value shell(s) to use in response function estimation (single value for single-shell response, comma-separated list for multi-shell response)')
+common_options.add_argument('-shells', help='The b-value shell(s) to use in response function estimation (single value for single-shell response, comma-separated list for multi-shell response)')
common_options.add_argument('-lmax', help='The maximum harmonic degree(s) of response function estimation (single value for single-shell response, comma-separated list for multi-shell response)')
common_options.add_argument('-mask', help='Provide an initial mask for response voxel selection')
common_options.add_argument('-voxels', help='Output an image showing the final voxel selection(s)')
@@ -58,15 +56,15 @@ if app.args.lmax:
app.error('Parameter lmax must be a number')
if alg.needsSingleShell() and not len(lmax) == 1:
app.error('Can only specify a single lmax value for single-shell algorithms')
-shell_option = ''
-if app.args.shell:
+shells_option = ''
+if app.args.shells:
try:
- shell_values = [ int(x) for x in app.args.shell.split(',') ]
+ shells_values = [ int(x) for x in app.args.shells.split(',') ]
except:
- app.error('-shell option should provide a comma-separated list of b-values')
- if alg.needsSingleShell() and not len(shell_values) == 1:
+ app.error('-shells option should provide a comma-separated list of b-values')
+ if alg.needsSingleShell() and not len(shells_values) == 1:
app.error('Can only specify a single b-value shell for single-shell algorithms')
- shell_option = ' -shell ' + app.args.shell
+ shells_option = ' -shells ' + app.args.shells
singleshell_option = ''
if alg.needsSingleShell():
singleshell_option = ' -singleshell -no_bzero'
@@ -76,35 +74,31 @@ if app.args.grad:
grad_import_option = ' -grad ' + path.fromUser(app.args.grad, True)
elif app.args.fslgrad:
grad_import_option = ' -fslgrad ' + path.fromUser(app.args.fslgrad[0], True) + ' ' + path.fromUser(app.args.fslgrad[1], True)
-elif not image.headerField(path.fromUser(app.args.input, False), 'dwgrad'):
+elif 'dw_scheme' not in image.Header(path.fromUser(app.args.input, False)).keyval():
app.error('Script requires diffusion gradient table: either in image header, or using -grad / -fslgrad option')
app.makeTempDir()
# Get standard input data into the temporary directory
-if alg.needsSingleShell() or shell_option:
- run.command('mrconvert ' + path.fromUser(app.args.input, True) + ' - -stride 0,0,0,1' + grad_import_option + ' | dwiextract - ' + path.toTemp('dwi.mif', True) + shell_option + singleshell_option)
+if alg.needsSingleShell() or shells_option:
+ run.command('mrconvert ' + path.fromUser(app.args.input, True) + ' - -strides 0,0,0,1' + grad_import_option + ' | dwiextract - ' + path.toTemp('dwi.mif', True) + shells_option + singleshell_option)
else: # Don't discard b=0 in multi-shell algorithms
- run.command('mrconvert ' + path.fromUser(app.args.input, True) + ' ' + path.toTemp('dwi.mif', True) + ' -stride 0,0,0,1' + grad_import_option)
+ run.command('mrconvert ' + path.fromUser(app.args.input, True) + ' ' + path.toTemp('dwi.mif', True) + ' -strides 0,0,0,1' + grad_import_option)
if app.args.mask:
run.command('mrconvert ' + path.fromUser(app.args.mask, True) + ' ' + path.toTemp('mask.mif', True) + ' -datatype bit')
+else:
+ run.command('dwi2mask ' + path.fromUser(app.args.input, True) + ' ' + path.toTemp('mask.mif', True))
alg.getInputs()
app.gotoTempDir()
-# Generate a brain mask (if necessary)
-# Otherwise, check that the mask provided is appropriate
-if os.path.exists('mask.mif'):
- dwi_size = [ int(x) for x in image.headerField('dwi.mif', 'size').split() ]
- mask_size = [ int(x) for x in image.headerField('mask.mif', 'size').split() ]
- if not mask_size[:3] == dwi_size[:3]:
- app.error('Dimensions of provided mask image do not match DWI')
- if int(image.statistic('mask.mif', 'count', 'mask.mif')) == 0:
- app.error('Input mask does not contain any voxels')
-else:
- run.command('dwi2mask dwi.mif mask.mif')
+# Check that the brain mask is appropriate
+if image.Header('mask.mif').size()[:3] != image.Header('dwi.mif').size()[:3]:
+ app.error('Dimensions of provided mask image do not match DWI')
+if int(image.statistic('mask.mif', 'count', '-mask mask.mif')) == 0:
+ app.error('Input mask does not contain any voxels')
# From here, the script splits depending on what estimation algorithm is being used
@@ -113,6 +107,5 @@ alg.execute()
# Finalize for all algorithms
if app.args.voxels:
- run.command('mrconvert voxels.mif ' + path.fromUser(app.args.voxels, True) + (' -force' if app.force else ''))
+ run.command('mrconvert voxels.mif ' + path.fromUser(app.args.voxels, True) + (' -force' if app.forceOverwrite else ''))
app.complete()
-
diff --git a/bin/dwibiascorrect b/bin/dwibiascorrect
index be497005a6..b8ca354ffe 100755
--- a/bin/dwibiascorrect
+++ b/bin/dwibiascorrect
@@ -3,25 +3,31 @@
# Script that performs B1 field inhomogeneity correction for a DWI volume series
# Bias field is estimated using the mean b=0 image, and subsequently used to correct all volumes
-
# Make the corresponding MRtrix3 Python libraries available
import inspect, os, sys
-lib_folder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], os.pardir, 'lib')))
+lib_folder = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))), os.pardir, 'lib'))
if not os.path.isdir(lib_folder):
sys.stderr.write('Unable to locate MRtrix3 Python libraries')
sys.exit(1)
sys.path.insert(0, lib_folder)
-
from distutils.spawn import find_executable
from mrtrix3 import app, fsl, image, path, run
+opt_N4BiasFieldCorrection = {
+ 's': ('4','shrink-factor applied to spatial dimensions'),
+ 'b':('[100,3]','[initial mesh resolution in mm, spline order] This value is optimised for human adult data and needs to be adjusted for rodent data.'),
+ 'c':('[1000,0.0]', '[numberOfIterations,convergenceThreshold]')}
app.init('Robert E. Smith (robert.smith@florey.edu.au)',
'Perform B1 field inhomogeneity correction for a DWI volume series')
app.cmdline.addCitation('If using -fast option', 'Zhang, Y.; Brady, M. & Smith, S. Segmentation of brain MR images through a hidden Markov random field model and the expectation-maximization algorithm. IEEE Transactions on Medical Imaging, 2001, 20, 45-57', True)
app.cmdline.addCitation('If using -fast option', 'Smith, S. M.; Jenkinson, M.; Woolrich, M. W.; Beckmann, C. F.; Behrens, T. E.; Johansen-Berg, H.; Bannister, P. R.; De Luca, M.; Drobnjak, I.; Flitney, D. E.; Niazy, R. K.; Saunders, J.; Vickers, J.; Zhang, Y.; De Stefano, N.; Brady, J. M. & Matthews, P. M. Advances in functional and structural MR image analysis and implementation as FSL. NeuroImage, 2004, 23, S208-S219', True)
app.cmdline.addCitation('If using -ants option', 'Tustison, N.; Avants, B.; Cook, P.; Zheng, Y.; Egan, A.; Yushkevich, P. & Gee, J. N4ITK: Improved N3 Bias Correction. IEEE Transactions on Medical Imaging, 2010, 29, 1310-1320', True)
+antsoptions = app.cmdline.add_argument_group('Options for ANTS N4BiasFieldCorrection')
+for key in sorted(opt_N4BiasFieldCorrection):
+ antsoptions.add_argument('-ants.'+key, metavar=opt_N4BiasFieldCorrection[key][0], help='N4BiasFieldCorrection option -%s. %s' % (key,opt_N4BiasFieldCorrection[key][1]))
+ app.cmdline.flagMutuallyExclusiveOptions( [ '-ants.'+key, 'fsl' ] )
app.cmdline.add_argument('input', help='The input image series to be corrected')
app.cmdline.add_argument('output', help='The output corrected image series')
options = app.cmdline.add_argument_group('Options for the dwibiascorrect script')
@@ -44,19 +50,23 @@ if app.args.fsl:
if not fsl_path:
app.error('Environment variable FSLDIR is not set; please run appropriate FSL configuration script')
- fast_cmd = 'fast'
- if not find_executable(fast_cmd):
- fast_cmd = 'fsl5.0-fast'
- if not find_executable(fast_cmd):
- app.error('Could not find FSL program fast; please verify FSL install')
-
- fsl_suffix = fsl.suffix()
+ fast_cmd = fsl.exeName('fast')
+
+ app.warn('Use of -fsl option in dwibiascorrect script is discouraged due to its strong dependence ' + \
+ 'on initial brain masking, and its inability to correct voxels outside of this mask.' + \
+ 'Use of the -ants option is recommended for quantitative DWI analyses.')
elif app.args.ants:
if not find_executable('N4BiasFieldCorrection'):
app.error('Could not find ANTS program N4BiasFieldCorrection; please check installation')
+ for key in sorted(opt_N4BiasFieldCorrection):
+ if hasattr(app.args, 'ants.'+key):
+ val = getattr(app.args, 'ants.'+key)
+ if val is not None:
+ opt_N4BiasFieldCorrection[key] = (val, 'user defined')
+ ants_options = ' '.join(['-%s %s' %(k, v[0]) for k, v in opt_N4BiasFieldCorrection.items()])
else:
app.error('No bias field estimation algorithm specified')
@@ -78,17 +88,17 @@ if app.args.mask:
app.gotoTempDir()
# Make sure it's actually a DWI that's been passed
-dwi_sizes = image.headerField('in.mif', 'size').split()
-if len(dwi_sizes) != 4:
+dwi_header = image.Header('in.mif')
+if len(dwi_header.size()) != 4:
app.error('Input image must be a 4D image')
-DW_scheme = image.headerField('in.mif', 'dwgrad').split('\n')
-if len(DW_scheme) != int(dwi_sizes[3]):
- app.error('Input image does not contain valid DW gradient scheme')
+if 'dw_scheme' not in dwi_header.keyval():
+ app.error('No valid DW gradient scheme provided or present in image header')
+if len(dwi_header.keyval()['dw_scheme']) != dwi_header.size()[3]:
+ app.error('DW gradient scheme contains different number of entries (' + str(len(dwi_header.keyval()['dw_scheme'])) + ' to number of volumes in DWIs (' + dwi_header.size()[3] + ')')
# Generate a brain mask if required, or check the mask if provided
if app.args.mask:
- mask_sizes = image.headerField('mask.mif', 'size').split()
- if not mask_sizes[:3] == dwi_sizes[:3]:
+ if image.Header('mask.mif').size()[:3] != dwi_header.size()[:3]:
app.error('Provided mask image does not match input DWI')
else:
run.command('dwi2mask in.mif mask.mif')
@@ -99,7 +109,7 @@ run.command('dwiextract in.mif - -bzero | mrmath - mean mean_bzero.mif -axis 3')
if app.args.fsl:
# FAST doesn't accept a mask input; therefore need to explicitly mask the input image
- run.command('mrcalc mean_bzero.mif mask.mif -mult - | mrconvert - mean_bzero_masked.nii -stride -1,+2,+3')
+ run.command('mrcalc mean_bzero.mif mask.mif -mult - | mrconvert - mean_bzero_masked.nii -strides -1,+2,+3')
run.command(fast_cmd + ' -t 2 -o fast -n 3 -b mean_bzero_masked.nii')
bias_path = fsl.findImage('fast_bias')
@@ -113,14 +123,24 @@ elif app.args.ants:
# Use the brain mask as a weights image rather than a mask; means that voxels at the edge of the mask
# will have a smoothly-varying bias field correction applied, rather than multiplying by 1.0 outside the mask
- run.command('mrconvert mean_bzero.mif mean_bzero.nii -stride +1,+2,+3')
- run.command('mrconvert mask.mif mask.nii -stride +1,+2,+3')
- bias_path = 'bias.nii'
- run.command('N4BiasFieldCorrection -d 3 -i mean_bzero.nii -w mask.nii -o [corrected.nii,' + bias_path + '] -b [150,3] -c [1000x1000,0.0]')
-
+ run.command('mrconvert mean_bzero.mif mean_bzero.nii -strides +1,+2,+3')
+ run.command('mrconvert mask.mif mask.nii -strides +1,+2,+3')
+ init_bias_path = 'init_bias.nii'
+ corrected_path = 'corrected.nii'
+ run.command('N4BiasFieldCorrection -d 3 -i mean_bzero.nii -w mask.nii -o [' + corrected_path + ',' + init_bias_path + '] ' + ants_options)
+
+ # N4 can introduce large differences between subjects via a global scaling of the bias field
+ # Estimate this scaling based on the total integral of the pre- and post-correction images within the brain mask
+ input_integral = float(run.command('mrcalc mean_bzero.mif mask.mif -mult - | mrmath - sum - -axis 0 | mrmath - sum - -axis 1 | mrmath - sum - -axis 2 | mrdump -')[0])
+ output_integral = float(run.command('mrcalc ' + corrected_path + ' mask.mif -mult - | mrmath - sum - -axis 0 | mrmath - sum - -axis 1 | mrmath - sum - -axis 2 | mrdump -')[0])
+ app.var(input_integral, output_integral)
+ bias_path = 'bias.mif'
+ run.command('mrcalc ' + init_bias_path + ' ' + str(output_integral / input_integral) + ' -mult ' + bias_path)
+
+
+
run.command('mrcalc in.mif ' + bias_path + ' -div result.mif')
-run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + (' -force' if app.force else ''))
+run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + (' -force' if app.forceOverwrite else ''))
if app.args.bias:
- run.command('mrconvert ' + bias_path + ' ' + path.fromUser(app.args.bias, True) + (' -force' if app.force else ''))
+ run.command('mrconvert ' + bias_path + ' ' + path.fromUser(app.args.bias, True) + (' -force' if app.forceOverwrite else ''))
app.complete()
-
diff --git a/bin/dwigradcheck b/bin/dwigradcheck
index 63f6646fb5..441699dbe2 100755
--- a/bin/dwigradcheck
+++ b/bin/dwigradcheck
@@ -4,13 +4,13 @@
# Make the corresponding MRtrix3 Python libraries available
import inspect, os, sys
-lib_folder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], os.pardir, 'lib')))
+lib_folder = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))), os.pardir, 'lib'))
if not os.path.isdir(lib_folder):
sys.stderr.write('Unable to locate MRtrix3 Python libraries')
sys.exit(1)
sys.path.insert(0, lib_folder)
-import copy, decimal, numbers, os, sys, shutil
+import copy, numbers, shutil
from mrtrix3 import app, image, run, path
app.init('Robert E. Smith (robert.smith@florey.edu.au)', 'Check the orientation of the diffusion gradient table')
@@ -33,17 +33,17 @@ app.cmdline.flagMutuallyExclusiveOptions( ['grad', 'fslgrad' ])
app.cmdline.flagMutuallyExclusiveOptions( ['export_grad_mrtrix', 'export_grad_fsl' ])
app.parse()
-image_dimensions = [ int(i) for i in image.headerField(app.args.input, 'size').split() ]
+image_dimensions = image.Header(path.fromUser(app.args.input, False)).size()
if len(image_dimensions) != 4:
app.error('Input image must be a 4D image')
if min(image_dimensions) == 1:
- app.error('Cannot perform tractography on an image with a unity spatial dimension')
+ app.error('Cannot perform tractography on an image with a unity dimension')
num_volumes = image_dimensions[3]
app.makeTempDir()
# Make sure the image data can be memory-mapped
-run.command('mrconvert ' + app.args.input + ' ' + path.toTemp('data.mif', True) + ' -stride 0,0,0,1 -datatype float32')
+run.command('mrconvert ' + app.args.input + ' ' + path.toTemp('data.mif', True) + ' -strides 0,0,0,1 -datatype float32')
if app.args.grad:
shutil.copy(path.fromUser(app.args.grad, False), path.toTemp('grad.b', False))
@@ -100,8 +100,7 @@ if not os.path.exists('mask.mif'):
number_option = ' -select ' + str(app.args.number)
-#
-# TODO What variations of gradient errors can we conceive?
+# What variations of gradient errors can we conceive?
# Done:
# * Has an axis been flipped? (none, 0, 1, 2)
@@ -117,11 +116,14 @@ number_option = ' -select ' + str(app.args.number)
axis_flips = [ 'none', 0, 1, 2 ]
axis_permutations = [ ( 0, 1, 2 ), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0) ]
grad_basis = [ 'scanner', 'image' ]
+total_tests = len(axis_flips) * len(axis_permutations) * len(grad_basis)
# List where the first element is the mean length
lengths = [ ]
+progress = app.progressBar('Testing gradient table alterations (0 of ' + str(total_tests) + ')', total_tests)
+
for flip in axis_flips:
for permutation in axis_permutations:
for basis in grad_basis:
@@ -173,6 +175,10 @@ for flip in axis_flips:
# Add to the database
lengths.append([meanlength,flip,permutation,basis])
+ # Increament the progress bar
+ progress.increment('Testing gradient table alterations (' + str(len(lengths)) + ' of ' + str(total_tests) + ')')
+
+progress.done()
# Sort the list to find the best gradient configuration(s)
lengths.sort()
@@ -180,13 +186,13 @@ lengths.reverse()
# Provide a printout of the mean streamline length of each gradient table manipulation
-print ('Mean length Axis flipped Axis permutations Axis basis')
+sys.stderr.write('Mean length Axis flipped Axis permutations Axis basis\n')
for line in lengths:
if isinstance(line[1], numbers.Number):
flip_str = "{:4d}".format(line[1])
else:
flip_str = line[1]
- print ("{:5.2f}".format(line[0]) + ' ' + flip_str + ' ' + str(line[2]) + ' ' + line[3])
+ sys.stderr.write("{:5.2f}".format(line[0]) + ' ' + flip_str + ' ' + str(line[2]) + ' ' + line[3] + '\n')
# If requested, extract what has been detected as the best gradient table, and
@@ -207,4 +213,3 @@ if grad_export_option:
app.complete()
-
diff --git a/bin/dwiintensitynorm b/bin/dwiintensitynorm
index 0c0ab4db0e..046d2d596f 100755
--- a/bin/dwiintensitynorm
+++ b/bin/dwiintensitynorm
@@ -3,26 +3,26 @@
# Make the corresponding MRtrix3 Python libraries available
import inspect, os, sys
-lib_folder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], os.pardir, 'lib')))
+lib_folder = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))), os.pardir, 'lib'))
if not os.path.isdir(lib_folder):
sys.stderr.write('Unable to locate MRtrix3 Python libraries')
sys.exit(1)
sys.path.insert(0, lib_folder)
-import math
-from mrtrix3 import app, file, image, path, run
+from mrtrix3 import app, file, image, path, run #pylint: disable=redefined-builtin
+from mrtrix3.path import allindir
def abspath(*arg):
return os.path.abspath(os.path.join(*arg))
def relpath(*arg):
- return os.path.relpath(os.path.join(*arg), app._workingDir)
+ return os.path.relpath(os.path.join(*arg), app.workingDir)
-class Input:
+class Input(object):
def __init__(self, filename, prefix, directory, mask_filename = '', mask_directory = ''):
self.filename = filename
self.prefix = prefix
@@ -39,24 +39,24 @@ app.cmdline.add_argument('output_dir', help='The output directory containing all
app.cmdline.add_argument('fa_template', help='The output population specific FA template, which is threshold to estimate a white matter mask')
app.cmdline.add_argument('wm_mask', help='The output white matter mask (in template space), used to estimate the median b=0 white matter value for normalisation')
options = app.cmdline.add_argument_group('Options for the dwiintensitynorm script')
-options.add_argument('-fa_threshold', default='0.4', help='The threshold applied to the Fractional Anisotropy group template used to derive an approximate white matter mask')
+options.add_argument('-fa_threshold', default='0.4', help='The threshold applied to the Fractional Anisotropy group template used to derive an approximate white matter mask (default: 0.4)')
app.parse()
app.args.input_dir = relpath(app.args.input_dir)
inputDir = app.args.input_dir
if not os.path.exists(inputDir):
- app.error('input directory not found');
-inFiles = os.listdir(inputDir)
+ app.error('input directory not found')
+inFiles = allindir(inputDir, dir_path=False)
if len(inFiles) <= 1:
app.console('not enough images found in input directory. More than one image is needed to perform a group-wise intensity normalisation')
else:
app.console('performing global intensity normalisation on ' + str(len(inFiles)) + ' input images')
-app.args.mask_dir = relpath(app.args.mask_dir);
+app.args.mask_dir = relpath(app.args.mask_dir)
maskDir = app.args.mask_dir
if not os.path.exists(maskDir):
app.error('mask directory not found')
-maskFiles = os.listdir(maskDir)
+maskFiles = allindir(maskDir, dir_path=False)
if len(maskFiles) != len(inFiles):
app.error('the number of images in the mask directory does not equal the number of images in the input directory')
maskCommonPostfix = path.commonPostfix(maskFiles)
@@ -65,14 +65,14 @@ for m in maskFiles:
maskPrefixes.append(m.split(maskCommonPostfix)[0])
commonPostfix = path.commonPostfix(inFiles)
-input = []
+input_list = []
for i in inFiles:
- prefix = i.split(commonPostfix)[0]
- if prefix not in maskPrefixes:
+ subj_prefix = i.split(commonPostfix)[0]
+ if subj_prefix not in maskPrefixes:
app.error ('no matching mask image was found for input image ' + i)
image.check3DNonunity(os.path.join(path.fromUser(inputDir, False), i))
- index = maskPrefixes.index(prefix)
- input.append(Input(i, prefix, path.fromUser(inputDir, False), maskFiles[index], path.fromUser(maskDir, False)))
+ index = maskPrefixes.index(subj_prefix)
+ input_list.append(Input(i, subj_prefix, path.fromUser(inputDir, False), maskFiles[index], path.fromUser(maskDir, False)))
app.checkOutputPath(app.args.fa_template)
app.checkOutputPath(app.args.wm_mask)
@@ -81,28 +81,34 @@ file.makeDir(app.args.output_dir)
app.makeTempDir()
-maskTempDir = os.path.join(app._tempDir, os.path.basename(os.path.normpath(maskDir)))
-run.command ('cp -R -L ' + maskDir + ' ' + maskTempDir)
+maskTempDir = os.path.join(app.tempDir, os.path.basename(os.path.normpath(maskDir)))
+run.command('cp -R -L ' + maskDir + ' ' + maskTempDir)
app.gotoTempDir()
file.makeDir('fa')
-app.console('Computing FA images')
-input_filenames = []
-for i in input:
+progress = app.progressBar('Computing FA images', len(input_list))
+for i in input_list:
run.command('dwi2tensor ' + abspath(i.directory, i.filename) + ' -mask ' + abspath(i.mask_directory, i.mask_filename) + ' - | tensor2metric - -fa ' + os.path.join('fa', i.prefix + '.mif'))
+ progress.increment()
+progress.done()
app.console('Generating FA population template')
run.command('population_template fa -mask_dir ' + maskTempDir + ' fa_template.mif -type rigid_affine_nonlinear -rigid_scale 0.25,0.5,0.8,1.0 -affine_scale 0.7,0.8,1.0,1.0 -nl_scale 0.5,0.75,1.0,1.0,1.0 -nl_niter 5,5,5,5,5 -tempdir population_template -linear_no_pause -nocleanup')
+app.console('Generating WM mask in template space')
run.command('mrthreshold fa_template.mif -abs ' + app.args.fa_threshold + ' template_wm_mask.mif')
+
+progress = app.progressBar('Intensity normalising subject images', len(input_list))
file.makeDir('wm_mask_warped')
-for i in input:
+for i in input_list:
run.command('mrtransform template_wm_mask.mif -interp nearest -warp_full ' + os.path.join('population_template', 'warps', i.prefix + '.mif') + ' ' + os.path.join('wm_mask_warped', i.prefix + '.mif') + ' -from 2 -template ' + os.path.join('fa', i.prefix + '.mif'))
- run.command('dwinormalise ' + abspath(i.directory, i.filename) + ' ' + os.path.join('wm_mask_warped', i.prefix + '.mif') + ' ' + path.fromUser(os.path.join(app.args.output_dir, i.filename), True) + (' -force' if app.force else ''))
+ run.command('dwinormalise ' + abspath(i.directory, i.filename) + ' ' + os.path.join('wm_mask_warped', i.prefix + '.mif') + ' ' + path.fromUser(os.path.join(app.args.output_dir, i.filename), True) + (' -force' if app.forceOverwrite else ''))
+ progress.increment()
+progress.done()
-run.command('mrconvert template_wm_mask.mif ' + path.fromUser(app.args.wm_mask, True) + (' -force' if app.force else ''))
-run.command('mrconvert fa_template.mif ' + path.fromUser(app.args.fa_template, True) + (' -force' if app.force else ''))
+app.console('Exporting template images to user locations')
+run.command('mrconvert template_wm_mask.mif ' + path.fromUser(app.args.wm_mask, True) + (' -force' if app.forceOverwrite else ''))
+run.command('mrconvert fa_template.mif ' + path.fromUser(app.args.fa_template, True) + (' -force' if app.forceOverwrite else ''))
app.complete()
-
diff --git a/bin/dwipreproc b/bin/dwipreproc
index e0003d7244..f0840ed934 100755
--- a/bin/dwipreproc
+++ b/bin/dwipreproc
@@ -14,19 +14,16 @@
# * The (primary) direction of phase encoding. In cases where opposing phase encoding is part of the acquisition protocol (i.e. the reversed phase-encode pair in case 2 above, and all of the DWIs in case 3 above), the -pe_dir option specifies the phase encode direction of the _first_ volume in the relevant volume pair; the second is assumed to be the exact opposite.
# * The total readout time of the EPI acquisition. This affects the magnitude of the image distortion for a given field inhomogeneity. If this information is not provided via the -readout_time option, then a 'sane' default of 0.1s will be assumed. Note that this is not actually expected to influence the estimation of the field; it will result in the field inhomogeneity estimation being scaled by some factor, but as long as it uses the same sane default for the DWIs, the distortion correction should operate as expected.
-
# Make the corresponding MRtrix3 Python libraries available
import inspect, os, sys
-lib_folder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], os.pardir, 'lib')))
+lib_folder = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))), os.pardir, 'lib'))
if not os.path.isdir(lib_folder):
sys.stderr.write('Unable to locate MRtrix3 Python libraries')
sys.exit(1)
sys.path.insert(0, lib_folder)
-
-import math, shutil
-from distutils.spawn import find_executable
-from mrtrix3 import app, file, fsl, image, path, phaseEncoding, run
+import math, itertools, shutil
+from mrtrix3 import app, file, fsl, image, path, phaseEncoding, run #pylint: disable=redefined-builtin
app.init ('Robert E. Smith (robert.smith@florey.edu.au)',
@@ -36,6 +33,8 @@ app.cmdline.addCitation('', 'Andersson, J. L. & Sotiropoulos, S. N. An integrate
app.cmdline.addCitation('', 'Smith, S. M.; Jenkinson, M.; Woolrich, M. W.; Beckmann, C. F.; Behrens, T. E.; Johansen-Berg, H.; Bannister, P. R.; De Luca, M.; Drobnjak, I.; Flitney, D. E.; Niazy, R. K.; Saunders, J.; Vickers, J.; Zhang, Y.; De Stefano, N.; Brady, J. M. & Matthews, P. M. Advances in functional and structural MR image analysis and implementation as FSL. NeuroImage, 2004, 23, S208-S219', True)
app.cmdline.addCitation('If performing recombination of diffusion-weighted volume pairs with opposing phase encoding directions', 'Skare, S. & Bammer, R. Jacobian weighting of distortion corrected EPI data. Proceedings of the International Society for Magnetic Resonance in Medicine, 2010, 5063', True)
app.cmdline.addCitation('If performing EPI susceptibility distortion correction', 'Andersson, J. L.; Skare, S. & Ashburner, J. How to correct susceptibility distortions in spin-echo echo-planar images: application to diffusion tensor imaging. NeuroImage, 2003, 20, 870-888', True)
+app.cmdline.addCitation('If including "--repol" in -eddy_options input', 'Andersson, J. L. R.; Graham, M. S.; Zsoldos, E. & Sotiropoulos, S. N. Incorporating outlier detection and replacement into a non-parametric framework for movement and distortion correction of diffusion MR images. NeuroImage, 2016, 141, 556-572', True)
+app.cmdline.addCitation('If including "--mporder" in -eddy_options input', 'Andersson, J. L. R.; Graham, M. S.; Drobnjak, I.; Zhang, H.; Filippini, N. & Bastiani, M. Towards a comprehensive framework for movement and distortion correction of diffusion MR images: Within volume movement. NeuroImage, 2017, 152, 450-466', True)
app.cmdline.add_argument('input', help='The input DWI series to be corrected')
app.cmdline.add_argument('output', help='The output corrected image series')
grad_export_options = app.cmdline.add_argument_group('Options for exporting the diffusion gradient table')
@@ -49,11 +48,13 @@ app.cmdline.flagMutuallyExclusiveOptions( [ 'grad', 'fslgrad' ] )
options = app.cmdline.add_argument_group('Other options for the dwipreproc script')
options.add_argument('-pe_dir', metavar=('PE'), help='Manually specify the phase encoding direction of the input series; can be a signed axis number (e.g. -0, 1, +2), an axis designator (e.g. RL, PA, IS), or NIfTI axis codes (e.g. i-, j, k)')
options.add_argument('-readout_time', metavar=('time'), type=float, help='Manually specify the total readout time of the input series (in seconds)')
-options.add_argument('-se_epi', metavar=('file'), help='Provide an additional image series consisting of spin-echo EPI images, which is to be used exclusively by topup for estimating the inhomogeneity field (i.e. it will not form part of the output image series)')
-options.add_argument('-json_import', metavar=('JSON_file'), help='Import image header information from an associated JSON file (may be necessary to determine phase encoding information)')
+options.add_argument('-se_epi', metavar=('image'), help='Provide an additional image series consisting of spin-echo EPI images, which is to be used exclusively by topup for estimating the inhomogeneity field (i.e. it will not form part of the output image series)')
+options.add_argument('-align_seepi', action='store_true', help='Achieve alignment between the SE-EPI images used for inhomogeneity field estimation, and the DWIs, by inserting the first DWI b=0 volume to the SE-EPI series. Only use this option if the input SE-EPI images have identical image contrast to the b=0 images present in the DWI series.')
+options.add_argument('-json_import', metavar=('file'), help='Import image header information from an associated JSON file (may be necessary to determine phase encoding information)')
options.add_argument('-topup_options', metavar=('TopupOptions'), help='Manually provide additional command-line options to the topup command (provide a string within quotation marks that contains at least one space, even if only passing a single command-line option to topup)')
options.add_argument('-eddy_options', metavar=('EddyOptions'), help='Manually provide additional command-line options to the eddy command (provide a string within quotation marks that contains at least one space, even if only passing a single command-line option to eddy)')
-options.add_argument('-cuda', help='Use the CUDA version of eddy (if available)', action='store_true', default=False)
+options.add_argument('-eddyqc_text', metavar=('directory'), help='Copy the various text-based statistical outputs generated by eddy into an output directory')
+options.add_argument('-eddyqc_all', metavar=('directory'), help='Copy ALL outputs generated by eddy (including images) into an output directory')
rpe_options = app.cmdline.add_argument_group('Options for specifying the acquisition phase-encoding design; note that one of the -rpe_* options MUST be provided')
rpe_options.add_argument('-rpe_none', action='store_true', help='Specify that no reversed phase-encoding image data is being provided; eddy will perform eddy current and motion correction only')
rpe_options.add_argument('-rpe_pair', action='store_true', help='Specify that a set of images (typically b=0 volumes) will be provided for use in inhomogeneity field estimation only (using the -se_epi option). It is assumed that the FIRST volume(s) of this image has the SAME phase-encoding direction as the input DWIs, and the LAST volume(s) has precisely the OPPOSITE phase encoding')
@@ -63,6 +64,7 @@ app.cmdline.flagMutuallyExclusiveOptions( [ 'rpe_none', 'rpe_pair', 'rpe_all', '
app.cmdline.flagMutuallyExclusiveOptions( [ 'rpe_none', 'se_epi' ], False ) # May still technically provide -se_epi even with -rpe_all
app.cmdline.flagMutuallyExclusiveOptions( [ 'rpe_header', 'pe_dir' ], False ) # Can't manually provide phase-encoding direction if expecting it to be in the header
app.cmdline.flagMutuallyExclusiveOptions( [ 'rpe_header', 'readout_time' ], False ) # Can't manually provide readout time if expecting it to be in the header
+app.cmdline.flagMutuallyExclusiveOptions( [ 'eddyqc_text', 'eddyqc_all' ], False )
app.parse()
@@ -85,6 +87,9 @@ elif app.args.rpe_header:
else:
app.error('Must explicitly specify phase-encoding acquisition design (even if none)')
+if app.args.align_seepi and not app.args.se_epi:
+ app.error('-align_seepi option is only applicable when the -se_epi option is also used')
+
fsl_path = os.environ.get('FSLDIR', '')
if not fsl_path:
app.error('Environment variable FSLDIR is not set; please run appropriate FSL configuration script')
@@ -92,21 +97,12 @@ if not fsl_path:
if not PE_design == 'None':
topup_config_path = os.path.join(fsl_path, 'etc', 'flirtsch', 'b02b0.cnf')
if not os.path.isfile(topup_config_path):
- app.error('Could not find necessary default config file for FSL\'s topup program\n(expected location: ' + topup_config_path + ')')
-
- topup_cmd = 'topup'
- if not find_executable(topup_cmd):
- topup_cmd = 'fsl5.0-topup'
- if not find_executable(topup_cmd):
- app.error('Could not find FSL program topup; please verify FSL install')
+ app.error('Could not find necessary default config file for FSL topup command\n(expected location: ' + topup_config_path + ')')
+ topup_cmd = fsl.exeName('topup')
+ applytopup_cmd = fsl.exeName('applytopup')
- applytopup_cmd = 'applytopup'
- if not find_executable(applytopup_cmd):
- applytopup_cmd = 'fsl5.0-applytopup'
- if not find_executable(applytopup_cmd):
- app.error('Could not find FSL program applytopup; please verify FSL install')
-
-eddy_cmd = fsl.eddyBinary(app.args.cuda)
+if not fsl.eddyBinary(True) and not fsl.eddyBinary(False):
+ app.error('Could not find any version of FSL eddy command')
fsl_suffix = fsl.suffix()
app.checkOutputPath(app.args.output)
@@ -121,8 +117,38 @@ elif app.args.export_grad_fsl:
app.checkOutputPath(path.fromUser(app.args.export_grad_fsl[1], False))
-# Convert all input images into MRtrix format and store in temprary directory first;
-# that way image.headerField() can be run multiple times without having to repeatedly parse e.g. DICOM data
+eddyqc_path = None
+eddyqc_files = [ 'eddy_parameters', 'eddy_movement_rms', 'eddy_restricted_movement_rms', \
+ 'eddy_post_eddy_shell_alignment_parameters', 'eddy_post_eddy_shell_PE_translation_parameters', \
+ 'eddy_outlier_report', 'eddy_outlier_map', 'eddy_outlier_n_stdev_map', 'eddy_outlier_n_sqr_stdev_map', \
+ 'eddy_movement_over_time' ]
+if app.args.eddyqc_text:
+ eddyqc_path = path.fromUser(app.args.eddyqc_text, False)
+elif app.args.eddyqc_all:
+ eddyqc_path = path.fromUser(app.args.eddyqc_all, False)
+ eddyqc_files.extend([ 'eddy_outlier_free_data.nii.gz', 'eddy_cnr_maps.nii.gz', 'eddy_residuals.nii.gz' ])
+if eddyqc_path:
+ if os.path.exists(eddyqc_path):
+ if os.path.isdir(eddyqc_path):
+ if any([ os.path.exists(os.path.join(eddyqc_path, filename)) for filename in eddyqc_files ]):
+ if app.forceOverwrite:
+ app.warn('Output eddy QC directory already contains relevant files; these will be overwritten on completion')
+ else:
+ app.error('Output eddy QC directory already contains relevant files (use -force to override)')
+ else:
+ if app.forceOverwrite:
+ app.warn('Target for eddy QC output is not a directory; it will be overwritten on completion')
+ else:
+ app.error('Target for eddy QC output exists, and is not a directory (use -force to override)')
+
+
+eddy_manual_options = ''
+if app.args.eddy_options:
+ # Initially process as a list; we'll convert back to a string later
+ eddy_manual_options = app.args.eddy_options.strip().split()
+
+
+# Convert all input images into MRtrix format and store in temprary directory first
app.makeTempDir()
grad_option = ''
@@ -136,47 +162,105 @@ if app.args.json_import:
run.command('mrconvert ' + path.fromUser(app.args.input, True) + ' ' + path.toTemp('dwi.mif', True) + grad_option + json_option)
if app.args.se_epi:
image.check3DNonunity(path.fromUser(app.args.se_epi, False))
- run.command('mrconvert ' + path.fromUser(app.args.se_epi, True) + ' ' + path.toTemp('topup_in.mif', True))
+ run.command('mrconvert ' + path.fromUser(app.args.se_epi, True) + ' ' + path.toTemp('se_epi.mif', True))
app.gotoTempDir()
-
-# Get information on the input images, particularly so that their validity can be checked
-dwi_size = [ int(s) for s in image.headerField('dwi.mif', 'size').split() ]
-dwi_pe_scheme = phaseEncoding.getScheme('dwi.mif')
+# Get information on the input images, and check their validity
+dwi_header = image.Header('dwi.mif')
+if not len(dwi_header.size()) == 4:
+ app.error('Input DWI must be a 4D image')
+dwi_num_volumes = dwi_header.size()[3]
+app.var(dwi_num_volumes)
+dwi_num_slices = dwi_header.size()[2]
+app.var(dwi_num_slices)
+dwi_pe_scheme = phaseEncoding.getScheme(dwi_header)
if app.args.se_epi:
- topup_size = [ int(s) for s in image.headerField('topup_in.mif', 'size').split() ]
- if not len(topup_size) == 4:
- app.error('File provided using -se_epi option must contain more than one image volume')
- topup_pe_scheme = phaseEncoding.getScheme('topup_in.mif')
-grad = image.headerField('dwi.mif', 'dwgrad').split('\n')
-grad = [ line.split() for line in grad ]
-grad = [ [ float(f) for f in line ] for line in grad ]
-stride = image.headerField('dwi.mif', 'stride')
-num_volumes = 1
-if len(dwi_size) == 4:
- num_volumes = dwi_size[3]
-
+ se_epi_header = image.Header('se_epi.mif')
+ # This doesn't necessarily apply any more: May be able to combine e.g. a P>>A from -se_epi with an A>>P b=0 image from the DWIs
+# if not len(se_epi_header.size()) == 4:
+# app.error('File provided using -se_epi option must contain more than one image volume')
+ se_epi_pe_scheme = phaseEncoding.getScheme(se_epi_header)
+if 'dw_scheme' not in dwi_header.keyval():
+ app.error('No diffusion gradient table found')
+grad = dwi_header.keyval()['dw_scheme']
+if not len(grad) == dwi_num_volumes:
+ app.error('Number of lines in gradient table (' + str(len(grad)) + ') does not match input image (' + str(dwi_num_volumes) + ' volumes); check your input data')
+
+# Check the manual options being passed to eddy, ensure they make sense
+eddy_mporder = any(s.startswith('--mporder') for s in eddy_manual_options)
+if eddy_mporder:
+ if 'SliceEncodingDirection' in dwi_header.keyval():
+ slice_encoding_direction = dwi_header.keyval()['SliceEncodingDirection']
+ app.var(slice_encoding_direction)
+ if not slice_encoding_direction.startswith('k'):
+ app.error('DWI header indicates that 3rd spatial axis is not the slice axis; this is not yet compatible with --mporder option in eddy, nor supported in dwipreproc')
+ slice_encoding_direction = image.axis2dir(slice_encoding_direction)
+ else:
+ app.console('No slice encoding direction information present; assuming third axis corresponds to slices')
+ slice_encoding_direction = [0,0,1]
+if '--resamp=lsr' in eddy_manual_options:
+ app.error('dwipreproc does not currently support least-squares reconstruction; this cannot be simply passed via -eddy_options')
+if eddy_mporder:
+ slspec_option = [ s for s in eddy_manual_options if s.startswith('--slspec') ]
+ slice_groups = [ ]
+ slice_timing = [ ]
+ if len(slspec_option) > 1:
+ app.error('--slspec option appears more than once in -eddy_options input; cannot import slice timing')
+ elif len(slspec_option) == 1:
+ slspec_file_path = path.fromUser(slspec_option[0][9:], False)
+ if os.path.isfile(slspec_file_path):
+ # Since there's a chance that we may need to pad this info, we can't just copy this file
+ # to the temporary directory...
+ with open(slspec_file_path, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if line:
+ slice_groups.append([int(value) for value in line.split()])
+ app.var(slice_groups)
+ # Remove this entry from eddy_manual_options; it'll be inserted later, with the
+ # path to the new slspec file
+ eddy_manual_options = [ s for s in eddy_manual_options if not s.startswith('--slspec') ]
+ else:
+ app.error('Unable to find \'slspec\' file provided via -eddy_options \" ... --slspec=/path/to/file ... \" (expected location: ' + slspec_file_path + ')')
+ else:
+ if 'SliceTiming' not in dwi_header.keyval():
+ app.error('Cannot perform slice-to-volume correction in eddy: No slspec file provided, and no slice timing information present in header')
+ slice_timing = dwi_header.keyval()['SliceTiming'][0]
+ app.var(slice_timing)
+ if len(slice_timing) != dwi_num_slices:
+ app.error('Cannot use slice timing information in image header for slice-to-volume correction: Number of entries (' + len(slice_timing) + ') does not match number of slices (' + dwi_header.size()[2] + ')')
+
+# Use new features of dirstat to query the quality of the diffusion acquisition scheme
+# Need to know the mean b-value in each shell, and the asymmetry value of each shell
+# But don't bother testing / warning the user if they're already controlling for this
+if not app.args.eddy_options or not any(s.startswith('--slm=') for s in app.args.eddy_options.split()):
+ shell_bvalues = [ int(round(float(value))) for value in image.mrinfo('dwi.mif', 'shell_bvalues').split() ]
+ shell_asymmetry = [ float(value) for value in run.command('dirstat dwi.mif -output asym')[0].splitlines() ]
+ # dirstat will skip any b=0 shell by default; therefore for correspondence between
+ # shell_bvalues and shell_symmetry, need to remove any b=0 from the former
+ if len(shell_bvalues) == len(shell_asymmetry) + 1:
+ shell_bvalues = shell_bvalues[1:]
+ elif len(shell_bvalues) != len(shell_asymmetry):
+ app.error('Number of b-values reported by mrinfo (' + str(len(shell_bvalues)) + ') does not match number of outputs provided by dirstat (' + str(len(shell_asymmetry)) + ')')
+ for b, s in zip(shell_bvalues, shell_asymmetry):
+ if s >= 0.1:
+ app.warn('sampling of b=' + str(b) + ' shell is ' + ('strongly' if s >= 0.4 else 'moderately') + \
+ ' asymmetric; distortion correction may benefit from use of: ' + \
+ '-eddy_options " ... --slm=linear ... "')
# Since we want to access user-defined phase encoding information regardless of whether or not
# such information is present in the header, let's grab it here
manual_pe_dir = None
if app.args.pe_dir:
- manual_pe_dir = [ float(i) for i in phaseEncoding.dir(app.args.pe_dir) ]
+ manual_pe_dir = [ float(i) for i in phaseEncoding.direction(app.args.pe_dir) ]
+app.var(manual_pe_dir)
manual_trt = None
if app.args.readout_time:
manual_trt = float(app.args.readout_time)
-
-
-
-# Perform initial checks on input images
-if not grad:
- app.error('No diffusion gradient table found')
-if not len(grad) == num_volumes:
- app.error('Number of lines in gradient table (' + str(len(grad)) + ') does not match input image (' + str(num_volumes) + ' volumes); check your input data')
-
+app.var(manual_trt)
do_topup = (not PE_design == 'None')
@@ -202,10 +286,9 @@ def grads_match(one, two):
return True
-
# Manually generate a phase-encoding table for the input DWI based on user input
dwi_manual_pe_scheme = None
-topup_manual_pe_scheme = None
+se_epi_manual_pe_scheme = None
auto_trt = 0.1
dwi_auto_trt_warning = False
if manual_pe_dir:
@@ -221,22 +304,25 @@ if manual_pe_dir:
if PE_design == 'None':
line = list(manual_pe_dir)
line.append(trt)
- dwi_manual_pe_scheme = [ line ] * num_volumes
+ dwi_manual_pe_scheme = [ line ] * dwi_num_volumes
+ app.var(dwi_manual_pe_scheme)
# With 'Pair', also need to construct the manual scheme for SE EPIs
elif PE_design == 'Pair':
line = list(manual_pe_dir)
line.append(trt)
- dwi_manual_pe_scheme = [ line ] * num_volumes
- num_topup_volumes = topup_size[3]
- if num_topup_volumes%2:
+ dwi_manual_pe_scheme = [ line ] * dwi_num_volumes
+ app.var(dwi_manual_pe_scheme)
+ se_epi_num_volumes = se_epi_header.size()[3]
+ if se_epi_num_volumes%2:
app.error('If using -rpe_pair option, image provided using -se_epi must contain an even number of volumes')
# Assume that first half of volumes have same direction as series;
# second half have the opposite direction
- topup_manual_pe_scheme = [ line ] * int(num_topup_volumes/2)
+ se_epi_manual_pe_scheme = [ line ] * int(se_epi_num_volumes/2)
line = [ (-i if i else 0.0) for i in manual_pe_dir ]
line.append(trt)
- topup_manual_pe_scheme.extend( [ line ] * int(num_topup_volumes/2) )
+ se_epi_manual_pe_scheme.extend( [ line ] * int(se_epi_num_volumes/2) )
+ app.var(se_epi_manual_pe_scheme)
# If -rpe_all, need to scan through grad and figure out the pairings
# This will be required if relying on user-specified phase encode direction
@@ -248,35 +334,36 @@ if manual_pe_dir:
# despite having the same phase-encoding direction. Instead, explicitly enforce
# that volumes must be matched between the first and second halves of the DWI data.
elif PE_design == 'All':
- if num_volumes%2:
+ if dwi_num_volumes%2:
app.error('If using -rpe_all option, input image must contain an even number of volumes')
- grads_matched = [ num_volumes ] * num_volumes
+ grads_matched = [ dwi_num_volumes ] * dwi_num_volumes
grad_pairs = [ ]
- app.debug('Commencing gradient direction matching; ' + str(num_volumes) + ' volumes')
- for index1 in range(int(num_volumes/2)):
- if grads_matched[index1] == num_volumes: # As yet unpaired
- for index2 in range(int(num_volumes/2), num_volumes):
- if grads_matched[index2] == num_volumes: # Also as yet unpaired
+ app.debug('Commencing gradient direction matching; ' + str(dwi_num_volumes) + ' volumes')
+ for index1 in range(int(dwi_num_volumes/2)):
+ if grads_matched[index1] == dwi_num_volumes: # As yet unpaired
+ for index2 in range(int(dwi_num_volumes/2), dwi_num_volumes):
+ if grads_matched[index2] == dwi_num_volumes: # Also as yet unpaired
if grads_match(grad[index1], grad[index2]):
- grads_matched[index1] = index2;
- grads_matched[index2] = index1;
+ grads_matched[index1] = index2
+ grads_matched[index2] = index1
grad_pairs.append([index1, index2])
app.debug('Matched volume ' + str(index1) + ' with ' + str(index2) + ': ' + str(grad[index1]) + ' ' + str(grad[index2]))
break
else:
app.error('Unable to determine matching reversed phase-encode direction volume for DWI volume ' + str(index1))
- if not len(grad_pairs) == num_volumes/2:
+ if not len(grad_pairs) == dwi_num_volumes/2:
app.error('Unable to determine complete matching DWI volume pairs for reversed phase-encode combination')
# Construct manual PE scheme here:
# Regardless of whether or not there's a scheme in the header, need to have it:
# if there's one in the header, want to compare to the manually-generated one
dwi_manual_pe_scheme = [ ]
- for index in range(0, num_volumes):
+ for index in range(0, dwi_num_volumes):
line = list(manual_pe_dir)
- if index >= int(num_volumes/2):
+ if index >= int(dwi_num_volumes/2):
line = [ (-i if i else 0.0) for i in line ]
line.append(trt)
dwi_manual_pe_scheme.append(line)
+ app.var(dwi_manual_pe_scheme)
else: # No manual phase encode direction defined
@@ -328,7 +415,7 @@ else:
if not manual_pe_dir:
app.error('No phase encoding information provided either in header or at command-line')
if dwi_auto_trt_warning:
- app.warn('Total readout time not provided at command-line; assuming sane default of ' + str(auto_trt))
+ app.console('Total readout time not provided at command-line; assuming sane default of ' + str(auto_trt))
dwi_pe_scheme = dwi_manual_pe_scheme # May be needed later for triggering volume recombination
# This may be required by -rpe_all for extracting b=0 volumes while retaining phase-encoding information
@@ -340,16 +427,42 @@ if dwi_manual_pe_scheme:
import_dwi_pe_table_option = ' -import_pe_table dwi_manual_pe_scheme.txt'
+# Find the index of the first DWI volume that is a b=0 volume
+# This needs to occur at the outermost loop as it is pertinent information
+# not only for the -align_seepi option, but also for when the -se_epi option
+# is not provided at all, and the input top topup is extracted solely from the DWIs
+bzero_threshold = 10.0
+if 'BZeroThreshold' in app.config:
+ bzero_threshold = float(app.config['BZeroThreshold'])
+dwi_first_bzero_index = 0
+for line in grad:
+ if line[3] <= bzero_threshold:
+ break
+ dwi_first_bzero_index += 1
+app.var(dwi_first_bzero_index)
+
# Deal with the phase-encoding of the images to be fed to topup (if applicable)
-overwrite_topup_pe_scheme = False
+overwrite_se_epi_pe_scheme = False
+se_epi_path = 'se_epi.mif'
+dwi_permute_volumes_pre_eddy_option = ''
+dwi_permute_volumes_post_eddy_option = ''
+dwi_bzero_added_to_se_epi = False
if app.args.se_epi:
+ # Newest version of eddy requires that topup field be on the same grid as the eddy input DWI
+ if not image.match(dwi_header, se_epi_header, 3):
+ app.console('DWIs and SE-EPI images used for inhomogeneity field estimation are defined on different image grids; '
+ 'the latter will be automatically re-gridded to match the former')
+ new_se_epi_path = 'se_epi_regrid.mif'
+ run.command('mrtransform ' + se_epi_path + ' - -interp sinc -template dwi.mif | mrcalc - 0.0 -max ' + new_se_epi_path)
+ file.delTemporary(se_epi_path)
+ se_epi_path = new_se_epi_path
+ se_epi_header = image.Header(se_epi_path)
+
# 3 possible sources of PE information: DWI header, topup image header, command-line
# Any pair of these may conflict, and any one could be absent
- topup_auto_trt_warning = False
-
# Have to switch here based on phase-encoding acquisition design
if PE_design == 'Pair':
# Criteria:
@@ -359,22 +472,22 @@ if app.args.se_epi:
# * If _not_ present in own header:
# - If provided at command-line, infer appropriately
# - If not provided at command-line, but the DWI header has that information, infer appropriately
- if topup_pe_scheme:
+ if se_epi_pe_scheme:
if manual_pe_dir:
- if not scheme_dirs_match(topup_pe_scheme, topup_manual_pe_scheme):
+ if not scheme_dirs_match(se_epi_pe_scheme, se_epi_manual_pe_scheme):
app.warn('User-defined phase-encoding direction design does not match what is stored in SE EPI image header; proceeding with user specification')
- overwrite_topup_pe_scheme = True
+ overwrite_se_epi_pe_scheme = True
if manual_trt:
- if not scheme_times_match(topup_pe_scheme, topup_manual_pe_scheme):
+ if not scheme_times_match(se_epi_pe_scheme, se_epi_manual_pe_scheme):
app.warn('User-defined total readout time does not match what is stored in SE EPI image header; proceeding with user specification')
- overwrite_topup_pe_scheme = True
- if overwrite_topup_pe_scheme:
- topup_pe_scheme = topup_manual_pe_scheme
+ overwrite_se_epi_pe_scheme = True
+ if overwrite_se_epi_pe_scheme:
+ se_epi_pe_scheme = se_epi_manual_pe_scheme
else:
- topup_manual_pe_scheme = None # To guarantee that these data are never used
+ se_epi_manual_pe_scheme = None # To guarantee that these data are never used
else:
- overwrite_topup_pe_scheme = True
- topup_pe_scheme = topup_manual_pe_scheme
+ overwrite_se_epi_pe_scheme = True
+ se_epi_pe_scheme = se_epi_manual_pe_scheme
elif PE_design == 'All':
# Criteria:
@@ -383,8 +496,8 @@ if app.args.se_epi:
# * If _not_ present in own header:
# - Don't have enough information to proceed
# - Is this too harsh? (e.g. Have rules by which it may be inferred from the DWI header / command-line)
- if not topup_pe_scheme:
- app.error('If explicitly including SE EPI images when using -rpe_all option, they must come with their own associated phase-encoding information')
+ if not se_epi_pe_scheme:
+ app.error('If explicitly including SE EPI images when using -rpe_all option, they must come with their own associated phase-encoding information in the image header')
elif PE_design == 'Header':
# Criteria:
@@ -392,8 +505,120 @@ if app.args.se_epi:
# Nothing to do (-pe_dir option is mutually exclusive)
# * If _not_ present in own header:
# Cannot proceed
- if not topup_pe_scheme:
- app.error('No phase-encoding information present in SE EPI image header');
+ if not se_epi_pe_scheme:
+ app.error('No phase-encoding information present in SE-EPI image header')
+ # If there is no phase encoding contrast within the SE-EPI series,
+ # try combining it with the DWI b=0 volumes, see if that produces some contrast
+ # However, this should probably only be permitted if the -align_seepi option is defined
+ se_epi_pe_scheme_has_contrast = 'pe_scheme' in se_epi_header.keyval()
+ if not se_epi_pe_scheme_has_contrast:
+ if app.args.align_seepi:
+ app.console('No phase-encoding contrast present in SE-EPI images; will examine again after combining with DWI b=0 images')
+ new_se_epi_path = os.path.splitext(se_epi_path)[0] + '_dwibzeros.mif'
+ # Don't worry about trying to produce a balanced scheme here
+ run.command('dwiextract dwi.mif - -bzero | mrcat - se_epi.mif ' + new_se_epi_path + ' -axis 3')
+ se_epi_header = image.Header(new_se_epi_path)
+ se_epi_pe_scheme_has_contrast = 'pe_scheme' in se_epi_header.keyval()
+ if se_epi_pe_scheme_has_contrast:
+ file.delTemporary(se_epi_path)
+ se_epi_path = new_se_epi_path
+ se_epi_pe_scheme = phaseEncoding.getScheme(se_epi_header)
+ dwi_bzero_added_to_se_epi = True
+ # Delay testing appropriateness of the concatenation of these images
+ # (i.e. differences in contrast) to later
+ else:
+ app.error('No phase-encoding contrast present in SE-EPI images, even after concatenating with b=0 images due to -align_seepi option; '
+ 'cannot perform inhomogeneity field estimation')
+ else:
+ app.error('No phase-encoding contrast present in SE-EPI images; cannot perform inhomogeneity field estimation')
+
+ if app.args.align_seepi:
+
+ dwi_te = dwi_header.keyval().get('EchoTime')
+ se_epi_te = se_epi_header.keyval().get('EchoTime')
+ if dwi_te and se_epi_te and dwi_te != se_epi_te:
+ app.warn('It appears that the spin-echo EPI images used for inhomogeneity field estimation have a different echo time to the DWIs being corrected. '
+ 'This may cause issues in estimation of the field, as the first DWI b=0 volume will be added to the input series to topup '
+ 'due to use of the -align_seepi option.')
+
+ dwi_tr = dwi_header.keyval().get('RepetitionTime')
+ se_epi_tr = se_epi_header.keyval().get('RepetitionTime')
+ if dwi_tr and se_epi_tr and dwi_tr != se_epi_tr:
+ app.warn('It appears that the spin-echo EPI images used for inhomogeneity field estimation have a different repetition time to the DWIs being corrected. '
+ 'This may cause issues in estimation of the field, as the first DWI b=0 volume will be added to the input series to topup '
+ 'due to use of the -align_seepi option.')
+
+ dwi_flip = dwi_header.keyval().get('FlipAngle')
+ se_epi_flip = se_epi_header.keyval().get('FlipAngle')
+ if dwi_flip and se_epi_flip and dwi_flip != se_epi_flip:
+ app.warn('It appears that the spin-echo EPI images used for inhomogeneity field estimation have a different flip angle to the DWIs being corrected. '
+ 'This may cause issues in estimation of the field, as the first DWI b=0 volume will be added to the input series to topup '
+ 'due to use of the -align_seepi option.')
+
+ # If we are using the -se_epi option, and hence the input images to topup have not come from the DWIs themselves,
+ # we need to insert the first b=0 DWI volume to the start of the topup input image. Otherwise, the field estimated
+ # by topup will not be correctly aligned with the volumes as they are processed by eddy.
+ #
+ # However, there's also a code path by which we may have already performed this addition.
+ # If we have already apliced the b=0 volumes from the DWI input with the SE-EPI image
+ # (due to the absence of phase-encoding contrast in the SE-EPI series), we don't want to
+ # re-attempt such a concatenation; the fact that the DWI b=0 images were inserted ahead of
+ # the SE-EPI images means the alignment issue should be dealt with.
+
+ if dwi_first_bzero_index == len(grad) and not dwi_bzero_added_to_se_epi:
+
+ app.warn('Unable to find b=0 volume in input DWIs to provide alignment between topup and eddy; script will proceed as though the -align_seepi option were not provided')
+
+ # If b=0 volumes from the DWIs have already been added to the SE-EPI image due to an
+ # absence of phase-encoding contrast in the latter, we don't need to perform the following
+ elif not dwi_bzero_added_to_se_epi:
+
+ run.command('mrconvert dwi.mif dwi_first_bzero.mif -coord 3 ' + str(dwi_first_bzero_index) + ' -axes 0,1,2')
+ dwi_first_bzero_pe = dwi_manual_pe_scheme[dwi_first_bzero_index] if overwrite_dwi_pe_scheme else dwi_pe_scheme[dwi_first_bzero_index]
+
+ se_epi_pe_sum = [ 0, 0, 0 ]
+ se_epi_volume_to_remove = len(se_epi_pe_scheme)
+ for index, line in enumerate(se_epi_pe_scheme):
+ se_epi_pe_sum = [ i + j for i, j in zip(se_epi_pe_sum, line[0:3]) ]
+ if se_epi_volume_to_remove == len(se_epi_pe_scheme) and line[0:3] == dwi_first_bzero_pe[0:3]:
+ se_epi_volume_to_remove = index
+ new_se_epi_path = os.path.splitext(se_epi_path)[0] + '_firstdwibzero.mif'
+ if (se_epi_pe_sum == [ 0, 0, 0 ]) and (se_epi_volume_to_remove < len(se_epi_pe_scheme)):
+ app.console('Balanced phase-encoding scheme detected in SE-EPI series; volume ' + str(se_epi_volume_to_remove) + ' will be removed and replaced with first b=0 from DWIs')
+ run.command('mrconvert ' + se_epi_path + ' - -coord 3 ' + ','.join([str(index) for index in range(len(se_epi_pe_scheme)) if not index == se_epi_volume_to_remove]) + ' | mrcat dwi_first_bzero.mif - ' + new_se_epi_path + ' -axis 3')
+ # Also need to update the phase-encoding scheme appropriately if it's being set manually
+ # (if embedded within the image headers, should be updated through the command calls)
+ if se_epi_manual_pe_scheme:
+ first_line = list(manual_pe_dir)
+ first_line.append(trt)
+ new_se_epi_manual_pe_scheme = [ ]
+ new_se_epi_manual_pe_scheme.append(first_line)
+ for index, entry in enumerate(se_epi_manual_pe_scheme):
+ if not index == se_epi_volume_to_remove:
+ new_se_epi_manual_pe_scheme.append(entry)
+ se_epi_manual_pe_scheme = new_se_epi_manual_pe_scheme
+ else:
+ if se_epi_pe_sum == [ 0, 0, 0 ] and se_epi_volume_to_remove == len(se_epi_pe_scheme):
+ app.console('Phase-encoding scheme of -se_epi image is balanced, but could not find appropriate volume with which to substitute first b=0 volume from DWIs; first b=0 DWI volume will be inserted to start of series, resulting in an unbalanced scheme')
+ else:
+ app.console('Unbalanced phase-encoding scheme detected in series provided via -se_epi option; first DWI b=0 volume will be inserted to start of series')
+ run.command('mrcat dwi_first_bzero.mif ' + se_epi_path + ' ' + new_se_epi_path + ' -axis 3')
+ # Also need to update the phase-encoding scheme appropriately
+ if se_epi_manual_pe_scheme:
+ first_line = list(manual_pe_dir)
+ first_line.append(trt)
+ se_epi_manual_pe_scheme = [ first_line, se_epi_manual_pe_scheme ]
+
+ # Ended branching based on balanced-ness of PE acquisition scheme within SE-EPI volumes
+ file.delTemporary(se_epi_path)
+ file.delTemporary('dwi_first_bzero.mif')
+ se_epi_path = new_se_epi_path
+
+ # Ended branching based on:
+ # - Detection of first b=0 volume in DWIs; or
+ # - Prior merge of SE-EPI and DWI b=0 volumes due to no phase-encoding contrast in SE-EPI
+
+ # Completed checking for presence of -se_epi option
elif not PE_design == 'None': # No SE EPI images explicitly provided: In some cases, can extract appropriate b=0 images from DWI
@@ -402,72 +627,133 @@ elif not PE_design == 'None': # No SE EPI images explicitly provided: In some ca
# Preferably also make sure that there's some phase-encoding contrast in there...
# With -rpe_all, need to write inferred phase-encoding to file and import before using dwiextract so that the phase-encoding
# of the extracted b=0's is propagated to the generated b=0 series
- run.command('mrconvert dwi.mif' + import_dwi_pe_table_option + ' - | dwiextract - bzeros.mif -bzero')
+ run.command('mrconvert dwi.mif' + import_dwi_pe_table_option + ' - | dwiextract - ' + se_epi_path + ' -bzero')
+ se_epi_header = image.Header(se_epi_path)
# If there's no contrast remaining in the phase-encoding scheme, it'll be written to
# PhaseEncodingDirection and TotalReadoutTime rather than pe_scheme
# In this scenario, we will be unable to run topup, or volume recombination
- if not image.headerKeyValue('bzeros.mif', 'pe_scheme'):
+ if 'pe_scheme' not in se_epi_header.keyval():
if PE_design == 'All':
app.error('DWI header indicates no phase encoding contrast between b=0 images; cannot proceed with volume recombination-based pre-processing')
else:
app.warn('DWI header indicates no phase encoding contrast between b=0 images; proceeding without inhomogeneity field estimation')
do_topup = False
- if do_topup:
- run.function(shutil.move, 'bzeros.mif', 'topup_in.mif')
- topup_size = [int(s) for s in image.headerField('topup_in.mif', 'size').split()]
- file.delTempFile('bzeros.mif')
+ run.function(os.remove, se_epi_path)
+ se_epi_path = None
+ se_epi_header = None
+
+
+# If the first b=0 volume in the DWIs is in fact not the first volume (i.e. index zero), we're going to
+# manually place it at the start of the DWI volumes when they are input to eddy, so that the
+# first input volume to topup and the first input volume to eddy are one and the same.
+# Note: If at a later date, the statistical outputs from eddy are considered (e.g. motion, outliers),
+# then this volume permutation will need to be taken into account
+if dwi_first_bzero_index:
+ app.console('First b=0 volume in input DWIs is volume index ' + str(dwi_first_bzero_index) + '; '
+ 'this will be permuted to be the first volume (index 0) when eddy is run')
+ dwi_permute_volumes_pre_eddy_option = ' -coord 3 ' + \
+ str(dwi_first_bzero_index) + \
+ ',0' + \
+ (':' + str(dwi_first_bzero_index-1) if dwi_first_bzero_index > 1 else '') + \
+ (',' + str(dwi_first_bzero_index+1) if dwi_first_bzero_index < dwi_num_volumes-1 else '') + \
+ (':' + str(dwi_num_volumes-1) if dwi_first_bzero_index < dwi_num_volumes-2 else '')
+ dwi_permute_volumes_post_eddy_option = ' -coord 3 1' + \
+ (':' + str(dwi_first_bzero_index) if dwi_first_bzero_index > 1 else '') + \
+ ',0' + \
+ (',' + str(dwi_first_bzero_index+1) if dwi_first_bzero_index < dwi_num_volumes-1 else '') + \
+ (':' + str(dwi_num_volumes-1) if dwi_first_bzero_index < dwi_num_volumes-2 else '')
+ app.var(dwi_permute_volumes_pre_eddy_option, dwi_permute_volumes_post_eddy_option)
+
# This may be required when setting up the topup call
-import_topup_pe_table_option = ''
-if topup_manual_pe_scheme:
- with open('topup_manual_pe_scheme.txt', 'w') as f:
- for line in topup_manual_pe_scheme:
+import_se_epi_manual_pe_table_option = ''
+if se_epi_manual_pe_scheme:
+ with open('se_epi_manual_pe_scheme.txt', 'w') as f:
+ for line in se_epi_manual_pe_scheme:
f.write(' '.join([str(value) for value in line]) + '\n')
- import_topup_pe_table_option = ' -import_pe_table topup_manual_pe_scheme.txt'
+ import_se_epi_manual_pe_table_option = ' -import_pe_table se_epi_manual_pe_scheme.txt'
# Need gradient table if running dwi2mask after applytopup to derive a brain mask for eddy
run.command('mrinfo dwi.mif -export_grad_mrtrix grad.b')
-
eddy_in_topup_option = ''
+dwi_post_eddy_crop_option = ''
+dwi_path = 'dwi.mif'
if do_topup:
- # If no axes need to be cropped, use the original topup input volumes
- # Otherwise, need to call mrcrop with the appropriate options, and pass those to topup
- topup_in_path = 'topup_in.mif'
- # For any non-even axis sizes, crop the first voxel along that dimension
- # TODO This primarily applies to topup - don't recall if eddy bugs out or not
- crop_option = ''
- for axis, axis_size in enumerate(topup_size[:3]):
- if int(axis_size)%2:
- crop_option += ' -axis ' + str(axis) + ' 1 ' + str(int(axis_size)-1)
- if crop_option:
- app.warn('Topup images contain at least one non-even dimension; cropping images for topup compatibility')
- run.command('mrcrop topup_in.mif topup_in_crop.mif' + crop_option)
- file.delTempFile('topup_in.mif')
- topup_in_path = 'topup_in_crop.mif'
+ # topup will crash if its input image has a spatial dimension with a non-even size;
+ # presumably due to a downsampling by a factor of 2 in a multi-resolution scheme
+ # The newest eddy also requires the output from topup and the input DWIs to have the same size;
+ # therefore this restriction applies to the DWIs as well
+ # Rather than crop in this case (which would result in a cropped output image),
+ # duplicate the last slice on any problematic axis, and then crop that extra
+ # slice at the output step
+ # By this point, if the input SE-EPI images and DWIs are not on the same image grid, the
+ # SE-EPI images have already been re-gridded to DWI image space;
+ odd_axis_count = 0
+ for axis_size in dwi_header.size()[:3]:
+ if int(axis_size%2):
+ odd_axis_count += 1
+ if odd_axis_count:
+ app.console(str(odd_axis_count) + ' spatial ' + ('axes of DWIs have' if odd_axis_count > 1 else 'axis of DWIs has') + ' non-even size; '
+ 'this will be automatically padded for compatibility with topup, and the extra slice' + ('s' if odd_axis_count > 1 else '') + ' erased afterwards')
+ for axis, axis_size in enumerate(dwi_header.size()[:3]):
+ if int(axis_size%2):
+ new_se_epi_path = os.path.splitext(se_epi_path)[0] + '_pad' + str(axis) + '.mif'
+ run.command('mrconvert ' + se_epi_path + ' -coord ' + str(axis) + ' ' + str(axis_size-1) + ' - | mrcat ' + se_epi_path + ' - ' + new_se_epi_path + ' -axis ' + str(axis))
+ file.delTemporary(se_epi_path)
+ se_epi_path = new_se_epi_path
+ new_dwi_path = os.path.splitext(dwi_path)[0] + '_pad' + str(axis) + '.mif'
+ run.command('mrconvert ' + dwi_path + ' -coord ' + str(axis) + ' ' + str(axis_size-1) + ' - | mrcat ' + dwi_path + ' - ' + new_dwi_path + ' -axis ' + str(axis))
+ file.delTemporary(dwi_path)
+ dwi_path = new_dwi_path
+ dwi_post_eddy_crop_option += ' -coord ' + str(axis) + ' 0:' + str(axis_size-1)
+ # If we are padding the slice axis, and performing slice-to-volume correction,
+ # then we need to perform the corresponding padding to the slice timing
+ if eddy_mporder and slice_encoding_direction[axis]:
+ dwi_num_slices += 1
+ # At this point in the script, this information may be encoded either within
+ # the slice timing vector (as imported from the image header), or as
+ # slice groups (i.e. in the format expected by eddy). How these data are
+ # stored affects how the padding is performed.
+ if slice_timing:
+ slice_timing.append(slice_timing[-1])
+ elif slice_groups:
+ # Can't edit in place when looping through the list
+ new_slice_groups = [ ]
+ for group in slice_groups:
+ if axis_size-1 in group:
+ group.append(axis_size)
+ new_slice_groups.append(group)
+ slice_groups = new_slice_groups
+
# Do the conversion in preparation for topup
- run.command('mrconvert ' + topup_in_path + ' topup_in.nii' + import_topup_pe_table_option + ' -stride -1,+2,+3,+4 -export_pe_table topup_datain.txt')
- file.delTempFile(topup_in_path)
+ run.command('mrconvert ' + se_epi_path + ' topup_in.nii' + import_se_epi_manual_pe_table_option + ' -strides -1,+2,+3,+4 -export_pe_table topup_datain.txt')
+ file.delTemporary(se_epi_path)
# Run topup
topup_manual_options = ''
if app.args.topup_options:
topup_manual_options = ' ' + app.args.topup_options.strip()
- run.command(topup_cmd + ' --imain=topup_in.nii --datain=topup_datain.txt --out=field --fout=field_map' + fsl_suffix + ' --config=' + topup_config_path + topup_manual_options)
+ (topup_stdout, topup_stderr) = run.command(topup_cmd + ' --imain=topup_in.nii --datain=topup_datain.txt --out=field --fout=field_map' + fsl_suffix + ' --config=' + topup_config_path + topup_manual_options)
+ with open('topup_output.txt', 'w') as f:
+ f.write(topup_stdout + '\n' + topup_stderr)
+ if app.verbosity > 2:
+ app.console('Output of topup command:\n' + topup_stdout + '\n' + topup_stderr)
# Apply the warp field to the input image series to get an initial corrected volume estimate
# applytopup can't receive the complete DWI input and correct it as a whole, because the phase-encoding
# details may vary between volumes
if dwi_manual_pe_scheme:
- run.command('mrconvert dwi.mif' + import_dwi_pe_table_option + ' - | mrinfo - -export_pe_eddy applytopup_config.txt applytopup_indices.txt')
+ run.command('mrconvert ' + dwi_path + import_dwi_pe_table_option + ' - | mrinfo - -export_pe_eddy applytopup_config.txt applytopup_indices.txt')
else:
- run.command('mrinfo dwi.mif -export_pe_eddy applytopup_config.txt applytopup_indices.txt')
+ run.command('mrinfo ' + dwi_path + ' -export_pe_eddy applytopup_config.txt applytopup_indices.txt')
+
# Update: Call applytopup separately for each unique phase-encoding
# This should be the most compatible option with more complex phase-encoding acquisition designs,
@@ -477,51 +763,107 @@ if do_topup:
index = 1
with open('applytopup_config.txt', 'r') as f:
for line in f:
- input_path = 'dwi_pe_' + str(index) + '.nii'
- json_path = 'dwi_pe_' + str(index) + '.json'
- temp_path = 'dwi_pe_' + str(index) + '_topup' + fsl_suffix
- output_path = 'dwi_pe_' + str(index) + '_topup.mif'
- run.command('dwiextract dwi.mif' + import_dwi_pe_table_option + ' -pe ' + ','.join(line.split()) + ' - | mrconvert - ' + input_path + ' -json_export ' + json_path)
+ prefix = os.path.splitext(dwi_path)[0] + '_pe_' + str(index)
+ input_path = prefix + '.nii'
+ json_path = prefix + '.json'
+ temp_path = prefix + '_applytopup.nii'
+ output_path = prefix + '_applytopup.mif'
+ run.command('dwiextract ' + dwi_path + import_dwi_pe_table_option + ' -pe ' + ','.join(line.split()) + ' - | mrconvert - ' + input_path + ' -json_export ' + json_path)
run.command(applytopup_cmd + ' --imain=' + input_path + ' --datain=applytopup_config.txt --inindex=' + str(index) + ' --topup=field --out=' + temp_path + ' --method=jac')
- file.delTempFile(input_path)
+ file.delTemporary(input_path)
temp_path = fsl.findImage(temp_path)
run.command('mrconvert ' + temp_path + ' ' + output_path + ' -json_import ' + json_path)
- file.delTempFile(json_path)
- file.delTempFile(temp_path)
+ file.delTemporary(json_path)
+ file.delTemporary(temp_path)
applytopup_image_list.append(output_path)
index += 1
# Use the initial corrected volumes to derive a brain mask for eddy
if len(applytopup_image_list) == 1:
- run.command('dwi2mask ' + applytopup_image_list[0] + ' - | maskfilter - dilate - | mrconvert - mask.nii -datatype float32 -stride -1,+2,+3')
+ run.command('dwi2mask ' + applytopup_image_list[0] + ' - | maskfilter - dilate - | mrconvert - eddy_mask.nii -datatype float32 -strides -1,+2,+3')
else:
- run.command('mrcat ' + ' '.join(applytopup_image_list) + ' - | dwi2mask - - | maskfilter - dilate - | mrconvert - mask.nii -datatype float32 -stride -1,+2,+3')
+ run.command('mrcat ' + ' '.join(applytopup_image_list) + ' - -axis 3 | dwi2mask - - | maskfilter - dilate - | mrconvert - eddy_mask.nii -datatype float32 -strides -1,+2,+3')
for entry in applytopup_image_list:
- file.delTempFile(entry)
+ file.delTemporary(entry)
eddy_in_topup_option = ' --topup=field'
else:
# Generate a processing mask for eddy based on the uncorrected input DWIs
- run.command('dwi2mask dwi.mif - | maskfilter - dilate - | mrconvert - mask.nii -datatype float32 -stride -1,+2,+3')
-
-
+ run.command('dwi2mask ' + dwi_path + ' - | maskfilter - dilate - | mrconvert - eddy_mask.nii -datatype float32 -strides -1,+2,+3')
+
+
+# Generate the text file containing slice timing / grouping information if necessary
+if eddy_mporder:
+ if slice_timing:
+ # This list contains, for each slice, the timing offset between acquisition of the
+ # first slice in the volume, and acquisition of that slice
+ # Eddy however requires a text file where each row contains those slices that were
+ # acquired with a single readout, in ordered rows from first slice (group)
+ # acquired to last slice (group) acquired
+ if sum(slice_encoding_direction) < 0:
+ slice_timing = reversed(slice_timing)
+ slice_groups = [ [ x[0] for x in g ] for _, g in itertools.groupby(sorted(enumerate(slice_timing), key=lambda x:x[1]), key=lambda x:x[1]) ] #pylint: disable=unused-variable
+ app.var(slice_timing, slice_groups)
+ # Variable slice_groups may have already been defined in the correct format.
+ # In that instance, there's nothing to do other than write it to file;
+ # UNLESS the slice encoding direction is known to be reversed, in which case
+ # we need to reverse the timings. Would think that this would however be
+ # rare, given it requires that the slspec text file be provided manually but
+ # SliceEncodingDirection to be present.
+ elif slice_groups and sum(slice_encoding_direction) < 0:
+ new_slice_groups = [ ]
+ for group in new_slice_groups:
+ new_slice_groups.append([ dwi_num_slices-index for index in group ])
+ app.var(slice_groups, new_slice_groups)
+ slice_groups = new_slice_groups
+
+ with open('slspec.txt', 'w') as f:
+ for line in slice_groups:
+ f.write(' '.join(str(value) for value in line) + '\n')
+ eddy_manual_options.append('--slspec=slspec.txt')
+
+
+# Revert eddy_manual_options from a list back to a single string
+eddy_manual_options = (' ' + ' '.join(eddy_manual_options)) if eddy_manual_options else ''
+
+
+# Prepare input data for eddy
+run.command('mrconvert ' + dwi_path + import_dwi_pe_table_option + dwi_permute_volumes_pre_eddy_option + ' eddy_in.nii -strides -1,+2,+3,+4 -export_grad_fsl bvecs bvals -export_pe_eddy eddy_config.txt eddy_indices.txt')
+file.delTemporary(dwi_path)
# Run eddy
-run.command('mrconvert dwi.mif' + import_dwi_pe_table_option + ' dwi.nii -stride -1,+2,+3,+4 -export_grad_fsl bvecs bvals -export_pe_eddy eddy_config.txt eddy_indices.txt')
-file.delTempFile('dwi.mif')
-eddy_manual_options = ''
-if app.args.eddy_options:
- eddy_manual_options = ' ' + app.args.eddy_options.strip()
-run.command(eddy_cmd + ' --imain=dwi.nii --mask=mask.nii --acqp=eddy_config.txt --index=eddy_indices.txt --bvecs=bvecs --bvals=bvals' + eddy_in_topup_option + eddy_manual_options + ' --out=dwi_post_eddy')
-
+# If a CUDA version is in PATH, run that first; if it fails, re-try using the non-CUDA version
+eddy_all_options = '--imain=eddy_in.nii --mask=eddy_mask.nii --acqp=eddy_config.txt --index=eddy_indices.txt --bvecs=bvecs --bvals=bvals' + eddy_in_topup_option + eddy_manual_options + ' --out=dwi_post_eddy'
+eddy_cuda_cmd = fsl.eddyBinary(True)
+eddy_openmp_cmd = fsl.eddyBinary(False)
+if eddy_cuda_cmd:
+ # If running CUDA version, but OpenMP version is also available, don't stop the script if the CUDA version fails
+ (eddy_stdout, eddy_stderr) = run.command(eddy_cuda_cmd + ' ' + eddy_all_options, not eddy_openmp_cmd)
+ if app.verbosity > 2:
+ app.console('Output of CUDA eddy command:\n' + eddy_stdout + '\n' + eddy_stderr)
+ if os.path.isfile('dwi_post_eddy.eddy_parameters'):
+ # Flag that the OpenMP version won't be attempted
+ eddy_openmp_cmd = ''
+ else:
+ app.warn('CUDA version of eddy appears to have failed; trying OpenMP version')
+if eddy_openmp_cmd:
+ (eddy_stdout, eddy_stderr) = run.command(eddy_openmp_cmd + ' ' + eddy_all_options)
+ if app.verbosity > 2:
+ app.console('Output of OpenMP eddy command:\n' + eddy_stdout + '\n' + eddy_stderr)
+file.delTemporary('eddy_in.nii')
+file.delTemporary('eddy_mask.nii')
+if do_topup:
+ file.delTemporary(fsl.findImage('field_fieldcoef'))
+with open('eddy_output.txt', 'w') as f:
+ f.write(eddy_stdout + '\n' + eddy_stderr)
+eddy_output_image_path = fsl.findImage('dwi_post_eddy')
# Get the axis strides from the input series, so the output image can be modified to match
-stride_option = ' -stride ' + stride.replace(' ', ',')
-
+stride_option = ' -strides ' + ','.join([str(i) for i in dwi_header.strides()])
# Check to see whether or not eddy has provided a rotated bvecs file;
@@ -532,22 +874,21 @@ if not os.path.isfile(bvecs_path):
bvecs_path = 'bvecs'
-
# Determine whether or not volume recombination should be performed
# This could be either due to use of -rpe_all option, or just due to the data provided with -rpe_header
# Rather than trying to re-use the code that was used in the case of -rpe_all, run fresh code
# The phase-encoding scheme needs to be checked also
-volume_matchings = [ num_volumes ] * num_volumes
+volume_matchings = [ dwi_num_volumes ] * dwi_num_volumes
volume_pairs = [ ]
-app.debug('Commencing gradient direction matching; ' + str(num_volumes) + ' volumes')
-for index1 in range(num_volumes):
- if volume_matchings[index1] == num_volumes: # As yet unpaired
- for index2 in range(index1+1, num_volumes):
- if volume_matchings[index2] == num_volumes: # Also as yet unpaired
+app.debug('Commencing gradient direction matching; ' + str(dwi_num_volumes) + ' volumes')
+for index1 in range(dwi_num_volumes):
+ if volume_matchings[index1] == dwi_num_volumes: # As yet unpaired
+ for index2 in range(index1+1, dwi_num_volumes):
+ if volume_matchings[index2] == dwi_num_volumes: # Also as yet unpaired
# Here, need to check both gradient matching and reversed phase-encode direction
if not any(dwi_pe_scheme[index1][i] + dwi_pe_scheme[index2][i] for i in range(0,3)) and grads_match(grad[index1], grad[index2]):
- volume_matchings[index1] = index2;
- volume_matchings[index2] = index1;
+ volume_matchings[index1] = index2
+ volume_matchings[index2] = index1
volume_pairs.append([index1, index2])
app.debug('Matched volume ' + str(index1) + ' with ' + str(index2) + '\n' +
'Phase encoding: ' + str(dwi_pe_scheme[index1]) + ' ' + str(dwi_pe_scheme[index2]) + '\n' +
@@ -555,10 +896,15 @@ for index1 in range(num_volumes):
break
-if not len(volume_pairs) == int(num_volumes/2):
+if not len(volume_pairs) == int(dwi_num_volumes/2):
+
+ if do_topup:
+ file.delTemporary('topup_in.nii')
+ file.delTemporary(fsl.findImage('field_map'))
# Convert the resulting volume to the output image, and re-insert the diffusion encoding
- run.command('mrconvert ' + fsl.findImage('dwi_post_eddy') + ' result.mif' + stride_option + ' -fslgrad ' + bvecs_path + ' bvals')
+ run.command('mrconvert ' + eddy_output_image_path + ' result.mif' + dwi_permute_volumes_post_eddy_option + dwi_post_eddy_crop_option + stride_option + ' -fslgrad ' + bvecs_path + ' bvals')
+ file.delTemporary(eddy_output_image_path)
else:
app.console('Detected matching DWI volumes with opposing phase encoding; performing explicit volume recombination')
@@ -570,7 +916,6 @@ else:
# potentially have subject rotation between them (and therefore the sensitisation direction is
# not precisely equivalent), the best we can do is take the mean of the two vectors.
# Manual recombination of volumes needs to take into account the explicit volume matching
- # TODO Re-test eddy LSR
bvecs = [ [] for axis in range(3) ]
with open(bvecs_path, 'r') as f:
@@ -605,7 +950,7 @@ else:
with open('bvecs_combined', 'w') as f:
for axis in range(0, 3):
axis_data = [ ]
- for volume in range(0, int(num_volumes/2)):
+ for volume in range(0, int(dwi_num_volumes/2)):
axis_data.append(str(bvecs_combined_transpose[volume][axis]))
f.write(' '.join(axis_data) + '\n')
@@ -613,7 +958,6 @@ else:
f.write(' '.join( [ str(b) for b in bvals_combined ] ))
-
# Prior to 5.0.8, a bug resulted in the output field map image from topup having an identity transform,
# regardless of the transform of the input image
# Detect this, and manually replace the transform if necessary
@@ -622,10 +966,11 @@ else:
field_map_image = fsl.findImage('field_map')
if not image.match('topup_in.nii', field_map_image):
app.warn('topup output field image has erroneous header; recommend updating FSL to version 5.0.8 or later')
- run.command('mrtransform ' + field_map_image + ' -replace topup_in.nii field_map_fix.mif')
- file.delTempFile(field_map_image)
- field_map_image = 'field_map_fix.mif'
-
+ new_field_map_image = 'field_map_fix.mif'
+ run.command('mrtransform ' + field_map_image + ' -replace topup_in.nii ' + new_field_map_image)
+ file.delTemporary(field_map_image)
+ field_map_image = new_field_map_image
+ file.delTemporary('topup_in.nii')
# Derive the weight images
@@ -639,6 +984,7 @@ else:
eddy_config = [ [ float(f) for f in line.split() ] for line in open('eddy_config.txt', 'r').read().split('\n')[:-1] ]
eddy_indices = [ int(i) for i in open('eddy_indices.txt', 'r').read().split() ]
+ app.var(eddy_config, eddy_indices)
# This section derives, for each phase encoding configuration present, the 'weight' to be applied
# to the image during volume recombination, which is based on the Jacobian of the field in the
@@ -650,46 +996,79 @@ else:
run.command('mrcalc ' + field_map_image + ' ' + str(config[3]) + ' -mult' + sign_multiplier + ' - | mrfilter - gradient - | mrconvert - ' + field_derivative_path + ' -coord 3 ' + str(pe_axis) + ' -axes 0,1,2')
jacobian_path = 'jacobian_' + str(index+1) + '.mif'
run.command('mrcalc 1.0 ' + field_derivative_path + ' -add 0.0 -max ' + jacobian_path)
- file.delTempFile(field_derivative_path)
+ file.delTemporary(field_derivative_path)
run.command('mrcalc ' + jacobian_path + ' ' + jacobian_path + ' -mult weight' + str(index+1) + '.mif')
- file.delTempFile(jacobian_path)
+ file.delTemporary(jacobian_path)
+ file.delTemporary(field_map_image)
# If eddy provides its main image output in a compressed format, the code block below will need to
# uncompress that image independently for every volume pair. Instead, if this is the case, let's
# convert it to an uncompressed format before we do anything with it.
- eddy_output = fsl.findImage('dwi_post_eddy')
- if eddy_output.endswith('.gz'):
- run.command('mrconvert ' + eddy_output + ' dwi_post_eddy.nii')
- file.delTempFile(eddy_output)
- eddy_output = 'dwi_post_eddy.nii'
+ if eddy_output_image_path.endswith('.gz'):
+ new_eddy_output_image_path = 'dwi_post_eddy_uncompressed.mif'
+ run.command('mrconvert ' + eddy_output_image_path + ' ' + new_eddy_output_image_path)
+ file.delTemporary(eddy_output_image_path)
+ eddy_output_image_path = new_eddy_output_image_path
+
+ # If the DWI volumes were permuted prior to running eddy, then the simplest approach is to permute them
+ # back to their original positions; otherwise, the stored gradient vector directions / phase encode
+ # directions / matched volume pairs are no longer appropriate
+ if dwi_permute_volumes_post_eddy_option:
+ new_eddy_output_image_path = os.path.splitext(eddy_output_image_path)[0] + '_volpermuteundo.mif'
+ run.command('mrconvert ' + eddy_output_image_path + dwi_permute_volumes_post_eddy_option + ' ' + new_eddy_output_image_path)
+ file.delTemporary(eddy_output_image_path)
+ eddy_output_image_path = new_eddy_output_image_path
# This section extracts the two volumes corresponding to each reversed phase-encoded volume pair, and
# derives a single image volume based on the recombination equation
combined_image_list = [ ]
+ progress = app.progressBar('Performing explicit volume recombination', len(volume_pairs))
for index, volumes in enumerate(volume_pairs):
pe_indices = [ eddy_indices[i] for i in volumes ]
- run.command('mrconvert ' + eddy_output + ' volume0.mif -coord 3 ' + str(volumes[0]))
- run.command('mrconvert ' + eddy_output + ' volume1.mif -coord 3 ' + str(volumes[1]))
+ run.command('mrconvert ' + eddy_output_image_path + ' volume0.mif -coord 3 ' + str(volumes[0]))
+ run.command('mrconvert ' + eddy_output_image_path + ' volume1.mif -coord 3 ' + str(volumes[1]))
# Volume recombination equation described in Skare and Bammer 2010
- run.command('mrcalc volume0.mif weight' + str(pe_indices[0]) + '.mif -mult volume1.mif weight' + str(pe_indices[1]) + '.mif -mult -add weight' + str(pe_indices[0]) + '.mif weight' + str(pe_indices[1]) + '.mif -add -divide 0.0 -max combined' + str(index) + '.mif')
- combined_image_list.append('combined' + str(index) + '.mif')
- os.remove('volume0.mif')
- os.remove('volume1.mif')
- file.delTempFile(eddy_output)
-
+ combined_image_path = 'combined' + str(index) + '.mif'
+ run.command('mrcalc volume0.mif weight' + str(pe_indices[0]) + '.mif -mult volume1.mif weight' + str(pe_indices[1]) + '.mif -mult -add weight' + str(pe_indices[0]) + '.mif weight' + str(pe_indices[1]) + '.mif -add -divide 0.0 -max ' + combined_image_path)
+ combined_image_list.append(combined_image_path)
+ run.function(os.remove, 'volume0.mif')
+ run.function(os.remove, 'volume1.mif')
+ progress.increment()
+ progress.done()
+
+ file.delTemporary(eddy_output_image_path)
for index in range(0, len(eddy_config)):
- file.delTempFile('weight' + str(index+1) + '.mif')
+ file.delTemporary('weight' + str(index+1) + '.mif')
# Finally the recombined volumes must be concatenated to produce the resulting image series
- run.command('mrcat ' + ' '.join(combined_image_list) + ' - -axis 3 | mrconvert - result.mif -fslgrad bvecs_combined bvals_combined' + stride_option)
-
+ run.command('mrcat ' + ' '.join(combined_image_list) + ' - -axis 3 | mrconvert - result.mif' + dwi_post_eddy_crop_option + ' -fslgrad bvecs_combined bvals_combined' + stride_option)
for entry in combined_image_list:
- file.delTempFile(entry)
+ file.delTemporary(entry)
+# Grab any relevant files that eddy has created, and copy them to the requested directory
+if eddyqc_path:
+ if os.path.exists(eddyqc_path) and not os.path.isdir(eddyqc_path):
+ run.function(os.remove, eddyqc_path)
+ if not os.path.exists(eddyqc_path):
+ run.function(os.makedirs, eddyqc_path)
+ for filename in eddyqc_files:
+ if os.path.exists('dwi_post_eddy.' + filename):
+ run.function(shutil.copy, 'dwi_post_eddy.' + filename, os.path.join(eddyqc_path, filename))
+
+
+# Build a list of header key-value entries that we want to _remove_ from the
+# output image, as they may have been useful for controlling pre-processing
+# but are no longer required, and will just bloat the key-value listings of
+# all subsequent derived images
+# Disabled this for now: The output from eddy is a NIfTI, so all these fields
+# have been lost. For now just neglect to re-introduce them; in the future,
+# this may be combined with GitHub Issue #1188 (proper behaviour of
+# command_history header key-value entry when running a Python script)
+#keys_to_remove = [ 'EchoTime', 'FlipAngle', 'MultibandAccelerationFactor', 'PhaseEncodingDirection', 'RepetitionTime', 'SliceEncodingDirection', 'SliceTiming', 'TotalReadoutTime', 'pe_scheme' ]
+#clear_property_options = ' ' + ' '.join(['-clear_property '+key for key in keys_to_remove if key in dwi_header.keyval() ])
# Finish!
-run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + grad_export_option + (' -force' if app.force else ''))
+run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + grad_export_option + (' -force' if app.forceOverwrite else ''))
app.complete()
-
diff --git a/bin/dwishellmath b/bin/dwishellmath
index 372cb50d2b..45cb6f216b 100755
--- a/bin/dwishellmath
+++ b/bin/dwishellmath
@@ -22,10 +22,10 @@ HELP_PAGE
fi
tmpfiles=""
-for b in $(mrinfo "$1" -shells);
+for b in $(mrinfo "$1" -shell_bvalues);
do
echo "Extracting $2 of the b=$b shell."
- tmpfiles="$tmpfiles "$(dwiextract -shell $b "$1" - -quiet | mrmath -axis 3 - $2 - -quiet)
+ tmpfiles="$tmpfiles "$(dwiextract -shells $b "$1" - -quiet | mrmath -axis 3 - $2 - -quiet)
done
mrcat -axis 3 $tmpfiles "$3" -quiet
diff --git a/bin/labelsgmfix b/bin/labelsgmfix
index eab97d14a6..92594b8312 100755
--- a/bin/labelsgmfix
+++ b/bin/labelsgmfix
@@ -8,20 +8,20 @@
# node indices that correspond to these structures, and replaces them with estimates
# derived from FIRST.
-
# Make the corresponding MRtrix3 Python libraries available
-import inspect, os, sys
-lib_folder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], os.pardir, 'lib')))
+import inspect, math, os, sys
+lib_folder = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))), os.pardir, 'lib'))
if not os.path.isdir(lib_folder):
sys.stderr.write('Unable to locate MRtrix3 Python libraries')
sys.exit(1)
sys.path.insert(0, lib_folder)
-from distutils.spawn import find_executable
+
from mrtrix3 import app, fsl, image, path, run
+
app.init('Robert E. Smith (robert.smith@florey.edu.au)',
'In a FreeSurfer parcellation image, replace the sub-cortical grey matter structure delineations using FSL FIRST')
app.cmdline.addCitation('', 'Patenaude, B.; Smith, S. M.; Kennedy, D. N. & Jenkinson, M. A Bayesian model of shape and appearance for subcortical brain segmentation. NeuroImage, 2011, 56, 907-922', True)
@@ -46,14 +46,9 @@ fsl_path = os.environ.get('FSLDIR', '')
if not fsl_path:
app.error('Environment variable FSLDIR is not set; please run appropriate FSL configuration script')
-first_cmd = 'run_first_all'
-if not find_executable(first_cmd):
- first_cmd = "fsl5.0-run_first_all"
- if not find_executable(first_cmd):
- app.error('Could not find FSL program run_first_all; please verify FSL install')
+first_cmd = fsl.exeName('run_first_all')
first_atlas_path = os.path.join(fsl_path, 'data', 'first', 'models_336_bin')
-
if not os.path.isdir(first_atlas_path):
app.error('Atlases required for FSL\'s FIRST program not installed;\nPlease install fsl-first-data using your relevant package manager')
@@ -68,11 +63,23 @@ if app.args.sgm_amyg_hipp:
structure_map.update({ 'L_Amyg':'Left-Amygdala', 'R_Amyg':'Right-Amygdala',
'L_Hipp':'Left-Hippocampus', 'R_Hipp':'Right-Hippocampus' })
+t1_spacing = image.Header(path.fromUser(app.args.t1, False)).spacing()
+upsample_for_first = False
+# If voxel size is 1.25mm or larger, make a guess that the user has erroneously re-gridded their data
+if math.pow(t1_spacing[0] * t1_spacing[1] * t1_spacing[2], 1.0/3.0) > 1.225:
+ app.warn('Voxel size of input T1 image larger than expected for T1-weighted images (' + str(t1_spacing) + '); '
+ 'image will be resampled to 1mm isotropic in order to maximise chance of '
+ 'FSL FIRST script succeeding')
+ upsample_for_first = True
+
app.makeTempDir()
# Get the parcellation and T1 images into the temporary directory, with conversion of the T1 into the correct format for FSL
run.command('mrconvert ' + path.fromUser(app.args.parc, True) + ' ' + path.toTemp('parc.mif', True))
-run.command('mrconvert ' + path.fromUser(app.args.t1, True) + ' ' + path.toTemp('T1.nii', True) + ' -stride -1,+2,+3')
+if upsample_for_first:
+ run.command('mrresize ' + path.fromUser(app.args.t1, True) + ' - -voxel 1.0 -interp sinc | mrcalc - 0.0 -max - | mrconvert - ' + path.toTemp('T1.nii', True) + ' -strides -1,+2,+3')
+else:
+ run.command('mrconvert ' + path.fromUser(app.args.t1, True) + ' ' + path.toTemp('T1.nii', True) + ' -strides -1,+2,+3')
app.gotoTempDir()
@@ -80,7 +87,7 @@ app.gotoTempDir()
first_input_is_brain_extracted = ''
if app.args.premasked:
first_input_is_brain_extracted = ' -b'
-run.command(first_cmd + ' -s ' + ','.join(structure_map.keys()) + ' -i T1.nii' + first_input_is_brain_extracted + ' -o first')
+run.command(first_cmd + ' -m none -s ' + ','.join(structure_map.keys()) + ' -i T1.nii' + first_input_is_brain_extracted + ' -o first')
fsl.checkFirst('first', structure_map.keys())
# Generate an empty image that will be used to construct the new SGM nodes
@@ -101,18 +108,21 @@ with open(sgm_lut_file_path) as f:
# Convert FIRST meshes to node masks
# In this use case, don't want the PVE images; want to threshold at 0.5
mask_list = [ ]
+progress = app.progressBar('Generating mask images for SGM structures', len(structure_map))
for key, value in structure_map.items():
image_path = key + '_mask.mif'
mask_list.append(image_path)
vtk_in_path = 'first-' + key + '_first.vtk'
run.command('meshconvert ' + vtk_in_path + ' first-' + key + '_transformed.vtk -transform first2real T1.nii')
- run.command('mesh2pve first-' + key + '_transformed.vtk parc.mif - | mrthreshold - ' + image_path + ' -abs 0.5')
+ run.command('mesh2voxel first-' + key + '_transformed.vtk parc.mif - | mrthreshold - ' + image_path + ' -abs 0.5')
# Add to the SGM image; don't worry about overlap for now
node_index = sgm_lut[value]
run.command('mrcalc ' + image_path + ' ' + node_index + ' sgm.mif -if sgm_new.mif')
- if not app._lastFile:
+ if not app.continueOption:
run.function(os.remove, 'sgm.mif')
run.function(os.rename, 'sgm_new.mif', 'sgm.mif')
+ progress.increment()
+progress.done()
# Detect any overlapping voxels between the SGM masks, and set to zero
run.command('mrmath ' + ' '.join(mask_list) + ' sum - | mrcalc - 1 -gt sgm_overlap_mask.mif')
@@ -125,17 +135,18 @@ run.command('labelconvert sgm_masked.mif ' + sgm_lut_file_path + ' ' + path.from
# * Figure out what index the structure has been mapped to; this can only be done using mrstats
# * Strip that index from the parcellation image
# * Insert the new delineation of that structure
-for struct in structure_map.keys():
+progress = app.progressBar('Replacing SGM parcellations', len(structure_map))
+for struct in structure_map:
image_path = struct + '_mask.mif'
- index = image.statistic('sgm_new_labels.mif', 'median', image_path)
+ index = image.statistic('sgm_new_labels.mif', 'median', '-mask ' + image_path)
run.command('mrcalc parc.mif ' + index + ' -eq 0 parc.mif -if parc_removed.mif')
- if not app._lastFile:
- run.function(os.remove, 'parc.mif')
- run.function(os.rename, 'parc_removed.mif', 'parc.mif')
+ run.function(os.remove, 'parc.mif')
+ run.function(os.rename, 'parc_removed.mif', 'parc.mif')
+ progress.increment()
+progress.done()
# Insert the new delineations of all SGM structures in a single call
# Enforce unsigned integer datatype of output image
run.command('mrcalc sgm_new_labels.mif 0.5 -gt sgm_new_labels.mif parc.mif -if result.mif -datatype uint32')
-run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + (' -force' if app.force else ''))
+run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + (' -force' if app.forceOverwrite else ''))
app.complete()
-
diff --git a/bin/population_template b/bin/population_template
index 1e6a91a15f..d02425fbb4 100755
--- a/bin/population_template
+++ b/bin/population_template
@@ -2,75 +2,74 @@
# Generates an unbiased group-average template via image registration of images to a midway space.
-
# Make the corresponding MRtrix3 Python libraries available
import inspect, os, sys
-lib_folder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], os.pardir, 'lib')))
+lib_folder = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))), os.pardir, 'lib'))
if not os.path.isdir(lib_folder):
sys.stderr.write('Unable to locate MRtrix3 Python libraries')
sys.exit(1)
sys.path.insert(0, lib_folder)
-
import shutil
move = shutil.move
-copy = shutil.copy2
+copy = shutil.copy
copytree = shutil.copytree
rmtree = shutil.rmtree
remove = os.remove
+# Binds raw_input() to input() in Python2, so that input() can be used
+# and the code will work on both Python 2 and 3
+try:
+ input = raw_input #pylint: disable=redefined-builtin
+except NameError:
+ pass
+
-import math, shutil
-from mrtrix3 import app, file, image, path, run
+import math
+from mrtrix3 import app, file, image, path, run #pylint: disable=redefined-builtin
+from mrtrix3.path import allindir
def abspath(*arg):
return os.path.abspath(os.path.join(*arg))
def relpath(*arg):
- return os.path.relpath(os.path.join(*arg), app._workingDir)
-
-def allindir(dir):
- return ' '.join( [ os.path.join(dir, file) for file in os.listdir(dir) ] )
-
-
+ return os.path.relpath(os.path.join(*arg), app.workingDir)
try:
from numpy import loadtxt, savetxt, dot
except ImportError:
- app.console("numpy not found. using replacement functions")
+ app.console("numpy not found; using replacement functions")
def loadtxt(fname, delimiter=" ", dtype=float):
with open(fname, "rb") as f:
- return [[dtype(x) for x in l.decode(errors='ignore').rstrip().split(delimiter)] for l in f.readlines()]
+ return [[dtype(a) for a in l.decode(errors='ignore').rstrip().split(delimiter)] for l in f.readlines()]
def savetxt(fname, X, delimiter=" ", fmt="%.14e"):
- import sys
try:
fh = open(fname, 'wb')
if len(X) == 1:
- ncol = 1
+ ncol = 1
else:
ncol = len(X[0])
fmt = [fmt, ] * ncol
- format = delimiter.join(fmt)
+ row_format = delimiter.join(fmt)
for row in X:
- fh.write(((format % tuple(row) + '\n').encode(errors='ignore')))
+ fh.write(((row_format % tuple(row) + '\n').encode(errors='ignore')))
finally:
- fh.close()
+ fh.close()
def dot(a, b):
# matrix dot product
# does not check input dimensions
return [[sum(x*y for x,y in zip(a_row,b_col)) for b_col in zip(*b)] for a_row in a]
-def check_linear_transformation (transformation, command, max_scaling = 0.5, max_shear = 0.2, max_rot = 2 * math.pi, pause_on_warn = True):
- import os
+def check_linear_transformation (transformation, cmd, max_scaling = 0.5, max_shear = 0.2, max_rot = 2 * math.pi, pause_on_warn = True):
- def load_key_value(file):
+ def load_key_value(file_path):
res = {}
- with open (file, "r") as f:
+ with open (file_path, "r") as f:
for line in f.readlines():
if len(line)==1 or line.startswith("#"):
continue
@@ -88,12 +87,12 @@ def check_linear_transformation (transformation, command, max_scaling = 0.5, max
return True
data = load_key_value(transformation + 'decomp')
run.function(remove, transformation + 'decomp')
- scaling = [float(x) for x in data['scaling']]
- if any([x < 0 for x in scaling]) or any([x > (1 + max_scaling) for x in scaling]) or any([x < (1 - max_scaling) for x in scaling]):
+ scaling = [float(value) for value in data['scaling']]
+ if any([a < 0 for a in scaling]) or any([a > (1 + max_scaling) for a in scaling]) or any([a < (1 - max_scaling) for a in scaling]):
app.warn ("large scaling (" + str(scaling) + ") in " + transformation)
bGood = False
- shear = [float(x) for x in data['shear']]
- if any([abs(x) > max_shear for x in shear]):
+ shear = [float(value) for value in data['shear']]
+ if any([abs(a) > max_shear for a in shear]):
app.warn ("large shear (" + str(shear) + ") in " + transformation)
bGood = False
rot_angle = float(data['angle_axis'][0])
@@ -102,11 +101,11 @@ def check_linear_transformation (transformation, command, max_scaling = 0.5, max
bGood = False
if not bGood:
- newcommand = []
+ newcmd = []
what = ''
init_rotation_found = False
skip = 0
- for e in command.split():
+ for e in cmd.split():
if skip:
skip -= 1
continue
@@ -116,30 +115,30 @@ def check_linear_transformation (transformation, command, max_scaling = 0.5, max
skip = 1
continue
if 'affine_scale' in e:
- assert (what != 'rigid')
+ assert what != 'rigid'
what = 'affine'
elif 'rigid_scale' in e:
- assert (what != 'affine')
+ assert what != 'affine'
what = 'rigid'
- newcommand.append(e)
- newcommand=" ".join(newcommand)
+ newcmd.append(e)
+ newcmd=" ".join(newcmd)
if not init_rotation_found:
app.console("replacing the transformation obtained with:")
- app.console(command)
+ app.console(cmd)
if what:
- newcommand += ' -'+what+'_init_translation mass -'+what+'_init_rotation search'
+ newcmd += ' -'+what+'_init_translation mass -'+what+'_init_rotation search'
app.console("by the one obtained with:")
- app.console(newcommand)
- run.command(newcommand)
- return check_linear_transformation (transformation, newcommand, max_scaling, max_shear, max_rot, pause_on_warn = pause_on_warn)
+ app.console(newcmd)
+ run.command(newcmd)
+ return check_linear_transformation (transformation, newcmd, max_scaling, max_shear, max_rot, pause_on_warn = pause_on_warn)
elif pause_on_warn:
app.warn("you might want to manually repeat mrregister with different parameters and overwrite the transformation file: \n%s" % transformation)
- app.console('The command that failed the test was: \n' + command)
+ app.console('The command that failed the test was: \n' + cmd)
app.console('Working directory: \n' + os.getcwd())
- raw_input("press enter to continue population_template")
+ input("press enter to continue population_template")
return bGood
-class Input:
+class Input(object):
def __init__(self, filename, prefix, directory, mask_filename = '', mask_directory = ''):
self.filename = filename
self.prefix = prefix
@@ -207,8 +206,8 @@ assert (dorigid + doaffine + dononlinear >= 1), "FIXME: registration type not va
app.args.input_dir = relpath(app.args.input_dir)
inputDir = app.args.input_dir
if not os.path.exists(inputDir):
- app.error('input directory not found');
-inFiles = sorted(os.listdir(inputDir))
+ app.error('input directory not found')
+inFiles = sorted(allindir(inputDir, dir_path=False))
if len(inFiles) <= 1:
app.error('Not enough images found in input directory. More than one image is needed to generate a population template')
else:
@@ -220,31 +219,30 @@ if app.args.voxel_size:
if len(voxel_size) == 1:
voxel_size = voxel_size * 3
try:
- if len(voxel_size) != 3:
- raise
- [float(v) for v in voxel_size]
+ assert len(voxel_size) == 3
+ [float(v) for v in voxel_size] #pylint: disable=expression-not-assigned
except:
- app.error('voxel size needs to be a single or three comma separated floating point numbers, received: '+str(app.args.voxel_size))
+ app.error('voxel size needs to be a single or three comma-separated floating point numbers; received: ' + str(app.args.voxel_size))
initial_alignment = app.args.initial_alignment
if initial_alignment not in ["mass", "geometric", "none"]:
- message.error('initial_alignment must be one of ' + " ".join(["mass", "geometric", "none"]));
+ app.error('initial_alignment must be one of ' + " ".join(["mass", "geometric", "none"]))
linear_estimator = app.args.linear_estimator
if linear_estimator:
if not dononlinear:
- app.error('linear_estimator specified when no linear registration is requested');
+ app.error('linear_estimator specified when no linear registration is requested')
if linear_estimator not in ["l1", "l2", "lp"]:
- app.error('linear_estimator must be one of ' + " ".join(["l1", "l2", "lp"]));
+ app.error('linear_estimator must be one of ' + " ".join(["l1", "l2", "lp"]))
useMasks = False
if app.args.mask_dir:
useMasks = True
- app.args.mask_dir = relpath(app.args.mask_dir);
+ app.args.mask_dir = relpath(app.args.mask_dir)
maskDir = app.args.mask_dir
if not os.path.exists(maskDir):
app.error('mask directory not found')
- maskFiles = os.listdir(maskDir)
+ maskFiles = allindir(maskDir, dir_path=False)
if len(maskFiles) < len(inFiles):
app.error('there are not enough mask images for the number of images in the input directory')
maskCommonPostfix = path.commonPostfix(maskFiles)
@@ -259,17 +257,17 @@ if app.args.template_mask and not useMasks:
app.error('you cannot output a template mask because no subject masks were input using -mask_dir')
commonPostfix = path.commonPostfix(inFiles)
-input = []
+inputs = []
for i in inFiles:
image.check3DNonunity(os.path.join(path.fromUser(inputDir, False), i))
- prefix = i.split(commonPostfix)[0]
+ subj_prefix = i.split(commonPostfix)[0]
if useMasks:
- if prefix not in maskPrefixes:
+ if subj_prefix not in maskPrefixes:
app.error ('no matching mask image was found for input image ' + i)
- index = maskPrefixes.index(prefix)
- input.append(Input(i, prefix, path.fromUser(inputDir, False), maskFiles[index], path.fromUser(maskDir, False)))
+ index = maskPrefixes.index(subj_prefix)
+ inputs.append(Input(i, subj_prefix, path.fromUser(inputDir, False), maskFiles[index], path.fromUser(maskDir, False)))
else:
- input.append(Input(i, prefix, path.fromUser(inputDir, False)))
+ inputs.append(Input(i, subj_prefix, path.fromUser(inputDir, False)))
noreorientation = app.args.noreorientation
@@ -300,13 +298,13 @@ if app.args.linear_transformations_dir:
# automatically detect SH series
do_fod_registration = False
-image_size = [int(x) for x in image.headerField(relpath(input[0].directory, input[0].filename), 'size').split(' ')]
+image_size = image.Header(relpath(inputs[0].directory, inputs[0].filename)).size()
if len(image_size) < 3 or len(image_size) > 4:
app.error('only 3 and 4 dimensional images can be used to build a template')
if len(image_size) == 4:
- val = (math.sqrt (1 + 8 * image_size[3]) - 3.0) / 4.0;
+ val = (math.sqrt (1 + 8 * image_size[3]) - 3.0) / 4.0
if not (val - int(val)) and not noreorientation:
- app.console("SH Series detected, performing FOD registration")
+ app.console("SH series detected, performing FOD registration")
do_fod_registration = True
#rigid options
@@ -369,10 +367,10 @@ if doaffine:
linear_niter += affine_niter
linear_type += ['affine'] * len(affine_scales)
-assert (len(linear_type) == len(linear_scales))
-assert (len(linear_scales) == len(linear_niter))
+assert len(linear_type) == len(linear_scales)
+assert len(linear_scales) == len(linear_niter)
if do_fod_registration:
- assert (len(linear_lmax) == len(linear_niter))
+ assert len(linear_lmax) == len(linear_niter)
app.console('initial alignment of images: %s' % initial_alignment)
@@ -385,7 +383,7 @@ if dolinear:
for istage, [tpe, scale, niter] in enumerate(zip (linear_type, linear_scales, linear_niter)):
app.console('(%02i) %s scale: %.4f, niter: %i, no reorientation' %(istage, tpe.ljust(9), scale, niter))
-datatype = ' -datatype float32'
+datatype_option = ' -datatype float32'
if not dononlinear:
nl_scales = []
@@ -419,7 +417,7 @@ else:
app.makeTempDir()
app.gotoTempDir()
-file.makeDir('input_transformed')
+file.makeDir('inputs_transformed')
file.makeDir('linear_transforms_initial')
file.makeDir('linear_transforms')
for level in range(0, len(linear_scales)):
@@ -429,15 +427,15 @@ for level in range(0, len(nl_scales)):
if useMasks:
file.makeDir('masks_transformed')
-write_log = (app._verbosity >= 2)
+write_log = (app.verbosity >= 2)
if write_log:
file.makeDir('log')
# Make initial template in average space
app.console('Generating initial template')
input_filenames = []
-for i in input:
- input_filenames.append (abspath(i.directory, i.filename));
+for i in inputs:
+ input_filenames.append(abspath(i.directory, i.filename))
if voxel_size is None:
run.command('mraverageheader ' + ' '.join(input_filenames) + ' average_header.mif -fill')
else:
@@ -445,54 +443,67 @@ else:
# crop average space to extent defined by original masks
if useMasks:
+ progress = app.progressBar('Importing input masks to average space for template cropping', len(inputs))
mask_filenames = []
- for i in input:
- run.command('mrtransform ' + abspath(i.mask_directory, i.mask_filename) +
- ' -interp nearest -template average_header.mif ' + os.path.join('masks_transformed', i.mask_filename))
+ for i in inputs:
+ run.command('mrtransform ' + abspath(i.mask_directory, i.mask_filename) + \
+ ' -interp nearest -template average_header.mif ' + os.path.join('masks_transformed', i.mask_filename))
mask_filenames.append(os.path.join('masks_transformed', i.mask_filename))
+ progress.increment()
+ progress.done()
run.command('mrmath ' + ' '.join(mask_filenames) + ' max mask_initial.mif' )
run.command('mrcrop ' + 'average_header.mif -mask mask_initial.mif average_header_cropped.mif')
run.function(remove,'mask_initial.mif')
run.function(remove, 'average_header.mif')
run.function(move, 'average_header_cropped.mif', 'average_header.mif')
+ progress = app.progressBar('Erasing temporary mask images', len(mask_filenames))
for mask in mask_filenames:
run.function(remove, mask)
+ progress.increment()
+ progress.done()
+
if initial_alignment == 'none':
- for i in input:
- run.command('mrtransform ' + abspath(i.directory, i.filename) + ' -interp linear -template average_header.mif ' + os.path.join('input_transformed', i.prefix + '.mif') + datatype)
+ progress = app.progressBar('Resampling input images to template space with no initial alignment', len(inputs))
+ for i in inputs:
+ run.command('mrtransform ' + abspath(i.directory, i.filename) + ' -interp linear -template average_header.mif ' + os.path.join('inputs_transformed', i.prefix + '.mif') + datatype_option)
+ progress.increment()
+ progress.done()
if not dolinear:
- for i in input:
+ for i in inputs:
with open(os.path.join('linear_transforms_initial','%s.txt' % (i.prefix)),'w') as fout:
fout.write('1 0 0 0\n0 1 0 0\n0 0 1 0\n0 0 0 1\n')
else:
+ progress = app.progressBar('Performing initial rigid registration to template', len(inputs))
mask = ''
- for i in input:
+ for i in inputs:
if useMasks:
mask = ' -mask1 ' + abspath(i.mask_directory, i.mask_filename)
output = ' -rigid ' + os.path.join('linear_transforms_initial', i.prefix + '.txt')
- run.command('mrregister ' + abspath(i.directory, i.filename) + ' average_header.mif' +
- mask +
- ' -rigid_scale 1 ' +
- ' -rigid_niter 0 ' +
- ' -type rigid ' +
- ' -noreorientation ' +
- ' -rigid_init_translation ' + initial_alignment + ' ' +
- datatype +
+ run.command('mrregister ' + abspath(i.directory, i.filename) + ' average_header.mif' + \
+ mask + \
+ ' -rigid_scale 1 ' + \
+ ' -rigid_niter 0 ' + \
+ ' -type rigid ' + \
+ ' -noreorientation ' + \
+ ' -rigid_init_translation ' + initial_alignment + ' ' + \
+ datatype_option + \
output)
# translate input images to centre of mass without interpolation
- run.command('mrtransform ' + abspath(i.directory, i.filename) +
- ' -linear ' + os.path.join('linear_transforms_initial', i.prefix + '.txt') +
- datatype +
- ' ' + os.path.join('input_transformed', i.prefix + '_translated.mif'))
+ run.command('mrtransform ' + abspath(i.directory, i.filename) + \
+ ' -linear ' + os.path.join('linear_transforms_initial', i.prefix + '.txt') + \
+ datatype_option + \
+ ' ' + os.path.join('inputs_transformed', i.prefix + '_translated.mif'))
if useMasks:
- run.command('mrtransform ' + abspath(i.mask_directory, i.mask_filename) +
- ' -linear ' + os.path.join('linear_transforms_initial', i.prefix + '.txt') +
- datatype +
- ' ' + os.path.join('masks_transformed', i.prefix + '_translated.mif'))
+ run.command('mrtransform ' + abspath(i.mask_directory, i.mask_filename) + \
+ ' -linear ' + os.path.join('linear_transforms_initial', i.prefix + '.txt') + \
+ datatype_option + ' ' + \
+ os.path.join('masks_transformed', i.prefix + '_translated.mif'))
+ progress.increment()
+ progress.done()
# update average space to new extent
- run.command('mraverageheader ' + ' '.join([os.path.join('input_transformed', i.prefix + '_translated.mif') for i in input]) + ' average_header_tight.mif')
+ run.command('mraverageheader ' + ' '.join([os.path.join('inputs_transformed', i.prefix + '_translated.mif') for i in inputs]) + ' average_header_tight.mif')
if voxel_size is None:
run.command('mrpad -uniform 10 average_header_tight.mif average_header.mif -force')
else:
@@ -500,214 +511,233 @@ else:
run.function(remove, 'average_header_tight.mif')
if useMasks:
# reslice masks
- for i in input:
- run.command('mrtransform ' +
- ' ' + os.path.join('masks_transformed', i.prefix + '_translated.mif') +
- ' ' + os.path.join('masks_transformed', i.prefix + '.mif') +
- ' -interp nearest -template average_header.mif' +
- datatype)
+ progress = app.progressBar('Reslicing input masks to average header', len(inputs))
+ for i in inputs:
+ run.command('mrtransform ' + \
+ os.path.join('masks_transformed', i.prefix + '_translated.mif') + ' ' + \
+ os.path.join('masks_transformed', i.prefix + '.mif') + ' ' + \
+ '-interp nearest -template average_header.mif' + \
+ datatype_option)
+ progress.increment()
+ progress.done()
# crop average space to extent defined by translated masks
mask_filenames = []
- for i in input:
+ for i in inputs:
mask_filenames.append(os.path.join('masks_transformed', i.prefix + '.mif'))
run.command('mrmath ' + ' '.join(mask_filenames) + ' max mask_translated.mif' )
run.command('mrcrop ' + 'average_header.mif -mask mask_translated.mif average_header_cropped.mif')
# pad average space to allow for deviation from initial alignment
run.command('mrpad -uniform 10 average_header_cropped.mif -force average_header.mif')
run.function(remove, 'average_header_cropped.mif')
- for i in input:
+ progress = app.progressBar('Reslicing mask images to new padded average header', len(inputs))
+ for i in inputs:
run.command('mrtransform ' +
- ' ' + os.path.join('masks_transformed', i.prefix + '_translated.mif') +
- ' ' + os.path.join('masks_transformed', i.prefix + '.mif') +
- ' -interp nearest -template average_header.mif' +
- datatype +
- ' -force')
+ os.path.join('masks_transformed', i.prefix + '_translated.mif') + ' ' + \
+ os.path.join('masks_transformed', i.prefix + '.mif') + ' ' + \
+ '-interp nearest -template average_header.mif' + \
+ datatype_option + ' ' + \
+ '-force')
run.function(remove, os.path.join('masks_transformed', i.prefix + '_translated.mif'))
+ progress.increment()
+ progress.done()
run.function(remove, 'mask_translated.mif')
# reslice input images
- for i in input:
- run.command('mrtransform ' +
- ' ' + os.path.join('input_transformed', i.prefix + '_translated.mif') +
- ' ' + os.path.join('input_transformed', i.prefix + '.mif') +
- ' -interp linear -template average_header.mif' +
- datatype)
- run.function(remove, os.path.join('input_transformed', i.prefix + '_translated.mif'))
-
-
-run.command('mrmath ' + allindir('input_transformed') + ' mean initial_template.mif')
+ progress = app.progressBar('Reslicing input images to average header', len(inputs))
+ for i in inputs:
+ run.command('mrtransform ' + \
+ os.path.join('inputs_transformed', i.prefix + '_translated.mif') + ' ' + \
+ os.path.join('inputs_transformed', i.prefix + '.mif') + ' ' + \
+ '-interp linear -template average_header.mif' + \
+ datatype_option)
+ run.function(remove, os.path.join('inputs_transformed', i.prefix + '_translated.mif'))
+ progress.increment()
+ progress.done()
+
+
+run.command('mrmath ' + ' '.join(allindir('inputs_transformed')) + ' mean initial_template.mif')
current_template = 'initial_template.mif'
# Optimise template with linear registration
if not dolinear:
- for i in input:
- run.function(copy, os.path.join('linear_transforms_initial','%s.txt' % (i.prefix)), 'linear_transforms')
+ for i in inputs:
+ run.function(copy, os.path.join('linear_transforms_initial','%s.txt' % (i.prefix)), os.path.join('linear_transforms','%s.txt' % (i.prefix)))
else:
- app.console('Optimising template with linear registration')
- for level in range(0, len(linear_scales)):
- for i in input:
- initialise = ''
+ for level, (regtype, scale, niter, lmax) in enumerate(zip(linear_type, linear_scales, linear_niter, linear_lmax)):
+ progress = app.progressBar('Optimising template with linear registration (stage {0} of {1})'.format(level+1, len(linear_scales)), len(inputs))
+ for i in inputs:
+ initialise_option = ''
if useMasks:
- mask = ' -mask1 ' + abspath(i.mask_directory, i.mask_filename)
+ mask_option = ' -mask1 ' + abspath(i.mask_directory, i.mask_filename)
else:
- mask = ''
- lmax = ''
- metric = ''
- mrregister_log = ''
- if linear_type[level] == 'rigid':
- scale = ' -rigid_scale ' + str(linear_scales[level])
- niter = ' -rigid_niter ' + str(linear_niter[level])
- regtype = ' -type rigid'
- output = ' -rigid ' + os.path.join('linear_transforms_%i' % level, '%s.txt' % i.prefix)
+ mask_option = ''
+ lmax_option = ''
+ metric_option = ''
+ mrregister_log_option = ''
+ if regtype == 'rigid':
+ scale_option = ' -rigid_scale ' + str(scale)
+ niter_option = ' -rigid_niter ' + str(niter)
+ regtype_option = ' -type rigid'
+ output_option = ' -rigid ' + os.path.join('linear_transforms_%i' % level, '%s.txt' % i.prefix)
if level > 0:
- initialise = ' -rigid_init_matrix ' + os.path.join('linear_transforms_%i' % (level - 1), '%s.txt' % i.prefix)
+ initialise_option = ' -rigid_init_matrix ' + os.path.join('linear_transforms_%i' % (level - 1), '%s.txt' % i.prefix)
if do_fod_registration:
- lmax = ' -rigid_lmax ' + str(linear_lmax[level])
+ lmax_option = ' -rigid_lmax ' + str(lmax)
else:
- lmax = ' -noreorientation'
+ lmax_option = ' -noreorientation'
if linear_estimator:
- metric = ' -rigid_metric.diff.estimator ' + linear_estimator
- if app._verbosity >= 2:
- mrregister_log = ' -info -rigid_log ' + os.path.join('log', i.filename + "_" + str(level) + '.log')
+ metric_option = ' -rigid_metric.diff.estimator ' + linear_estimator
+ if app.verbosity >= 2:
+ mrregister_log_option = ' -info -rigid_log ' + os.path.join('log', i.filename + "_" + str(level) + '.log')
else:
- scale = ' -affine_scale ' + str(linear_scales[level])
- niter = ' -affine_niter ' + str(linear_niter[level])
- regtype = ' -type affine'
- output = ' -affine ' + os.path.join('linear_transforms_%i' % level, '%s.txt' % i.prefix)
+ scale_option = ' -affine_scale ' + str(scale)
+ niter_option = ' -affine_niter ' + str(niter)
+ regtype_option = ' -type affine'
+ output_option = ' -affine ' + os.path.join('linear_transforms_%i' % level, '%s.txt' % i.prefix)
if level > 0:
- initialise = ' -affine_init_matrix ' + os.path.join('linear_transforms_%i' % (level - 1), '%s.txt' % i.prefix)
+ initialise_option = ' -affine_init_matrix ' + os.path.join('linear_transforms_%i' % (level - 1), '%s.txt' % i.prefix)
if do_fod_registration:
- lmax = ' -affine_lmax ' + str(linear_lmax[level])
+ lmax_option = ' -affine_lmax ' + str(lmax)
else:
- lmax = ' -noreorientation'
+ lmax_option = ' -noreorientation'
if linear_estimator:
- metric = ' -affine_metric.diff.estimator ' + linear_estimator
+ metric_option = ' -affine_metric.diff.estimator ' + linear_estimator
if write_log:
- mrregister_log = ' -info -affine_log ' + os.path.join('log', i.filename + '_' + str(level) + '.log')
+ mrregister_log_option = ' -info -affine_log ' + os.path.join('log', i.filename + '_' + str(level) + '.log')
command = 'mrregister ' + abspath(i.directory, i.filename) + ' ' + current_template + \
- ' -force' + \
- initialise + \
- mask + \
- scale + \
- niter + \
- lmax + \
- regtype + \
- metric + \
- datatype + \
- output + \
- mrregister_log
+ ' -force' + \
+ initialise_option + \
+ mask_option + \
+ scale_option + \
+ niter_option + \
+ lmax_option + \
+ regtype_option + \
+ metric_option + \
+ datatype_option + \
+ output_option + \
+ mrregister_log_option
run.command(command)
check_linear_transformation(os.path.join('linear_transforms_%i' % level, '%s.txt' % i.prefix), command, pause_on_warn=do_pause_on_warn)
+ progress.increment()
+ progress.done()
# Here we ensure the template doesn't drift or scale
- run.command('transformcalc ' + allindir('linear_transforms_%i' % level) + ' average linear_transform_average.txt -force -quiet')
+ run.command('transformcalc ' + ' '.join(allindir('linear_transforms_%i' % level)) + ' average linear_transform_average.txt -force -quiet')
if linear_type[level] == 'rigid':
run.command('transformcalc linear_transform_average.txt rigid linear_transform_average.txt -force -quiet')
run.command('transformcalc linear_transform_average.txt invert linear_transform_average_inv.txt -force -quiet')
average_inv = run.function(loadtxt, 'linear_transform_average_inv.txt')
if average_inv is not None:
- for i in input:
+ for i in inputs:
transform = dot(loadtxt(os.path.join('linear_transforms_%i' % level, '%s.txt' % i.prefix)), average_inv)
savetxt(os.path.join('linear_transforms_%i' % level, '%s.txt' % i.prefix), transform)
-
- for i in input:
- run.command('mrtransform ' + abspath(i.directory, i.filename) +
- ' -template ' + current_template +
- ' -linear ' + os.path.join('linear_transforms_%i' % level, '%s.txt' % i.prefix) +
- ' ' + os.path.join('input_transformed', '%s.mif' % i.prefix) +
- datatype +
- ' -force')
-
- run.command('mrmath ' + allindir('input_transformed') + ' mean linear_template' + str(level) + '.mif -force')
+ progress = app.progressBar('Transforming all subjects to revised template', len(inputs))
+ for i in inputs:
+ run.command('mrtransform ' + abspath(i.directory, i.filename) + ' ' + \
+ '-template ' + current_template + ' ' + \
+ '-linear ' + os.path.join('linear_transforms_%i' % level, '%s.txt' % i.prefix) + ' ' + \
+ os.path.join('inputs_transformed', '%s.mif' % i.prefix) + \
+ datatype_option + ' ' + \
+ '-force')
+ progress.increment()
+ progress.done()
+
+ run.command('mrmath ' + ' '.join(allindir('inputs_transformed')) + ' mean linear_template' + str(level) + '.mif -force')
current_template = 'linear_template' + str(level) + '.mif'
- for filename in os.listdir('linear_transforms_%i' % level):
- run.function(copy, os.path.join('linear_transforms_%i' % level, filename), 'linear_transforms')
+ for entry in os.listdir('linear_transforms_%i' % level):
+ run.function(copy, os.path.join('linear_transforms_%i' % level, entry), os.path.join('linear_transforms', entry))
# Create a template mask for nl registration by taking the intersection of all transformed input masks and dilating
if useMasks and (dononlinear or app.args.template_mask):
- for i in input:
- run.command('mrtransform ' + abspath(i.mask_directory, i.mask_filename) +
- ' -template ' + current_template +
- ' -interp nearest' +
- ' -linear ' + os.path.join('linear_transforms', '%s.txt' % i.prefix) +
- ' ' + os.path.join('masks_transformed', '%s.mif' % i.prefix) +
- ' -force')
- run.command ('mrmath ' + allindir('masks_transformed') + ' min - | maskfilter - median - | maskfilter - dilate -npass 5 init_nl_template_mask.mif -force')
+ progress = app.progressBar('Generating template mask for non-linear registration', len(inputs))
+ for i in inputs:
+ run.command('mrtransform ' + abspath(i.mask_directory, i.mask_filename) + ' ' + \
+ '-template ' + current_template + ' ' + \
+ '-interp nearest ' + \
+ '-linear ' + os.path.join('linear_transforms', '%s.txt' % i.prefix) + ' ' + \
+ os.path.join('masks_transformed', '%s.mif' % i.prefix) + ' ' + \
+ '-force')
+ progress.increment()
+ progress.done()
+ run.command ('mrmath ' + ' '.join(allindir('masks_transformed')) + ' min - | maskfilter - median - | maskfilter - dilate -npass 5 init_nl_template_mask.mif -force')
current_template_mask = 'init_nl_template_mask.mif'
if dononlinear:
# Optimise the template with non-linear registration
- app.console('Optimising template with non-linear registration')
file.makeDir('warps')
- for level in range(0, len(nl_scales)):
- for i in input:
+ for level, (scale, niter, lmax) in enumerate(zip(nl_scales, nl_niter, nl_lmax)):
+ progress = app.progressBar('Optimising template with non-linear registration (stage {0} of {1})'.format(level+1, len(nl_scales)), len(inputs))
+ for i in inputs:
if level > 0:
- initialise = ' -nl_init ' + os.path.join('warps_%i' % (level-1), '%s.mif' % i.prefix)
- scale = ''
+ initialise_option = ' -nl_init ' + os.path.join('warps_%i' % (level-1), '%s.mif' % i.prefix)
+ scale_option = ''
else:
- scale = ' -nl_scale ' + str(nl_scales[level])
+ scale_option = ' -nl_scale ' + str(scale)
if not doaffine: # rigid or no previous linear stage
- initialise = ' -rigid_init_matrix ' + os.path.join('linear_transforms', '%s.txt' % i.prefix)
+ initialise_option = ' -rigid_init_matrix ' + os.path.join('linear_transforms', '%s.txt' % i.prefix)
else:
- initialise = ' -affine_init_matrix ' + os.path.join('linear_transforms', '%s.txt' % i.prefix)
+ initialise_option = ' -affine_init_matrix ' + os.path.join('linear_transforms', '%s.txt' % i.prefix)
if useMasks:
- mask = ' -mask1 ' + abspath(i.mask_directory, i.mask_filename) + ' -mask2 ' + current_template_mask
+ mask_option = ' -mask1 ' + abspath(i.mask_directory, i.mask_filename) + ' -mask2 ' + current_template_mask
else:
- mask = ''
+ mask_option = ''
if do_fod_registration:
- lmax = ' -nl_lmax ' + str(nl_lmax[level])
+ lmax_option = ' -nl_lmax ' + str(lmax)
else:
- lmax = ' -noreorientation'
-
- run.command('mrregister ' + abspath(i.directory, i.filename) + ' ' + current_template +
- ' -type nonlinear' +
- ' -nl_niter ' + str(nl_niter[level]) +
- ' -nl_warp_full ' + os.path.join('warps_%i' % level, '%s.mif' % i.prefix) +
- ' -transformed ' + os.path.join('input_transformed', '%s.mif' % i.prefix) +
- ' -nl_update_smooth ' + app.args.nl_update_smooth +
- ' -nl_disp_smooth ' + app.args.nl_disp_smooth +
- ' -nl_grad_step ' + app.args.nl_grad_step +
- ' -force ' +
- initialise +
- scale +
- mask +
- datatype +
- lmax)
+ lmax_option = ' -noreorientation'
+
+ run.command('mrregister ' + abspath(i.directory, i.filename) + ' ' + current_template + ' ' + \
+ '-type nonlinear ' + \
+ '-nl_niter ' + str(niter) + ' ' + \
+ '-nl_warp_full ' + os.path.join('warps_%i' % level, '%s.mif' % i.prefix) + ' ' + \
+ '-transformed ' + os.path.join('inputs_transformed', '%s.mif' % i.prefix) + ' ' + \
+ '-nl_update_smooth ' + app.args.nl_update_smooth + ' ' + \
+ '-nl_disp_smooth ' + app.args.nl_disp_smooth + ' ' + \
+ '-nl_grad_step ' + app.args.nl_grad_step + ' ' + \
+ '-force' + \
+ initialise_option + \
+ scale_option + \
+ mask_option + \
+ datatype_option + \
+ lmax_option)
+
if level > 0:
run.function(remove, os.path.join('warps_%i'%(level-1), '%s.mif' % i.prefix))
if useMasks:
- run.command('mrtransform ' + abspath(i.mask_directory, i.mask_filename) +
- ' -template ' + current_template +
- ' -warp_full ' + os.path.join('warps_%i' % level, '%s.mif' % i.prefix) +
- ' ' + os.path.join('masks_transformed', i.prefix + '.mif') +
- ' -interp nearest ' +
- ' -force')
-
- run.command ('mrmath ' + allindir('input_transformed') + ' mean nl_template' + str(level) + '.mif')
+ run.command('mrtransform ' + abspath(i.mask_directory, i.mask_filename) + ' ' + \
+ '-template ' + current_template + ' ' + \
+ '-warp_full ' + os.path.join('warps_%i' % level, '%s.mif' % i.prefix) + ' ' + \
+ os.path.join('masks_transformed', i.prefix + '.mif') + ' ' + \
+ '-interp nearest ' +
+ '-force')
+ progress.increment()
+ progress.done()
+
+ run.command('mrmath ' + ' '.join(allindir('inputs_transformed')) + ' mean nl_template' + str(level) + '.mif')
current_template = 'nl_template' + str(level) + '.mif'
if useMasks:
- run.command ('mrmath ' + allindir('masks_transformed') + ' min - | maskfilter - median - | maskfilter - dilate -npass 5 nl_template_mask' + str(level) + '.mif')
+ run.command('mrmath ' + ' '.join(allindir('masks_transformed')) + ' min - | maskfilter - median - | maskfilter - dilate -npass 5 nl_template_mask' + str(level) + '.mif')
current_template_mask = 'nl_template_mask' + str(level) + '.mif'
if level < len(nl_scales) - 1:
- if (nl_scales[level] < nl_scales[level + 1]):
- upsample_factor = nl_scales[level + 1] / nl_scales[level]
- for i in input:
+ if scale < nl_scales[level + 1]:
+ upsample_factor = nl_scales[level + 1] / scale
+ for i in inputs:
run.command('mrresize ' + os.path.join('warps_%i' % level, '%s.mif' % i.prefix) + ' -scale %f tmp.mif' % upsample_factor)
run.function(move, 'tmp.mif', os.path.join('warps_' + str(level), '%s.mif' % i.prefix))
else:
- for i in input:
+ for i in inputs:
run.function(move, os.path.join('warps_' + str(level), '%s.mif' % i.prefix), 'warps')
-run.command('mrconvert ' + current_template + ' ' + path.fromUser(app.args.template, True) + (' -force' if app.force else ''))
+run.command('mrconvert ' + current_template + ' ' + path.fromUser(app.args.template, True) + (' -force' if app.forceOverwrite else ''))
if app.args.warp_dir:
warp_path = path.fromUser(app.args.warp_dir, False)
@@ -725,10 +755,9 @@ if app.args.transformed_dir:
transformed_path = path.fromUser(app.args.transformed_dir, False)
if os.path.exists(transformed_path):
run.function(rmtree, transformed_path)
- run.function(copytree, 'input_transformed', transformed_path)
+ run.function(copytree, 'inputs_transformed', transformed_path)
if app.args.template_mask:
- run.command('mrconvert ' + current_template_mask + ' ' + path.fromUser(app.args.template_mask, True) + (' -force' if app.force else ''))
+ run.command('mrconvert ' + current_template_mask + ' ' + path.fromUser(app.args.template_mask, True) + (' -force' if app.forceOverwrite else ''))
app.complete()
-
diff --git a/build b/build
index bb27dc17dd..f58f1aa35d 100755
--- a/build
+++ b/build
@@ -16,7 +16,7 @@ DESCRIPTION
$ ./build
If no targets are provided, the command will default to building all
- applications by scanning through the cmd/ folder.
+ applications by scanning through the cmd/ folder.
The target executables will be located in the bin/ folder, and the shared
library (if requested - the default) will be located in the lib/ folder (or
@@ -25,20 +25,20 @@ DESCRIPTION
SPECIAL TARGETS
- clean
+ clean
used to remove all compiler-generated files, including objects,
- executables, and shared libraries.
-
+ executables, and shared libraries.
+
bash
used to update the bash completion script. Note that automatic updating
of this script can be enabled at the configure stage, by running
'./configure -dev' prior to invoking './build'.
-
+
doc
used to update the command documentation, so that any modifications to
the inline documentation in commands can be propagated through to the
user documentation site.
-
+
select name
used to switch between configs / builds. This stores the current config
and all compiler-generated files in a folder (called "build.oldname"),
@@ -55,7 +55,7 @@ PARALLELISED BUILD
$ NUMBER_OF_PROCESSORS=1 ./build
-OPTIONS
+OPTIONS
-verbose
print each command as it is being invoked
@@ -69,6 +69,13 @@ OPTIONS
-tree
[only used with -showdep] print full dependency tree for each file
+
+ -persistent
+ keep trying to build regardless of failures, until none of the remaining
+ jobs succeed.
+
+ -nopaginate
+ do not feed error log to the paginator, even if running in a TTY
'''
@@ -106,7 +113,9 @@ system = None
dependencies = 0
dep_recursive = False
verbose = False
+persistent = False
nowarnings = False
+paginate = True
targets = []
use_multiple_cores = True
@@ -180,10 +189,6 @@ def pipe_errors_to_less_handler():
-if sys.stderr.isatty():
- error_stream = ''
- atexit.register (pipe_errors_to_less_handler)
-
@@ -192,7 +197,7 @@ def disp (msg):
logfile.write (msg.encode (errors='ignore'))
sys.stdout.write (msg)
print_lock.release()
-
+
def log (msg):
print_lock.acquire()
logfile.write (msg.encode (errors='ignore'))
@@ -209,31 +214,13 @@ def error (msg):
else:
sys.stderr.write (msg)
print_lock.release()
-
+
def split_path (filename):
return filename.replace ('\\', '/').split ('/')
-
-def get_git_lib_version (folder):
- log ('''
-getting short git version in folder "''' + folder + '"... ')
-
- try:
- process = subprocess.Popen ([ 'git', 'describe', '--abbrev=0', '--always' ], cwd=folder, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- ( git_version, stderr ) = process.communicate()
- if process.returncode == 0:
- git_version = git_version.decode('utf-8', 'ignore').strip()
- log (git_version + '\n')
- return git_version
- except:
- pass
- log ('not found\n')
- return None
-
-
def get_real_name (path):
if os.path.islink (path): return os.readlink (path)
else: return (path)
@@ -258,15 +245,15 @@ def modify_path (name, tmp=False, in_main=False, strip=None, add=None):
name = name[:-len(strip)]
if add is not None:
name = name + add
- for module_dir in mrtrix_dir:
- relname = os.path.normpath (os.path.relpath (name, module_dir))
+ for project_dir in mrtrix_dir:
+ relname = os.path.normpath (os.path.relpath (name, project_dir))
if relname.startswith ('.'):
continue
if tmp:
relname = os.path.join (tmp_dir, relname)
elif not os.path.relpath (relname, tmp_dir).startswith ('.'):
relname = os.sep.join (split_path (relname)[1:])
- name = os.path.normpath (os.path.join (module_dir, relname))
+ name = os.path.normpath (os.path.join (project_dir, relname))
return name
@@ -283,20 +270,24 @@ for arg in sys.argv[1:]:
if '-help'.startswith (arg):
print (usage_string)
exit (0)
- elif '-verbose'.startswith(arg):
+ elif '-verbose'.startswith(arg):
verbose = True
+ elif '-persistent'.startswith(arg):
+ persistent = True
elif '-nowarnings'.startswith(arg):
nowarnings = True
- elif '-showdep'.startswith(arg):
+ elif '-showdep'.startswith(arg):
dependencies = 1
elif arg.startswith ('-showdep='):
if arg[9:] == 'target': dependencies = 1
elif arg[9:] == 'all': dependencies = 2
- else:
+ else:
sys.stderr.write ('invalid specified for option "-showdep" (expected target, all)\n')
sys.exit (1)
elif '-tree'.startswith(arg):
dep_recursive = True
+ elif '-nopaginate'.startswith(arg):
+ paginate = False
elif arg[0] == '-':
sys.stderr.write ('unknown command-line option "' + arg + '"\n')
sys.exit (1)
@@ -306,12 +297,16 @@ for arg in sys.argv[1:]:
command_doc = True
elif arg == 'clean':
targets = [ 'clean' ]
- else:
+ else:
targets.append(arg)
+if paginate and sys.stderr.isatty():
+ error_stream = ''
+ atexit.register (pipe_errors_to_less_handler)
+
@@ -328,10 +323,10 @@ while os.path.abspath (os.path.dirname (get_real_name (build_script))) != os.pat
log ('compiling separate project against:' + os.linesep)
separate_project = True
build_script = os.path.normpath (os.path.join (mrtrix_dir[-1], get_real_name (build_script)))
- module_dir = os.path.dirname (build_script)
- mrtrix_dir += [ module_dir ]
- include_paths += [ os.path.join (module_dir, misc_dir) ]
- log (' ' + module_dir + os.linesep + os.linesep)
+ project_dir = os.path.dirname (build_script)
+ mrtrix_dir += [ project_dir ]
+ include_paths += [ os.path.join (project_dir, misc_dir) ]
+ log (' ' + project_dir + os.linesep + os.linesep)
@@ -341,7 +336,7 @@ while os.path.abspath (os.path.dirname (get_real_name (build_script))) != os.pat
# CONFIG HANDLING #
############################################################################
-def get_active_build (folder = '.'):
+def get_active_build (folder):
active_config = glob.glob (os.path.join (folder, 'build.*.active'))
if len (active_config) > 1:
sys.stderr.write ('ERROR: more than one config is currently marked as active!\n')
@@ -362,7 +357,7 @@ def get_active_build (folder = '.'):
def store_current_build (folder = '.'):
stored_config = get_active_build (folder)
os.rename (stored_config + '.active', stored_config)
- print ('storing "' + stored_config + '"...')
+ print ('in "' + folder + '": storing "' + stored_config + '"...')
for f in [ tmp_dir, 'config' ] + list_untracked_bin_files (folder) + glob.glob ('lib/libmrtrix*'):
if os.path.exists (os.path.join (folder, f)):
os.renames (os.path.join (folder, f), os.path.join (stored_config, f))
@@ -373,7 +368,7 @@ def restore_build (name, folder = '.'):
stored_config = os.path.join (folder, 'build.' + name)
active_config = stored_config + '.active'
if os.path.isdir (stored_config):
- print ('restoring "' + stored_config + '"...')
+ print ('in "' + folder + '": restoring "' + stored_config + '"...')
os.rename (stored_config, active_config)
for root, dirs, files in os.walk(active_config, topdown=False):
for name in files:
@@ -384,7 +379,7 @@ def restore_build (name, folder = '.'):
else:
if os.path.exists (stored_config):
raise Exception ('ERROR config to be restored (' + stored_config + ') is not a folder')
- print ('creating empty "' + stored_config + '"...')
+ print ('in "' + folder + '": creating empty "' + stored_config + '"...')
if not os.path.isdir (active_config):
os.mkdir (active_config)
@@ -410,7 +405,7 @@ def activate_build (name, folders):
if len (targets) and targets[0] == 'select':
if len(targets) == 1:
- active_config = os.path.basename (get_active_build ())
+ active_config = os.path.basename (get_active_build ('.'))
print ('current config is "' + active_config + '"')
for folder in mrtrix_dir[1:]:
other_config = os.path.basename (get_active_build (folder))
@@ -427,14 +422,17 @@ if len (targets) and targets[0] == 'select':
sys.exit (0)
-
-active_config = os.path.basename (get_active_build())[6:]
+
+active_config = os.path.basename (get_active_build ('.'))[6:]
log ('active config is ' + active_config + '\n\n')
-activate_build (active_config, mrtrix_dir)
+active_config_core = os.path.basename (get_active_build (mrtrix_dir[-1]))[6:]
+if active_config_core != active_config:
+ print ('active config differs from core - switching to core active config')
+activate_build (active_config_core, mrtrix_dir)
@@ -458,7 +456,7 @@ if 'clean' in targets:
print ('delete folder:', root)
os.rmdir (root)
- try:
+ try:
for fname in list_untracked_bin_files():
print ('delete file:', fname)
os.remove (fname)
@@ -498,6 +496,8 @@ please run "./configure" prior to invoking this script
''')
sys.exit (1)
+if separate_project:
+ cpp_flags += [ '-DMRTRIX_MODULE' ]
environ = os.environ.copy()
environ.update ({ 'PATH': PATH })
@@ -522,12 +522,6 @@ if ld_enabled and len(runpath):
# GET VERSION INFORMATION #
############################################################################
-lib_version = get_git_lib_version (mrtrix_dir[-1])
-if lib_version is not None:
- if lib_version.find('-') > 0:
- lib_version = lib_version[0:lib_version.find('-')]
- libname += '-' + lib_version
-
if ld_enabled:
ld_flags.insert(0, '-l' + libname)
libname = lib_prefix + libname + lib_suffix
@@ -547,9 +541,13 @@ moc_obj_suffix = '_moc' + obj_suffix
# remove any files that might have been left over from older installations in
# different locations:
if os.path.isdir ('release'):
- disp ("WARNING: removing 'release/' folder - most likely left over from a previous installation\n\n")
+ disp ('WARNING: removing \'release/\' folder - most likely left over from a previous installation\n')
shutil.rmtree ('release')
+for entry in glob.glob (os.path.normpath (os.path.join (target_lib_dir, '*' + lib_suffix))):
+ if os.path.basename (entry) != libname:
+ disp ('WARNING: removing "' + entry + '" - most likely left over from a previous installation\n')
+ os.remove (entry)
###########################################################################
@@ -577,17 +575,17 @@ class Entry:
elif is_object (self.name): self.set_object()
elif is_library (self.name): self.set_library()
elif is_moc (self.name): self.set_moc()
- elif not os.path.exists (self.name):
+ elif not os.path.exists (self.name):
raise Exception ('unknown target "' + self.name + '"')
[ Entry(item) for item in self.deps ]
dep_timestamp = [ todo[item].timestamp for item in todo.keys() if item in self.deps and not is_library(item) ]
dep_timestamp += [ todo[item].dep_timestamp for item in todo.keys() if item in self.deps and not is_library(item) ]
- if len(dep_timestamp):
+ if len(dep_timestamp):
self.dep_timestamp = max(dep_timestamp)
-
+
def execute (self, cindex, formatstr):
folder = os.path.dirname (self.name)
@@ -597,7 +595,7 @@ class Entry:
if not os.path.isdir (folder):
error ('ERROR: can''t create target folder "' + folder + '": ' + os.strerror (e.errno))
exit (1)
-
+
if self.action == 'RCC':
with codecs.open (self.cmd[1], mode='w', encoding='utf-8') as fd:
fd.write ('\n\n')
@@ -606,9 +604,9 @@ class Entry:
if not entry.startswith ('config'):
fd.write ('' + entry + '\n')
fd.write ('\n\n')
- if len(self.cmd) > 0:
+ if len(self.cmd) > 0:
return execute (formatstr.format (cindex, self.action, self.name), self.cmd)
- else:
+ else:
return None
@@ -618,16 +616,18 @@ class Entry:
else: cc_file = self.name
cc_file = modify_path (os.path.join (cmd_dir, os.sep.join (split_path(cc_file)[1:])), add=cpp_suffix)
self.deps = list_cmd_deps(cc_file)
+ if separate_project:
+ self.deps = self.deps.union ([ os.path.join (tmp_dir, misc_dir, 'project_version' + obj_suffix) ])
skip = False
flags = []
if 'Q' in file_flags[cc_file]: flags += qt_ldflags
- if not skip:
- if not ld_enabled:
+ if not skip:
+ if not ld_enabled:
self.deps = self.deps.union (list_lib_deps())
- self.cmd = fillin (ld, {
+ self.cmd = fillin (ld, {
'LDFLAGS': [ s.replace ('LIBNAME', os.path.basename (self.name)) for s in ld_flags ] + flags,
'OBJECTS': self.deps,
'EXECUTABLE': [ self.name ] })
@@ -665,8 +665,8 @@ class Entry:
skip = False
if 'Q' in file_flags[cc_file]: flags += qt_cflags
- if not skip:
- self.cmd = fillin (cpp, {
+ if not skip:
+ self.cmd = fillin (cpp, {
'CFLAGS': cpp_flags + flags,
'OBJECT': [ self.name ],
'SRC': [ cc_file ] })
@@ -681,7 +681,7 @@ class Entry:
self.cmd = [ moc ]
self.cmd += [ src_file, '-o', self.name ]
-
+
def set_library (self):
if not ld_enabled:
error ('ERROR: shared library generation is disabled in this configuration')
@@ -716,7 +716,7 @@ class Entry:
return self.timestamp == float("inf") or self.timestamp < self.dep_timestamp
def display (self, indent=''):
- show_rebuild = lambda x: x+' [REBUILD]' if todo[x].need_rebuild() else x
+ show_rebuild = lambda x: x+' [REBUILD]' if todo[x].need_rebuild() else x
sys.stdout.write (indent + '[' + self.action + '] ' + show_rebuild (self.name) + ':\n')
sys.stdout.write (indent + ' timestamp: ' + str(self.timestamp))
if len(self.deps):
@@ -744,7 +744,7 @@ class Entry:
def default_targets():
- if not os.path.isdir (cmd_dir):
+ if not os.path.isdir (cmd_dir):
sys.stderr.write ('ERROR: no "cmd" folder - unable to determine default targets' + os.linesep)
sys.exit (1)
for entry in os.listdir (cmd_dir):
@@ -775,9 +775,9 @@ def mtime (target):
def fillin (template, keyvalue):
cmd = []
for item in template:
- if item in keyvalue:
+ if item in keyvalue:
cmd += keyvalue[item]
- else:
+ else:
cmd += [ item ]
return cmd
@@ -820,8 +820,8 @@ def execute (message, cmd, working_dir=None):
def print_deps (current_file, indent=''):
current_file = os.path.normpath (current_file)
sys.stdout.write (indent + current_file)
- if current_file in file_flags:
- if len(file_flags[current_file]):
+ if current_file in file_flags:
+ if len(file_flags[current_file]):
sys.stdout.write (' [' + file_flags[current_file] + ']')
sys.stdout.write (os.linesep)
if len(todo[current_file].deps):
@@ -834,8 +834,8 @@ def print_deps (current_file, indent=''):
def is_GUI_target (current_file):
if 'gui' in split_path (current_file):
return True
- if current_file in file_flags:
- if 'Q' in file_flags[current_file]:
+ if current_file in file_flags:
+ if 'Q' in file_flags[current_file]:
return True
if len(todo[current_file].deps):
for entry in todo[current_file].deps:
@@ -849,17 +849,17 @@ def list_headers (current_file):
global headers, file_flags
current_file = os.path.normpath (current_file)
- if current_file not in headers.keys():
+ if current_file not in headers.keys():
headers[current_file] = set()
- if current_file not in file_flags:
+ if current_file not in file_flags:
file_flags[current_file] = ''
if 'gui' in split_path (current_file):
if 'Q' not in file_flags[current_file]:
file_flags[current_file] += 'Q'
if not os.path.exists (current_file):
- if os.path.basename(current_file) == 'icons'+cpp_suffix:
+ if os.path.basename(current_file) == 'icons'+cpp_suffix:
return headers[current_file]
sys.stderr.write ('ERROR: cannot find file "' + current_file + '"' + os.linesep)
sys.exit(1)
@@ -903,7 +903,7 @@ def list_cmd_deps (file_cc):
object_deps[file_cc] = set([ modify_path (file_cc, tmp=True, strip=cpp_suffix, add=obj_suffix) ])
for entry in list_headers (file_cc):
if os.path.abspath(entry).startswith(os.path.abspath(lib_dir)): continue
- if 'M' in file_flags[entry]:
+ if 'M' in file_flags[entry]:
object_deps[file_cc] = object_deps[file_cc].union ([ modify_path (entry, tmp=True, strip=h_suffix, add=moc_obj_suffix) ])
entry_cc = entry[:-len(h_suffix)] + cpp_suffix
if os.path.exists (entry_cc):
@@ -949,19 +949,20 @@ def build_next (id):
if not len(unsatisfied_deps):
todo[item].currently_being_processed = True
current = item
- main_cindex+=1
+ main_cindex+=1
cindex = main_cindex
break
else: stop = max (stop, 1)
lock.release()
-
+
if stop: return
- if current == None:
+ if current == None:
time.sleep (0.01)
continue
-
+
target = todo[current]
if target.execute(cindex, formatstr):
+ todo[item].currently_being_processed = False
stop = 2
return
@@ -972,7 +973,7 @@ def build_next (id):
except:
stop = 2
return
-
+
stop = max(stop, 1)
@@ -994,7 +995,7 @@ def get_git_version (folder):
log ('''
getting git version in folder "''' + folder + '"... ')
- try:
+ try:
process = subprocess.Popen ([ 'git', 'describe', '--abbrev=8', '--dirty', '--always' ], cwd=folder, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
( git_version, stderr ) = process.communicate()
if process.returncode == 0:
@@ -1018,7 +1019,7 @@ def update_git_version (folder, git_version_file, contents):
current_version_file_contents = fd.read()
except:
pass
-
+
if not current_version_file_contents or (version_file_contents != current_version_file_contents and git_version != 'unknown'):
log ('version file "' + git_version_file + '" is out of date - updating\n')
with open (git_version_file, 'w') as fd:
@@ -1028,21 +1029,21 @@ def update_git_version (folder, git_version_file, contents):
def update_bash_completion ():
# Only attempt to generate completion file if POSIX compliant
if os.name != 'posix': return
-
+
# Check whether completion generation script exists
script_path = os.path.join (mrtrix_dir[-1], 'generate_bash_completion.py')
completion_path = os.path.join (mrtrix_dir[-1], 'mrtrix_bash_completion')
if not os.path.isfile (script_path):
disp (colorize('WARNING: Skipping bash completion generation. Could not find script at ' + script_path + '\n')[0])
return
-
+
# Check whether both command files and completion file exist and completion file is newer than commands
if not os.path.isdir (target_bin_dir):
return
else:
# Only look at relevant executables in bin file
commands = [comm for comm in os.listdir (target_bin_dir) if os.access( os.path.join( target_bin_dir, comm ), os.X_OK)]
- if not commands:
+ if not commands:
return
else:
command_freshness_time = max (commands, key = lambda x: time.ctime ( os.path.getmtime( os.path.join( target_bin_dir, x ) ) ) )
@@ -1059,7 +1060,7 @@ def update_user_docs ():
if os.name != 'posix': return
scripts_dir = os.path.abspath(os.path.join (mrtrix_dir[-1], 'docs' ))
- script_path = os.path.join (scripts_dir, 'generate_user_docs.sh')
+ script_path = os.path.join (scripts_dir, 'generate_user_docs.sh')
# Check whether generate docs script exists
if not os.path.isfile (script_path):
@@ -1069,15 +1070,15 @@ def update_user_docs ():
# Check whether commands dir exists
if not os.path.isdir (target_bin_dir):
return
-
+
import re
# Fetch relevant executables in bin file
commands = [comm for comm in os.listdir (target_bin_dir) if os.access( os.path.join( target_bin_dir, comm ), os.X_OK) and not re.match (r'^\w+__debug', comm)]
-
+
# Fetch relevant user docs
rst_files = []
-
+
for root, subfolders, files in os.walk (scripts_dir):
for rst_file in files:
if rst_file.endswith('.rst'):
@@ -1094,7 +1095,7 @@ def update_user_docs ():
if execute ('[DOC] generating user documentation (./docs)', [ script_path ], scripts_dir):
sys.exit (1)
-
+
###########################################################################
# SCRIPT VERSION #
@@ -1109,31 +1110,53 @@ with open (os.path.join(mrtrix_dir[-1], script_dir, '_version.py'),'w') as vfile
###########################################################################
-if len(targets) == 0:
+if len(targets) == 0:
targets = default_targets()
# get git version info:
update_git_version (mrtrix_dir[-1], os.path.join (lib_dir, 'version.cpp'), '''
-namespace MR {
- namespace App {
+namespace MR {
+ namespace App {
const char* mrtrix_version = "%%%";
- }
+ const char* build_date = __DATE__;
+ }
+}
+''')
+
+update_git_version (mrtrix_dir[-1], os.path.join (mrtrix_dir[-1], misc_dir, 'exec_version.cpp'), '''
+namespace MR {
+ namespace App {
+ extern const char* executable_uses_mrtrix_version;
+ void set_executable_uses_mrtrix_version () { executable_uses_mrtrix_version = "%%%"; }
+ }
}
''')
if separate_project:
if not os.path.exists (misc_dir):
os.mkdir (misc_dir)
- git_version_file = os.path.join (misc_dir, 'project_version.h')
- update_git_version ('.', git_version_file, '#define MRTRIX_PROJECT_VERSION "%%%"\n');
+ git_version_file = os.path.join (misc_dir, 'project_version.cpp')
+ update_git_version ('.', git_version_file, '''
+namespace MR {
+ namespace App {
+ extern const char* project_version;
+ extern const char* project_build_date;
+ void set_project_version () {
+ project_version = "%%%";
+ project_build_date = __DATE__;
+ }
+ }
+}
+''')
+
if not os.path.exists (git_version_file):
with open (git_version_file, 'w'):
pass
-
+
@@ -1154,7 +1177,7 @@ if nogui:
if not is_GUI_target (entry):
nogui_targets.append (entry)
targets = nogui_targets
-
+
nogui_todo = {}
for item in todo.keys():
if not is_GUI_target (todo[item].name):
@@ -1197,18 +1220,21 @@ log ('TODO list contains ' + str(len(todo)) + ''' items
#for entry in todo.values(): entry.display()
+try: num_processors = int(os.environ['NUMBER_OF_PROCESSORS'])
+except:
+ try: num_processors = os.sysconf('SC_NPROCESSORS_ONLN')
+ except: num_processors = 1
-if len(todo):
-
- try: num_processors = int(os.environ['NUMBER_OF_PROCESSORS'])
- except:
- try: num_processors = os.sysconf('SC_NPROCESSORS_ONLN')
- except: num_processors = 1
+while len(todo):
+
+ stop = False
+ main_cindex = 0
+ num_todo_previous = len(todo)
log ('''
-
+
launching ''' + str(num_processors) + ''' threads
-
+
''')
threads = []
@@ -1216,11 +1242,29 @@ if len(todo):
t = threading.Thread (target=build_next, args=(i,));
t.start()
threads.append (t)
-
+
build_next(0)
-
+
for t in threads: t.join()
+ if not persistent:
+ break
+
+ if len(todo) == num_todo_previous:
+ disp ('''
+stopping despite errors as no jobs completed successfully
+
+''')
+ break
+
+ if len(todo):
+ disp ('''
+retrying as running in persistent mode
+
+''')
+ if error_stream is not None:
+ error_stream = ''
+
# generate development-specific files (if needed)
# i.e. bash completion and user documentation
diff --git a/check_memalign b/check_syntax
similarity index 56%
rename from check_memalign
rename to check_syntax
index ee0de9ac22..2cd647f7af 100755
--- a/check_memalign
+++ b/check_syntax
@@ -1,28 +1,28 @@
#!/bin/bash
-LOG=memalign.log
-echo -n "Checking memory alignment for Eigen 3.3 compatibility... "
-echo "Checking memory alignment for Eigen 3.3 compatibility..." > $LOG
+LOG=syntax.log
+echo -n "Checking syntax and memory alignment for Eigen 3.3 compatibility... "
+echo "Checking syntax and memory alignment for Eigen 3.3 compatibility..." > $LOG
echo "" >> $LOG
retval=0
-for f in $(find cmd core src -type f -name '*.h' -o -name '*.cpp' | grep -v '_moc.cpp'); do
+for f in $(find cmd core src -type f -name '*.h' -o -name '*.cpp' | grep -v '_moc.cpp'); do
# files to ignore:
- [[ "$f" -ef "src/gui/shview/icons.h" ||
- "$f" -ef "src/gui/shview/icons.cpp" ||
- "$f" -ef "src/gui/mrview/icons.h" ||
- "$f" -ef "src/gui/mrview/icons.cpp" ||
- "$f" -ef "core/file/mgh.h" ||
- "$f" -ef "core/file/json.h" ||
- "$f" -ef "core/file/nifti2.h" ||
+ [[ "$f" -ef "src/gui/shview/icons.h" ||
+ "$f" -ef "src/gui/shview/icons.cpp" ||
+ "$f" -ef "src/gui/mrview/icons.h" ||
+ "$f" -ef "src/gui/mrview/icons.cpp" ||
+ "$f" -ef "core/file/mgh.h" ||
+ "$f" -ef "core/file/json.h" ||
+ "$f" -ef "core/file/nifti2.h" ||
"$f" -ef "core/signal_handler.h" ||
"$f" -ef "core/signal_handler.cpp" ]] && continue
-
+
# process the file to strip comments, macros, etc:
cat $f | \
# remove C preprocessor macros:
@@ -36,13 +36,12 @@ for f in $(find cmd core src -type f -name '*.h' -o -name '*.cpp' | grep -v '_mo
# remove C-style comments:
perl -pe 's|/\*.*?\*/||g' | \
# remove quoted strings:
- perl -pe 's/(")(\\"|.)*?"//g' > .check_memalign.tmp
+ perl -pe 's/(")(\\"|.)*?"//g' > .check_syntax.tmp
# detect classes not declared MEMALIGN or NOMEMALIGN:
-
- res=$(
- cat .check_memalign.tmp | \
+ res=$(
+ cat .check_syntax.tmp | \
# remove any text within a template declaration (i.e. within <>):
perl -pe 's|<[^{};<]*?>||g' | \
# and do it multiple times to handle nested declarations:
@@ -54,52 +53,62 @@ for f in $(find cmd core src -type f -name '*.h' -o -name '*.cpp' | grep -v '_mo
# remove matches that correspond to an enum class declaration:
grep -Ev '\benum\s*class\b' | \
# remove matches that are properly specified:
- grep -Ev '\b(class|struct)\b[^;{]*?{(\s*(MEMALIGN\s*\([^\)]*\)|NOMEMALIGN))'
+ grep -Ev '\b(class|struct)\b[^;{]*?{(\s*(MEMALIGN\s*\([^\)]*\)|NOMEMALIGN))'
)
-
# detect any instances of std::vector:
- res="$res"$(
- cat .check_memalign.tmp | \
+ res="$res"$(
+ cat .check_syntax.tmp | \
# match for the parts we're interested in and output just the bits that match:
- grep -Po '(?> $LOG
echo "$res" >> $LOG
retval=1
- fi
+ fi
done
# set exit code:
if [[ $retval == 0 ]]; then
- echo "OK"
+ echo "OK"
echo "no issues detected" >> $LOG
else
- echo "FAIL"
-
+ echo "FAIL (see syntax.log for details)"
+
echo "" >> $LOG
- echo "Please add MEMALIGN() macro to the class declarations identified above,
-replace all occurrences of std::vector with MR::vector,
-and avoid use of std::make_shared" >> $LOG
+ echo "Please add MEMALIGN() or NOMEMALIGN macro to the class declarations
+ identified above, replace all occurrences of std::vector<> with MR::vector<>
+ (or just vector<>), avoid use of std::make_shared(), and replace all
+ instances of std::abs() with MR::abs() (or just abs())" >> $LOG
exit 1
fi
diff --git a/cmd/5tt2gmwmi.cpp b/cmd/5tt2gmwmi.cpp
index 8d6908fc02..3ed1a74f67 100644
--- a/cmd/5tt2gmwmi.cpp
+++ b/cmd/5tt2gmwmi.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -94,7 +95,7 @@ class Processor
input.index(axis) = output.index(axis) + 1;
}
const DWI::Tractography::ACT::Tissues pos (input);
- gradient += Math::pow2 (multiplier * std::min (std::abs (pos.get_gm() - neg.get_gm()), std::abs (pos.get_wm() - neg.get_wm())));
+ gradient += Math::pow2 (multiplier * std::min (abs (pos.get_gm() - neg.get_gm()), abs (pos.get_wm() - neg.get_wm())));
}
output.value() = std::max (0.0, std::sqrt (gradient));
assign_pos_of (output, 0, 3).to (input);
diff --git a/cmd/5tt2vis.cpp b/cmd/5tt2vis.cpp
index 14f0ad5723..4c652a966c 100644
--- a/cmd/5tt2vis.cpp
+++ b/cmd/5tt2vis.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/5ttcheck.cpp b/cmd/5ttcheck.cpp
index 4c4d81af71..619cefce45 100644
--- a/cmd/5ttcheck.cpp
+++ b/cmd/5ttcheck.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -20,6 +21,7 @@
#include "algo/copy.h"
#include "algo/loop.h"
+#include "formats/list.h"
#include "dwi/tractography/ACT/act.h"
@@ -53,7 +55,7 @@ void run ()
{
const std::string mask_prefix = get_option_value ("masks", "");
- size_t error_count = 0;
+ size_t major_error_count = 0, minor_error_count = 0;
for (size_t i = 0; i != argument.size(); ++i) {
auto in = Image::open (argument[i]);
@@ -81,7 +83,7 @@ void run ()
for (auto inner = Loop(3) (in); inner; ++inner)
sum += in.value();
if (!sum) continue;
- if (std::abs (sum-1.0) > MAX_ERROR) {
+ if (abs (sum-1.0) > MAX_ERROR) {
++voxel_error_sum;
if (voxels.valid()) {
assign_pos_of (in, 0, 3).to (voxels);
@@ -90,28 +92,53 @@ void run ()
}
}
- if (voxel_error_sum) {
+ if (voxel_error_sum == 1) {
+ INFO ("Image \"" + argument[i] + "\" contains just one isolated voxel with non-unity sum of partial volume fractions");
+ } else if (voxel_error_sum) {
WARN ("Image \"" + argument[i] + "\" contains " + str(voxel_error_sum) + " brain voxels with non-unity sum of partial volume fractions");
- ++error_count;
+ ++minor_error_count;
if (voxels.valid()) {
- auto out = Image::create (mask_prefix + Path::basename (argument[i]), H_out);
+ std::string path = mask_prefix;
+ if (argument.size() > 1) {
+ path += Path::basename (argument[i]);
+ } else {
+ bool has_extension = false;
+ for (auto p = MR::Formats::known_extensions; *p; ++p) {
+ if (Path::has_suffix (path, std::string (*p))) {
+ has_extension = true;
+ break;
+ }
+ }
+ if (!has_extension)
+ path += ".mif";
+ }
+ auto out = Image::create (path, H_out);
copy (voxels, out);
}
} else {
INFO ("Image \"" + argument[i] + "\" conforms to 5TT format");
}
- } catch (...) {
+ } catch (Exception& e) {
+ e.display();
WARN ("Image \"" + argument[i] + "\" does not conform to fundamental 5TT format requirements");
- ++error_count;
+ ++major_error_count;
}
}
- if (error_count) {
+ const std::string vox_option_suggestion = get_options ("masks").size() ? (" (suggest checking " + std::string(argument.size() > 1 ? "outputs from" : "output of") + " -masks option)") : " (suggest re-running using the -masks option to see voxels where tissue fractions do not sum to 1.0)";
+
+ if (major_error_count) {
if (argument.size() > 1)
- throw Exception (str(error_count) + " input image" + (error_count > 1 ? "s do" : " does") + " not conform to 5TT format");
+ throw Exception (str(major_error_count) + " input image" + (major_error_count > 1 ? "s do" : " does") + " not conform to 5TT format");
else
throw Exception ("Input image does not conform to 5TT format");
+ } else if (minor_error_count) {
+ if (argument.size() > 1) {
+ WARN (str(minor_error_count) + " input image" + (minor_error_count > 1 ? "s do" : " does") + " not perfectly conform to 5TT format, but may still be applicable" + vox_option_suggestion);
+ } else {
+ WARN ("Input image does not perfectly conform to 5TT format, but may still be applicable" + vox_option_suggestion);
+ }
}
}
diff --git a/cmd/5ttedit.cpp b/cmd/5ttedit.cpp
index eceef43205..249d2a0685 100644
--- a/cmd/5ttedit.cpp
+++ b/cmd/5ttedit.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/afdconnectivity.cpp b/cmd/afdconnectivity.cpp
index 5fa0436f33..47c5ba479d 100644
--- a/cmd/afdconnectivity.cpp
+++ b/cmd/afdconnectivity.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/amp2response.cpp b/cmd/amp2response.cpp
index d3c4278a3f..4444ac0c34 100644
--- a/cmd/amp2response.cpp
+++ b/cmd/amp2response.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -56,10 +57,7 @@ void usage ()
+ "If multi-shell data are provided, and one or more b-value shells are not explicitly "
"requested, the command will generate a response function for every b-value shell "
- "(including b=0 if present)."
-
- + "For details on the method provided by this command see: "
- "https://www.researchgate.net/publication/307862932_Constrained_linear_least_squares_estimation_of_anisotropic_response_function_for_spherical_deconvolution";
+ "(including b=0 if present).";
ARGUMENTS
+ Argument ("amps", "the amplitudes image").type_image_in()
@@ -75,11 +73,16 @@ void usage ()
+ Option ("directions", "provide an external text file containing the directions along which the amplitudes are sampled")
+ Argument("path").type_file_in()
- + DWI::ShellOption
+ + DWI::ShellsOption
+ Option ("lmax", "specify the maximum harmonic degree of the response function to estimate "
"(can be a comma-separated list for multi-shell data)")
+ Argument ("values").type_sequence_int();
+
+ REFERENCES
+ + "Smith, R. E.; Dhollander, T. & Connelly, A. " // Internal
+ "Constrained linear least squares estimation of anisotropic response function for spherical deconvolution. "
+ "ISMRM Workshop on Breaking the Barriers of Diffusion MRI, 23.";
}
@@ -114,7 +117,7 @@ vector all_volumes (const size_t num)
}
-void run ()
+void run ()
{
// Get directions from either selecting a b-value shell, or the header, or external file
diff --git a/cmd/amp2sh.cpp b/cmd/amp2sh.cpp
index b34a1915cb..5f89dea612 100644
--- a/cmd/amp2sh.cpp
+++ b/cmd/amp2sh.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -68,7 +69,7 @@ void usage ()
+ Argument ("noise").type_image_in()
+ DWI::GradImportOptions()
- + DWI::ShellOption
+ + DWI::ShellsOption
+ Stride::Options;
}
diff --git a/cmd/connectome2tck.cpp b/cmd/connectome2tck.cpp
index 655e725573..39d66a3199 100644
--- a/cmd/connectome2tck.cpp
+++ b/cmd/connectome2tck.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp
index fb35f3b820..7b48a3e5ac 100644
--- a/cmd/connectomestats.cpp
+++ b/cmd/connectomestats.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/dcmedit.cpp b/cmd/dcmedit.cpp
index 77679f3161..63d8b028c9 100644
--- a/cmd/dcmedit.cpp
+++ b/cmd/dcmedit.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/dcminfo.cpp b/cmd/dcminfo.cpp
index 0ac2ae7bc6..f401263d38 100644
--- a/cmd/dcminfo.cpp
+++ b/cmd/dcminfo.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/dirflip.cpp b/cmd/dirflip.cpp
index a0508d57d0..3e336fbbdd 100644
--- a/cmd/dirflip.cpp
+++ b/cmd/dirflip.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -37,7 +38,7 @@ void usage ()
+ "The orientations themselves are not affected, only their "
"polarity; this is necessary to ensure near-optimal distribution of DW "
"directions for eddy-current correction.";
-
+
ARGUMENTS
+ Argument ("in", "the input files for the directions.").type_file_in()
+ Argument ("out", "the output files for the directions.").type_file_out();
@@ -62,7 +63,7 @@ class Shared { MEMALIGN(Shared)
progress ("optimising directions for eddy-currents", target_num_permutations),
best_signs (directions.rows(), 1), best_eddy (std::numeric_limits::max()) { }
- bool update (value_type eddy, const vector& signs)
+ bool update (value_type eddy, const vector& signs)
{
std::lock_guard lock (mutex);
if (eddy < best_eddy) {
@@ -98,7 +99,7 @@ class Shared { MEMALIGN(Shared)
vector best_signs;
value_type best_eddy;
std::mutex mutex;
-
+
};
@@ -113,7 +114,7 @@ class Processor { MEMALIGN(Processor)
uniform (0, signs.size()-1) { }
void execute () {
- while (eval());
+ while (eval());
}
@@ -127,8 +128,8 @@ class Processor { MEMALIGN(Processor)
next_permutation();
value_type eddy = 0.0;
- for (size_t i = 0; i < signs.size(); ++i)
- for (size_t j = i+1; j < signs.size(); ++j)
+ for (size_t i = 0; i < signs.size(); ++i)
+ for (size_t j = i+1; j < signs.size(); ++j)
eddy += shared.eddy (i, j, signs);
return shared.update (eddy, signs);
@@ -148,18 +149,20 @@ class Processor { MEMALIGN(Processor)
-void run ()
+void run ()
{
auto directions = DWI::Directions::load_cartesian (argument[0]);
size_t num_permutations = get_option_value ("permutations", DEFAULT_PERMUTATIONS);
- Shared eddy_shared (directions, num_permutations);
- Thread::run (Thread::multi (Processor (eddy_shared)), "eval thread");
-
- auto& signs = eddy_shared.get_best_signs();
+ vector signs;
+ {
+ Shared eddy_shared (directions, num_permutations);
+ Thread::run (Thread::multi (Processor (eddy_shared)), "eval thread");
+ signs = eddy_shared.get_best_signs();
+ }
- for (ssize_t n = 0; n < directions.rows(); ++n)
+ for (ssize_t n = 0; n < directions.rows(); ++n)
if (signs[n] < 0)
directions.row(n) *= -1.0;
diff --git a/cmd/dirgen.cpp b/cmd/dirgen.cpp
index eaf5823d5a..9724807905 100644
--- a/cmd/dirgen.cpp
+++ b/cmd/dirgen.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -35,7 +36,7 @@ void usage ()
SYNOPSIS = "Generate a set of uniformly distributed directions using a bipolar electrostatic repulsion model";
- DESCRIPTION
+ DESCRIPTION
+ "Directions are distributed by analogy to an electrostatic repulsion system, with each direction "
"corresponding to a single electrostatic charge (for -unipolar), or a pair of diametrically opposed charges "
"(for the default bipolar case). The energy of the system is determined based on the Coulomb repulsion, "
@@ -43,7 +44,7 @@ void usage ()
"assumed for the repulsion law (default: 1). The minimum energy state is obtained by gradient descent.";
- REFERENCES
+ REFERENCES
+ "Jones, D.; Horsfield, M. & Simmons, A. "
"Optimal strategies for measuring diffusion in anisotropic systems by magnetic resonance imaging. "
"Magnetic Resonance in Medicine, 1999, 42: 515-525"
@@ -79,9 +80,9 @@ void usage ()
class ProjectedUpdate { MEMALIGN(ProjectedUpdate)
public:
bool operator() (
- Eigen::VectorXd& newx,
+ Eigen::VectorXd& newx,
const Eigen::VectorXd& x,
- const Eigen::VectorXd& g,
+ const Eigen::VectorXd& g,
double step_size) {
newx.noalias() = x - step_size * g;
for (ssize_t n = 0; n < newx.size(); n += 3)
@@ -98,14 +99,18 @@ class ProjectedUpdate { MEMALIGN(ProjectedUpdate)
class Energy { MEMALIGN(Energy)
public:
- Energy (ProgressBar& progress) :
+ Energy (ProgressBar& progress) :
progress (progress),
ndirs (to (argument[0])),
bipolar (!(get_options ("unipolar").size())),
power (0),
directions (3 * ndirs) { }
- FORCE_INLINE double fast_pow (double x, int p) {
+// Non-optimised compilation can't handle recursive inline functions
+#ifdef __OPTIMIZE__
+FORCE_INLINE
+#endif
+ double fast_pow (double x, int p) {
return p == 1 ? x : fast_pow (x*x, p/2);
}
@@ -113,13 +118,13 @@ class Energy { MEMALIGN(Energy)
size_t size () const { return 3 * ndirs; }
- // set x to original directions provided in constructor.
+ // set x to original directions provided in constructor.
// The idea is to save the directions from one run to initialise next run
// at higher power.
- double init (Eigen::VectorXd& x)
+ double init (Eigen::VectorXd& x)
{
Math::RNG::Normal rng;
- for (ssize_t n = 0; n < ndirs; ++n) {
+ for (size_t n = 0; n < ndirs; ++n) {
auto d = x.segment (3*n,3);
d[0] = rng();
d[1] = rng();
@@ -146,26 +151,26 @@ class Energy { MEMALIGN(Energy)
Eigen::Vector3d r = d1-d2;
double _1_r2 = 1.0 / r.squaredNorm();
double _1_r = std::sqrt (_1_r2);
- double e = fast_pow (_1_r, power);
+ double e = fast_pow (_1_r, power);
E += e;
- g1 -= (power * e * _1_r2) * r;
- g2 += (power * e * _1_r2) * r;
+ g1 -= (power * e * _1_r2) * r;
+ g2 += (power * e * _1_r2) * r;
if (bipolar) {
r = d1+d2;
_1_r2 = 1.0 / r.squaredNorm();
_1_r = std::sqrt (_1_r2);
- e = fast_pow (_1_r, power);
+ e = fast_pow (_1_r, power);
E += e;
- g1 -= (power * e * _1_r2) * r;
- g2 -= (power * e * _1_r2) * r;
+ g1 -= (power * e * _1_r2) * r;
+ g2 -= (power * e * _1_r2) * r;
}
}
}
// constrain gradients to lie tangent to unit sphere:
- for (size_t n = 0; n < ndirs; ++n)
+ for (size_t n = 0; n < ndirs; ++n)
g.segment(3*n,3) -= x.segment(3*n,3).dot (g.segment(3*n,3)) * x.segment(3*n,3);
return E;
@@ -174,7 +179,7 @@ class Energy { MEMALIGN(Energy)
// function executed per thread:
- void execute ()
+ void execute ()
{
size_t this_start = 0;
while ((this_start = current_start++) < restarts) {
@@ -189,7 +194,7 @@ class Energy { MEMALIGN(Energy)
size_t iter = 0;
for (; iter < niter; iter++) {
- if (!optim.iterate())
+ if (!optim.iterate())
break;
DEBUG ("start " + str(this_start) + ": [ " + str (iter) + " ] (pow = " + str (power) + ") E = " + str (optim.value(), 8)
@@ -244,13 +249,13 @@ Eigen::VectorXd Energy::best_directions;
-void run ()
+void run ()
{
Energy::restarts = get_option_value ("restarts", DEFAULT_RESTARTS);
Energy::target_power = get_option_value ("power", DEFAULT_POWER);
Energy::niter = get_option_value ("niter", DEFAULT_NITER);
- {
+ {
ProgressBar progress ("Optimising directions up to power " + str(Energy::target_power) + " (" + str(Energy::restarts) + " restarts)");
Energy energy_functor (progress);
auto threads = Thread::run (Thread::multi (energy_functor), "energy function");
@@ -259,7 +264,7 @@ void run ()
CONSOLE ("final energy = " + str(Energy::best_E));
size_t ndirs = Energy::best_directions.size()/3;
Eigen::MatrixXd directions_matrix (ndirs, 3);
- for (int n = 0; n < ndirs; ++n)
+ for (size_t n = 0; n < ndirs; ++n)
directions_matrix.row (n) = Energy::best_directions.segment (3*n, 3);
DWI::Directions::save (directions_matrix, argument[1], get_options ("cartesian").size());
diff --git a/cmd/dirmerge.cpp b/cmd/dirmerge.cpp
index e65379bc4b..78f3b558e1 100644
--- a/cmd/dirmerge.cpp
+++ b/cmd/dirmerge.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/dirorder.cpp b/cmd/dirorder.cpp
index 2112c46226..368b4e2522 100644
--- a/cmd/dirorder.cpp
+++ b/cmd/dirorder.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/dirsplit.cpp b/cmd/dirsplit.cpp
index 64c86b90ee..a74620a682 100644
--- a/cmd/dirsplit.cpp
+++ b/cmd/dirsplit.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -50,7 +51,7 @@ using vector3_type = Eigen::Vector3d;
class Shared { MEMALIGN(Shared)
public:
Shared (const Eigen::MatrixXd& directions, size_t num_subsets, size_t target_num_permutations) :
- directions (directions), subset (num_subsets),
+ directions (directions), subset (num_subsets),
best_energy (std::numeric_limits::max()),
target_num_permutations (target_num_permutations),
num_permutations (0) {
@@ -59,14 +60,14 @@ class Shared { MEMALIGN(Shared)
subset[s++].push_back (n);
if (s >= num_subsets) s = 0;
}
- INFO ("split " + str(directions.rows()) + " directions into subsets with " +
+ INFO ("split " + str(directions.rows()) + " directions into subsets with " +
str([&]{ vector c; for (auto& x : subset) c.push_back (x.size()); return c; }()) + " volumes");
}
- bool update (value_type energy, const vector>& set)
+ bool update (value_type energy, const vector>& set)
{
std::lock_guard lock (mutex);
if (!progress) progress.reset (new ProgressBar ("distributing directions", target_num_permutations));
@@ -114,7 +115,7 @@ class EnergyCalculator { MEMALIGN(EnergyCalculator)
EnergyCalculator (Shared& shared) : shared (shared), subset (shared.get_init_subset()) { }
void execute () {
- while (eval());
+ while (eval());
}
@@ -140,8 +141,8 @@ class EnergyCalculator { MEMALIGN(EnergyCalculator)
value_type energy = 0.0;
for (auto& s: subset) {
value_type current_energy = 0.0;
- for (size_t i = 0; i < s.size(); ++i)
- for (size_t j = i+1; j < s.size(); ++j)
+ for (size_t i = 0; i < s.size(); ++i)
+ for (size_t j = i+1; j < s.size(); ++j)
current_energy += shared.energy (s[i], s[j]);
energy = std::max (energy, current_energy);
}
@@ -161,13 +162,15 @@ class EnergyCalculator { MEMALIGN(EnergyCalculator)
-void run ()
+void run ()
{
auto directions = DWI::Directions::load_cartesian (argument[0]);
- size_t num_subsets = argument.size() - 1;
+ const size_t num_subsets = argument.size() - 1;
+ if (num_subsets == 1)
+ throw Exception ("Directions must be split across two or more output files");
- size_t num_permutations = get_option_value ("permutations", DEFAULT_PERMUTATIONS);
+ const size_t num_permutations = get_option_value ("permutations", DEFAULT_PERMUTATIONS);
vector> best;
{
@@ -176,12 +179,10 @@ void run ()
best = shared.get_best_subset();
}
-
-
- bool cartesian = get_options("cartesian").size();
+ const bool cartesian = get_options("cartesian").size();
for (size_t i = 0; i < best.size(); ++i) {
Eigen::MatrixXd output (best[i].size(), 3);
- for (size_t n = 0; n < best[i].size(); ++n)
+ for (size_t n = 0; n < best[i].size(); ++n)
output.row(n) = directions.row (best[i][n]);
DWI::Directions::save (output, argument[i+1], cartesian);
}
diff --git a/cmd/dirstat.cpp b/cmd/dirstat.cpp
index 960de4b7b4..a99d53c13e 100644
--- a/cmd/dirstat.cpp
+++ b/cmd/dirstat.cpp
@@ -1,21 +1,24 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
#include "command.h"
#include "progressbar.h"
+#include "header.h"
#include "dwi/directions/file.h"
#include "dwi/gradient.h"
+#include "dwi/shells.h"
@@ -28,19 +31,160 @@ void usage ()
SYNOPSIS = "Report statistics on a direction set";
+ DESCRIPTION
+ + "This command will accept as inputs:"
+ + "- directions file in spherical coordinates (ASCII text, [ az el ] space-separated values, one per line);"
+ + "- directions file in Cartesian coordinates (ASCII text, [ x y z ] space-separated values, one per line);"
+ + "- DW gradient files (MRtrix format: ASCII text, [ x y z b ] space-separated values, one per line);"
+ + "- image files, using the DW gradient scheme found in the header (or provided using the appropriate command line options below)."
+
+ + "By default, this produces all relevant metrics for the direction set "
+ "provided. If the direction set contains multiple shells, metrics are "
+ "provided for each shell separately."
+
+ + "Metrics are produced assuming a unipolar or bipolar electrostatic "
+ "repulsion model, producing the potential energy (total, mean, min & max), "
+ "and the nearest-neighbour angles (mean, min & max). The condition "
+ "number is also produced for the spherical harmonic fits up to the highest "
+ "harmonic order supported by the number of volumes. Finally, the norm of the "
+ "mean direction vector is provided as a measure of the overall symmetry of "
+ "the direction set (important with respect to eddy-current resilience)."
+
+ + "Specific metrics can also be queried independently via the \"-output\" "
+ "option, using these shorthands: U/B for unipolar/bipolar model, E/N "
+ "for energy and nearest-neighbour respectively, t/-/+ for total/min/max "
+ "respectively (mean implied otherwise); SHn for condition number of SH fit "
+ "at order n (with n an even integer); ASYM for asymmetry index (norm of "
+ "mean direction vector); and N for the number of directions. For example:"
+ + "-output BN,BN-,BN+ requests the mean, min and max nearest-neighour "
+ "angles assuming a bipolar model."
+ + "-output UE,SH8,SYM requests the mean unipolar electrostatic energy, "
+ "condition number of SH fit at order 8, and the asymmetry index.";
+
ARGUMENTS
- + Argument ("dirs", "the text file containing the directions.").type_file_in();
+ + Argument ("dirs", "the text file or image containing the directions.").type_file_in();
+
+ OPTIONS
+ + Option ("output", "output selected metrics as a space-delimited list, "
+ "suitable for use in scripts. This will produce one line of values per "
+ "selected shell. Valid metrics are as specified in the description "
+ "above.")
+ + Argument ("list")
+ + DWI::ShellsOption
+ + DWI::GradImportOptions();
}
int precision = 6;
+void report (const std::string& title, Eigen::MatrixXd& directions);
+
+
+
+void run ()
+{
+ Eigen::MatrixXd directions;
+
+ try {
+ directions = DWI::Directions::load_cartesian (argument[0]);
+ }
+ catch (Exception& E) {
+ try {
+ directions = load_matrix (argument[0]);
+ }
+ catch (Exception& E) {
+ auto header = Header::open (argument[0]);
+ directions = DWI::get_valid_DW_scheme (header);
+ }
+ }
+
+ if (directions.cols() >= 4) {
+ int n_start = 0;
+ auto shells = DWI::Shells (directions).select_shells (false, false, false);
+ if (get_options ("shells").empty() && shells.has_bzero() && shells.count() > 1) {
+ n_start = 1;
+ if (get_options("output").empty())
+ print (std::string (argument[0]) + " (b=0) [ " + str(shells.smallest().count(), precision) + " volumes ]\n\n");
+ }
+
+
+ Eigen::MatrixXd dirs;
+
+ for (size_t n = n_start; n < shells.count(); ++n) {
+ dirs.resize (shells[n].count(), 3);
+ for (size_t idx = 0; idx < shells[n].count(); ++idx)
+ dirs.row (idx) = directions.row (shells[n].get_volumes()[idx]).head (3);
+ report (std::string (argument[0]) + " (b=" + str(shells[n].get_mean()) + ")", dirs);
+ }
+
+ }
+ else
+ report (argument[0], directions);
+}
-void report (const std::string& title, const Eigen::MatrixXd& directions)
+
+
+
+vector summarise_NN (const vector& NN)
{
+ double NN_min = std::numeric_limits::max();
+ double NN_mean = 0.0;
+ double NN_max = 0.0;
+ for (auto a : NN) {
+ a = (180.0/Math::pi) * std::acos (a);
+ NN_mean += a;
+ NN_min = std::min (NN_min, a);
+ NN_max = std::max (NN_max, a);
+ }
+ NN_mean /= NN.size();
+
+ return { NN_mean, NN_min, NN_max };
+}
+
+
+
+
+
+
+
+vector summarise_E (const vector& E)
+{
+ double E_min = std::numeric_limits::max();
+ double E_total = 0.0;
+ double E_max = 0.0;
+ for (auto e : E) {
+ E_total += e;
+ E_min = std::min (E_min, e);
+ E_max = std::max (E_max, e);
+ }
+
+ return { 0.5*E_total, E_total/E.size(), E_min, E_max };
+}
+
+
+
+class Metrics
+{ MEMALIGN (Metrics)
+ public:
+ vector BN, UN, BE, UE, SH;
+ default_type ASYM;
+ size_t ndirs;
+};
+
+
+
+
+
+
+Metrics compute (Eigen::MatrixXd& directions)
+{
+ if (directions.cols() < 3)
+ throw Exception ("unexpected matrix size for scheme \"" + str(argument[0]) + "\"");
+ DWI::normalise_grad (directions);
+
vector NN_bipolar (directions.rows(), -1.0);
vector NN_unipolar (directions.rows(), -1.0);
@@ -52,7 +196,7 @@ void report (const std::string& title, const Eigen::MatrixXd& directions)
double cos_angle = directions.row(i).head(3).normalized().dot (directions.row(j).head(3).normalized());
NN_unipolar[i] = std::max (NN_unipolar[i], cos_angle);
NN_unipolar[j] = std::max (NN_unipolar[j], cos_angle);
- cos_angle = std::abs (cos_angle);
+ cos_angle = abs (cos_angle);
NN_bipolar[i] = std::max (NN_bipolar[i], cos_angle);
NN_bipolar[j] = std::max (NN_bipolar[j], cos_angle);
@@ -69,81 +213,93 @@ void report (const std::string& title, const Eigen::MatrixXd& directions)
}
}
+ Metrics metrics;
+ metrics.ndirs = directions.rows();
+ metrics.UN = summarise_NN (NN_unipolar);
+ metrics.BN = summarise_NN (NN_bipolar);
+ metrics.UE = summarise_E (E_unipolar);
+ metrics.BE = summarise_E (E_bipolar);
+ for (size_t lmax = 2; lmax <= Math::SH::LforN (directions.rows()); lmax += 2)
+ metrics.SH.push_back (DWI::condition_number_for_lmax (directions, lmax));
+ metrics.ASYM = directions.leftCols(3).colwise().mean().norm();
- auto report_NN = [](const vector& NN) {
- double min = std::numeric_limits::max();
- double mean = 0.0;
- double max = 0.0;
- for (auto a : NN) {
- a = (180.0/Math::pi) * std::acos (a);
- mean += a;
- min = std::min (min, a);
- max = std::max (max, a);
- }
- mean /= NN.size();
+ return metrics;
+}
- print (" nearest-neighbour angles: mean = " + str(mean, precision) + ", range [ " + str(min, precision) + " - " + str(max, precision) + " ]\n");
- };
- auto report_E = [](const vector& E) {
- double min = std::numeric_limits::max();
- double total = 0.0;
- double max = 0.0;
- for (auto e : E) {
- total += e;
- min = std::min (min, e);
- max = std::max (max, e);
+void output_selected (const Metrics& metrics, const std::string& selection)
+{
+ auto select = split (selection, ", \t\n", true);
+
+ for (const auto& x : select) {
+ const auto xl = lowercase(x);
+ if (xl == "uet") std::cout << metrics.UE[0] << " ";
+ else if (xl == "ue") std::cout << metrics.UE[1] << " ";
+ else if (xl == "ue-") std::cout << metrics.UE[2] << " ";
+ else if (xl == "ue+") std::cout << metrics.UE[3] << " ";
+ else if (xl == "bet") std::cout << metrics.BE[0] << " ";
+ else if (xl == "be") std::cout << metrics.BE[1] << " ";
+ else if (xl == "be-") std::cout << metrics.BE[2] << " ";
+ else if (xl == "be+") std::cout << metrics.BE[3] << " ";
+ else if (xl == "un") std::cout << metrics.UN[0] << " ";
+ else if (xl == "un-") std::cout << metrics.UN[1] << " ";
+ else if (xl == "un+") std::cout << metrics.UN[2] << " ";
+ else if (xl == "bn") std::cout << metrics.BN[0] << " ";
+ else if (xl == "bn-") std::cout << metrics.BN[1] << " ";
+ else if (xl == "bn+") std::cout << metrics.BN[2] << " ";
+ else if (xl == "asym") std::cout << metrics.ASYM << " ";
+ else if (xl == "n") std::cout << metrics.ndirs << " ";
+ else if (xl.substr(0,2) == "sh") {
+ size_t order = to(x.substr(2));
+ if (order & 1U || order < 2)
+ throw Exception ("spherical harmonic order must be an even positive integer");
+ order = (order/2)-1;
+ if (order >= metrics.SH.size())
+ throw Exception ("spherical harmonic order requested is too large given number of directions");
+ std::cout << metrics.SH[order] << " ";
}
- print (" energy: total = " + str(0.5*total, precision) + ", mean = " + str(total/E.size(), precision) + ", range [ " + str(min, precision) + " - " + str(max, precision) + " ]\n");
- };
+ else
+ throw Exception ("unknown output specifier \"" + x + "\"");
+ }
+ std::cout << "\n";
+}
- print (title + " [ " + str(directions.rows(), precision) + " directions ]\n\n");
+void report (const std::string& title, Eigen::MatrixXd& directions)
+{
+ auto metrics = compute (directions);
- print (" Bipolar electrostatic repulsion model:\n");
- report_NN (NN_bipolar);
- report_E (E_bipolar);
+ auto opt = get_options ("output");
+ if (opt.size()) {
+ output_selected (metrics, opt[0][0]);
+ return;
+ }
- print ("\n Unipolar electrostatic repulsion model:\n");
- report_NN (NN_unipolar);
- report_E (E_unipolar);
+ std::string output = title + " [ " + str(metrics.ndirs, precision) + " directions ]\n";
- std::string lmax_results;
- for (size_t lmax = 2; lmax <= Math::SH::LforN (directions.rows()); lmax += 2)
- lmax_results += " " + str(DWI::condition_number_for_lmax (directions, lmax), precision);
- print ("\n Spherical Harmonic fit:\n condition numbers for lmax = " + str(2) + " -> "
- + str(Math::SH::LforN (directions.rows()), precision) + ":" + lmax_results + "\n\n");
-}
+ output += "\n Bipolar electrostatic repulsion model:\n";
+ output += " nearest-neighbour angles: mean = " + str(metrics.BN[0], precision) + ", range [ " + str(metrics.BN[1], precision) + " - " + str(metrics.BN[2], precision) + " ]\n";
+ output += " energy: total = " + str(metrics.BE[0], precision) + ", mean = " + str(metrics.BE[1], precision) + ", range [ " + str(metrics.BE[2], precision) + " - " + str(metrics.BE[3], precision) + " ]\n";
+ output += "\n Unipolar electrostatic repulsion model:\n";
+ output += " nearest-neighbour angles: mean = " + str(metrics.UN[0], precision) + ", range [ " + str(metrics.UN[1], precision) + " - " + str(metrics.UN[2], precision) + " ]\n";
+ output += " energy: total = " + str(metrics.UE[0], precision) + ", mean = " + str(metrics.UE[1], precision) + ", range [ " + str(metrics.UE[2], precision) + " - " + str(metrics.UE[3], precision) + " ]\n";
-void run ()
-{
- try {
- auto directions = DWI::Directions::load_cartesian (argument[0]);
- report (argument[0], directions);
- }
- catch (Exception& E) {
- auto directions = load_matrix (argument[0]);
- DWI::normalise_grad (directions);
- if (directions.cols() < 3)
- throw Exception ("unexpected matrix size for DW scheme \"" + str(argument[0]) + "\"");
-
- print (str(argument[0]) + " [ " + str(directions.rows()) + " volumes ]\n");
- DWI::Shells shells (directions);
-
- for (size_t n = 0; n < shells.count(); ++n) {
- Eigen::MatrixXd subset (shells[n].count(), 3);
- for (ssize_t i = 0; i < subset.rows(); ++i)
- subset.row(i) = directions.row(shells[n].get_volumes()[i]).head(3);
- report ("\nb = " + str(shells[n].get_mean(), precision), subset);
- }
- }
+ output += "\n Spherical Harmonic fit:\n condition numbers for lmax = 2 -> " + str(metrics.SH.size()*2) + ": " + str(metrics.SH, precision) + "\n";
+
+ output += "\n Asymmetry of sampling:\n norm of mean direction vector = " + str(metrics.ASYM, precision) + "\n";
+ if (metrics.ASYM >= 0.1)
+ output += std::string(" WARNING: sampling is ") + ( metrics.ASYM >= 0.4 ? "strongly" : "moderately" )
+ + " asymmetric - this may affect resiliance to eddy-current distortions\n";
+
+ output += "\n";
+ print (output);
}
diff --git a/cmd/dwi2adc.cpp b/cmd/dwi2adc.cpp
index 3b76ed1c0c..1c8976f9cb 100644
--- a/cmd/dwi2adc.cpp
+++ b/cmd/dwi2adc.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/dwi2fod.cpp b/cmd/dwi2fod.cpp
index e6e1b3b4b2..fbb254c874 100644
--- a/cmd/dwi2fod.cpp
+++ b/cmd/dwi2fod.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -91,7 +92,7 @@ void usage ()
OPTIONS
+ DWI::GradImportOptions()
- + DWI::ShellOption
+ + DWI::ShellsOption
+ CommonOptions
+ DWI::SDeconv::CSD_options
+ Stride::Options;
diff --git a/cmd/dwi2mask.cpp b/cmd/dwi2mask.cpp
index 9b1e146d29..1dac27690a 100644
--- a/cmd/dwi2mask.cpp
+++ b/cmd/dwi2mask.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/dwi2noise.cpp b/cmd/dwi2noise.cpp
deleted file mode 100644
index d96eb64df8..0000000000
--- a/cmd/dwi2noise.cpp
+++ /dev/null
@@ -1,93 +0,0 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
- *
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
- *
- * MRtrix is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * For more details, see http://www.mrtrix.org/.
- */
-
-
-#include "command.h"
-#include "image.h"
-#include "phase_encoding.h"
-#include "adapter/extract.h"
-#include "dwi/gradient.h"
-#include "math/least_squares.h"
-#include "math/SH.h"
-
-#include "dwi/noise_estimator.h"
-
-using namespace MR;
-using namespace App;
-
-
-void usage ()
-{
-
- AUTHOR = "J-Donald Tournier (jdtournier@gmail.com)";
-
- SYNOPSIS = "Estimate noise level voxel-wise using residuals from a truncated SH fit";
-
- DESCRIPTION
- + "WARNING: This command is deprecated and may be removed in future releases. "
- "Try using the dwidenoise command with the -noise option instead.";
-
- ARGUMENTS
- + Argument ("dwi",
- "the input diffusion-weighted image.")
- .type_image_in ()
-
- + Argument ("noise",
- "the output noise map")
- .type_image_out ();
-
-
-
- OPTIONS
- + Option ("lmax",
- "set the maximum harmonic order for the output series. By default, the "
- "program will use the highest possible lmax given the number of "
- "diffusion-weighted images, up to a maximum of 8.")
- + Argument ("order").type_integer (0, 30)
-
- + DWI::GradImportOptions()
- + DWI::ShellOption;
-
-}
-
-
-using value_type = float;
-
-void run ()
-{
- WARN ("Command dwi2noise is deprecated. Try using dwidenoise with -noise option instead.");
-
- auto dwi_in = Image::open (argument[0]);
- const auto grad = DWI::get_valid_DW_scheme (dwi_in);
-
- vector dwis;
- Eigen::MatrixXd mapping;
- {
- dwis = DWI::Shells (grad).select_shells (true, false, true).largest().get_volumes();
- const auto dirs = DWI::gen_direction_matrix (grad, dwis);
- mapping = DWI::compute_SH2amp_mapping (dirs);
- }
-
- auto dwi = Adapter::make (dwi_in, 3, container_cast> (dwis));
-
- auto header = Header (dwi_in);
- header.ndim() = 3;
- header.datatype() = DataType::Float32;
- DWI::stash_DW_scheme (header, grad);
- PhaseEncoding::clear_scheme (header);
- auto noise = Image::create (argument[1], header);
-
- DWI::estimate_noise (dwi, noise, mapping);
-}
-
-
diff --git a/cmd/dwi2tensor.cpp b/cmd/dwi2tensor.cpp
index 717b214e54..4746503d86 100644
--- a/cmd/dwi2tensor.cpp
+++ b/cmd/dwi2tensor.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/dwidenoise.cpp b/cmd/dwidenoise.cpp
index c18fd50ee5..a94c96f400 100644
--- a/cmd/dwidenoise.cpp
+++ b/cmd/dwidenoise.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/dwiextract.cpp b/cmd/dwiextract.cpp
index e0209461ae..56e16c127b 100644
--- a/cmd/dwiextract.cpp
+++ b/cmd/dwiextract.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -29,7 +30,7 @@ using value_type = float;
void usage ()
{
- AUTHOR = "David Raffelt (david.raffelt@florey.edu.au) and Thijs Dhollander (thijs.dhollander@gmail.com)";
+ AUTHOR = "David Raffelt (david.raffelt@florey.edu.au) and Thijs Dhollander (thijs.dhollander@gmail.com) and Robert E. Smith (robert.smith@florey.edu.au)";
SYNOPSIS = "Extract diffusion-weighted volumes, b=0 volumes, or certain shells from a DWI dataset";
@@ -42,13 +43,14 @@ void usage ()
+ Option ("no_bzero", "Output only non b=0 volumes (default, if -singleshell is not specified).")
+ Option ("singleshell", "Force a single-shell (single non b=0 shell) output. This will include b=0 volumes, if present. Use with -bzero to enforce presence of b=0 volumes (error if not present) or with -no_bzero to exclude them.")
+ DWI::GradImportOptions()
- + DWI::ShellOption
+ + DWI::ShellsOption
+ + DWI::GradExportOptions()
+ PhaseEncoding::ImportOptions
+ PhaseEncoding::SelectOptions
+ Stride::Options;
}
-void run()
+void run()
{
auto input_header = Header::open (argument[0]);
auto input_image = input_header.get_image();
@@ -61,12 +63,12 @@ void run()
// of all dwis or all bzeros i.e. don't initialise the Shells class
vector volumes;
bool bzero = get_options ("bzero").size();
- if (get_options ("shell").size() || get_options ("singleshell").size()) {
+ if (get_options ("shells").size() || get_options ("singleshell").size()) {
DWI::Shells shells (grad);
shells.select_shells (get_options ("singleshell").size(),get_options ("bzero").size(),get_options ("no_bzero").size());
for (size_t s = 0; s != shells.count(); ++s) {
DEBUG ("Including data from shell b=" + str(shells[s].get_mean()) + " +- " + str(shells[s].get_stdev()));
- for (const auto v : shells[s].get_volumes())
+ for (const auto v : shells[s].get_volumes())
volumes.push_back (v);
}
bzero = (shells.count() == 1 && shells.has_bzero());
@@ -104,7 +106,7 @@ void run()
}
}
if (filter.size() == 4) {
- if (std::abs (pe_scheme(i, 3) - filter[3]) > 5e-3) {
+ if (abs (pe_scheme(i, 3) - filter[3]) > 5e-3) {
keep = false;
break;
}
@@ -139,6 +141,7 @@ void run()
}
auto output_image = Image::create (argument[1], header);
+ DWI::export_grad_commandline (header);
auto input_volumes = Adapter::make (input_image, 3, volumes);
threaded_copy_with_progress_message ("extracting volumes", input_volumes, output_image);
diff --git a/cmd/dwinormalise.cpp b/cmd/dwinormalise.cpp
index 86e0ba14d5..f22f54adbd 100644
--- a/cmd/dwinormalise.cpp
+++ b/cmd/dwinormalise.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/fixel2sh.cpp b/cmd/fixel2sh.cpp
index d092c46ff4..7f950aeca2 100644
--- a/cmd/fixel2sh.cpp
+++ b/cmd/fixel2sh.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/fixel2tsf.cpp b/cmd/fixel2tsf.cpp
index 0c25de23ce..dc949e2b38 100644
--- a/cmd/fixel2tsf.cpp
+++ b/cmd/fixel2tsf.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -98,7 +99,7 @@ void run ()
while (reader (tck)) {
SetVoxelDir dixels;
mapper (tck, dixels);
- vector scalars (tck.size(), 0.0);
+ vector scalars (tck.size(), 0.0f);
for (size_t p = 0; p < tck.size(); ++p) {
voxel_pos = transform.scanner2voxel * tck[p].cast ();
for (SetVoxelDir::const_iterator d = dixels.begin(); d != dixels.end(); ++d) {
@@ -106,7 +107,7 @@ void run ()
assign_pos_of (*d).to (in_index_image);
Eigen::Vector3f dir = d->get_dir().cast();
dir.normalize();
- float largest_dp = 0.0;
+ float largest_dp = 0.0f;
int32_t closest_fixel_index = -1;
in_index_image.index(3) = 0;
@@ -116,7 +117,7 @@ void run ()
for (size_t fixel = 0; fixel < num_fixels_in_voxel; ++fixel) {
in_directions_image.index(0) = offset + fixel;
- float dp = std::abs (dir.dot (Eigen::Vector3f (in_directions_image.row(1))));
+ const float dp = abs (dir.dot (Eigen::Vector3f (in_directions_image.row(1))));
if (dp > largest_dp) {
largest_dp = dp;
closest_fixel_index = fixel;
@@ -124,9 +125,13 @@ void run ()
}
if (largest_dp > angular_threshold_dp) {
in_data_image.index(0) = offset + closest_fixel_index;
- scalars[p] = in_data_image.value();
+ const float value = in_data_image.value();
+ if (std::isfinite (value))
+ scalars[p] = in_data_image.value();
+ else
+ scalars[p] = 0.0f;
} else {
- scalars[p] = 0.0;
+ scalars[p] = 0.0f;
}
break;
}
diff --git a/cmd/fixel2voxel.cpp b/cmd/fixel2voxel.cpp
index db080226ab..fcd3b3fa3f 100644
--- a/cmd/fixel2voxel.cpp
+++ b/cmd/fixel2voxel.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -68,7 +69,7 @@ void usage ()
+ "- A 4D scalar image of fixel values with one 3D volume per fixel: split_data"
+ "- A 4D image of fixel directions, stored as three 3D volumes per fixel direction: split_dir";
- REFERENCES
+ REFERENCES
+ "* Reference for 'complexity' operation:\n"
"Riffert, T. W.; Schreiber, J.; Anwander, A. & Knosche, T. R. "
"Beyond Fractional Anisotropy: Extraction of bundle-specific structural metrics from crossing fibre models. "
@@ -303,8 +304,8 @@ class AbsMax : protected Base
{
default_type absmax = -std::numeric_limits::infinity();
for (auto f = Base::Loop (index) (data); f; ++f) {
- if (!f.padding() && std::abs (data.value()) > absmax)
- absmax = std::abs (data.value());
+ if (!f.padding() && abs (float(data.value())) > absmax)
+ absmax = abs (float(data.value()));
}
out.value() = std::isfinite (absmax) ? absmax : 0.0;
}
@@ -321,7 +322,7 @@ class MagMax : protected Base
{
default_type magmax = 0.0;
for (auto f = Base::Loop (index) (data); f; ++f) {
- if (!f.padding() && std::abs (data.value()) > std::abs (magmax))
+ if (!f.padding() && abs (float(data.value())) > abs (magmax))
magmax = data.value();
}
out.value() = std::isfinite (magmax) ? magmax : 0.0;
@@ -391,12 +392,12 @@ class DEC_unit : protected Base
if (vol.valid()) {
for (auto f = Base::Loop (index) (data, vol, dir); f; ++f) {
if (!f.padding())
- sum_dec += Eigen::Vector3 (std::abs (dir.row(1)[0]), std::abs (dir.row(1)[1]), std::abs (dir.row(1)[2])) * data.value() * vol.value();
+ sum_dec += Eigen::Vector3 (abs (dir.row(1)[0]), abs (dir.row(1)[1]), abs (dir.row(1)[2])) * data.value() * vol.value();
}
} else {
for (auto f = Base::Loop (index) (data, dir); f; ++f) {
if (!f.padding())
- sum_dec += Eigen::Vector3 (std::abs (dir.row(1)[0]), std::abs (dir.row(1)[1]), std::abs (dir.row(1)[2])) * data.value();
+ sum_dec += Eigen::Vector3 (abs (dir.row(1)[0]), abs (dir.row(1)[1]), abs (dir.row(1)[2])) * data.value();
}
}
if ((sum_dec.array() != 0.0).any())
@@ -425,7 +426,7 @@ class DEC_scaled : protected Base
default_type sum_volume = 0.0;
for (auto f = Base::Loop (index) (data, vol, dir); f; ++f) {
if (!f.padding()) {
- sum_dec += Eigen::Vector3 (std::abs (dir.row(1)[0]), std::abs (dir.row(1)[1]), std::abs (dir.row(1)[2])) * data.value() * vol.value();
+ sum_dec += Eigen::Vector3 (abs (dir.row(1)[0]), abs (dir.row(1)[1]), abs (dir.row(1)[2])) * data.value() * vol.value();
sum_volume += vol.value();
sum_value += vol.value() * data.value();
}
@@ -436,7 +437,7 @@ class DEC_scaled : protected Base
} else {
for (auto f = Base::Loop (index) (data, dir); f; ++f) {
if (!f.padding()) {
- sum_dec += Eigen::Vector3 (std::abs (dir.row(1)[0]), std::abs (dir.row(1)[1]), std::abs (dir.row(1)[2])) * data.value();
+ sum_dec += Eigen::Vector3 (abs (dir.row(1)[0]), abs (dir.row(1)[1]), abs (dir.row(1)[2])) * data.value();
sum_value += data.value();
}
}
diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp
index 4d1b461df9..41fb19c568 100644
--- a/cmd/fixelcfestats.cpp
+++ b/cmd/fixelcfestats.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -73,7 +74,7 @@ void usage ()
"NeuroImage, 2011, 54(3), 2006-19\n" ;
ARGUMENTS
- + Argument ("in_fixel_directory", "the fixel directory containing the data files for each subject (after obtaining fixel correspondence").type_file_in ()
+ + Argument ("in_fixel_directory", "the fixel directory containing the data files for each subject (after obtaining fixel correspondence").type_directory_in()
+ Argument ("subjects", "a text file listing the subject identifiers (one per line). This should correspond with the filenames "
"in the fixel directory (including the file extension), and be listed in the same order as the rows of the design matrix.").type_image_in ()
diff --git a/cmd/fixelconvert.cpp b/cmd/fixelconvert.cpp
index 2d6ff2e7a8..3621e1252d 100644
--- a/cmd/fixelconvert.cpp
+++ b/cmd/fixelconvert.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -43,8 +44,8 @@ void usage ()
SYNOPSIS = "Convert between the old format fixel image (.msf / .msh) and the new fixel directory format";
ARGUMENTS
- + Argument ("fixel_in", "the input fixel file / directory.").type_text()
- + Argument ("fixel_out", "the output fixel file / directory.").type_text();
+ + Argument ("fixel_in", "the input fixel file / directory.").type_various()
+ + Argument ("fixel_out", "the output fixel file / directory.").type_various();
OPTIONS
+ OptionGroup ("Options for converting from old to new format")
@@ -53,7 +54,7 @@ void usage ()
+ Option ("nii", "output the index, directions and data file in NIfTI format instead of .mif")
+ Option ("out_size", "also output the 'size' field from the old format")
+ Option ("template", "specify an existing fixel directory (in the new format) to which the new output should conform")
- + Argument ("path").type_text()
+ + Argument ("path").type_directory_in()
+ OptionGroup ("Options for converting from new to old format")
+ Option ("value", "nominate the data file to import to the 'value' field in the old format")
diff --git a/cmd/fixelcorrespondence.cpp b/cmd/fixelcorrespondence.cpp
index f2e8635e68..5e03b46b19 100644
--- a/cmd/fixelcorrespondence.cpp
+++ b/cmd/fixelcorrespondence.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -36,9 +37,9 @@ void usage ()
ARGUMENTS
+ Argument ("subject_data", "the input subject fixel data file. This should be a file inside the fixel directory").type_image_in ()
- + Argument ("template_directory", "the input template fixel directory.").type_image_in ()
- + Argument ("output_directory", "the output fixel directory.").type_text()
- + Argument ("output_data", "the name of the output fixel data file. This will be placed in the output fixel directory").type_image_out ();
+ + Argument ("template_directory", "the input template fixel directory.").type_directory_in()
+ + Argument ("output_directory", "the fixel directory where the output file will be written.").type_text()
+ + Argument ("output_data", "the name of the output fixel data file. This will be placed in the output fixel directory").type_text();
OPTIONS
+ Option ("angle", "the max angle threshold for computing inter-subject fixel correspondence (Default: " + str(DEFAULT_ANGLE_THRESHOLD, 2) + " degrees)")
@@ -75,7 +76,7 @@ void run ()
output_data_header.size(1) = 1;
auto output_data = Image::create (Path::join (output_fixel_directory, argument[3]), output_data_header);
- for (auto i = Loop ("mapping subject fixels data to template fixels", template_index, 0, 3)(template_index, subject_index); i; ++i) {
+ for (auto i = Loop ("mapping subject fixel data to template fixels", template_index, 0, 3)(template_index, subject_index); i; ++i) {
template_index.index(3) = 0;
uint32_t nfixels_template = template_index.value();
template_index.index(3) = 1;
@@ -99,7 +100,7 @@ void run ()
templatedir.normalize();
Eigen::Vector3f subjectdir = subject_directions.row(1);
subjectdir.normalize();
- float dp = std::abs (templatedir.dot (subjectdir));
+ float dp = abs (templatedir.dot (subjectdir));
if (dp > largest_dp) {
largest_dp = dp;
index_of_closest_fixel = s;
diff --git a/cmd/fixelcrop.cpp b/cmd/fixelcrop.cpp
index a54b40b0b0..d129e42bfb 100644
--- a/cmd/fixelcrop.cpp
+++ b/cmd/fixelcrop.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -36,10 +37,10 @@ void usage ()
ARGUMENTS
+ Argument ("input_fixel_directory", "input fixel directory, all data files and directions "
- "file will be cropped and saved in the output fixel directory").type_text ()
+ "file will be cropped and saved in the output fixel directory").type_directory_in()
+ Argument ("input_fixel_mask", "the input fixel data file defining which fixels to crop. "
"Fixels with zero values will be removed").type_image_in ()
- + Argument ("output_fixel_directory", "the output directory to store the cropped directions and data files").type_text ();
+ + Argument ("output_fixel_directory", "the output directory to store the cropped directions and data files").type_directory_out();
}
diff --git a/cmd/fixelreorient.cpp b/cmd/fixelreorient.cpp
index 73a21628ed..77969842e0 100644
--- a/cmd/fixelreorient.cpp
+++ b/cmd/fixelreorient.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -37,13 +38,13 @@ void usage ()
"then re-normalising the vector.";
ARGUMENTS
- + Argument ("fixel_in", "the fixel directory").type_text ()
+ + Argument ("fixel_in", "the input fixel directory").type_directory_in()
+ Argument ("warp", "a 4D deformation field used to perform reorientation. "
"Reorientation is performed by applying the Jacobian affine transform in each voxel in the warp, "
"then re-normalising the vector representing the fixel direction").type_image_in ()
- + Argument ("fixel_out", "the output fixel directory. If the the input and output directorys are the same, the existing directions file will "
- "be replaced (providing the --force option is supplied). If a new directory is supplied then the fixel directions and all "
- "other fixel data will be copied to the new directory.").type_text ();
+ + Argument ("fixel_out", "the output fixel directory. If the the input and output directories are the same, the existing directions file will "
+ "be replaced (providing the -force option is supplied). If a new directory is supplied then the fixel directions and all "
+ "other fixel data will be copied to the new directory.").type_directory_out();
}
diff --git a/cmd/fod2dec.cpp b/cmd/fod2dec.cpp
index d4d8582b94..a5286b56bf 100644
--- a/cmd/fod2dec.cpp
+++ b/cmd/fod2dec.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -44,7 +45,7 @@ void usage ()
SYNOPSIS = "Generate FOD-based DEC maps, with optional panchromatic sharpening and/or luminance/perception correction";
DESCRIPTION
- + "By default, the FOD-based DEC is weighted by the integral of the FOD. To weight by another scalar map, use the outputmap option. This option can also be used for panchromatic sharpening, e.g., by supplying a T1 (or other sensible) anatomical volume with a higher spatial resolution.";
+ + "By default, the FOD-based DEC is weighted by the integral of the FOD. To weight by another scalar map, use the -contrast option. This option can also be used for panchromatic sharpening, e.g., by supplying a T1 (or other sensible) anatomical volume with a higher spatial resolution.";
REFERENCES
+ "Dhollander T, Smith RE, Tournier JD, Jeurissen B, Connelly A. " // Internal
@@ -62,21 +63,22 @@ void usage ()
+ Option ("mask","Only perform DEC computation within the specified mask image.")
+ Argument ("image").type_image_in()
- + Option ("threshold","FOD amplitudes below the threshold value are considered zero.")
- + Argument ("value").type_float()
-
- + Option ("outputmap","Weight the computed DEC map by a provided outputmap. If the outputmap has a different grid, the DEC map is first resliced and renormalised. To achieve panchromatic sharpening, provide an image with a higher spatial resolution than the input FOD image; e.g., a T1 anatomical volume. Only the DEC is subject to the mask, so as to allow for partial colouring of the outputmap. \nDefault when this option is *not* provided: integral of input FOD, subject to the same mask/threshold as used for DEC computation.")
+ + Option ("contrast","Weight the computed DEC map by the provided image contrast. If the contrast has a different image grid, the DEC map is first resliced and renormalised. To achieve panchromatic sharpening, provide an image with a higher spatial resolution than the input FOD image; e.g., a T1 anatomical volume. Only the DEC is subject to the mask, so as to allow for partial colouring of the contrast image. \nDefault when this option is *not* provided: integral of input FOD, subject to the same mask/threshold as used for DEC computation.")
+ Argument ("image").type_image_in()
- + Option ("no_weight","Do not weight the DEC map (reslicing and renormalising still possible by explicitly providing the outputmap option as a template).")
-
+ Option ("lum","Correct for luminance/perception, using default values Cr,Cg,Cb = " + str(DEFAULT_LUM_CR, 2) + "," + str(DEFAULT_LUM_CG, 2) + "," + str(DEFAULT_LUM_CB, 2) + " and gamma = " + str(DEFAULT_LUM_GAMMA, 2) + " (*not* correcting is the theoretical equivalent of Cr,Cg,Cb = 1,1,1 and gamma = 2).")
+ Option ("lum_coefs","The coefficients Cr,Cg,Cb to correct for luminance/perception. \nNote: this implicitly switches on luminance/perception correction, using a default gamma = " + str(DEFAULT_LUM_GAMMA, 2) + " unless specified otherwise.")
+ Argument ("values").type_sequence_float()
+ Option ("lum_gamma","The gamma value to correct for luminance/perception. \nNote: this implicitly switches on luminance/perception correction, using a default Cr,Cg,Cb = " + str(DEFAULT_LUM_CR, 2) + "," + str(DEFAULT_LUM_CG, 2) + "," + str(DEFAULT_LUM_CB, 2) + " unless specified otherwise.")
- + Argument ("value").type_float();
+ + Argument ("value").type_float()
+
+ + Option ("threshold","FOD amplitudes below the threshold value are considered zero.")
+ + Argument ("value").type_float()
+
+ + Option ("no_weight","Do not weight the DEC map; just output the unweighted colours. Reslicing and renormalising of colours will still happen when providing the -contrast option as a template.");
+
}
@@ -241,7 +243,7 @@ void run () {
bool needtoslice = false;
auto map_hdr = Header();
- auto opto = get_options ("outputmap");
+ auto opto = get_options ("contrast");
if (opto.size()) {
map_hdr = Header::open(opto[0][0]);
if (!dimensions_match(map_hdr, fod_hdr, 0, 3) ||
diff --git a/cmd/fod2fixel.cpp b/cmd/fod2fixel.cpp
index 50c4e3857b..c833761aa1 100644
--- a/cmd/fod2fixel.cpp
+++ b/cmd/fod2fixel.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -61,34 +62,39 @@ void usage ()
AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au)";
- SYNOPSIS = "Use a fast-marching level-set method to segment fibre orientation distributions, and save parameters of interest as fixel images";
+ SYNOPSIS = "Perform segmentation of continuous Fibre Orientation Distributions (FODs) to produce discrete fixels";
- REFERENCES
+ REFERENCES
+ "* Reference for the FOD segmentation method:\n"
"Smith, R. E.; Tournier, J.-D.; Calamante, F. & Connelly, A. " // Internal
"SIFT: Spherical-deconvolution informed filtering of tractograms. "
"NeuroImage, 2013, 67, 298-312 (Appendix 2)"
- + "* Reference for Apparent Fibre Density:\n"
+ + "* Reference for Apparent Fibre Density (AFD):\n"
"Raffelt, D.; Tournier, J.-D.; Rose, S.; Ridgway, G.R.; Henderson, R.; Crozier, S.; Salvado, O.; Connelly, A. " // Internal
"Apparent Fibre Density: a novel measure for the analysis of diffusion-weighted magnetic resonance images."
"Neuroimage, 2012, 15;59(4), 3976-94.";
ARGUMENTS
+ Argument ("fod", "the input fod image.").type_image_in ()
- + Argument ("fixel_directory", "the output fixel directory").type_text();
+ + Argument ("fixel_directory", "the output fixel directory").type_directory_out();
OPTIONS
- + Option ("mask",
- "only perform computation within the specified binary brain mask image.")
- + Argument ("image").type_image_in()
-
+ OutputOptions
+ FMLSSegmentOption
+ + OptionGroup ("Other options for fod2fixel")
+
+ + Option ("mask",
+ "only perform computation within the specified binary brain mask image.")
+ + Argument ("image").type_image_in()
+
+ + Option ("maxnum", "maximum number of fixels to output for any particular voxel (default: no limit)")
+ + Argument ("number").type_integer(1)
+
+ Option ("nii", "output the directions and index file in nii format (instead of the default mif)")
+ Option ("dirpeak", "define the fixel direction as the peak lobe direction as opposed to the lobe mean");
@@ -100,22 +106,18 @@ void usage ()
class Segmented_FOD_receiver { MEMALIGN(Segmented_FOD_receiver)
public:
- Segmented_FOD_receiver (const Header& header, bool dir_as_peak = false) :
- H (header), n_fixels (0), dir_as_peak (dir_as_peak)
- {
- }
+ Segmented_FOD_receiver (const Header& header, const uint32_t maxnum = 0, bool dir_as_peak = false) :
+ H (header), fixel_count (0), max_per_voxel (maxnum), dir_as_peak (dir_as_peak) { }
void commit ();
void set_fixel_directory_output (const std::string& path) { fixel_directory_path = path; }
void set_index_output (const std::string& path) { index_path = path; }
void set_directions_output (const std::string& path) { dir_path = path; }
- void set_afd_output (const std::string& path) { afd_path = path; }
+ void set_afd_output (const std::string& path) { afd_path = path; }
void set_peak_output (const std::string& path) { peak_path = path; }
void set_disp_output (const std::string& path) { disp_path = path; }
- size_t num_outputs() const;
-
bool operator() (const FOD_lobes&);
@@ -125,56 +127,45 @@ class Segmented_FOD_receiver { MEMALIGN(Segmented_FOD_receiver)
Eigen::Vector3f dir;
float integral;
float peak_value;
- Primitive_FOD_lobe (Eigen::Vector3f dir, float integral, float peak_value)
- : dir (dir), integral (integral), peak_value (peak_value) {}
+ Primitive_FOD_lobe (Eigen::Vector3f dir, float integral, float peak_value) :
+ dir (dir), integral (integral), peak_value (peak_value) {}
};
class Primitive_FOD_lobes : public vector { MEMALIGN (Primitive_FOD_lobes)
public:
- Eigen::Array3i vox;
-
- Primitive_FOD_lobes (const FOD_lobes& in, bool asdf) : vox (in.vox)
+ Primitive_FOD_lobes (const FOD_lobes& in, const uint32_t maxcount, bool use_peak_dir) :
+ vox (in.vox)
{
- for (const FOD_lobe& lobe : in) {
- if (asdf)
+ const uint32_t N = maxcount ? std::min (uint32_t(in.size()), maxcount) : in.size();
+ for (uint32_t i = 0; i != N; ++i) {
+ const FOD_lobe& lobe (in[i]);
+ if (use_peak_dir)
this->emplace_back (lobe.get_peak_dir(0).cast(), lobe.get_integral(), lobe.get_max_peak_value());
else
this->emplace_back (lobe.get_mean_dir().cast(), lobe.get_integral(), lobe.get_max_peak_value());
}
}
+ Eigen::Array3i vox;
};
Header H;
std::string fixel_directory_path, index_path, dir_path, afd_path, peak_path, disp_path;
vector lobes;
- uint64_t n_fixels;
+ uint32_t fixel_count;
+ uint32_t max_per_voxel;
bool dir_as_peak;
};
-size_t Segmented_FOD_receiver::num_outputs() const
-{
- size_t count = 1;
- if (dir_path.size()) ++count;
- if (afd_path.size()) ++count;
- if (peak_path.size()) ++count;
- if (disp_path.size()) ++count;
- return count;
-}
-
-
-
bool Segmented_FOD_receiver::operator() (const FOD_lobes& in)
{
-
- if (size_t n = in.size()) {
- lobes.emplace_back (in, dir_as_peak);
- n_fixels += n;
+ if (in.size()) {
+ lobes.emplace_back (in, max_per_voxel, dir_as_peak);
+ fixel_count += lobes.back().size();
}
-
return true;
}
@@ -182,7 +173,7 @@ bool Segmented_FOD_receiver::operator() (const FOD_lobes& in)
void Segmented_FOD_receiver::commit ()
{
- if (!lobes.size() || !n_fixels || !num_outputs())
+ if (!lobes.size() || !fixel_count)
return;
using DataImage = Image;
@@ -197,7 +188,7 @@ void Segmented_FOD_receiver::commit ()
std::unique_ptr disp_image;
auto index_header (H);
- index_header.keyval()[Fixel::n_fixels_key] = str(n_fixels);
+ index_header.keyval()[Fixel::n_fixels_key] = str(fixel_count);
index_header.ndim() = 4;
index_header.size(3) = 2;
index_header.datatype() = DataType::from();
@@ -206,7 +197,7 @@ void Segmented_FOD_receiver::commit ()
auto fixel_data_header (H);
fixel_data_header.ndim() = 3;
- fixel_data_header.size(0) = n_fixels;
+ fixel_data_header.size(0) = fixel_count;
fixel_data_header.size(2) = 1;
fixel_data_header.datatype() = DataType::Float32;
fixel_data_header.datatype().set_byte_order_native();
@@ -289,7 +280,7 @@ void Segmented_FOD_receiver::commit ()
lobe_index ++;
}
- assert (offset == n_fixels);
+ assert (offset == fixel_count);
}
@@ -301,9 +292,10 @@ void run ()
Math::SH::check (H);
auto fod_data = H.get_image();
- const bool dir_as_peak = get_options ("dirpeak").size() ? true : false;
+ const bool dir_as_peak = get_options ("dirpeak").size();
+ const uint32_t maxnum = get_option_value ("maxnum", 0);
- Segmented_FOD_receiver receiver (H, dir_as_peak);
+ Segmented_FOD_receiver receiver (H, maxnum, dir_as_peak);
auto& fixel_directory_path = argument[1];
receiver.set_fixel_directory_output (fixel_directory_path);
@@ -318,7 +310,7 @@ void run ()
receiver.set_directions_output (default_directions_filename);
auto
- opt = get_options ("afd"); if (opt.size()) receiver.set_afd_output (opt[0][0]);
+ opt = get_options ("afd"); if (opt.size()) receiver.set_afd_output (opt[0][0]);
opt = get_options ("peak"); if (opt.size()) receiver.set_peak_output (opt[0][0]);
opt = get_options ("disp"); if (opt.size()) receiver.set_disp_output (opt[0][0]);
@@ -330,14 +322,11 @@ void run ()
throw Exception ("Cannot use image \"" + str(opt[0][0]) + "\" as mask image; dimensions do not match FOD image");
}
- if (!receiver.num_outputs ())
- throw Exception ("Nothing to do; please specify at least one output image type");
-
Fixel::check_fixel_directory (fixel_directory_path, true, true);
FMLS::FODQueueWriter writer (fod_data, mask);
- const DWI::Directions::Set dirs (1281);
+ const DWI::Directions::FastLookupSet dirs (1281);
Segmenter fmls (dirs, Math::SH::LforN (H.size(3)));
load_fmls_thresholds (fmls);
diff --git a/cmd/label2colour.cpp b/cmd/label2colour.cpp
index 4f9cde6033..74be0e40af 100644
--- a/cmd/label2colour.cpp
+++ b/cmd/label2colour.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/label2mesh.cpp b/cmd/label2mesh.cpp
index f7dccc0ed1..b2ee669f5f 100644
--- a/cmd/label2mesh.cpp
+++ b/cmd/label2mesh.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/labelconvert.cpp b/cmd/labelconvert.cpp
index 796d4416cb..ea2b0e8d67 100644
--- a/cmd/labelconvert.cpp
+++ b/cmd/labelconvert.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/maskdump.cpp b/cmd/maskdump.cpp
index 6b7c4231cd..9adaf6a2ec 100644
--- a/cmd/maskdump.cpp
+++ b/cmd/maskdump.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/maskfilter.cpp b/cmd/maskfilter.cpp
index ef6592f829..060c5f2aa9 100644
--- a/cmd/maskfilter.cpp
+++ b/cmd/maskfilter.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/mesh2pve.cpp b/cmd/mesh2voxel.cpp
similarity index 88%
rename from cmd/mesh2pve.cpp
rename to cmd/mesh2voxel.cpp
index 2b62552f3c..273057c7ac 100644
--- a/cmd/mesh2pve.cpp
+++ b/cmd/mesh2voxel.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/meshconvert.cpp b/cmd/meshconvert.cpp
index c5e412c61b..76b3a30c50 100644
--- a/cmd/meshconvert.cpp
+++ b/cmd/meshconvert.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/meshfilter.cpp b/cmd/meshfilter.cpp
index 477f424d90..d0f6f18a65 100644
--- a/cmd/meshfilter.cpp
+++ b/cmd/meshfilter.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/mraverageheader.cpp b/cmd/mraverageheader.cpp
index 8f0630eb76..aadae11321 100644
--- a/cmd/mraverageheader.cpp
+++ b/cmd/mraverageheader.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/mrcalc.cpp b/cmd/mrcalc.cpp
index d2849fa2b1..0b2e409e82 100644
--- a/cmd/mrcalc.cpp
+++ b/cmd/mrcalc.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -35,7 +36,7 @@ DESCRIPTION
+ "This command will only compute per-voxel operations. "
"Use 'mrmath' to compute summary statistics across images or "
"along image axes."
-
+
+ "This command uses a stack-based syntax, with operators "
"(specified using options) operating on the top-most entries "
"(i.e. images or values) in the stack. Operands (values or "
@@ -44,20 +45,20 @@ DESCRIPTION
"as options) operate on and consume the top-most entries in "
"the stack, and push their output as a new entry on the stack. "
"For example:"
-
+
+ " $ mrcalc a.mif 2 -mult r.mif"
-
+
+ "performs the operation r = 2*a for every voxel a,r in "
"images a.mif and r.mif respectively. Similarly:"
-
+
+ " $ mrcalc a.mif -neg b.mif -div -exp 9.3 -mult r.mif"
-
+
+ "performs the operation r = 9.3*exp(-a/b), and:"
-
+
+ " $ mrcalc a.mif b.mif -add c.mif d.mif -mult 4.2 -add -div r.mif"
-
+
+ "performs r = (a+b)/(c*d+4.2)."
-
+
+ "As an additional feature, this command will allow images with different "
"dimensions to be processed, provided they satisfy the following "
"conditions: for each axis, the dimensions match if they are the same size, "
@@ -74,7 +75,7 @@ DESCRIPTION
ARGUMENTS
+ Argument ("operand", "an input image, intensity value, or the special keywords "
"'rand' (random number between 0 and 1) or 'randn' (random number from unit "
- "std.dev. normal distribution).").type_text().allow_multiple();
+ "std.dev. normal distribution).").type_various().allow_multiple();
OPTIONS
+ OptionGroup ("Unary operators")
@@ -130,6 +131,7 @@ OPTIONS
+ OptionGroup ("Ternary operators")
+ Option ("if", "if first operand is true (non-zero), return second operand, otherwise return third operand").allow_multiple()
+ + Option ("replace", "Wherever first operand is equal to the second operand, replace with third operand").allow_multiple()
+ DataType::options();
@@ -213,15 +215,19 @@ class LoadedImage { NOMEMALIGN
class StackEntry { NOMEMALIGN
public:
- StackEntry (const char* entry) :
- arg (entry) { }
+ StackEntry (const char* entry) :
+ arg (entry),
+ rng_gaussian (false),
+ image_is_complex (false) { }
- StackEntry (Evaluator* evaluator_p) :
- arg (nullptr),
- evaluator (evaluator_p) { }
+ StackEntry (Evaluator* evaluator_p) :
+ arg (nullptr),
+ evaluator (evaluator_p),
+ rng_gaussian (false),
+ image_is_complex (false) { }
void load () {
- if (!arg)
+ if (!arg)
return;
auto search = image_list.find (arg);
if (search != image_list.end()) {
@@ -236,15 +242,19 @@ class StackEntry { NOMEMALIGN
image.reset (new Image (header.get_image()));
image_list.insert (std::make_pair (arg, LoadedImage (image, image_is_complex)));
}
- catch (Exception) {
- std::string a = lowercase (arg);
- if (a == "nan") { value = std::numeric_limits::quiet_NaN(); }
- else if (a == "-nan") { value = -std::numeric_limits::quiet_NaN(); }
- else if (a == "inf") { value = std::numeric_limits::infinity(); }
- else if (a == "-inf") { value = -std::numeric_limits::infinity(); }
- else if (a == "rand") { value = 0.0; rng.reset (new Math::RNG()); rng_gausssian = false; }
- else if (a == "randn") { value = 0.0; rng.reset (new Math::RNG()); rng_gausssian = true; }
- else { value = to (arg); }
+ catch (Exception&) {
+ try {
+ std::string a = lowercase (arg);
+ if (a == "nan") { value = std::numeric_limits::quiet_NaN(); }
+ else if (a == "-nan") { value = -std::numeric_limits::quiet_NaN(); }
+ else if (a == "inf") { value = std::numeric_limits::infinity(); }
+ else if (a == "-inf") { value = -std::numeric_limits::infinity(); }
+ else if (a == "rand") { value = 0.0; rng.reset (new Math::RNG()); rng_gaussian = false; }
+ else if (a == "randn") { value = 0.0; rng.reset (new Math::RNG()); rng_gaussian = true; }
+ else { value = to (arg); }
+ } catch (Exception&) {
+ throw Exception (std::string ("Could not interpret string \"") + arg + "\" as either an image path or a numerical value");
+ }
}
}
arg = nullptr;
@@ -255,7 +265,7 @@ class StackEntry { NOMEMALIGN
std::shared_ptr> image;
copy_ptr rng;
complex_type value;
- bool rng_gausssian;
+ bool rng_gaussian;
bool image_is_complex;
bool is_complex () const;
@@ -269,8 +279,8 @@ std::map StackEntry::image_list;
class Evaluator { NOMEMALIGN
- public:
- Evaluator (const std::string& name, const char* format_string, bool complex_maps_to_real = false, bool real_maps_to_complex = false) :
+ public:
+ Evaluator (const std::string& name, const char* format_string, bool complex_maps_to_real = false, bool real_maps_to_complex = false) :
id (name),
format (format_string),
ZtoR (complex_maps_to_real),
@@ -294,8 +304,8 @@ class Evaluator { NOMEMALIGN
virtual Chunk& evaluate (Chunk& a, Chunk& b, Chunk& c) const { throw Exception ("operation \"" + id + "\" not supported!"); return a; }
virtual bool is_complex () const {
- for (size_t n = 0; n < operands.size(); ++n)
- if (operands[n].is_complex())
+ for (size_t n = 0; n < operands.size(); ++n)
+ if (operands[n].is_complex())
return !ZtoR;
return RtoZ;
}
@@ -319,7 +329,7 @@ inline Chunk& StackEntry::evaluate (ThreadLocalStorage& storage) const
if (evaluator) return evaluator->evaluate (storage);
if (rng) {
Chunk& chunk = storage.next();
- if (rng_gausssian) {
+ if (rng_gaussian) {
std::normal_distribution dis (0.0, 1.0);
for (size_t n = 0; n < chunk.size(); ++n)
chunk[n] = dis (*rng);
@@ -360,15 +370,15 @@ inline void replace (std::string& orig, size_t n, const std::string& value)
// to make sure full operation is recorded, even for scalar operations that
// get evaluated there and then and so get left out if the string is created
// later:
-std::string operation_string (const StackEntry& entry)
+std::string operation_string (const StackEntry& entry)
{
if (entry.image)
return entry.image->name();
else if (entry.rng)
- return entry.rng_gausssian ? "randn()" : "rand()";
+ return entry.rng_gaussian ? "randn()" : "rand()";
else if (entry.evaluator) {
std::string s = entry.evaluator->format;
- for (size_t n = 0; n < entry.evaluator->operands.size(); ++n)
+ for (size_t n = 0; n < entry.evaluator->operands.size(); ++n)
replace (s, n, operation_string (entry.evaluator->operands[n]));
return s;
}
@@ -382,23 +392,23 @@ std::string operation_string (const StackEntry& entry)
template
class UnaryEvaluator : public Evaluator { NOMEMALIGN
public:
- UnaryEvaluator (const std::string& name, Operation operation, const StackEntry& operand) :
- Evaluator (name, operation.format, operation.ZtoR, operation.RtoZ),
- op (operation) {
+ UnaryEvaluator (const std::string& name, Operation operation, const StackEntry& operand) :
+ Evaluator (name, operation.format, operation.ZtoR, operation.RtoZ),
+ op (operation) {
operands.push_back (operand);
}
Operation op;
- virtual Chunk& evaluate (Chunk& in) const {
- if (operands[0].is_complex())
+ virtual Chunk& evaluate (Chunk& in) const {
+ if (operands[0].is_complex())
for (size_t n = 0; n < in.size(); ++n)
in[n] = op.Z (in[n]);
- else
+ else
for (size_t n = 0; n < in.size(); ++n)
in[n] = op.R (in[n].real());
- return in;
+ return in;
}
};
@@ -410,9 +420,9 @@ class UnaryEvaluator : public Evaluator { NOMEMALIGN
template
class BinaryEvaluator : public Evaluator { NOMEMALIGN
public:
- BinaryEvaluator (const std::string& name, Operation operation, const StackEntry& operand1, const StackEntry& operand2) :
+ BinaryEvaluator (const std::string& name, Operation operation, const StackEntry& operand1, const StackEntry& operand2) :
Evaluator (name, operation.format, operation.ZtoR, operation.RtoZ),
- op (operation) {
+ op (operation) {
operands.push_back (operand1);
operands.push_back (operand2);
}
@@ -422,15 +432,15 @@ class BinaryEvaluator : public Evaluator { NOMEMALIGN
virtual Chunk& evaluate (Chunk& a, Chunk& b) const {
Chunk& out (a.size() ? a : b);
if (operands[0].is_complex() || operands[1].is_complex()) {
- for (size_t n = 0; n < out.size(); ++n)
+ for (size_t n = 0; n < out.size(); ++n)
out[n] = op.Z (
- a.size() ? a[n] : a.value,
+ a.size() ? a[n] : a.value,
b.size() ? b[n] : b.value );
}
else {
- for (size_t n = 0; n < out.size(); ++n)
+ for (size_t n = 0; n < out.size(); ++n)
out[n] = op.R (
- a.size() ? a[n].real() : a.value.real(),
+ a.size() ? a[n].real() : a.value.real(),
b.size() ? b[n].real() : b.value.real() );
}
return out;
@@ -443,9 +453,9 @@ class BinaryEvaluator : public Evaluator { NOMEMALIGN
template
class TernaryEvaluator : public Evaluator { NOMEMALIGN
public:
- TernaryEvaluator (const std::string& name, Operation operation, const StackEntry& operand1, const StackEntry& operand2, const StackEntry& operand3) :
+ TernaryEvaluator (const std::string& name, Operation operation, const StackEntry& operand1, const StackEntry& operand2, const StackEntry& operand3) :
Evaluator (name, operation.format, operation.ZtoR, operation.RtoZ),
- op (operation) {
+ op (operation) {
operands.push_back (operand1);
operands.push_back (operand2);
operands.push_back (operand3);
@@ -456,17 +466,17 @@ class TernaryEvaluator : public Evaluator { NOMEMALIGN
virtual Chunk& evaluate (Chunk& a, Chunk& b, Chunk& c) const {
Chunk& out (a.size() ? a : (b.size() ? b : c));
if (operands[0].is_complex() || operands[1].is_complex() || operands[2].is_complex()) {
- for (size_t n = 0; n < out.size(); ++n)
+ for (size_t n = 0; n < out.size(); ++n)
out[n] = op.Z (
a.size() ? a[n] : a.value,
b.size() ? b[n] : b.value,
c.size() ? c[n] : c.value );
}
else {
- for (size_t n = 0; n < out.size(); ++n)
+ for (size_t n = 0; n < out.size(); ++n)
out[n] = op.R (
- a.size() ? a[n].real() : a.value.real(),
- b.size() ? b[n].real() : b.value.real(),
+ a.size() ? a[n].real() : a.value.real(),
+ b.size() ? b[n].real() : b.value.real(),
c.size() ? c[n].real() : c.value.real() );
}
return out;
@@ -481,7 +491,7 @@ class TernaryEvaluator : public Evaluator { NOMEMALIGN
template
void unary_operation (const std::string& operation_name, vector& stack, Operation operation)
{
- if (stack.empty())
+ if (stack.empty())
throw Exception ("no operand in stack for operation \"" + operation_name + "\"!");
StackEntry& a (stack[stack.size()-1]);
a.load();
@@ -506,7 +516,7 @@ void unary_operation (const std::string& operation_name, vector& sta
template
void binary_operation (const std::string& operation_name, vector& stack, Operation operation)
{
- if (stack.size() < 2)
+ if (stack.size() < 2)
throw Exception ("not enough operands in stack for operation \"" + operation_name + "\"");
StackEntry& a (stack[stack.size()-2]);
StackEntry& b (stack[stack.size()-1]);
@@ -518,7 +528,7 @@ void binary_operation (const std::string& operation_name, vector& st
stack.back() = entry;
}
else {
- a.value = ( a.value.imag() == 0.0 && b.value.imag() == 0.0 ?
+ a.value = ( a.value.imag() == 0.0 && b.value.imag() == 0.0 ?
operation.R (a.value.real(), b.value.real()) :
operation.Z (a.value, b.value) );
stack.pop_back();
@@ -531,7 +541,7 @@ void binary_operation (const std::string& operation_name, vector& st
template
void ternary_operation (const std::string& operation_name, vector& stack, Operation operation)
{
- if (stack.size() < 3)
+ if (stack.size() < 3)
throw Exception ("not enough operands in stack for operation \"" + operation_name + "\"");
StackEntry& a (stack[stack.size()-3]);
StackEntry& b (stack[stack.size()-2]);
@@ -546,7 +556,7 @@ void ternary_operation (const std::string& operation_name, vector& s
stack.back() = entry;
}
else {
- a.value = ( a.value.imag() == 0.0 && b.value.imag() == 0.0 && c.value.imag() == 0.0 ?
+ a.value = ( a.value.imag() == 0.0 && b.value.imag() == 0.0 && c.value.imag() == 0.0 ?
operation.R (a.value.real(), b.value.real(), c.value.real()) :
operation.Z (a.value, b.value, c.value) );
stack.pop_back();
@@ -563,7 +573,7 @@ void ternary_operation (const std::string& operation_name, vector& s
**********************************************************************/
-void get_header (const StackEntry& entry, Header& header)
+void get_header (const StackEntry& entry, Header& header)
{
if (entry.evaluator) {
for (size_t n = 0; n < entry.evaluator->operands.size(); ++n)
@@ -571,7 +581,7 @@ void get_header (const StackEntry& entry, Header& header)
return;
}
- if (!entry.image)
+ if (!entry.image)
return;
if (header.ndim() == 0) {
@@ -579,17 +589,17 @@ void get_header (const StackEntry& entry, Header& header)
return;
}
- if (header.ndim() < entry.image->ndim())
+ if (header.ndim() < entry.image->ndim())
header.ndim() = entry.image->ndim();
for (size_t n = 0; n < std::min (header.ndim(), entry.image->ndim()); ++n) {
if (header.size(n) > 1 && entry.image->size(n) > 1 && header.size(n) != entry.image->size(n))
throw Exception ("dimensions of input images do not match - aborting");
- if (!transforms_match (header, *(entry.image)) && !transform_mis_match_reported) {
+ if (!voxel_grids_match_in_scanner_space (header, *(entry.image), 1.0e-4) && !transform_mis_match_reported) {
WARN ("header transformations of input images do not match");
transform_mis_match_reported = true;
}
header.size(n) = std::max (header.size(n), entry.image->size(n));
- if (!std::isfinite (header.spacing(n)))
+ if (!std::isfinite (header.spacing(n)))
header.spacing(n) = entry.image->spacing(n);
}
@@ -610,6 +620,12 @@ void get_header (const StackEntry& entry, Header& header)
PhaseEncoding::clear_scheme (header);
}
}
+
+ auto slice_encoding_it = entry.image->keyval().find ("SliceEncodingDirection");
+ if (slice_encoding_it != entry.image->keyval().end()) {
+ if (header.keyval()["SliceEncodingDirection"] != slice_encoding_it->second)
+ header.keyval().erase (header.keyval().find ("SliceEncodingDirection"));
+ }
}
@@ -621,7 +637,7 @@ class ThreadFunctor { NOMEMALIGN
public:
ThreadFunctor (
const vector& inner_axes,
- const StackEntry& top_of_stack,
+ const StackEntry& top_of_stack,
Image& output_image) :
top_entry (top_of_stack),
image (output_image),
@@ -660,7 +676,7 @@ class ThreadFunctor { NOMEMALIGN
Chunk& chunk = top_entry.evaluate (storage);
auto value = chunk.cbegin();
- for (auto l = loop (image); l; ++l)
+ for (auto l = loop (image); l; ++l)
image.value() = *(value++);
}
@@ -677,14 +693,14 @@ class ThreadFunctor { NOMEMALIGN
-void run_operations (const vector& stack)
+void run_operations (const vector& stack)
{
Header header;
get_header (stack[0], header);
if (header.ndim() == 0) {
DEBUG ("no valid images supplied - assuming calculator mode");
- if (stack.size() != 1)
+ if (stack.size() != 1)
throw Exception ("too many operands left on stack!");
assert (!stack[0].evaluator);
@@ -694,10 +710,10 @@ void run_operations (const vector& stack)
return;
}
- if (stack.size() == 1)
+ if (stack.size() == 1)
throw Exception ("output image not specified");
- if (stack.size() > 2)
+ if (stack.size() > 2)
throw Exception ("too many operands left on stack!");
if (!stack[1].arg)
@@ -772,8 +788,8 @@ class OpTernary : public OpBase { NOMEMALIGN
class OpAbs : public OpUnary { NOMEMALIGN
public:
OpAbs () : OpUnary ("|%1|", true) { }
- complex_type R (real_type v) const { return std::abs (v); }
- complex_type Z (complex_type v) const { return std::abs (v); }
+ complex_type R (real_type v) const { return abs (v); }
+ complex_type Z (complex_type v) const { return abs (v); }
};
class OpNeg : public OpUnary { NOMEMALIGN
@@ -785,7 +801,7 @@ class OpNeg : public OpUnary { NOMEMALIGN
class OpSqrt : public OpUnary { NOMEMALIGN
public:
- OpSqrt () : OpUnary ("sqrt (%1)") { }
+ OpSqrt () : OpUnary ("sqrt (%1)") { }
complex_type R (real_type v) const { return std::sqrt (v); }
complex_type Z (complex_type v) const { return std::sqrt (v); }
};
@@ -813,14 +829,14 @@ class OpLog10 : public OpUnary { NOMEMALIGN
class OpCos : public OpUnary { NOMEMALIGN
public:
- OpCos () : OpUnary ("cos (%1)") { }
+ OpCos () : OpUnary ("cos (%1)") { }
complex_type R (real_type v) const { return std::cos (v); }
complex_type Z (complex_type v) const { return std::cos (v); }
};
class OpSin : public OpUnary { NOMEMALIGN
public:
- OpSin () : OpUnary ("sin (%1)") { }
+ OpSin () : OpUnary ("sin (%1)") { }
complex_type R (real_type v) const { return std::sin (v); }
complex_type Z (complex_type v) const { return std::sin (v); }
};
@@ -848,7 +864,7 @@ class OpSinh : public OpUnary { NOMEMALIGN
class OpTanh : public OpUnary { NOMEMALIGN
public:
- OpTanh () : OpUnary ("tanh (%1)") { }
+ OpTanh () : OpUnary ("tanh (%1)") { }
complex_type R (real_type v) const { return std::tanh (v); }
complex_type Z (complex_type v) const { return std::tanh (v); }
};
@@ -861,7 +877,7 @@ class OpAcos : public OpUnary { NOMEMALIGN
class OpAsin : public OpUnary { NOMEMALIGN
public:
- OpAsin () : OpUnary ("asin (%1)") { }
+ OpAsin () : OpUnary ("asin (%1)") { }
complex_type R (real_type v) const { return std::asin (v); }
};
@@ -873,7 +889,7 @@ class OpAtan : public OpUnary { NOMEMALIGN
class OpAcosh : public OpUnary { NOMEMALIGN
public:
- OpAcosh () : OpUnary ("acosh (%1)") { }
+ OpAcosh () : OpUnary ("acosh (%1)") { }
complex_type R (real_type v) const { return std::acosh (v); }
};
@@ -892,13 +908,13 @@ class OpAtanh : public OpUnary { NOMEMALIGN
class OpRound : public OpUnary { NOMEMALIGN
public:
- OpRound () : OpUnary ("round (%1)") { }
+ OpRound () : OpUnary ("round (%1)") { }
complex_type R (real_type v) const { return std::round (v); }
};
class OpCeil : public OpUnary { NOMEMALIGN
public:
- OpCeil () : OpUnary ("ceil (%1)") { }
+ OpCeil () : OpUnary ("ceil (%1)") { }
complex_type R (real_type v) const { return std::ceil (v); }
};
@@ -928,7 +944,7 @@ class OpPhase : public OpUnary { NOMEMALIGN
class OpConj : public OpUnary { NOMEMALIGN
public:
- OpConj () : OpUnary ("conj (%1)") { }
+ OpConj () : OpUnary ("conj (%1)") { }
complex_type Z (complex_type v) const { return std::conj (v); }
};
@@ -960,14 +976,14 @@ class OpFinite : public OpUnary { NOMEMALIGN
class OpAdd : public OpBinary { NOMEMALIGN
public:
- OpAdd () : OpBinary ("(%1 + %2)") { }
+ OpAdd () : OpBinary ("(%1 + %2)") { }
complex_type R (real_type a, real_type b) const { return a+b; }
complex_type Z (complex_type a, complex_type b) const { return a+b; }
};
class OpSubtract : public OpBinary { NOMEMALIGN
public:
- OpSubtract () : OpBinary ("(%1 - %2)") { }
+ OpSubtract () : OpBinary ("(%1 - %2)") { }
complex_type R (real_type a, real_type b) const { return a-b; }
complex_type Z (complex_type a, complex_type b) const { return a-b; }
};
@@ -981,7 +997,7 @@ class OpMultiply : public OpBinary { NOMEMALIGN
class OpDivide : public OpBinary { NOMEMALIGN
public:
- OpDivide () : OpBinary ("(%1 / %2)") { }
+ OpDivide () : OpBinary ("(%1 / %2)") { }
complex_type R (real_type a, real_type b) const { return a/b; }
complex_type Z (complex_type a, complex_type b) const { return a/b; }
};
@@ -1001,7 +1017,7 @@ class OpMin : public OpBinary { NOMEMALIGN
class OpMax : public OpBinary { NOMEMALIGN
public:
- OpMax () : OpBinary ("max (%1, %2)") { }
+ OpMax () : OpBinary ("max (%1, %2)") { }
complex_type R (real_type a, real_type b) const { return std::max (a, b); }
};
@@ -1013,7 +1029,7 @@ class OpLessThan : public OpBinary { NOMEMALIGN
class OpGreaterThan : public OpBinary { NOMEMALIGN
public:
- OpGreaterThan () : OpBinary ("(%1 > %2)") { }
+ OpGreaterThan () : OpBinary ("(%1 > %2)") { }
complex_type R (real_type a, real_type b) const { return a > b; }
};
@@ -1025,7 +1041,7 @@ class OpLessThanOrEqual : public OpBinary { NOMEMALIGN
class OpGreaterThanOrEqual : public OpBinary { NOMEMALIGN
public:
- OpGreaterThanOrEqual () : OpBinary ("(%1 >= %2)") { }
+ OpGreaterThanOrEqual () : OpBinary ("(%1 >= %2)") { }
complex_type R (real_type a, real_type b) const { return a >= b; }
};
@@ -1066,6 +1082,13 @@ class OpIf : public OpTernary { NOMEMALIGN
complex_type Z (complex_type a, complex_type b, complex_type c) const { return a.real() ? b : c; }
};
+class OpReplace : public OpTernary { NOMEMALIGN
+ public:
+ OpReplace () : OpTernary ("(%1, %2 -> %3)") { }
+ complex_type R (real_type a, real_type b, real_type c) const { return ((a==b) || (std::isnan(a) && std::isnan(b))) ? c : a; }
+ complex_type Z (complex_type a, complex_type b, complex_type c) const { return (a==b) ? c : a; }
+};
+
/**********************************************************************
@@ -1079,7 +1102,7 @@ void run () {
const Option* opt = match_option (App::argv[n]);
if (opt) {
- if (opt->is ("abs")) unary_operation (opt->id, stack, OpAbs());
+ if (opt->is ("abs")) unary_operation (opt->id, stack, OpAbs());
else if (opt->is ("neg")) unary_operation (opt->id, stack, OpNeg());
else if (opt->is ("sqrt")) unary_operation (opt->id, stack, OpSqrt());
else if (opt->is ("exp")) unary_operation (opt->id, stack, OpExp());
@@ -1133,13 +1156,14 @@ void run () {
else if (opt->is ("complex")) binary_operation (opt->id, stack, OpComplex());
else if (opt->is ("if")) ternary_operation (opt->id, stack, OpIf());
+ else if (opt->is ("replace")) ternary_operation (opt->id, stack, OpReplace());
else if (opt->is ("datatype")) ++n;
else if (opt->is ("nthreads")) ++n;
else if (opt->is ("force") || opt->is ("info") || opt->is ("debug") || opt->is ("quiet"))
continue;
- else
+ else
throw Exception (std::string ("operation \"") + opt->id + "\" not yet implemented!");
}
diff --git a/cmd/mrcat.cpp b/cmd/mrcat.cpp
index d718076df0..a6f417718c 100644
--- a/cmd/mrcat.cpp
+++ b/cmd/mrcat.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -22,8 +23,9 @@
using namespace MR;
using namespace App;
-void usage () {
-AUTHOR = "J-Donald Tournier (jdtournier@gmail.com)";
+void usage ()
+{
+AUTHOR = "J-Donald Tournier (jdtournier@gmail.com) and Robert E. Smith (robert.smith@florey.edu.au)";
SYNOPSIS = "Concatenate several images into one";
@@ -50,10 +52,40 @@ OPTIONS
}
-using value_type = float;
+template
+void write (vector& in,
+ const size_t axis,
+ Header& header_out,
+ const std::string& out_path)
+{
+ auto image_out = Image::create (out_path, header_out);
+ size_t axis_offset = 0;
-void run () {
+ for (size_t i = 0; i != in.size(); i++) {
+ auto image_in = in[i].get_image();
+
+ auto copy_func = [&axis, &axis_offset](decltype(image_in)& in, decltype(image_out)& out)
+ {
+ out.index (axis) = axis < in.ndim() ? in.index (axis) + axis_offset : axis_offset;
+ out.value() = in.value();
+ };
+
+ ThreadedLoop ("concatenating \"" + image_in.name() + "\"", image_in, 0, std::min (image_in.ndim(), image_out.ndim()))
+ .run (copy_func, image_in, image_out);
+ if (axis < image_in.ndim())
+ axis_offset += image_in.size (axis);
+ else {
+ ++axis_offset;
+ image_out.index (axis) = axis_offset;
+ }
+ }
+}
+
+
+
+void run ()
+{
int axis = get_option_value ("axis", -1);
int num_images = argument.size()-1;
@@ -62,13 +94,25 @@ void run () {
int ndims = 0;
int last_dim;
+ DataType datatype = in[0].datatype();
+ DEBUG ("Datatype of first image (" + in[0].name() + "): " + datatype.specifier());
for (int i = 1; i < num_images; i++) {
in[i] = Header::open (argument[i]);
for (last_dim = in[i].ndim()-1; in[i].size (last_dim) <= 1 && last_dim >= 0; last_dim--);
if (last_dim > ndims)
ndims = last_dim;
+ DEBUG ("Datatype of image " + in[i].name() + ": " + in[i].datatype().specifier());
+ if (in[i].datatype().is_complex())
+ datatype.set_flag (DataType::Complex);
+ if (datatype.is_integer() && in[i].datatype().is_signed())
+ datatype.set_flag (DataType::Signed);
+ if (in[i].datatype().is_floating_point() && datatype.is_integer())
+ datatype = in[i].datatype();
+ if (in[i].datatype().bytes() > datatype.bytes())
+ datatype = (datatype() & DataType::Attributes) + (in[i].datatype()() & DataType::Type);
}
+ DEBUG (str("Output image datatype: ") + datatype.specifier());
if (axis < 0) axis = std::max (3, ndims);
++ndims;
@@ -82,6 +126,7 @@ void run () {
if (axis >= ndims) ndims = axis+1;
Header header_out (in[0]);
+ header_out.datatype() = DataType::from_command_line (datatype);
header_out.ndim() = ndims;
for (size_t i = 0; i < header_out.ndim(); i++) {
@@ -97,18 +142,10 @@ void run () {
}
- {
- size_t axis_dim = 0;
- for (int n = 0; n < num_images; n++) {
- if (in[n].datatype().is_complex())
- header_out.datatype() = DataType::CFloat32;
- axis_dim += in[n].ndim() > size_t (axis) ? (in[n].size (axis) > 1 ? in[n].size (axis) : 1) : 1;
- }
- header_out.size (axis) = axis_dim;
- }
-
- header_out.datatype() = DataType::from_command_line (header_out.datatype());
-
+ size_t axis_dim = 0;
+ for (int n = 0; n < num_images; n++)
+ axis_dim += in[n].ndim() > size_t (axis) ? (in[n].size (axis) > 1 ? in[n].size (axis) : 1) : 1;
+ header_out.size (axis) = axis_dim;
if (axis > 2) {
@@ -129,7 +166,7 @@ void run () {
}
nrows += grad.rows();
input_grads.push_back (std::move (grad));
- }
+ }
if (nrows) {
Eigen::MatrixXd grad_out (nrows, 4);
int row = 0;
@@ -173,28 +210,32 @@ void run () {
}
+ const std::string out_path = argument[num_images];
- auto image_out = Image::create (argument[num_images], header_out);
- int axis_offset = 0;
-
-
- for (int i = 0; i < num_images; i++) {
- auto image_in = in[i].get_image();
-
- auto copy_func = [&axis, &axis_offset](decltype(image_in)& in, decltype(image_out)& out)
- {
- out.index (axis) = axis < int(in.ndim()) ? in.index (axis) + axis_offset : axis_offset;
- out.value() = in.value();
- };
-
- ThreadedLoop ("concatenating \"" + image_in.name() + "\"", image_in, 0, std::min (image_in.ndim(), image_out.ndim()))
- .run (copy_func, image_in, image_out);
- if (axis < int(image_in.ndim()))
- axis_offset += image_in.size (axis);
- else {
- ++axis_offset;
- image_out.index (axis) = axis_offset;
+ if (header_out.intensity_offset() == 0.0 && header_out.intensity_scale() == 1.0 && !datatype.is_floating_point()) {
+ switch (datatype() & DataType::Type) {
+ case DataType::Bit:
+ case DataType::UInt8:
+ case DataType::UInt16:
+ case DataType::UInt32:
+ if (datatype.is_signed())
+ write (in, axis, header_out, out_path);
+ else
+ write (in, axis, header_out, out_path);
+ break;
+ case DataType::UInt64:
+ if (datatype.is_signed())
+ write (in, axis, header_out, out_path);
+ else
+ write (in, axis, header_out, out_path);
+ break;
}
+ } else {
+ if (datatype.is_complex())
+ write (in, axis, header_out, out_path);
+ else
+ write (in, axis, header_out, out_path);
}
+
}
diff --git a/cmd/mrcheckerboardmask.cpp b/cmd/mrcheckerboardmask.cpp
index d01a2d06a5..c990da0c39 100644
--- a/cmd/mrcheckerboardmask.cpp
+++ b/cmd/mrcheckerboardmask.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp
index 61160fcd6a..c83b3db17d 100644
--- a/cmd/mrclusterstats.cpp
+++ b/cmd/mrclusterstats.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/mrconvert.cpp b/cmd/mrconvert.cpp
index 46432b2b1b..2ebc65250e 100644
--- a/cmd/mrconvert.cpp
+++ b/cmd/mrconvert.cpp
@@ -1,22 +1,25 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
+#include "axes.h"
#include "command.h"
#include "header.h"
#include "image.h"
#include "phase_encoding.h"
#include "transform.h"
+#include "types.h"
#include "algo/threaded_copy.h"
#include "adapter/extract.h"
#include "adapter/permute_axes.h"
@@ -30,53 +33,99 @@ using namespace App;
void usage ()
{
- AUTHOR = "J-Donald Tournier (jdtournier@gmail.com)";
+ AUTHOR = "J-Donald Tournier (jdtournier@gmail.com) and Robert E. Smith (robert.smith@florey.edu.au)";
SYNOPSIS = "Perform conversion between different file types and optionally "
- "extract a subset of the input image";
+ "extract a subset of the input image";
-DESCRIPTION
+ DESCRIPTION
+ "If used correctly, this program can be a very useful workhorse. "
- "In addition to converting images between different formats, it can "
- "be used to extract specific studies from a data set, extract a "
- "specific region of interest, or flip the images.";
+ "In addition to converting images between different formats, it can "
+ "be used to extract specific studies from a data set, extract a "
+ "specific region of interest, or flip the images. Some of the possible "
+ "operations are described in more detail below."
+
+ + "The -coord option is used to select the coordinates within the input "
+ "image that are to be retained in the output image. This can therefore "
+ "be used to include or exclude subsets of slices along a particular "
+ "spatial axis, or volumes / series within higher dimensions. "
+ "For instance: "
+ "-coord 3 0 extracts the first volume from a 4D image; "
+ "-coord 1 24 extracts slice number 24 along the y-axis."
+
+ + "The colon operator can be particularly useful in conjunction with the "
+ "-coord option, in order to select multiple coordinates. "
+ "For instance: "
+ "-coord 3 1:59 "
+ "would select all but the first volume from an image containing 60 volumes."
+
+ + "The -vox option is used to change the size of the voxels in the output "
+ "image. Note that this does not re-sample the image based on a new "
+ "voxel size (that is done using the mrresize command); this only changes "
+ "the voxel size as reported in the image header. Voxel sizes for "
+ "individual axes can be set independently, using a comma-separated list of "
+ "values; e.g. "
+ "-vox 1,,3.5 "
+ "will change the voxel size along the x & z axes to 1.0mm and 3.5mm "
+ "respectively, and leave the y-axis voxel size unchanged."
+
+ + "The -axes option specifies which axes from the input image will be used "
+ "to form the output image. This allows the permutation, omission, or "
+ "addition of axes into the output image. The axes should be supplied as a "
+ "comma-separated list of axis indices, e.g. "
+ "-axes 0,1,2 "
+ "would select only the three spatial axes to form the output image. If an "
+ "axis from the input image is to be omitted from the output image, it must "
+ "have dimension 1; either in the input image itself, or a single coordinate "
+ "along that axis must be selected by the user by using the -coord option. "
+ "An axis of unity dimension can be inserted by supplying -1 at the "
+ "corresponding position in the list."
+
+ + "The -scaling option specifies the data scaling parameters stored within "
+ "the image header that are used to rescale the image intensity values. "
+ "Where the raw data stored in a particular voxel is I, the value within "
+ "that voxel is interpreted as: "
+ "value = offset + (scale x I). "
+ "To adjust this scaling, the relevant parameters must be provided as a "
+ "comma-separated 2-vector of floating-point values, in the format "
+ "\"offset,scale\" (no quotation marks)."
+
+ + "By default, the intensity scaling parameters in the input image header "
+ "are passed through to the output image header when writing to an integer "
+ "image, and reset to 0,1 (i.e. no scaling) for floating-point and binary "
+ "images. Note that the -scaling option will therefore have no effect for "
+ "floating-point or binary output images."
+
+ + "Note that for both the -coord and -axes options, indexing starts from 0 "
+ "rather than 1. E.g. "
+ "-coord 3 <#> selects volumes (the fourth dimension) from the series; "
+ "-axes 0,1,2 includes only the three spatial axes in the output image.";
ARGUMENTS
+ Argument ("input", "the input image.").type_image_in ()
+ Argument ("output", "the output image.").type_image_out ();
OPTIONS
+
+ + OptionGroup ("Options for manipulating fundamental image properties")
+
+ Option ("coord",
- "extract data from the input image only at the coordinates specified.")
+ "retain data from the input image only at the coordinates specified")
.allow_multiple()
- + Argument ("axis").type_integer (0)
- + Argument ("coord").type_sequence_int()
+ + Argument ("axis").type_integer (0)
+ + Argument ("coord").type_sequence_int()
+ Option ("vox",
- "change the voxel dimensions of the output image. The new sizes should "
- "be provided as a comma-separated list of values. Only those values "
- "specified will be changed. For example: 1,,3.5 will change the voxel "
- "size along the x & z axes, and leave the y-axis voxel size unchanged.")
- + Argument ("sizes").type_sequence_float()
+ "change the voxel dimensions of the output image")
+ + Argument ("sizes").type_sequence_float()
+ Option ("axes",
- "specify the axes from the input image that will be used to form the output "
- "image. This allows the permutation, ommission, or addition of axes into the "
- "output image. The axes should be supplied as a comma-separated list of axes. "
- "Any ommitted axes must have dimension 1. Axes can be inserted by supplying "
- "-1 at the corresponding position in the list.")
- + Argument ("axes").type_sequence_int()
+ "specify the axes from the input image that will be used to form the output image")
+ + Argument ("axes").type_sequence_int()
+ Option ("scaling",
- "specify the data scaling parameters used to rescale the intensity values. "
- "These take the form of a comma-separated 2-vector of floating-point values, "
- "corresponding to offset & scale, with final intensity values being given by "
- "offset + scale * stored_value. "
- "By default, the values in the input image header are passed through to the "
- "output image header when writing to an integer image, and reset to 0,1 (no "
- "scaling) for floating-point and binary images. Note that his option has no "
- "effect for floating-point and binary images.")
- + Argument ("values").type_sequence_float()
+ "specify the data scaling parameters used to rescale the intensity values")
+ + Argument ("values").type_sequence_float()
+ OptionGroup ("Options for handling JSON (JavaScript Object Notation) files")
@@ -161,6 +210,18 @@ void permute_PE_scheme (Header& H, const vector& axes)
+void permute_slice_direction (Header& H, const vector& axes)
+{
+ auto it = H.keyval().find ("SliceEncodingDirection");
+ if (it == H.keyval().end())
+ return;
+ const Eigen::Vector3 orig_dir = Axes::id2dir (it->second);
+ const Eigen::Vector3 new_dir (orig_dir[axes[0]], orig_dir[axes[1]], orig_dir[axes[2]]);
+ it->second = Axes::dir2id (new_dir);
+}
+
+
+
template
inline vector set_header (Header& header, const ImageType& input)
@@ -186,6 +247,7 @@ inline vector set_header (Header& header, const ImageType& input)
}
permute_DW_scheme (header, axes);
permute_PE_scheme (header, axes);
+ permute_slice_direction (header, axes);
} else {
header.ndim() = input.ndim();
axes.assign (input.ndim(), 0);
@@ -214,36 +276,33 @@ inline vector set_header (Header& header, const ImageType& input)
-template
-inline void copy_permute (Header& header_in, Header& header_out, const vector>& pos, const std::string& output_filename)
+
+template
+void copy_permute (const InputType& in, Header& header_out, const std::string& output_filename)
{
+ const auto axes = set_header (header_out, in);
+ auto out = Image::create (output_filename, header_out);
+ DWI::export_grad_commandline (out);
+ PhaseEncoding::export_commandline (out);
+ auto perm = Adapter::make (in, axes);
+ threaded_copy_with_progress (perm, out, 0, std::numeric_limits::max(), 2);
+}
- auto in = header_in.get_image();
- if (pos.empty()) {
- const auto axes = set_header (header_out, in);
- auto out = Header::create (output_filename, header_out).get_image();
- DWI::export_grad_commandline (out);
- PhaseEncoding::export_commandline (out);
- auto perm = Adapter::make (in, axes);
- threaded_copy_with_progress (perm, out, 0, std::numeric_limits::max(), 2);
+template
+void extract (Header& header_in, Header& header_out, const vector>& pos, const std::string& output_filename)
+{
+ auto in = header_in.get_image();
+ if (pos.empty()) {
+ copy_permute (in, header_out, output_filename);
} else {
-
auto extract = Adapter::make (in, pos);
- const auto axes = set_header (header_out, extract);
- auto out = Image::create (output_filename, header_out);
- DWI::export_grad_commandline (out);
- PhaseEncoding::export_commandline (out);
-
- auto perm = Adapter::make (extract, axes);
- threaded_copy_with_progress (perm, out, 0, std::numeric_limits::max(), 2);
-
+ copy_permute (extract, header_out, output_filename);
}
-
}
@@ -378,15 +437,15 @@ void run ()
case DataType::UInt16:
case DataType::UInt32:
if (header_out.datatype().is_signed())
- copy_permute (header_in, header_out, pos, argument[1]);
+ extract (header_in, header_out, pos, argument[1]);
else
- copy_permute (header_in, header_out, pos, argument[1]);
+ extract (header_in, header_out, pos, argument[1]);
break;
case DataType::UInt64:
if (header_out.datatype().is_signed())
- copy_permute (header_in, header_out, pos, argument[1]);
+ extract (header_in, header_out, pos, argument[1]);
else
- copy_permute (header_in, header_out, pos, argument[1]);
+ extract (header_in, header_out, pos, argument[1]);
break;
case DataType::Undefined: throw Exception ("invalid output image data type"); break;
@@ -394,9 +453,9 @@ void run ()
}
else {
if (header_out.datatype().is_complex())
- copy_permute (header_in, header_out, pos, argument[1]);
+ extract (header_in, header_out, pos, argument[1]);
else
- copy_permute (header_in, header_out, pos, argument[1]);
+ extract (header_in, header_out, pos, argument[1]);
}
diff --git a/cmd/mrcrop.cpp b/cmd/mrcrop.cpp
index c7ccf4f18e..483fc23496 100644
--- a/cmd/mrcrop.cpp
+++ b/cmd/mrcrop.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -88,7 +89,7 @@ void run ()
overall_bounds[axis][1] = std::max (bounds[axis][1], overall_bounds[axis][1]);
}
}
- void operator() (const decltype(mask)& m) {
+ void operator() (const decltype(mask)& m) {
if (m.value()) {
for (size_t axis = 0; axis != 3; ++axis) {
bounds[axis][0] = std::min (bounds[axis][0], m.index(axis));
@@ -136,6 +137,6 @@ void run ()
auto cropped = Adapter::make (in, from, size);
auto out = Image::create (argument[1], cropped);
- threaded_copy_with_progress_message ("cropping image...", cropped, out);
+ threaded_copy_with_progress_message ("cropping image", cropped, out);
}
diff --git a/cmd/mrdegibbs.cpp b/cmd/mrdegibbs.cpp
index 17a9d9c880..c6af0a83ff 100644
--- a/cmd/mrdegibbs.cpp
+++ b/cmd/mrdegibbs.cpp
@@ -1,22 +1,24 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
#include
+#include "axes.h"
#include "command.h"
-#include "progressbar.h"
#include "image.h"
+#include "progressbar.h"
#include "algo/threaded_loop.h"
#include
@@ -29,16 +31,16 @@ void usage ()
SYNOPSIS = "Remove Gibbs Ringing Artifacts";
- DESCRIPTION
+ DESCRIPTION
+ "This application attempts to remove Gibbs ringing artefacts from MRI images using the method "
"of local subvoxel-shifts proposed by Kellner et al. (see reference below for details)."
-
+
+ "This command is designed to run on data directly after it has been reconstructed by the scanner, "
"before any interpolation of any kind has taken place. You should not run this command after any "
"form of motion correction (e.g. not after dwipreproc). Similarly, if you intend running dwidenoise, "
"you should run this command afterwards, since it has the potential to alter the noise structure, "
"which would impact on dwidenoise's performance."
-
+
+ "Note that this method is designed to work on images acquired with full k-space coverage. "
"Running this method on partial Fourier ('half-scan') data may lead to suboptimal and/or biased "
"results, as noted in the original reference below. There is currently no means of dealing with this; "
@@ -90,24 +92,24 @@ class ComputeSlice
nsh (nsh),
minW (minW),
maxW (maxW),
- in (in),
+ in (in),
out (out),
im1 (in.size(slice_axes[0]), in.size(slice_axes[1])),
- im2 (im1.rows(), im1.cols()) {
+ im2 (im1.rows(), im1.cols()) {
prealloc_FFT();
}
-
+
ComputeSlice (const ComputeSlice& other) :
outer_axes (other.outer_axes),
slice_axes (other.slice_axes),
nsh (other.nsh),
minW (other.minW),
maxW (other.maxW),
- in (other.in),
+ in (other.in),
out (other.out),
fft (),
im1 (in.size(slice_axes[0]), in.size(slice_axes[1])),
- im2 (im1.rows(), im1.cols()) {
+ im2 (im1.rows(), im1.cols()) {
prealloc_FFT();
}
@@ -118,26 +120,26 @@ class ComputeSlice
const int Y = slice_axes[1];
assign_pos_of (pos, outer_axes).to (in, out);
- for (auto l = Loop (slice_axes) (in); l; ++l)
+ for (auto l = Loop (slice_axes) (in); l; ++l)
im1 (in.index(X), in.index(Y)) = cdouble (in.value(), 0.0);
-
+
unring_2d ();
for (auto l = Loop (slice_axes) (out); l; ++l)
- out.value() = im1 (out.index(X), out.index(Y)).real();
+ out.value() = im1 (out.index(X), out.index(Y)).real();
}
private:
const vector& outer_axes;
const vector& slice_axes;
const int nsh, minW, maxW;
- Image in, out;
+ Image in, out;
Eigen::FFT fft;
Eigen::MatrixXcd im1, im2, shifted;
Eigen::VectorXcd v;
void prealloc_FFT () {
- // needed to avoid within-thread allocations,
+ // needed to avoid within-thread allocations,
// which aren't thread-safe in FFTW:
#ifdef EIGEN_FFTW_DEFAULT
Eigen::VectorXcd tmp (im1.rows());
@@ -151,11 +153,11 @@ class ComputeSlice
template FORCE_INLINE void FFT (Eigen::MatrixBase&& vec) { fft.fwd (v, vec); vec = v; }
template FORCE_INLINE void FFT (Eigen::MatrixBase& vec) { FFT (std::move (vec)); }
- template FORCE_INLINE void iFFT (Eigen::MatrixBase&& vec) { fft.inv (v, vec); vec = v; }
+ template FORCE_INLINE void iFFT (Eigen::MatrixBase&& vec) { fft.inv (v, vec); vec = v; }
template FORCE_INLINE void iFFT (Eigen::MatrixBase& vec) { iFFT (std::move (vec)); }
- template FORCE_INLINE void row_FFT (Eigen::MatrixBase& mat) { for (auto n = 0; n < mat.rows(); ++n) FFT (mat.row(n)); }
+ template FORCE_INLINE void row_FFT (Eigen::MatrixBase& mat) { for (auto n = 0; n < mat.rows(); ++n) FFT (mat.row(n)); }
template FORCE_INLINE void row_iFFT (Eigen::MatrixBase& mat) { for (auto n = 0; n < mat.rows(); ++n) iFFT (mat.row(n)); }
- template FORCE_INLINE void col_FFT (Eigen::MatrixBase& mat) { for (auto n = 0; n < mat.cols(); ++n) FFT (mat.col(n)); }
+ template FORCE_INLINE void col_FFT (Eigen::MatrixBase& mat) { for (auto n = 0; n < mat.cols(); ++n) FFT (mat.col(n)); }
template FORCE_INLINE void col_iFFT (Eigen::MatrixBase& mat) { for (auto n = 0; n < mat.cols(); ++n) iFFT (mat.col(n)); }
@@ -174,7 +176,7 @@ class ComputeSlice
im2(j,k) = im1(j,k) * cj / (ck+cj);
im1(j,k) *= ck / (ck+cj);
}
- else
+ else
im1(j,k) = im2(j,k) = cdouble(0.0, 0.0);
}
}
@@ -192,7 +194,7 @@ class ComputeSlice
- template
+ template
FORCE_INLINE void unring_1d (Eigen::MatrixBase&& eig)
{
const int n = eig.rows();
@@ -220,7 +222,7 @@ class ComputeSlice
cdouble e (1.0, 0.0);
shifted(0,j) = shifted(0,0);
- if (!(n&1))
+ if (!(n&1))
shifted(n/2,j) = cdouble(0.0, 0.0);
for (int l = 0; l < maxn; l++) {
@@ -237,10 +239,10 @@ class ComputeSlice
TV1arr[j] = 0.0;
TV2arr[j] = 0.0;
for (int t = minW; t <= maxW; t++) {
- TV1arr[j] += std::abs (shifted((n-t)%n,j).real() - shifted((n-t-1)%n,j).real());
- TV1arr[j] += std::abs (shifted((n-t)%n,j).imag() - shifted((n-t-1)%n,j).imag());
- TV2arr[j] += std::abs (shifted((n+t)%n,j).real() - shifted((n+t+1)%n,j).real());
- TV2arr[j] += std::abs (shifted((n+t)%n,j).imag() - shifted((n+t+1)%n,j).imag());
+ TV1arr[j] += abs (shifted((n-t)%n,j).real() - shifted((n-t-1)%n,j).real());
+ TV1arr[j] += abs (shifted((n-t)%n,j).imag() - shifted((n-t-1)%n,j).imag());
+ TV2arr[j] += abs (shifted((n+t)%n,j).real() - shifted((n+t+1)%n,j).real());
+ TV2arr[j] += abs (shifted((n+t)%n,j).imag() - shifted((n+t+1)%n,j).imag());
}
}
@@ -258,15 +260,15 @@ class ComputeSlice
minidx = j;
}
- TV1arr[j] += std::abs (shifted((l-minW+1+n)%n,j).real() - shifted((l-(minW )+n)%n,j).real());
- TV1arr[j] -= std::abs (shifted((l-maxW +n)%n,j).real() - shifted((l-(maxW+1)+n)%n,j).real());
- TV2arr[j] += std::abs (shifted((l+maxW+1+n)%n,j).real() - shifted((l+(maxW+2)+n)%n,j).real());
- TV2arr[j] -= std::abs (shifted((l+minW +n)%n,j).real() - shifted((l+(minW+1)+n)%n,j).real());
+ TV1arr[j] += abs (shifted((l-minW+1+n)%n,j).real() - shifted((l-(minW )+n)%n,j).real());
+ TV1arr[j] -= abs (shifted((l-maxW +n)%n,j).real() - shifted((l-(maxW+1)+n)%n,j).real());
+ TV2arr[j] += abs (shifted((l+maxW+1+n)%n,j).real() - shifted((l+(maxW+2)+n)%n,j).real());
+ TV2arr[j] -= abs (shifted((l+minW +n)%n,j).real() - shifted((l+(minW+1)+n)%n,j).real());
- TV1arr[j] += std::abs (shifted((l-minW+1+n)%n,j).imag() - shifted((l-(minW )+n)%n,j).imag());
- TV1arr[j] -= std::abs (shifted((l-maxW +n)%n,j).imag() - shifted((l-(maxW+1)+n)%n,j).imag());
- TV2arr[j] += std::abs (shifted((l+maxW+1+n)%n,j).imag() - shifted((l+(maxW+2)+n)%n,j).imag());
- TV2arr[j] -= std::abs (shifted((l+minW +n)%n,j).imag() - shifted((l+(minW+1)+n)%n,j).imag());
+ TV1arr[j] += abs (shifted((l-minW+1+n)%n,j).imag() - shifted((l-(minW )+n)%n,j).imag());
+ TV1arr[j] -= abs (shifted((l-maxW +n)%n,j).imag() - shifted((l-(maxW+1)+n)%n,j).imag());
+ TV2arr[j] += abs (shifted((l+maxW+1+n)%n,j).imag() - shifted((l+(maxW+2)+n)%n,j).imag());
+ TV2arr[j] -= abs (shifted((l+minW +n)%n,j).imag() - shifted((l+(minW+1)+n)%n,j).imag());
}
double a0r = shifted((l-1+n)%n,minidx).real();
@@ -277,15 +279,15 @@ class ComputeSlice
double a2i = shifted((l+1+n)%n,minidx).imag();
double s = double(shifts[minidx])/(2.0*nsh);
- if (s > 0.0)
+ if (s > 0.0)
eig(l,k) = cdouble (a1r*(1.0-s) + a0r*s, a1i*(1.0-s) + a0i*s);
- else
+ else
eig(l,k) = cdouble (a1r*(1.0+s) - a2r*s, a1i*(1.0+s) - a2i*s);
}
}
}
- template
+ template
FORCE_INLINE void unring_1d (Eigen::MatrixBase& eig) { unring_1d (std::move (eig)); }
};
@@ -302,7 +304,7 @@ void run ()
const int minW = App::get_option_value ("minW", 1);
const int maxW = App::get_option_value ("maxW", 3);
- if (minW >= maxW)
+ if (minW >= maxW)
throw Exception ("minW must be smaller than maxW");
auto in = Image::open (argument[0]);
@@ -313,13 +315,46 @@ void run ()
vector slice_axes = { 0, 1 };
auto opt = get_options ("axes");
+ const bool axes_set_manually = opt.size();
if (opt.size()) {
vector axes = opt[0][0];
- if (slice_axes.size() != 2)
+ if (slice_axes.size() != 2)
throw Exception ("slice axes must be specified as a comma-separated 2-vector");
slice_axes = { size_t(axes[0]), size_t(axes[1]) };
}
+ auto slice_encoding_it = header.keyval().find ("SliceEncodingDirection");
+ if (slice_encoding_it != header.keyval().end()) {
+ try {
+ const Eigen::Vector3 slice_endocing_axis_onehot = Axes::id2dir (slice_encoding_it->second);
+ vector auto_slice_axes = { 0, 0 };
+ if (slice_endocing_axis_onehot[0])
+ auto_slice_axes = { 1, 2 };
+ else if (slice_endocing_axis_onehot[1])
+ auto_slice_axes = { 0, 2 };
+ else if (slice_endocing_axis_onehot[2])
+ auto_slice_axes = { 0, 1 };
+ else
+ throw Exception ("Fatal error: Invalid slice axis one-hot encoding [ " + str(slice_endocing_axis_onehot.transpose()) + " ]");
+ if (axes_set_manually) {
+ if (slice_axes == auto_slice_axes) {
+ INFO ("User's manual selection of within-slice axes consistent with \"SliceEncodingDirection\" field in image header");
+ } else {
+ WARN ("Within-slice axes set using -axes option will be used, but is inconsistent with SliceEncodingDirection field present in image header (" + slice_encoding_it->second + ")");
+ }
+ } else {
+ if (slice_axes == auto_slice_axes) {
+ INFO ("\"SliceEncodingDirection\" field in image header is consistent with default selection of first two axes as being within-slice");
+ } else {
+ slice_axes = auto_slice_axes;
+ CONSOLE ("Using axes { " + str(slice_axes[0]) + ", " + str(slice_axes[1]) + " } for Gibbs ringing removal based on \"SliceEncodingDirection\" field in image header");
+ }
+ }
+ } catch (...) {
+ WARN ("Invalid value for field \"SliceEncodingDirection\" in image header (" + slice_encoding_it->second + "); ignoring");
+ }
+ }
+
// build vector of outer axes:
vector outer_axes (header.ndim());
std::iota (outer_axes.begin(), outer_axes.end(), 0);
diff --git a/cmd/mrdump.cpp b/cmd/mrdump.cpp
index 3e87d3553d..add0700bbb 100644
--- a/cmd/mrdump.cpp
+++ b/cmd/mrdump.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -47,7 +48,6 @@ void usage ()
template
void write (Image& image, StreamType& out)
{
- TRACE;
for (auto l = Loop(image) (image); l; ++l)
out << image.value() << "\n";
}
diff --git a/cmd/mredit.cpp b/cmd/mredit.cpp
index 471535e19e..0b90b0d385 100644
--- a/cmd/mredit.cpp
+++ b/cmd/mredit.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/mrfilter.cpp b/cmd/mrfilter.cpp
index 0ac8e201dc..af46e20458 100644
--- a/cmd/mrfilter.cpp
+++ b/cmd/mrfilter.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -154,7 +155,7 @@ void run () {
filter.datatype() = DataType::Float32;
auto output = Image::create (argument[2], filter);
for (auto l = Loop (output) (temp, output); l; ++l)
- output.value() = std::abs (cdouble(temp.value()));
+ output.value() = abs (cdouble(temp.value()));
} else {
auto output = Image::create (argument[2], filter);
filter (input, output);
diff --git a/cmd/mrhistmatch.cpp b/cmd/mrhistmatch.cpp
index 3b49588ec6..4cb65348eb 100644
--- a/cmd/mrhistmatch.cpp
+++ b/cmd/mrhistmatch.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -27,30 +28,32 @@
using namespace MR;
using namespace App;
+const char* choices[] = { "scale", "linear", "nonlinear", nullptr };
+
void usage () {
- AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au";
+ AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au)";
- SYNOPSIS = "Modify the intensities of one image to match the histogram of another via a non-linear transform";
+ SYNOPSIS = "Modify the intensities of one image to match the histogram of another";
ARGUMENTS
+ + Argument ("type", "type of histogram matching to perform; options are: " + join(choices, ",")).type_choice (choices)
+ Argument ("input", "the input image to be modified").type_image_in ()
+ Argument ("target", "the input image from which to derive the target histogram").type_image_in()
+ Argument ("output", "the output image").type_image_out();
OPTIONS
+ + OptionGroup ("Image masking options")
+ Option ("mask_input", "only generate input histogram based on a specified binary mask image")
+ Argument ("image").type_image_in ()
+ Option ("mask_target", "only generate target histogram based on a specified binary mask image")
+ Argument ("image").type_image_in ()
- // TODO Remove before release
- + Option ("cdfs", "output the histogram CDFs to a text file (for debugging).")
- + Argument ("path").type_file_out ()
-
+ + OptionGroup ("Non-linear histogram matching options")
+ Option ("bins", "the number of bins to use to generate the histograms")
+ Argument ("num").type_integer (2);
+
REFERENCES
+ "* If using inverse contrast normalization for inter-modal (DWI - T1) registration:\n"
"Bhushan, C.; Haldar, J. P.; Choi, S.; Joshi, A. A.; Shattuck, D. W. & Leahy, R. M. "
@@ -61,34 +64,102 @@ void usage () {
-using value_type = default_type;
-
-
-void run ()
+void match_linear (Image& input,
+ Image& target,
+ Image& mask_input,
+ Image& mask_target,
+ const bool estimate_intercept)
{
+ vector input_data, target_data;
+ {
+ ProgressBar progress ("Loading & sorting image data", 4);
+ if (mask_input.valid()) {
+ for (auto l = Loop(input) (input, mask_input); l; ++l) {
+ if (mask_input.value() && std::isfinite (static_cast(input.value())))
+ input_data.push_back (input.value());
+ }
+ } else {
+ for (auto l = Loop(input) (input); l; ++l) {
+ if (std::isfinite (static_cast(input.value())))
+ input_data.push_back (input.value());
+ }
+ }
+ ++progress;
+ if (mask_target.valid()) {
+ for (auto l = Loop(target) (target, mask_target); l; ++l) {
+ if (mask_target.value() && std::isfinite (static_cast(target.value())))
+ target_data.push_back (target.value());
+ }
+ } else {
+ for (auto l = Loop(target) (target); l; ++l) {
+ if (std::isfinite (static_cast(target.value())))
+ target_data.push_back (target.value());
+ }
+ }
+ ++progress;
+ std::sort (input_data.begin(), input_data.end());
+ ++progress;
+ std::sort (target_data.begin(), target_data.end());
+ }
- auto input = Image::open (argument[0]);
- auto target = Image::open (argument[1]);
+ // Ax=b
+ // A: Input data
+ // x: Model parameters; in the "scale" case, it's a single multiplier; if "linear", include a column of ones and estimate an intercept
+ // b: Output data (or actually, interpolated histogram-matched output data)
+ Eigen::Matrix input_matrix (input_data.size(), estimate_intercept ? 2 : 1);
+ Eigen::Matrix output_vector (input_data.size());
+ for (size_t input_index = 0; input_index != input_data.size()-1; ++input_index) {
+ input_matrix(input_index, 0) = input_data[input_index];
+ const default_type output_position = (target_data.size()-1) * (default_type(input_index) / default_type(input_data.size()-1));
+ const size_t target_index_lower = std::floor (output_position);
+ const default_type mu = output_position - default_type(target_index_lower);
+ output_vector[input_index] = ((1.0-mu)*target_data[target_index_lower] + mu*target_data[target_index_lower+1]);
+ }
+ input_matrix(input_data.size()-1, 0) = input_data.back();
+ output_vector[input_data.size()-1] = target_data.back();
+ if (estimate_intercept)
+ input_matrix.col(1).fill (1.0f);
- if (input.ndim() > 3 || target.ndim() > 3)
- throw Exception ("mrhistmatch currently only works on 3D images");
+ auto parameters = input_matrix.fullPivLu().solve (output_vector).eval();
+
+ Header H (input);
+ H.datatype() = DataType::Float32;
+ H.datatype().set_byte_order_native();
+ H.keyval()["mrhistmatch_scale"] = str(parameters[0]);
+ if (estimate_intercept) {
+ CONSOLE ("Estimated linear transform is: " + str(parameters[0]) + "x + " + str(parameters[1]));
+ H.keyval()["mrhistmatch_offset"] = str(parameters[1]);
+ auto output = Image::create (argument[3], H);
+ for (auto l = Loop("Writing output image data", input) (input, output); l; ++l) {
+ if (std::isfinite(static_cast(input.value()))) {
+ output.value() = parameters[0]*input.value() + parameters[1];
+ } else {
+ output.value() = input.value();
+ }
+ }
+ } else {
+ CONSOLE ("Estimated scale factor is " + str(parameters[0]));
+ auto output = Image::create (argument[3], H);
+ for (auto l = Loop("Writing output image data", input) (input, output); l; ++l) {
+ if (std::isfinite(static_cast(input.value()))) {
+ output.value() = input.value() * parameters[0];
+ } else {
+ output.value() = input.value();
+ }
+ }
+ }
+}
- const size_t nbins = get_option_value ("bins", 0);
- Image mask_input, mask_target;
- auto opt = get_options ("mask_input");
- if (opt.size()) {
- mask_input = Image::open (opt[0][0]);
- check_dimensions (input, mask_input);
- }
- opt = get_options ("mask_target");
- if (opt.size()) {
- mask_target = Image::open (opt[0][0]);
- check_dimensions (target, mask_target);
- }
+void match_nonlinear (Image& input,
+ Image& target,
+ Image& mask_input,
+ Image& mask_target,
+ const size_t nbins)
+{
Algo::Histogram::Calibrator calib_input (nbins, true);
Algo::Histogram::calibrate (calib_input, input, mask_input);
INFO ("Input histogram ranges from " + str(calib_input.get_min()) + " to " + str(calib_input.get_max()) + "; using " + str(calib_input.get_num_bins()) + " bins");
@@ -102,18 +173,54 @@ void run ()
// Non-linear intensity mapping determined within this class
Algo::Histogram::Matcher matcher (hist_input, hist_target);
- // Generate the output image
Header H (input);
H.datatype() = DataType::Float32;
H.datatype().set_byte_order_native();
- auto output = Image::create (argument[2], H);
- for (auto l = Loop(input) (input, output); l; ++l) {
- if (std::isfinite(static_cast(input.value()))) {
+ auto output = Image::create (argument[3], H);
+ for (auto l = Loop("Writing output data", input) (input, output); l; ++l) {
+ if (std::isfinite (static_cast(input.value()))) {
output.value() = matcher (input.value());
} else {
- output.value() = 0.0;
+ output.value() = input.value();
}
}
+}
+
+
+
+
+
+void run ()
+{
+ auto input = Image::open (argument[1]);
+ auto target = Image::open (argument[2]);
+ if (input.ndim() > 3 || target.ndim() > 3)
+ throw Exception ("mrhistmatch currently only works on 3D images");
+
+ Image mask_input, mask_target;
+ auto opt = get_options ("mask_input");
+ if (opt.size()) {
+ mask_input = Image::open (opt[0][0]);
+ check_dimensions (input, mask_input);
+ }
+ opt = get_options ("mask_target");
+ if (opt.size()) {
+ mask_target = Image::open (opt[0][0]);
+ check_dimensions (target, mask_target);
+ }
+ switch (int(argument[0])) {
+ case 0: // Scale
+ match_linear (input, target, mask_input, mask_target, false);
+ break;
+ case 1: // Linear
+ match_linear (input, target, mask_input, mask_target, true);
+ break;
+ case 2: // Non-linear
+ match_nonlinear (input, target, mask_input, mask_target, get_option_value ("bins", 0));
+ break;
+ default:
+ throw Exception ("Undefined histogram matching type");
+ }
}
diff --git a/cmd/mrhistogram.cpp b/cmd/mrhistogram.cpp
index 8a1e9a5c24..18f9da4a44 100644
--- a/cmd/mrhistogram.cpp
+++ b/cmd/mrhistogram.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/mrinfo.cpp b/cmd/mrinfo.cpp
index 40ac83da44..32d1861016 100644
--- a/cmd/mrinfo.cpp
+++ b/cmd/mrinfo.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -31,6 +32,19 @@ using namespace App;
const OptionGroup GradImportOptions = DWI::GradImportOptions();
const OptionGroup GradExportOptions = DWI::GradExportOptions();
+const OptionGroup FieldExportOptions = OptionGroup ("Options for exporting image header fields")
+
+ + Option ("property", "any text properties embedded in the image header under the "
+ "specified key (use 'all' to list all keys found)").allow_multiple()
+ + Argument ("key").type_text()
+
+ + Option ("json_keyval", "export header key/value entries to a JSON file")
+ + Argument ("file").type_file_out()
+
+ + Option ("json_all", "export all header contents to a JSON file")
+ + Argument ("file").type_file_out();
+
+
void usage ()
{
@@ -61,21 +75,16 @@ void usage ()
+ Option ("format", "image file format")
+ Option ("ndim", "number of image dimensions")
+ Option ("size", "image size along each axis")
- + Option ("vox", "voxel size along each image dimension")
+ + Option ("spacing", "voxel spacing along each image dimension")
+ Option ("datatype", "data type used for image data storage")
- + Option ("stride", "data strides i.e. order and direction of axes data layout")
+ + Option ("strides", "data strides i.e. order and direction of axes data layout")
+ Option ("offset", "image intensity offset")
+ Option ("multiplier", "image intensity multiplier")
+ Option ("transform", "the voxel to image transformation")
- + NoRealignOption
+ + NoRealignOption
- + Option ("property", "any text properties embedded in the image header under the "
- "specified key (use 'all' to list all keys found)").allow_multiple()
- + Argument ("key").type_text()
-
- + Option ("json_export", "export header key/value entries to a JSON file")
- + Argument ("file").type_file_out()
+ + FieldExportOptions
+ GradImportOptions
+ Option ("raw_dwgrad",
@@ -87,8 +96,8 @@ void usage ()
+ GradExportOptions
+ Option ("dwgrad", "the diffusion-weighting gradient table, as stored in the header "
"(i.e. without any interpretation, scaling of b-values, or normalisation of gradient vectors)")
- + Option ("shells", "list the average b-value of each shell")
- + Option ("shellcounts", "list the number of volumes in each shell")
+ + Option ("shell_bvalues", "list the average b-value of each shell")
+ + Option ("shell_sizes", "list the number of volumes in each shell")
+ PhaseEncoding::ExportOptions
+ Option ("petable", "print the phase encoding table");
@@ -111,7 +120,7 @@ void print_dimensions (const Header& header)
std::cout << buffer << "\n";
}
-void print_vox (const Header& header)
+void print_spacing (const Header& header)
{
std::string buffer;
for (size_t i = 0; i < header.ndim(); ++i) {
@@ -133,15 +142,15 @@ void print_strides (const Header& header)
std::cout << buffer << "\n";
}
-void print_shells (const Header& header, const bool shells, const bool shellcounts)
+void print_shells (const Header& header, const bool shell_bvalues, const bool shell_sizes)
{
DWI::Shells dwshells (DWI::parse_DW_scheme (header));
- if (shells) {
+ if (shell_bvalues) {
for (size_t i = 0; i < dwshells.count(); i++)
std::cout << dwshells[i].get_mean() << " ";
std::cout << "\n";
}
- if (shellcounts) {
+ if (shell_sizes) {
for (size_t i = 0; i < dwshells.count(); i++)
std::cout << dwshells[i].count() << " ";
std::cout << "\n";
@@ -181,6 +190,67 @@ void print_properties (const Header& header, const std::string& key, const size_
}
}
+template
+void keyval2json (const Header& header, JSON& json)
+{
+ for (const auto& kv : header.keyval()) {
+ // Text entries that in fact contain matrix / vector data will be
+ // converted to numerical matrices / vectors and written as such
+ try {
+ const auto M = parse_matrix (kv.second);
+ if (M.rows() == 1 && M.cols() == 1)
+ throw Exception ("Single scalar value rather than a matrix");
+ for (ssize_t row = 0; row != M.rows(); ++row) {
+ vector data (M.cols());
+ for (ssize_t i = 0; i != M.cols(); ++i)
+ data[i] = M (row, i);
+ if (json.find (kv.first) == json.end())
+ json[kv.first] = { data };
+ else
+ json[kv.first].push_back (data);
+ }
+ } catch (...) {
+ if (json.find (kv.first) == json.end()) {
+ json[kv.first] = kv.second;
+ } else if (json[kv.first] != kv.second) {
+ // If the value for this key differs between images, turn the JSON entry into an array
+ if (json[kv.first].is_array())
+ json[kv.first].push_back (kv.second);
+ else
+ json[kv.first] = { json[kv.first], kv.second };
+ }
+ }
+ }
+}
+
+void header2json (const Header& header, nlohmann::json& json)
+{
+ // Capture _all_ header fields, not just the optional key-value pairs
+ json["name"] = header.name();
+ vector size (header.ndim());
+ vector spacing (header.ndim());
+ for (size_t axis = 0; axis != header.ndim(); ++axis) {
+ size[axis] = header.size (axis);
+ spacing[axis] = header.spacing (axis);
+ }
+ json["size"] = size;
+ json["spacing"] = spacing;
+ vector strides (Stride::get (header));
+ Stride::symbolise (strides);
+ json["strides"] = strides;
+ json["format"] = header.format();
+ json["datatype"] = header.datatype().specifier();
+ json["intensity_offset"] = header.intensity_offset();
+ json["intensity_scale"] = header.intensity_scale();
+ const transform_type& T (header.transform());
+ json["transform"] = { { T(0,0), T(0,1), T(0,2), T(0,3) },
+ { T(1,0), T(1,1), T(1,2), T(1,3) },
+ { T(2,0), T(2,1), T(2,2), T(2,3) },
+ { 0.0, 0.0, 0.0, 1.0 } };
+ // Load key-value entries into a nested keyval.* member
+ keyval2json (header, json["keyval"]);
+}
+
@@ -198,50 +268,51 @@ void run ()
if (export_pe && argument.size() > 1)
throw Exception ("can only export phase encoding table to file if a single input image is provided");
- std::unique_ptr json (get_options ("json_export").size() ? new nlohmann::json : nullptr);
+ std::unique_ptr json_keyval (get_options ("json_keyval").size() ? new nlohmann::json : nullptr);
+ std::unique_ptr json_all (get_options ("json_all").size() ? new nlohmann::json : nullptr);
if (get_options ("norealign").size())
Header::do_not_realign_transform = true;
- const bool format = get_options("format") .size();
- const bool ndim = get_options("ndim") .size();
- const bool size = get_options("size") .size();
- const bool vox = get_options("vox") .size();
- const bool datatype = get_options("datatype") .size();
- const bool stride = get_options("stride") .size();
- const bool offset = get_options("offset") .size();
- const bool multiplier = get_options("multiplier") .size();
- const auto properties = get_options("property");
- const bool transform = get_options("transform") .size();
- const bool dwgrad = get_options("dwgrad") .size();
- const bool shells = get_options("shells") .size();
- const bool shellcounts = get_options("shellcounts") .size();
- const bool raw_dwgrad = get_options("raw_dwgrad") .size();
- const bool petable = get_options("petable") .size();
-
- const bool print_full_header = !(format || ndim || size || vox || datatype || stride ||
- offset || multiplier || properties.size() || transform || dwgrad || export_grad || shells || shellcounts || export_pe || petable);
-
- Eigen::IOFormat fmt(Eigen::FullPrecision, 0, ", ", "\n", "", "", "", "\n");
+ const bool format = get_options("format") .size();
+ const bool ndim = get_options("ndim") .size();
+ const bool size = get_options("size") .size();
+ const bool spacing = get_options("spacing") .size();
+ const bool datatype = get_options("datatype") .size();
+ const bool strides = get_options("strides") .size();
+ const bool offset = get_options("offset") .size();
+ const bool multiplier = get_options("multiplier") .size();
+ const auto properties = get_options("property");
+ const bool transform = get_options("transform") .size();
+ const bool dwgrad = get_options("dwgrad") .size();
+ const bool shell_bvalues = get_options("shell_bvalues") .size();
+ const bool shell_sizes = get_options("shell_sizes") .size();
+ const bool raw_dwgrad = get_options("raw_dwgrad") .size();
+ const bool petable = get_options("petable") .size();
+
+ const bool print_full_header = !(format || ndim || size || spacing || datatype || strides ||
+ offset || multiplier || properties.size() || transform ||
+ dwgrad || export_grad || shell_bvalues || shell_sizes || export_pe || petable ||
+ json_keyval || json_all);
for (size_t i = 0; i < argument.size(); ++i) {
auto header = Header::open (argument[i]);
if (raw_dwgrad)
DWI::set_DW_scheme (header, DWI::get_DW_scheme (header));
- else if (export_grad || check_option_group (GradImportOptions) || dwgrad || shells || shellcounts)
+ else if (export_grad || check_option_group (GradImportOptions) || dwgrad || shell_bvalues || shell_sizes)
DWI::set_DW_scheme (header, DWI::get_valid_DW_scheme (header, true));
if (format) std::cout << header.format() << "\n";
if (ndim) std::cout << header.ndim() << "\n";
if (size) print_dimensions (header);
- if (vox) print_vox (header);
+ if (spacing) print_spacing (header);
if (datatype) std::cout << (header.datatype().specifier() ? header.datatype().specifier() : "invalid") << "\n";
- if (stride) print_strides (header);
+ if (strides) print_strides (header);
if (offset) std::cout << header.intensity_offset() << "\n";
if (multiplier) std::cout << header.intensity_scale() << "\n";
if (transform) print_transform (header);
if (dwgrad) std::cout << DWI::get_DW_scheme (header) << "\n";
- if (shells || shellcounts) print_shells (header, shells, shellcounts);
+ if (shell_bvalues || shell_sizes) print_shells (header, shell_bvalues, shell_sizes);
if (petable) std::cout << PhaseEncoding::get_scheme (header) << "\n";
for (size_t n = 0; n < properties.size(); ++n)
@@ -250,29 +321,28 @@ void run ()
DWI::export_grad_commandline (header);
PhaseEncoding::export_commandline (header);
- if (json) {
- for (const auto& kv : header.keyval()) {
- if (json->find (kv.first) == json->end()) {
- (*json)[kv.first] = kv.second;
- } else if ((*json)[kv.first] != kv.second) {
- // If the value for this key differs between images, turn the JSON entry into an array
- if ((*json)[kv.first].is_array())
- (*json)[kv.first].push_back (kv.second);
- else
- (*json)[kv.first] = { (*json)[kv.first], kv.second };
- }
- }
- }
+ if (json_keyval)
+ keyval2json (header, *json_keyval);
+
+ if (json_all)
+ header2json (header, *json_all);
if (print_full_header)
std::cout << header.description (get_options ("all").size());
}
- if (json) {
- auto opt = get_options ("json_export");
+ if (json_keyval) {
+ auto opt = get_options ("json_keyval");
+ assert (opt.size());
+ File::OFStream out (opt[0][0]);
+ out << json_keyval->dump(4) << "\n";
+ }
+
+ if (json_all) {
+ auto opt = get_options ("json_all");
assert (opt.size());
File::OFStream out (opt[0][0]);
- out << json->dump(4) << "\n";
+ out << json_all->dump(4) << "\n";
}
}
diff --git a/cmd/mrmath.cpp b/cmd/mrmath.cpp
index 5a32ea6e1c..cc8957a6c1 100644
--- a/cmd/mrmath.cpp
+++ b/cmd/mrmath.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -81,16 +82,16 @@ using value_type = float;
class Mean { NOMEMALIGN
public:
Mean () : sum (0.0), count (0) { }
- void operator() (value_type val) {
+ void operator() (value_type val) {
if (std::isfinite (val)) {
sum += val;
++count;
}
}
- value_type result () const {
+ value_type result () const {
if (!count)
return NAN;
- return sum / count;
+ return sum / count;
}
double sum;
size_t count;
@@ -99,25 +100,25 @@ class Mean { NOMEMALIGN
class Median { NOMEMALIGN
public:
Median () { }
- void operator() (value_type val) {
+ void operator() (value_type val) {
if (!std::isnan (val))
values.push_back(val);
}
- value_type result () {
+ value_type result () {
return Math::median(values);
}
- vector values;
+ vector values;
};
class Sum { NOMEMALIGN
public:
Sum () : sum (0.0) { }
- void operator() (value_type val) {
- if (std::isfinite (val))
+ void operator() (value_type val) {
+ if (std::isfinite (val))
sum += val;
}
- value_type result () const {
- return sum;
+ value_type result () const {
+ return sum;
}
double sum;
};
@@ -177,15 +178,15 @@ class NORM2 { NOMEMALIGN
class Var { NOMEMALIGN
public:
Var () : sum (0.0), sum_sqr (0.0), count (0) { }
- void operator() (value_type val) {
+ void operator() (value_type val) {
if (std::isfinite (val)) {
sum += val;
sum_sqr += Math::pow2 (val);
++count;
}
}
- value_type result () const {
- if (count < 2)
+ value_type result () const {
+ if (count < 2)
return NAN;
return (sum_sqr - Math::pow2 (sum) / static_cast (count)) / (static_cast (count) - 1.0);
}
@@ -204,8 +205,8 @@ class Std : public Var { NOMEMALIGN
class Min { NOMEMALIGN
public:
Min () : min (std::numeric_limits::infinity()) { }
- void operator() (value_type val) {
- if (std::isfinite (val) && val < min)
+ void operator() (value_type val) {
+ if (std::isfinite (val) && val < min)
min = val;
}
value_type result () const { return std::isfinite (min) ? min : NAN; }
@@ -216,8 +217,8 @@ class Min { NOMEMALIGN
class Max { NOMEMALIGN
public:
Max () : max (-std::numeric_limits::infinity()) { }
- void operator() (value_type val) {
- if (std::isfinite (val) && val > max)
+ void operator() (value_type val) {
+ if (std::isfinite (val) && val > max)
max = val;
}
value_type result () const { return std::isfinite (max) ? max : NAN; }
@@ -228,9 +229,9 @@ class Max { NOMEMALIGN
class AbsMax { NOMEMALIGN
public:
AbsMax () : max (-std::numeric_limits::infinity()) { }
- void operator() (value_type val) {
- if (std::isfinite (val) && std::abs(val) > max)
- max = std::abs(val);
+ void operator() (value_type val) {
+ if (std::isfinite (val) && abs(val) > max)
+ max = abs(val);
}
value_type result () const { return std::isfinite (max) ? max : NAN; }
value_type max;
@@ -240,8 +241,8 @@ class MagMax { NOMEMALIGN
public:
MagMax () : max (-std::numeric_limits::infinity()) { }
MagMax (const int i) : max (-std::numeric_limits::infinity()) { }
- void operator() (value_type val) {
- if (std::isfinite (val) && (!std::isfinite (max) || std::abs(val) > std::abs (max)))
+ void operator() (value_type val) {
+ if (std::isfinite (val) && (!std::isfinite (max) || abs(val) > abs (max)))
max = val;
}
value_type result () const { return std::isfinite (max) ? max : NAN; }
@@ -274,6 +275,7 @@ class AxisKernel { NOMEMALIGN
class ImageKernelBase { NOMEMALIGN
public:
+ virtual ~ImageKernelBase () { }
virtual void process (Header& image_in) = 0;
virtual void write_back (Image& out) = 0;
};
@@ -284,26 +286,26 @@ template
class ImageKernel : public ImageKernelBase { NOMEMALIGN
protected:
class InitFunctor { NOMEMALIGN
- public:
- template
- void operator() (ImageType& out) const { out.value() = Operation(); }
+ public:
+ template
+ void operator() (ImageType& out) const { out.value() = Operation(); }
};
class ProcessFunctor { NOMEMALIGN
- public:
+ public:
template
- void operator() (ImageType1& out, ImageType2& in) const {
- Operation op = out.value();
- op (in.value());
+ void operator() (ImageType1& out, ImageType2& in) const {
+ Operation op = out.value();
+ op (in.value());
out.value() = op;
- }
+ }
};
class ResultFunctor { NOMEMALIGN
- public:
+ public:
template
void operator() (ImageType1& out, ImageType2& in) const {
- Operation op = in.value();
- out.value() = op.result();
- }
+ Operation op = in.value();
+ out.value() = op.result();
+ }
};
public:
@@ -444,7 +446,7 @@ void run ()
// Feed the input images to the kernel one at a time
{
- ProgressBar progress (std::string("computing ") + operations[op] + " across "
+ ProgressBar progress (std::string("computing ") + operations[op] + " across "
+ str(headers_in.size()) + " images", num_inputs);
for (size_t i = 0; i != headers_in.size(); ++i) {
assert (headers_in[i].valid());
@@ -455,7 +457,7 @@ void run ()
}
auto out = Header::create (output_path, header).get_image();
- kernel->write_back (out);
+ kernel->write_back (out);
}
}
diff --git a/cmd/mrmesh.cpp b/cmd/mrmesh.cpp
deleted file mode 100644
index 15225924c5..0000000000
--- a/cmd/mrmesh.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
- *
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
- *
- * MRtrix is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * For more details, see http://www.mrtrix.org/.
- */
-
-
-#include "command.h"
-
-#include "image.h"
-#include "filter/optimal_threshold.h"
-#include "surface/mesh.h"
-#include "surface/algo/image2mesh.h"
-
-
-
-using namespace MR;
-using namespace App;
-
-
-
-
-
-void usage ()
-{
- AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au)";
-
- SYNOPSIS = "Generate a mesh file from an image";
-
- ARGUMENTS
- + Argument ("input", "the input image.").type_image_in ()
- + Argument ("output", "the output mesh file.").type_file_out ();
-
- OPTIONS
- + Option ("blocky", "generate a \'blocky\' mesh that precisely represents the voxel edges")
-
- + Option ("threshold", "manually set the intensity threshold at which the mesh will be generated "
- "(if omitted, a threshold will be determined automatically)")
- + Argument ("value").type_float();
-}
-
-
-void run ()
-{
-
- Surface::Mesh mesh;
-
- if (get_options ("blocky").size()) {
-
- auto input = Image::open (argument[0]);
- Surface::Algo::image2mesh_blocky (input, mesh);
-
- } else {
- default_type threshold = 0.0;
- auto input = Image::open (argument[0]);
- auto opt = get_options("threshold");
- if ( opt.size() ) {
- threshold = (default_type) opt[0][0];
- } else {
- threshold = Filter::estimate_optimal_threshold (input);
- }
- Surface::Algo::image2mesh_mc (input, mesh, threshold);
-
- }
-
- mesh.save (argument[1]);
-
-}
diff --git a/cmd/mrmetric.cpp b/cmd/mrmetric.cpp
index 3bfd2cd935..c423a3e85b 100644
--- a/cmd/mrmetric.cpp
+++ b/cmd/mrmetric.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/mrpad.cpp b/cmd/mrpad.cpp
index 075af2c3dd..eea75a5816 100644
--- a/cmd/mrpad.cpp
+++ b/cmd/mrpad.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/mrregister.cpp b/cmd/mrregister.cpp
index dee64e8f8a..e0903d9b34 100644
--- a/cmd/mrregister.cpp
+++ b/cmd/mrregister.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/mrresize.cpp b/cmd/mrresize.cpp
index bbecdcea7a..9ce3e313de 100644
--- a/cmd/mrresize.cpp
+++ b/cmd/mrresize.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/mrstats.cpp b/cmd/mrstats.cpp
index 1f8b948cc4..514cbfc59e 100644
--- a/cmd/mrstats.cpp
+++ b/cmd/mrstats.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/mrthreshold.cpp b/cmd/mrthreshold.cpp
index 54a940d1d5..ea73158c17 100644
--- a/cmd/mrthreshold.cpp
+++ b/cmd/mrthreshold.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
diff --git a/cmd/mrtransform.cpp b/cmd/mrtransform.cpp
index 9f9bec01e7..b645da9b7e 100644
--- a/cmd/mrtransform.cpp
+++ b/cmd/mrtransform.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -39,7 +40,7 @@
using namespace MR;
using namespace App;
-const char* interp_choices[] = { "nearest", "linear", "cubic", "sinc", NULL };
+const char* interp_choices[] = { "nearest", "linear", "cubic", "sinc", nullptr };
void usage ()
{
@@ -60,7 +61,12 @@ void usage ()
+ "If a DW scheme is contained in the header (or specified separately), and "
"the number of directions matches the number of volumes in the images, any "
- "transformation applied using the -linear option will be also be applied to the directions.";
+ "transformation applied using the -linear option will be also be applied to the directions."
+
+ + "When the -template option is used to specify the target image grid, the "
+ "image provided via this option will not influence the axis data strides "
+ "of the output image; these are determined based on the input image, or the "
+ "input to the -strides option.";
REFERENCES
+ "* If FOD reorientation is being performed:\n"
@@ -174,6 +180,10 @@ void usage ()
+ DataType::options ()
+ + Stride::Options
+
+ + OptionGroup ("Additional generic options for mrtransform")
+
+ Option ("nan",
"Use NaN as the out of bounds value (Default: 0.0)");
}
@@ -206,6 +216,7 @@ void run ()
auto input_header = Header::open (argument[0]);
Header output_header (input_header);
output_header.datatype() = DataType::from_command_line (DataType::from ());
+ Stride::set_from_command_line (output_header);
// Linear
transform_type linear_transform;
diff --git a/cmd/mrview.cpp b/cmd/mrview.cpp
index 9aa3668d8d..d77aa65b68 100644
--- a/cmd/mrview.cpp
+++ b/cmd/mrview.cpp
@@ -1,14 +1,15 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
+/*
+ * Copyright (c) 2008-2018 the MRtrix3 contributors.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/
*
- * MRtrix is distributed in the hope that it will be useful,
+ * MRtrix3 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
- * For more details, see http://www.mrtrix.org/.
+ * For more details, see http://www.mrtrix.org/
*/
@@ -27,7 +28,7 @@ using namespace App;
void usage ()
{
- AUTHOR =
+ AUTHOR =
"J-Donald Tournier (jdtournier@gmail.com), "
"Dave Raffelt (david.raffelt@florey.edu.au), "
"Robert E. Smith (robert.smith@florey.edu.au), "
@@ -42,21 +43,21 @@ void usage ()
"image menu, with the first listed displayed initially. Any subsequent "
"command-line options will be processed as if the corresponding action had "
"been performed through the GUI."
-
+
+ "Note that because images loaded as arguments (i.e. simply listed on the "
"command-line) are opened before the GUI is shown, subsequent actions to be "
"performed via the various command-line options must appear after the last "
"argument. This is to avoid confusion about which option will apply to which "
"image. If you need fine control over this, please use the -load or -select_image "
"options. For example:"
-
+
+ "$ mrview -load image1.mif -interpolation 0 -load image2.mif -interpolation 0"
-
+
+ "or"
-
+
+ "$ mrview image1.mif image2.mif -interpolation 0 -select_image 2 -interpolation 0";
- REFERENCES
+ REFERENCES
+ "Tournier, J.-D.; Calamante, F. & Connelly, A. " // Internal
"MRtrix: Diffusion tractography in crossing fiber regions. "
"Int. J. Imaging Syst. Technol., 2012, 22, 53-66";
diff --git a/cmd/mtbin.cpp b/cmd/mtbin.cpp
deleted file mode 100644
index 40d5bf6f3b..0000000000
--- a/cmd/mtbin.cpp
+++ /dev/null
@@ -1,373 +0,0 @@
-/* Copyright (c) 2008-2017 the MRtrix3 contributors.
- *
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at http://mozilla.org/MPL/2.0/.
- *
- * MRtrix is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * For more details, see http://www.mrtrix.org/.
- */
-
-
-#include "command.h"
-#include "image.h"
-#include "algo/loop.h"
-#include "adapter/extract.h"
-#include "filter/optimal_threshold.h"
-#include "filter/mask_clean.h"
-#include "filter/connected_components.h"
-#include "transform.h"
-#include "math/least_squares.h"
-#include "algo/threaded_copy.h"
-
-using namespace MR;
-using namespace App;
-
-#define DEFAULT_NORM_VALUE 0.282094
-#define DEFAULT_MAXITER_VALUE 100
-
-void usage ()
-{
- AUTHOR = "David Raffelt (david.raffelt@florey.edu.au), Rami Tabbara (rami.tabbara@florey.edu.au) and Thijs Dhollander (thijs.dhollander@gmail.com)";
-
- SYNOPSIS = "Multi-Tissue Bias field correction and Intensity Normalisation (WARNING: deprecated).";
-
- DESCRIPTION
- + "WARNING: this command is deprecated and may produce highly inappropriate results in several cases. Not recommended and at your own discretion. Please use the new mtnormalise command instead for reliable results.";
-
- ARGUMENTS
- + Argument ("input output", "list of all input and output tissue compartment files. See example usage in the description. "
- "Note that any number of tissues can be normalised").type_image_in().allow_multiple();
-
- OPTIONS
- + Option ("mask", "define the mask to compute the normalisation within. This option is mandatory.").required ()
- + Argument ("image").type_image_in ()
-
- + Option ("value", "specify the value to which the summed tissue compartments will be normalised to "
- "(Default: sqrt(1/(4*pi)) = " + str(DEFAULT_NORM_VALUE, 6) + ")")
- + Argument ("number").type_float ()
-
- + Option ("bias", "output the estimated bias field")
- + Argument ("image").type_image_out ()
-
- + Option ("independent", "intensity normalise each tissue type independently")
-
- + Option ("maxiter", "set the maximum number of iterations. Default(" + str(DEFAULT_MAXITER_VALUE) + "). "
- "It will stop before the max iterations if convergence is detected")
- + Argument ("number").type_integer()
-
- + Option ("check", "check the final mask used to compute the bias field. This mask excludes outlier regions ignored by the bias field fitting procedure. However, these regions are still corrected for bias fields based on the other image data.")
- + Argument ("image").type_image_out ()
-
- + Option ("override", "consciously use this deprecated command. Not recommended and at your own discretion.");
-}
-
-const int n_basis_vecs (20);
-
-
-FORCE_INLINE Eigen::MatrixXd basis_function (const Eigen::Vector3 pos) {
- double x = pos[0];
- double y = pos[1];
- double z = pos[2];
- Eigen::MatrixXd basis(n_basis_vecs, 1);
- basis(0) = 1.0;
- basis(1) = x;
- basis(2) = y;
- basis(3) = z;
- basis(4) = x * y;
- basis(5) = x * z;
- basis(6) = y * z;
- basis(7) = x * x;
- basis(8) = y * y;
- basis(9)= z * x;
- basis(10)= x * x * y;
- basis(11) = x * x * z;
- basis(12) = y * y * x;
- basis(13) = y * y * z;
- basis(14) = z * z * x;
- basis(15) = z * z * y;
- basis(16) = x * x * x;
- basis(17) = y * y * y;
- basis(18) = z * z * z;
- basis(19) = x * y * z;
- return basis;
-}
-
-// Currently not used, but keep if we want to make mask argument optional in the future
-FORCE_INLINE void compute_mask (Image& summed, Image