From 27f2d30d88f4dcd6a05044587385d1cddaf20871 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 24 Apr 2019 11:37:00 -0400 Subject: [PATCH 1/4] Move statistical functions into new stats module. --- tedana/decomposition/eigendecomp.py | 5 +- tedana/io.py | 13 +- tedana/model/fit.py | 120 +------------- tedana/selection/select_comps.py | 5 +- tedana/stats.py | 147 ++++++++++++++++++ ...efeats2.py => test_stats_computefeats2.py} | 14 +- ...get_coeffs.py => test_stats_get_coeffs.py} | 22 +-- tedana/utils.py | 22 --- 8 files changed, 180 insertions(+), 168 deletions(-) create mode 100644 tedana/stats.py rename tedana/tests/{test_model_fit_computefeats2.py => test_stats_computefeats2.py} (82%) rename tedana/tests/{test_model_fit_get_coeffs.py => test_stats_get_coeffs.py} (79%) diff --git a/tedana/decomposition/eigendecomp.py b/tedana/decomposition/eigendecomp.py index 87c1c44cb..02d311c5e 100644 --- a/tedana/decomposition/eigendecomp.py +++ b/tedana/decomposition/eigendecomp.py @@ -11,6 +11,7 @@ from sklearn.decomposition import PCA from tedana import model, utils, io +from tedana.stats import getfbounds, computefeats2 from tedana.decomposition._utils import eimask from tedana.selection._utils import (getelbow_cons, getelbow) from tedana.due import due, BibTeX @@ -99,7 +100,7 @@ def kundu_tedpca(comptable, n_echos, kdaw, rdaw, stabilize=False): np.arange(len(lower_diff_varex_norm))[lower_diff_varex_norm >= varex_norm_thr][0] + 1] varex_norm_cum = np.cumsum(comptable['normalized variance explained']) - fmin, fmid, fmax = utils.getfbounds(n_echos) + fmin, fmid, fmax = getfbounds(n_echos) if int(kdaw) == -1: lim_idx = utils.andb([comptable['kappa'] < fmid, comptable['kappa'] > fmin]) == 2 @@ -352,7 +353,7 @@ def tedpca(catd, OCcatd, combmode, mask, t2s, t2sG, comp_maps = np.zeros((OCcatd.shape[0], comp_ts.shape[0])) for i_comp in range(comp_ts.shape[0]): temp_comp_ts = comp_ts[i_comp, :][:, None] - comp_map = utils.unmask(model.computefeats2(OCcatd, temp_comp_ts, mask), mask) + comp_map = utils.unmask(computefeats2(OCcatd, temp_comp_ts, mask), mask) comp_maps[:, i_comp] = np.squeeze(comp_map) io.filewrite(comp_maps, 'mepca_OC_components.nii', ref_img) diff --git a/tedana/io.py b/tedana/io.py index bf63e9936..980d386cd 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -10,7 +10,8 @@ from nilearn._utils import check_niimg from nilearn.image import new_img_like -from tedana import model, utils +from tedana import utils +from tedana.stats import computefeats2, get_coeffs LGR = logging.getLogger(__name__) @@ -42,8 +43,8 @@ def split_ts(data, mmix, mask, comptable): """ acc = comptable[comptable.classification == 'accepted'].index.values - cbetas = model.get_coeffs(data - data.mean(axis=-1, keepdims=True), - mmix, mask) + cbetas = get_coeffs(data - data.mean(axis=-1, keepdims=True), + mmix, mask) betas = cbetas[mask] if len(acc) != 0: hikts = utils.unmask(betas[:, acc].dot(mmix.T[acc, :]), mask) @@ -99,7 +100,7 @@ def write_split_ts(data, mmix, mask, comptable, ref_img, suffix=''): dmdata = mdata.T - mdata.T.mean(axis=0) # get variance explained by retained components - betas = model.get_coeffs(dmdata.T, mmix, mask=None) + betas = get_coeffs(dmdata.T, mmix, mask=None) varexpl = (1 - ((dmdata.T - betas.dot(mmix.T))**2.).sum() / (dmdata**2.).sum()) * 100 LGR.info('Variance explained by ICA decomposition: {:.02f}%'.format(varexpl)) @@ -161,7 +162,7 @@ def writefeats(data, mmix, mask, ref_img, suffix=''): """ # write feature versions of components - feats = utils.unmask(model.computefeats2(data, mmix, mask), mask) + feats = utils.unmask(computefeats2(data, mmix, mask), mask) fname = filewrite(feats, 'feats_{0}'.format(suffix), ref_img) return fname @@ -220,7 +221,7 @@ def writeresults(ts, mask, comptable, mmix, n_vols, ref_img): write_split_ts(ts, mmix, mask, comptable, ref_img, suffix='OC') - ts_B = model.get_coeffs(ts, mmix, mask) + ts_B = get_coeffs(ts, mmix, mask) fout = filewrite(ts_B, 'betas_OC', ref_img) LGR.info('Writing full ICA coefficient feature set: {}'.format(op.abspath(fout))) diff --git a/tedana/model/fit.py b/tedana/model/fit.py index b6f10010e..8b8d172e2 100644 --- a/tedana/model/fit.py +++ b/tedana/model/fit.py @@ -9,6 +9,7 @@ from scipy import stats from tedana import (combine, io, utils) +from tedana.stats import getfbounds, computefeats2, get_coeffs LGR = logging.getLogger(__name__) @@ -123,7 +124,7 @@ def fitmodels_direct(catd, mmix, mask, t2s, t2s_full, tes, combmode, ref_img, n_data_voxels = (t2s != 0).sum() mu = catd.mean(axis=-1, dtype=float) tes = np.reshape(tes, (n_echos, 1)) - fmin, _, _ = utils.getfbounds(n_echos) + fmin, _, _ = getfbounds(n_echos) # mask arrays mumask = mu[t2s != 0] @@ -287,120 +288,3 @@ def fitmodels_direct(catd, mmix, mask, t2s, t2s_full, tes, combmode, ref_img, seldict[vv] = eval(vv) return seldict, comptable, betas, mmix_new - - -def computefeats2(data, mmix, mask, normalize=True): - """ - Converts `data` to component space using `mmix` - - Parameters - ---------- - data : (S x T) array_like - Input data - mmix : (T [x C]) array_like - Mixing matrix for converting input data to component space, where `C` - is components and `T` is the same as in `data` - mask : (S,) array_like - Boolean mask array - normalize : bool, optional - Whether to z-score output. Default: True - - Returns - ------- - data_Z : (S x C) :obj:`numpy.ndarray` - Data in component space - """ - if data.ndim != 2: - raise ValueError('Parameter data should be 2d, not {0}d'.format(data.ndim)) - elif mmix.ndim not in [2]: - raise ValueError('Parameter mmix should be 2d, not ' - '{0}d'.format(mmix.ndim)) - elif mask.ndim != 1: - raise ValueError('Parameter mask should be 1d, not {0}d'.format(mask.ndim)) - elif data.shape[0] != mask.shape[0]: - raise ValueError('First dimensions (number of samples) of data ({0}) ' - 'and mask ({1}) do not match.'.format(data.shape[0], - mask.shape[0])) - elif data.shape[1] != mmix.shape[0]: - raise ValueError('Second dimensions (number of volumes) of data ({0}) ' - 'and mmix ({1}) do not match.'.format(data.shape[0], - mmix.shape[0])) - - # demean masked data - data_vn = stats.zscore(data[mask], axis=-1) - - # get betas of `data`~`mmix` and limit to range [-0.999, 0.999] - data_R = get_coeffs(data_vn, mmix, mask=None) - data_R[data_R < -0.999] = -0.999 - data_R[data_R > 0.999] = 0.999 - - # R-to-Z transform - data_Z = np.arctanh(data_R) - if data_Z.ndim == 1: - data_Z = np.atleast_2d(data_Z).T - - # normalize data - if normalize: - data_Zm = stats.zscore(data_Z, axis=0) - data_Z = data_Zm + (data_Z.mean(axis=0, keepdims=True) / - data_Z.std(axis=0, keepdims=True)) - return data_Z - - -def get_coeffs(data, X, mask=None, add_const=False): - """ - Performs least-squares fit of `X` against `data` - - Parameters - ---------- - data : (S [x E] x T) array_like - Array where `S` is samples, `E` is echoes, and `T` is time - X : (T [x C]) array_like - Array where `T` is time and `C` is predictor variables - mask : (S [x E]) array_like - Boolean mask array - add_const : bool, optional - Add intercept column to `X` before fitting. Default: False - - Returns - ------- - betas : (S [x E] x C) :obj:`numpy.ndarray` - Array of `S` sample betas for `C` predictors - """ - if data.ndim not in [2, 3]: - raise ValueError('Parameter data should be 2d or 3d, not {0}d'.format(data.ndim)) - elif X.ndim not in [2]: - raise ValueError('Parameter X should be 2d, not {0}d'.format(X.ndim)) - elif data.shape[-1] != X.shape[0]: - raise ValueError('Last dimension (dimension {0}) of data ({1}) does not ' - 'match first dimension of ' - 'X ({2})'.format(data.ndim, data.shape[-1], X.shape[0])) - - # mask data and flip (time x samples) - if mask is not None: - if mask.ndim not in [1, 2]: - raise ValueError('Parameter data should be 1d or 2d, not {0}d'.format(mask.ndim)) - elif data.shape[0] != mask.shape[0]: - raise ValueError('First dimensions of data ({0}) and mask ({1}) do not ' - 'match'.format(data.shape[0], mask.shape[0])) - mdata = data[mask, :].T - else: - mdata = data.T - - # coerce X to >=2d - X = np.atleast_2d(X) - - if len(X) == 1: - X = X.T - - if add_const: # add intercept, if specified - X = np.column_stack([X, np.ones((len(X), 1))]) - - betas = np.linalg.lstsq(X, mdata, rcond=None)[0].T - if add_const: # drop beta for intercept, if specified - betas = betas[:, :-1] - - if mask is not None: - betas = utils.unmask(betas, mask) - - return betas diff --git a/tedana/selection/select_comps.py b/tedana/selection/select_comps.py index 89356ebdc..5a499b04c 100644 --- a/tedana/selection/select_comps.py +++ b/tedana/selection/select_comps.py @@ -6,6 +6,7 @@ from scipy import stats from tedana import utils +from tedana.stats import getfbounds from tedana.selection._utils import getelbow LGR = logging.getLogger(__name__) @@ -242,14 +243,14 @@ def selcomps(seldict, comptable, mmix, manacc, n_echos): # Compute elbows from other elbows kappas_under_f01 = (comptable.loc[comptable['kappa'] < - utils.getfbounds(n_echos)[-1], 'kappa']) + getfbounds(n_echos)[-1], 'kappa']) # NOTE: Would an elbow from all Kappa values *ever* be lower than one from # a subset of lower values? kappa_elbow = np.min((getelbow(kappas_under_f01, return_val=True), getelbow(comptable['kappa'], return_val=True))) rho_elbow = np.mean((getelbow(comptable.loc[ncls, 'rho'], return_val=True), getelbow(comptable['rho'], return_val=True), - utils.getfbounds(n_echos)[0])) + getfbounds(n_echos)[0])) # Provisionally accept components based on Kappa and Rho elbows acc_prov = ncls[(comptable.loc[ncls, 'kappa'] >= kappa_elbow) & diff --git a/tedana/stats.py b/tedana/stats.py new file mode 100644 index 000000000..da192b278 --- /dev/null +++ b/tedana/stats.py @@ -0,0 +1,147 @@ +""" +Statistical functions +""" +import logging + +import numpy as np +from scipy import stats + +LGR = logging.getLogger(__name__) + + +def getfbounds(n_echos): + """ + Gets F-statistic boundaries based on number of echos + + Parameters + ---------- + n_echos : :obj:`int` + Number of echoes + + Returns + ------- + fmin, fmid, fmax : :obj:`float` + F-statistic thresholds for alphas of 0.05, 0.025, and 0.01, + respectively. + """ + f05 = stats.f.ppf(q=(1 - 0.05), dfn=1, dfd=(n_echos - 1)) + f025 = stats.f.ppf(q=(1 - 0.025), dfn=1, dfd=(n_echos - 1)) + f01 = stats.f.ppf(q=(1 - 0.01), dfn=1, dfd=(n_echos - 1)) + return f05, f025, f01 + + +def computefeats2(data, mmix, mask, normalize=True): + """ + Converts `data` to component space using `mmix` + + Parameters + ---------- + data : (S x T) array_like + Input data + mmix : (T [x C]) array_like + Mixing matrix for converting input data to component space, where `C` + is components and `T` is the same as in `data` + mask : (S,) array_like + Boolean mask array + normalize : bool, optional + Whether to z-score output. Default: True + + Returns + ------- + data_Z : (S x C) :obj:`numpy.ndarray` + Data in component space + """ + if data.ndim != 2: + raise ValueError('Parameter data should be 2d, not {0}d'.format(data.ndim)) + elif mmix.ndim not in [2]: + raise ValueError('Parameter mmix should be 2d, not ' + '{0}d'.format(mmix.ndim)) + elif mask.ndim != 1: + raise ValueError('Parameter mask should be 1d, not {0}d'.format(mask.ndim)) + elif data.shape[0] != mask.shape[0]: + raise ValueError('First dimensions (number of samples) of data ({0}) ' + 'and mask ({1}) do not match.'.format(data.shape[0], + mask.shape[0])) + elif data.shape[1] != mmix.shape[0]: + raise ValueError('Second dimensions (number of volumes) of data ({0}) ' + 'and mmix ({1}) do not match.'.format(data.shape[0], + mmix.shape[0])) + + # demean masked data + data_vn = stats.zscore(data[mask], axis=-1) + + # get betas of `data`~`mmix` and limit to range [-0.999, 0.999] + data_R = get_coeffs(data_vn, mmix, mask=None) + data_R[data_R < -0.999] = -0.999 + data_R[data_R > 0.999] = 0.999 + + # R-to-Z transform + data_Z = np.arctanh(data_R) + if data_Z.ndim == 1: + data_Z = np.atleast_2d(data_Z).T + + # normalize data + if normalize: + data_Zm = stats.zscore(data_Z, axis=0) + data_Z = data_Zm + (data_Z.mean(axis=0, keepdims=True) / + data_Z.std(axis=0, keepdims=True)) + return data_Z + + +def get_coeffs(data, X, mask=None, add_const=False): + """ + Performs least-squares fit of `X` against `data` + + Parameters + ---------- + data : (S [x E] x T) array_like + Array where `S` is samples, `E` is echoes, and `T` is time + X : (T [x C]) array_like + Array where `T` is time and `C` is predictor variables + mask : (S [x E]) array_like + Boolean mask array + add_const : bool, optional + Add intercept column to `X` before fitting. Default: False + + Returns + ------- + betas : (S [x E] x C) :obj:`numpy.ndarray` + Array of `S` sample betas for `C` predictors + """ + if data.ndim not in [2, 3]: + raise ValueError('Parameter data should be 2d or 3d, not {0}d'.format(data.ndim)) + elif X.ndim not in [2]: + raise ValueError('Parameter X should be 2d, not {0}d'.format(X.ndim)) + elif data.shape[-1] != X.shape[0]: + raise ValueError('Last dimension (dimension {0}) of data ({1}) does not ' + 'match first dimension of ' + 'X ({2})'.format(data.ndim, data.shape[-1], X.shape[0])) + + # mask data and flip (time x samples) + if mask is not None: + if mask.ndim not in [1, 2]: + raise ValueError('Parameter data should be 1d or 2d, not {0}d'.format(mask.ndim)) + elif data.shape[0] != mask.shape[0]: + raise ValueError('First dimensions of data ({0}) and mask ({1}) do not ' + 'match'.format(data.shape[0], mask.shape[0])) + mdata = data[mask, :].T + else: + mdata = data.T + + # coerce X to >=2d + X = np.atleast_2d(X) + + if len(X) == 1: + X = X.T + + if add_const: # add intercept, if specified + X = np.column_stack([X, np.ones((len(X), 1))]) + + betas = np.linalg.lstsq(X, mdata, rcond=None)[0].T + if add_const: # drop beta for intercept, if specified + betas = betas[:, :-1] + + if mask is not None: + betas = utils.unmask(betas, mask) + + return betas diff --git a/tedana/tests/test_model_fit_computefeats2.py b/tedana/tests/test_stats_computefeats2.py similarity index 82% rename from tedana/tests/test_model_fit_computefeats2.py rename to tedana/tests/test_stats_computefeats2.py index fa2fc4e45..83126fa85 100644 --- a/tedana/tests/test_model_fit_computefeats2.py +++ b/tedana/tests/test_stats_computefeats2.py @@ -1,11 +1,11 @@ """ -Tests for tedana.model.fit +Tests for tedana.stats.computefeats2 """ import numpy as np import pytest -from tedana.model import fit +from tedana.stats import computefeats2 def test_break_computefeats2(): @@ -20,24 +20,24 @@ def test_break_computefeats2(): data = np.empty((n_samples)) with pytest.raises(ValueError) as e_info: - fit.computefeats2(data, mmix, mask, normalize=True) + computefeats2(data, mmix, mask, normalize=True) assert str(e_info.value) == ('Parameter data should be 2d, not {0}d'.format(data.ndim)) data = np.empty((n_samples, n_vols)) mmix = np.empty((n_vols)) with pytest.raises(ValueError) as e_info: - fit.computefeats2(data, mmix, mask, normalize=True) + computefeats2(data, mmix, mask, normalize=True) assert str(e_info.value) == ('Parameter mmix should be 2d, not {0}d'.format(mmix.ndim)) mmix = np.empty((n_vols, n_comps)) mask = np.empty((n_samples, n_vols)) with pytest.raises(ValueError) as e_info: - fit.computefeats2(data, mmix, mask, normalize=True) + computefeats2(data, mmix, mask, normalize=True) assert str(e_info.value) == ('Parameter mask should be 1d, not {0}d'.format(mask.ndim)) mask = np.empty((n_samples+1)) with pytest.raises(ValueError) as e_info: - fit.computefeats2(data, mmix, mask, normalize=True) + computefeats2(data, mmix, mask, normalize=True) assert str(e_info.value) == ('First dimensions (number of samples) of data ({0}) ' 'and mask ({1}) do not match.'.format(data.shape[0], mask.shape[0])) @@ -45,7 +45,7 @@ def test_break_computefeats2(): mask = np.empty((n_samples)) mmix = np.empty((n_vols+1, n_comps)) with pytest.raises(ValueError) as e_info: - fit.computefeats2(data, mmix, mask, normalize=True) + computefeats2(data, mmix, mask, normalize=True) assert str(e_info.value) == ('Second dimensions (number of volumes) of data ({0}) ' 'and mmix ({1}) do not match.'.format(data.shape[0], mmix.shape[0])) diff --git a/tedana/tests/test_model_fit_get_coeffs.py b/tedana/tests/test_stats_get_coeffs.py similarity index 79% rename from tedana/tests/test_model_fit_get_coeffs.py rename to tedana/tests/test_stats_get_coeffs.py index 9496284eb..d991cde58 100644 --- a/tedana/tests/test_model_fit_get_coeffs.py +++ b/tedana/tests/test_stats_get_coeffs.py @@ -1,11 +1,11 @@ """ -Tests for tedana.model.fit +Tests for tedana.stats.get_coeffs """ import numpy as np import pytest -from tedana.model import fit +from tedana.stats import get_coeffs def test_get_coeffs(): @@ -19,19 +19,19 @@ def test_get_coeffs(): X = np.arange(0, 40)[:, np.newaxis] mask = np.array([True, False]) - betas = fit.get_coeffs(data, X, mask=None, add_const=False) + betas = get_coeffs(data, X, mask=None, add_const=False) betas = np.squeeze(betas) assert np.allclose(betas, np.array([5., 5.])) - betas = fit.get_coeffs(data, X, mask=None, add_const=True) + betas = get_coeffs(data, X, mask=None, add_const=True) betas = np.squeeze(betas) assert np.allclose(betas, np.array([5., 5.])) - betas = fit.get_coeffs(data, X, mask=mask, add_const=False) + betas = get_coeffs(data, X, mask=mask, add_const=False) betas = np.squeeze(betas) assert np.allclose(betas, np.array([5, 0])) - betas = fit.get_coeffs(data, X, mask=mask, add_const=True) + betas = get_coeffs(data, X, mask=mask, add_const=True) betas = np.squeeze(betas) assert np.allclose(betas, np.array([5, 0])) @@ -48,19 +48,19 @@ def test_break_get_coeffs(): data = np.empty((n_samples)) with pytest.raises(ValueError) as e_info: - fit.get_coeffs(data, X, mask, add_const=False) + get_coeffs(data, X, mask, add_const=False) assert str(e_info.value) == ('Parameter data should be 2d or 3d, not {0}d'.format(data.ndim)) data = np.empty((n_samples, n_vols)) X = np.empty((n_vols)) with pytest.raises(ValueError) as e_info: - fit.get_coeffs(data, X, mask, add_const=False) + get_coeffs(data, X, mask, add_const=False) assert str(e_info.value) == ('Parameter X should be 2d, not {0}d'.format(X.ndim)) data = np.empty((n_samples, n_echos, n_vols+1)) X = np.empty((n_vols, n_comps)) with pytest.raises(ValueError) as e_info: - fit.get_coeffs(data, X, mask, add_const=False) + get_coeffs(data, X, mask, add_const=False) assert str(e_info.value) == ('Last dimension (dimension {0}) of data ({1}) does not ' 'match first dimension of ' 'X ({2})'.format(data.ndim, data.shape[-1], X.shape[0])) @@ -68,11 +68,11 @@ def test_break_get_coeffs(): data = np.empty((n_samples, n_echos, n_vols)) mask = np.empty((n_samples, n_echos, n_vols)) with pytest.raises(ValueError) as e_info: - fit.get_coeffs(data, X, mask, add_const=False) + get_coeffs(data, X, mask, add_const=False) assert str(e_info.value) == ('Parameter data should be 1d or 2d, not {0}d'.format(mask.ndim)) mask = np.empty((n_samples+1, n_echos)) with pytest.raises(ValueError) as e_info: - fit.get_coeffs(data, X, mask, add_const=False) + get_coeffs(data, X, mask, add_const=False) assert str(e_info.value) == ('First dimensions of data ({0}) and mask ({1}) do not ' 'match'.format(data.shape[0], mask.shape[0])) diff --git a/tedana/utils.py b/tedana/utils.py index 9598dd157..dbfb9b4bc 100644 --- a/tedana/utils.py +++ b/tedana/utils.py @@ -5,7 +5,6 @@ import numpy as np import nibabel as nib -from scipy import stats from scipy import ndimage from nilearn._utils import check_niimg from sklearn.utils import check_array @@ -15,27 +14,6 @@ LGR = logging.getLogger(__name__) -def getfbounds(n_echos): - """ - Gets F-statistic boundaries based on number of echos - - Parameters - ---------- - n_echos : :obj:`int` - Number of echoes - - Returns - ------- - fmin, fmid, fmax : :obj:`float` - F-statistic thresholds for alphas of 0.05, 0.025, and 0.01, - respectively. - """ - f05 = stats.f.ppf(q=(1 - 0.05), dfn=1, dfd=(n_echos - 1)) - f025 = stats.f.ppf(q=(1 - 0.025), dfn=1, dfd=(n_echos - 1)) - f01 = stats.f.ppf(q=(1 - 0.01), dfn=1, dfd=(n_echos - 1)) - return f05, f025, f01 - - def load_image(data): """ Takes input `data` and returns a sample x time array From 6fa9b68198ce0489d2adac7302021e7e1a98cd32 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 24 Apr 2019 11:39:04 -0400 Subject: [PATCH 2/4] Update API. --- docs/api.rst | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/docs/api.rst b/docs/api.rst index 50470f278..b2296d50a 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -163,6 +163,26 @@ API .. _calibration_ref: +:mod:`tedana.stats`: Statistical functions +-------------------------------------------------- + +.. automodule:: tedana.stats + :no-members: + :no-inherited-members: + +.. autosummary:: tedana.stats + :toctree: generated/ + :template: function.rst + + tedana.stats.get_coeffs + tedana.stats.computefeats2 + tedana.stats.getfbounds + +.. currentmodule:: tedana + +.. _calibration_ref: + + :mod:`tedana.utils`: Utility functions -------------------------------------------------- @@ -177,7 +197,6 @@ API tedana.utils.andb tedana.utils.dice - tedana.utils.getfbounds tedana.utils.load_image tedana.utils.make_adaptive_mask tedana.utils.unmask From dc56bff74457b7f445dd08df53cf01e3acc92461 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 24 Apr 2019 11:55:00 -0400 Subject: [PATCH 3/4] Requisite bug fix. --- tedana/stats.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tedana/stats.py b/tedana/stats.py index da192b278..01d03a695 100644 --- a/tedana/stats.py +++ b/tedana/stats.py @@ -6,6 +6,8 @@ import numpy as np from scipy import stats +from tedana import utils + LGR = logging.getLogger(__name__) From acb1e831859348bd7eb63f8bd275ac8dc4617d9a Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 24 Apr 2019 12:00:09 -0400 Subject: [PATCH 4/4] Requisite *second* bug fix. --- tedana/tests/test_stats_getfbounds.py | 15 +++++++++++++++ tedana/tests/test_utils.py | 7 ------- 2 files changed, 15 insertions(+), 7 deletions(-) create mode 100644 tedana/tests/test_stats_getfbounds.py diff --git a/tedana/tests/test_stats_getfbounds.py b/tedana/tests/test_stats_getfbounds.py new file mode 100644 index 000000000..cbb35738d --- /dev/null +++ b/tedana/tests/test_stats_getfbounds.py @@ -0,0 +1,15 @@ +""" +Tests for tedana.stats.getfbounds +""" + +import numpy as np +import pytest + +from tedana.stats import getfbounds + + +def test_getfbounds(): + good_inputs = range(1, 12) + + for n_echos in good_inputs: + getfbounds(n_echos) diff --git a/tedana/tests/test_utils.py b/tedana/tests/test_utils.py index 11242c9be..0785843f1 100644 --- a/tedana/tests/test_utils.py +++ b/tedana/tests/test_utils.py @@ -16,13 +16,6 @@ tes = ['14.5', '38.5', '62.5'] -def test_getfbounds(): - good_inputs = range(1, 12) - - for n_echos in good_inputs: - utils.getfbounds(n_echos) - - def test_unmask(): # generate boolean mask + get number of True values mask = rs.choice([0, 1], size=(100,)).astype(bool)