From 1ccd1b9d2efc6b3f74c1fa2c56cc71342913176e Mon Sep 17 00:00:00 2001 From: Robert Guggenberger Date: Tue, 25 Sep 2018 14:39:46 +0200 Subject: [PATCH] Update readme.md --- artacs/helper/__init__.py | 2 - artacs/helper/api.py | 72 ----------------- artacs/helper/peaks.py | 166 -------------------------------------- artacs/kernel.py | 17 +++- artacs/template.py | 13 +-- readme.md | 71 ++++++++++++---- test/dev.py | 57 ------------- test/plotting.py | 19 ----- 8 files changed, 76 insertions(+), 341 deletions(-) delete mode 100644 artacs/helper/__init__.py delete mode 100644 artacs/helper/api.py delete mode 100644 artacs/helper/peaks.py delete mode 100644 test/dev.py delete mode 100644 test/plotting.py diff --git a/artacs/helper/__init__.py b/artacs/helper/__init__.py deleted file mode 100644 index 54ca1f8..0000000 --- a/artacs/helper/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from helper.api import * -from wyrm.io import load_brain_vision_data \ No newline at end of file diff --git a/artacs/helper/api.py b/artacs/helper/api.py deleted file mode 100644 index 61599ba..0000000 --- a/artacs/helper/api.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Thu Nov 16 18:03:00 2017 - -@author: Robert Guggenberger -""" - -import scipy.signal as sig -from helper.peaks import detect_peaks as _detect_peaks -import wyrm as _wyrm -import numpy as np -from mpl_toolkits.mplot3d import Axes3D -import matplotlib.pyplot as _plt -import sklearn.decomposition as sk -# %% - - -def plotonsubs(*args): - count = len(args) - fig, ax = _plt.subplots(1, count) - for _ax, item in zip(ax, args): - _ax.plot(item, 'k') - _ax.plot(np.mean(item, 1), 'r') - - -def phaseplot(timeseries): - h = sig.hilbert(timeseries) - r = np.real(h) - i = np.imag(h) - t = range(0, len(timeseries), 1) - _plt.figure() - ax = _plt.axes(projection='3d') - ax.plot(t, r, i, '-b') - return ax - - -def pcaplot(blocks): - pca = sk.PCA(n_components=2) - pca.fit(blocks) - S = pca.fit_transform(blocks) - t = range(0, len(blocks), 1) - _plt.figure() - ax = _plt.axes(projection='3d') - ax.plot(t, S[:, 0], S[:, 1], '-b') - return ax -# %% - - -def filtfilt(data, f_low=1, f_high=25, fs=None, butter_ord=4): - if fs is None: - if hasattr(data, 'fs'): - fs = data.fs - else: - fs = 500 - - fn = fs / 2 - b, a = sig.butter(butter_ord, [f_low / fn, f_high / fn], - btype='band') - return _wyrm.processing.filtfilt(data, b, a, timeaxis=-2) - - -def find_qrs(data, window=(500, 500), mpd=750): - peakind = _detect_peaks(-data.data[:, 0], mpd=mpd) - ecg = [] - arm = [] - for idx in peakind: - sel = range(idx - window[0], idx + window[1], 1) - if sel[0] > 0 and sel[-1] < len(data.data): - ecg.append(-data.data[sel, 0]) - arm.append(data.data[sel, 2] - data.data[sel, 1]) - return np.transpose(np.asanyarray(ecg)), np.transpose(np.asanyarray(arm)) diff --git a/artacs/helper/peaks.py b/artacs/helper/peaks.py deleted file mode 100644 index 0c04513..0000000 --- a/artacs/helper/peaks.py +++ /dev/null @@ -1,166 +0,0 @@ -""" -# -*- coding: utf-8 -*- - -Detect peaks in data based on their amplitude and other features. -""" - -from __future__ import division, print_function -import numpy as np - -__author__ = "Marcos Duarte, https://github.com/demotu/BMC" -__version__ = "1.0.4" -__license__ = "MIT" - -def detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising', - kpsh=False, valley=False, show=False, ax=None): - - """Detect peaks in data based on their amplitude and other features. - Parameters - ---------- - x : 1D array_like - data. - mph : {None, number}, optional (default = None) - detect peaks that are greater than minimum peak height. - mpd : positive integer, optional (default = 1) - detect peaks that are at least separated by minimum peak distance (in - number of data). - threshold : positive number, optional (default = 0) - detect peaks (valleys) that are greater (smaller) than `threshold` - in relation to their immediate neighbors. - edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising') - for a flat peak, keep only the rising edge ('rising'), only the - falling edge ('falling'), both edges ('both'), or don't detect a - flat peak (None). - kpsh : bool, optional (default = False) - keep peaks with same height even if they are closer than `mpd`. - valley : bool, optional (default = False) - if True (1), detect valleys (local minima) instead of peaks. - show : bool, optional (default = False) - if True (1), plot data in matplotlib figure. - ax : a matplotlib.axes.Axes instance, optional (default = None). - Returns - ------- - ind : 1D array_like - indeces of the peaks in `x`. - Notes - ----- - The detection of valleys instead of peaks is performed internally by simply - negating the data: `ind_valleys = detect_peaks(-x)` - - The function can handle NaN's - See this IPython Notebook [1]_. - References - ---------- - .. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb - Examples - -------- - >>> from detect_peaks import detect_peaks - >>> x = np.random.randn(100) - >>> x[60:81] = np.nan - >>> # detect all peaks and plot data - >>> ind = detect_peaks(x, show=True) - >>> print(ind) - >>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5 - >>> # set minimum peak height = 0 and minimum peak distance = 20 - >>> detect_peaks(x, mph=0, mpd=20, show=True) - >>> x = [0, 1, 0, 2, 0, 3, 0, 2, 0, 1, 0] - >>> # set minimum peak distance = 2 - >>> detect_peaks(x, mpd=2, show=True) - >>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5 - >>> # detection of valleys instead of peaks - >>> detect_peaks(x, mph=0, mpd=20, valley=True, show=True) - >>> x = [0, 1, 1, 0, 1, 1, 0] - >>> # detect both edges - >>> detect_peaks(x, edge='both', show=True) - >>> x = [-2, 1, -2, 2, 1, 1, 3, 0] - >>> # set threshold = 2 - >>> detect_peaks(x, threshold = 2, show=True) - """ - - x = np.atleast_1d(x).astype('float64') - if x.size < 3: - return np.array([], dtype=int) - if valley: - x = -x - # find indexes of all peaks - dx = x[1:] - x[:-1] - # handle NaN's - indnan = np.where(np.isnan(x))[0] - if indnan.size: - x[indnan] = np.inf - dx[np.where(np.isnan(dx))[0]] = np.inf - ine, ire, ife = np.array([[], [], []], dtype=int) - if not edge: - ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0] - else: - if edge.lower() in ['rising', 'both']: - ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0] - if edge.lower() in ['falling', 'both']: - ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0] - ind = np.unique(np.hstack((ine, ire, ife))) - # handle NaN's - if ind.size and indnan.size: - # NaN's and values close to NaN's cannot be peaks - ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan-1, indnan+1))), invert=True)] - # first and last values of x cannot be peaks - if ind.size and ind[0] == 0: - ind = ind[1:] - if ind.size and ind[-1] == x.size-1: - ind = ind[:-1] - # remove peaks < minimum peak height - if ind.size and mph is not None: - ind = ind[x[ind] >= mph] - # remove peaks - neighbors < threshold - if ind.size and threshold > 0: - dx = np.min(np.vstack([x[ind]-x[ind-1], x[ind]-x[ind+1]]), axis=0) - ind = np.delete(ind, np.where(dx < threshold)[0]) - # detect small peaks closer than minimum peak distance - if ind.size and mpd > 1: - ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height - idel = np.zeros(ind.size, dtype=bool) - for i in range(ind.size): - if not idel[i]: - # keep peaks with the same height if kpsh is True - idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \ - & (x[ind[i]] > x[ind] if kpsh else True) - idel[i] = 0 # Keep current peak - # remove the small peaks and sort back the indexes by their occurrence - ind = np.sort(ind[~idel]) - - if show: - if indnan.size: - x[indnan] = np.nan - if valley: - x = -x - _plot(x, mph, mpd, threshold, edge, valley, ax, ind) - - return ind - -def _plot(x, mph, mpd, threshold, edge, valley, ax, ind): - """Plot results of the detect_peaks function, see its help.""" - try: - import matplotlib.pyplot as plt - except ImportError: - print('matplotlib is not available.') - else: - if ax is None: - _, ax = plt.subplots(1, 1, figsize=(8, 4)) - - ax.plot(x, 'b', lw=1) - if ind.size: - label = 'valley' if valley else 'peak' - label = label + 's' if ind.size > 1 else label - ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=2, ms=8, - label='%d %s' % (ind.size, label)) - ax.legend(loc='best', framealpha=.5, numpoints=1) - ax.set_xlim(-.02*x.size, x.size*1.02-1) - ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max() - yrange = ymax - ymin if ymax > ymin else 1 - ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange) - ax.set_xlabel('Data #', fontsize=14) - ax.set_ylabel('Amplitude', fontsize=14) - mode = 'Valley detection' if valley else 'Peak detection' - ax.set_title("%s (mph=%s, mpd=%d, threshold=%s, edge='%s')" - % (mode, str(mph), mpd, str(threshold), edge)) - # plt.grid() - plt.show() diff --git a/artacs/kernel.py b/artacs/kernel.py index d072d3b..4e568ee 100644 --- a/artacs/kernel.py +++ b/artacs/kernel.py @@ -51,7 +51,9 @@ Object-oriented implementation ------------------------------ -There exists also an object-oriented implementation. +There exists also an object-oriented implementation as follows:: + + kernel = """ @@ -280,7 +282,15 @@ def apply_kernel(indata:ndarray, fs:int, freq:int, kernel:ndarray): #%% class CombKernel(): + '''Object-oriented comb kernel filter + Example to create and apply classical comb kernel:: + + kernel = CombKernel(freq=20, fs=1000, width=1, + left_mode='uniform', right_mode ='none') + + kernel.apply(artifacted_signal) + ''' def __init__(self, freq:int, fs:int, width:int, left_mode:str='uniform', right_mode:str='uniform') -> None: @@ -298,9 +308,12 @@ def _update_kernel(self): self._left_mode, self._right_mode) - def __call__(self, indata:np.array) -> ndarray: + def apply(self, indata:ndarray): return apply_kernel(indata=indata, freq=self._freq, fs=self._fs, kernel=self._kernel) + + def __call__(self, indata:ndarray) -> ndarray: + return self.apply(indata) def __repr__(self): return (f'KernelFilter({self._freq}, {self._fs}, {self._width}, ' + diff --git a/artacs/template.py b/artacs/template.py index 1cd5306..5ded6c9 100644 --- a/artacs/template.py +++ b/artacs/template.py @@ -5,6 +5,7 @@ @author: rgugg """ +from numpy import ndarray import numpy as np import artacs.tools as tools import logging @@ -64,11 +65,11 @@ def prepare_data(self, indata): data, period, fs = self.inbound_resample(valid_data) seeds = self.calc_seeds(period) return data, period, fs, seeds - - def __call__(self, indata): + + def __call__(self, indata:ndarray) -> ndarray: return self.process(indata) - def process(self, indata): + def process(self, indata:ndarray): 'process all channels of a dataset' if self.true_period is None: print('Invalid period length, skipping artifact removal') @@ -93,7 +94,7 @@ def process(self, indata): print(']',end='\n') return np.squeeze(outdata) - def process_channel(self, indata): + def process_channel(self, indata:ndarray) -> ndarray: 'process a single channels of data' if self.true_period is None: print('Invalid period length, skipping artifact removal') @@ -117,7 +118,7 @@ def process_channel(self, indata): return outdata - def _process(self, data, period, fs, seed): + def _process(self, data:ndarray, period:int, fs:int, seed:int): converged = False iteration = 0 fdata = data.copy() @@ -133,4 +134,4 @@ def _process(self, data, period, fs, seed): converged = True else: fdata[put_where] -= template - return put_where, fdata[put_where] \ No newline at end of file + return put_where, fdata[put_where] diff --git a/readme.md b/readme.md index 9d877fc..24b425a 100644 --- a/readme.md +++ b/readme.md @@ -9,25 +9,62 @@ Its development is supported by the [BMBF: FKZ 13GW0119](https://www.medizintech #### Example application | Upper Limb Bipolar ECG recording
during 11 Hz tACS || |:----:|:----:| -|__Recover the physiological signal__
(which is ~120dB weaker than tACS) || #### Weighted Comb Filter -Artifact can be _non-stationary_ and _non-sinusoidal_, but is required to be _periodic_. Comb filters natively support only frequencies which are integer divisibles of the sampling frequency. When artacs.kernel.run is used, the signal is automatically resampled, to circumvent this limitation. Note that the method still requires integer frequencies. - -By default, the kernel is symmetric and weights are based empirically on the artifacts periodic autocorrelation. - - - - - - - - - - - - - +Artifacts can be _non-stationary_ and _non-sinusoidal_, but are required to be _periodic_. Comb filters natively support only frequencies which are integer divisibles of the sampling frequency. This can be circumvented by resampling the signal, and has been implemented. + +##### Creation + +The following example creates a kernel for a _classical_ causal comb filte for +an artifcat with a period of 10Hz and sampled at 1000Hz: +```{python} + kernel = create_kernel(freq=10, fs:1000, width:1, + left_mode:str='uniform', + right_mode:str='none') +``` + +or the superposition of moving averages (SMA) filter as discussed e.g. by [1], +here for 5 periods at 10 Hz and at 1000Hz sampling rate: + +```{python} + kernel = create_kernel(freq=10, fs:1000, width:5, + left_mode:str='uniform', + right_mode:str='uniform') +``` + + +[1]: Kohli, S., Casson, A.J., 2015. Removal of Transcranial a.c. Current Stimulation +artifact from simultaneous EEG recordings by superposition of moving averages. +Conf Proc IEEE Eng Med Biol Soc 2015, 3436–3439. +https://doi.org/10.1109/EMBC.2015.7319131 + +##### Application + +The kernels, once created can be applied to the signal as follows: + +```{python} + filtered_data = filter_2d(artifacted_data, freq=20, fs=1000, kernel:ndarray) +``` + +The kernel application is implemented for 2-dimensional data, which calls the +1-dimensional implementation in a for-loop. Consider that you have to specify +the artifact frequency and sampling rate again. This is because the kernel only +makes sense if the period is an integer divisible of the sampling rate. +If it is not, the signal is automaticall up-sampled, processed, and down-sampled. +This allows to remove the artifact, but is far from perfect. Additionally, +the function estimates the period of the kernel, and throws an exception if +it does not match the specified frequency. This ensures that the correct kernel +is used. + +### Periodic Component Removal + +An alternative is the creation and removal of periodic templates, until the +artifact power is sufficiently suppressed. This can be achieved using + +```{python} + remover = StepwiseRemover(freq=20, s=1000) + filtered_data = remover.process(artifacted_data) +``` See also [![DOI](https://zenodo.org/badge/87182503.svg)](https://zenodo.org/badge/latestdoi/87182503) for a similar implementation in Matlab. diff --git a/test/dev.py b/test/dev.py deleted file mode 100644 index f187e5c..0000000 --- a/test/dev.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Thu Nov 16 15:07:58 2017 - -@author: rgugg -""" -# %% -import sys -sys.path.append('/media/rgugg/storage/projects/globalProjects/pyArtACS/src') -import helper -import matplotlib.pyplot as plt -from mpl_toolkits.mplot3d import Axes3D -import wyrm.io -import numpy as np -import scipy as sp -import sklearn.decomposition as sk -# %% -eeg = helper.load_brain_vision_data( - '/media/rgugg/storage/projects/globalProjects/pyArtACS/data/10Hz3ma.vhdr') -eeg = helper.filtfilt(eeg, f_low=1, f_high=25) -ecg, arm = helper.find_qrs(eeg, window=(400, 400), mpd=800) -helper.plotonsubs(ecg, arm) - -# %% -eeg = wyrm.io.load_brain_vision_data( - '/media/rgugg/storage/projects/globalProjects/pyArtACS/data/clean.vhdr') -eeg = helper.filtfilt(eeg, f_low=1, f_high=25) -ecg, arm = helper.find_qrs(eeg, window=(400, 400), mpd=800) -helper.plotonsubs(ecg, arm) -plt.show() - -# %% -cntArm = (eeg.data[:, 2] - eeg.data[:, 1]) -window = range(1000, 4000, 1) -helper.phaseplot(cntArm[window]) -helper.pcaplot(np.reshape(cntArm[window], (1000, 3))) -# %% -pca = sk.PCA() -pca.fit(arm) -S = pca.fit_transform(np.reshape(cntArm[window], (750, 4))) -fig, ax = plt.subplots(2, 3) -for idx in range(0, 6, 1): - ax.flatten()[idx].plot(S[:, idx]) -# %% -window = range(1000, 11000, 1) -sig = np.transpose(np.reshape(cntArm[window], (100, 100))) -helper.pcaplot(sig) -# %% -pca = sk.PCA() -pca.fit(arm) -S = pca.fit_transform(sig) -fig, ax = plt.subplots(25, 4) -for idx in range(0, 100, 1): - ax.flatten()[idx].plot(S[:, idx]) - -plt.show() diff --git a/test/plotting.py b/test/plotting.py deleted file mode 100644 index f468714..0000000 --- a/test/plotting.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Sep 19 13:27:45 2018 - -@author: rgugg -""" - -import matplotlib.pyplot as plt -# %% - # %% -# freq = 10 -# fs = 1000 -# indata = np.atleast_2d(np.sin(2*np.pi*freq*np.arange(0,2.01,1/fs))) -# width = 4 -# plt.cla() -# plt.plot(indata[0,:]) -# #plt.plot(pdata) -# plt.plot(kernel_filter(indata, freq, fs, width)[0,:]) \ No newline at end of file