diff --git a/camera_profiles/DJI Mini 2.cpf b/camera_profiles/DJI Mini 2.cpf
index 6e05bc2..7279f0d 100644
--- a/camera_profiles/DJI Mini 2.cpf
+++ b/camera_profiles/DJI Mini 2.cpf
@@ -2,16 +2,16 @@
Model = DJI Mini 2
[Intrinsics]
-fx = 0.7126984127
-fy = 0.7126984127
-cx = 0.5067482020
-cy = 0.3716643567
+fx = 0.7568749675
+fy = 0.7568749675
+cx = 0.4995236875
+cy = 0.49861405333
[Radial]
-k1 = 0.0298194
-k2 = -0.110242
+k1 = -0.00362736
+k2 = -0.00130119
k3 = 0.0
[Tangential]
-p1 = 0.0007491
-p2 = 0.0016115
+p1 = 0.000287332
+p2 = -0.00220176
diff --git a/gui/SSIM Stabilization GUI.application b/gui/SSIM Stabilization GUI.application
index d345ab5..7e61b40 100644
--- a/gui/SSIM Stabilization GUI.application
+++ b/gui/SSIM Stabilization GUI.application
@@ -14,7 +14,7 @@
- 5cQEWx+UnFUAP2k7WHa630JMdqJrtd281T/iVQWAS7w=
+ 3RPsOZN78HtDmcHYz9R2vEDeiPdk86kuVOnX2XWb9DA=
diff --git a/gui/SSIM Stabilization GUI.exe b/gui/SSIM Stabilization GUI.exe
index 70aa71e..6d2a40a 100644
Binary files a/gui/SSIM Stabilization GUI.exe and b/gui/SSIM Stabilization GUI.exe differ
diff --git a/gui/SSIM Stabilization GUI.exe.manifest b/gui/SSIM Stabilization GUI.exe.manifest
index 7259d68..105444d 100644
--- a/gui/SSIM Stabilization GUI.exe.manifest
+++ b/gui/SSIM Stabilization GUI.exe.manifest
@@ -104,14 +104,14 @@
-
+
- /v7FKLV31aXlizNnHufDdFIzUxggDtTkrLuWv6dbCP0=
+ 1SUVMnjos5qUF8w1wPSg2INKvq5fIY0VWlQgF7N6+pQ=
diff --git a/gui/update.ini b/gui/update.ini
index a93bb2b..db578cb 100644
--- a/gui/update.ini
+++ b/gui/update.ini
@@ -1,8 +1,8 @@
[Updates]
-CurrentReleaseTag = v0.3.1.4
-CurrentReleaseName = SSIMS_v0.3.1.4
-CurrentReleaseDate = 2022-03-11 13:36:03.252079
-LastCheckDate = 2022-03-11 13:36:03.252079
+CurrentReleaseTag = v0.3.2.0
+CurrentReleaseName = SSIMS_v0.3.2.0
+CurrentReleaseDate = 2022-04-05 17:57:43.422532
+LastCheckDate = 2022-04-05 17:57:43.422532
DisableUpdateCheck = 0
PauseDays = 0
diff --git a/release_notes.txt b/release_notes.txt
index 9f3b2b8..287d92c 100644
--- a/release_notes.txt
+++ b/release_notes.txt
@@ -1,3 +1,15 @@
+SSIMS_v0.3.2.0 ----------------------------------------------------------------
+
+Major changes:
+- Added "Explore colorspaces" option in Filter frames form
+
+Minor changes:
+- Added new filters and polished old ones
+- RGB model now default for filtering
+- Keybindings changed for inspect_images.py
+- New DJI Mini 2 parameters from Metashape
+
+
SSIMS_v0.3.1.4 ----------------------------------------------------------------
Minor changes:
diff --git a/scripts/__init__.py b/scripts/__init__.py
index 4724a26..a4706b6 100644
--- a/scripts/__init__.py
+++ b/scripts/__init__.py
@@ -8,9 +8,9 @@
__package_name__ = 'SSIMS: Preprocessing tool for UAV image velocimetry'
__description__ = 'Preprocessing and video stabilization tool for UAS/UAV image velocimetry based on Structural Similarity (SSIM) Index metric'
-__version__ = '0.3.1.4'
+__version__ = '0.3.2.0'
__status__ = 'beta'
-__date_deployed__ = '2022-03-11'
+__date_deployed__ = '2022-04-05'
__author__ = 'Robert Ljubicic, University of Belgrade - Civil Engineering Faculty'
__author_email__ = 'rljubicic@grf.bg.ac.rs'
diff --git a/scripts/colorspaces.py b/scripts/colorspaces.py
new file mode 100644
index 0000000..e28482d
--- /dev/null
+++ b/scripts/colorspaces.py
@@ -0,0 +1,258 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+"""
+This is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This package is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this package. If not, you can get eh GNU GPL from
+https://www.gnu.org/licenses/gpl-3.0.en.html.
+
+Created by Robert Ljubicic.
+"""
+
+try:
+ from __init__ import *
+ from matplotlib.widgets import Slider
+ from sys import exit
+
+ import glob
+ import matplotlib.pyplot as plt
+
+except Exception as ex:
+ print('\n[EXCEPTION] Import failed: \n\n'
+ ' {}'.format(ex))
+ input('\nPress ENTER/RETURN to exit...')
+ exit()
+
+
+def snr(a):
+ m = np.mean(a)
+ sd = np.std(a)
+ return m/sd
+
+
+def update_frame(val):
+ global current_frame
+
+ current_frame = frames_list[sl_ax_frame_num.val]
+ get_colospaces(current_frame, xlim, ylim)
+
+ plt.draw()
+
+
+def keypress(event):
+ if event.key == 'escape':
+ exit()
+
+ elif event.key == 'down':
+ if sl_ax_frame_num.val == 0:
+ sl_ax_frame_num.set_val(num_frames - 1)
+ else:
+ sl_ax_frame_num.set_val(sl_ax_frame_num.val - 1)
+
+ elif event.key == 'up':
+ if sl_ax_frame_num.val == num_frames - 1:
+ sl_ax_frame_num.set_val(0)
+ else:
+ sl_ax_frame_num.set_val(sl_ax_frame_num.val + 1)
+
+ elif event.key == 'pageup':
+ if sl_ax_frame_num.val >= num_frames - 10:
+ sl_ax_frame_num.set_val(0)
+ else:
+ sl_ax_frame_num.set_val(sl_ax_frame_num.val + 10)
+
+ elif event.key == 'pagedown':
+ if sl_ax_frame_num.val <= 9:
+ sl_ax_frame_num.set_val(num_frames - 1)
+ else:
+ sl_ax_frame_num.set_val(sl_ax_frame_num.val - 10)
+
+ update_frame(sl_ax_frame_num.val)
+
+
+def get_colospaces(path, xlim, ylim):
+ img_bgr = cv2.imread(path)
+ img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
+ img_gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
+ img_hsv = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV)
+ img_lab = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2LAB)
+
+ cs_list = [
+ img_rgb,
+ img_rgb[:, :, 0],
+ img_rgb[:, :, 1],
+ img_rgb[:, :, 2],
+ img_gray,
+ img_hsv[:, :, 0],
+ img_hsv[:, :, 1],
+ img_hsv[:, :, 2],
+ None,
+ img_lab[:, :, 0],
+ img_lab[:, :, 1],
+ img_lab[:, :, 2],
+ ]
+
+ for i in range(len(cs_list)):
+ if cs_list[i] is not None:
+ img = cs_list[i]
+ imshow_list[i].set_data(img)
+ ax_list[i].set_xlim(xlim)
+ ax_list[i].set_ylim(ylim)
+ ax_list[i].set_title(cs_names[i])
+
+ if i == 0:
+ ax_list[i].set_title(cs_names[i])
+ else:
+ ax_snr = snr(img[ylim[1]: ylim[0], xlim[0]: xlim[1]])
+ ax_list[i].set_title('{}, SNR={:.2f}'.format(cs_names[i], ax_snr))
+
+
+def on_lims_change(event_ax):
+ global xlim
+ global ylim
+
+ cid_list = list(event_ax.callbacks.callbacks['ylim_changed'].keys())
+ for cid in cid_list:
+ event_ax.callbacks.disconnect(cid)
+
+ xlim = [int(x) for x in event_ax.get_xlim()]
+ ylim = [int(y) for y in event_ax.get_ylim()]
+
+ event_ax.set_xlim(xlim)
+ event_ax.set_ylim(ylim)
+
+ for i in range(1, len(cs_names)):
+ if cs_names[i] != '':
+ a = ax_list[i]
+ ax_title = a.get_title().split(', ')[0]
+ ax_img = np.array(a.get_images()[0]._A)
+ ax_img_crop = ax_img[ylim[1]: ylim[0], xlim[0]: xlim[1]]
+ ax_snr = snr(ax_img_crop)
+
+ a.set_title('{}, SNR={:.2f}'.format(ax_title, ax_snr))
+
+ event_ax.callbacks.connect('ylim_changed', on_lims_change)
+
+
+if __name__ == '__main__':
+ try:
+ parser = ArgumentParser()
+ parser.add_argument('--folder', type=str, help='Path to image file or folder with images')
+ parser.add_argument('--ext', type=str, help='Path to image file')
+ args = parser.parse_args()
+
+ frames_list = glob.glob('{}/*.{}'.format(args.folder, args.ext))
+ num_frames = len(frames_list)
+ first_frame = cv2.imread(frames_list[0], 0)
+
+ h, w = first_frame.shape
+ xlim = [0, w]
+ ylim = [h, 0]
+
+ nrows, ncols = 3, 4
+ fig, ax = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, sharey=True)
+ plt.subplots_adjust(left=0.01, right=0.99, top=0.96, bottom=0.06, wspace=0.02, hspace=0.1)
+ fig.canvas.mpl_connect('key_press_event', keypress)
+
+ legend = 'Use O to zoom and P to pan images,\n' \
+ 'Use slider to select frame,\n' \
+ 'use UP and DOWN keys to move by +/- 1 frame\n' \
+ 'or PageUP and PageDOWN keys to move by +/- 10 frames\n' \
+ 'Press ESC or Q to exit'
+
+ legend_toggle = plt.text(0.5, 0.5, legend,
+ horizontalalignment='center',
+ verticalalignment='center',
+ transform=ax[2][0].transAxes,
+ bbox=dict(facecolor='white', alpha=0.5),
+ fontsize=9,
+ )
+
+ axcolor = 'lightgoldenrodyellow'
+ valfmt = "%d"
+
+ ax_frame_num = plt.axes([0.2, 0.02, 0.63, 0.03], facecolor=axcolor)
+ sl_ax_frame_num = Slider(ax_frame_num, 'Frame #\n({} total)'.format(num_frames), 0, num_frames - 1, valinit=0, valstep=1, valfmt=valfmt)
+ sl_ax_frame_num.on_changed(update_frame)
+
+ cs_names = [
+ 'Original RGB',
+ '[R]GB',
+ 'R[G]B',
+ 'RG[B]',
+ 'Grayscale',
+ '[H]SV',
+ 'H[S]V',
+ 'HS[V]',
+ '',
+ '[L*]a*b*',
+ 'L*[a*]b*',
+ 'L*a*[b*]',
+ ]
+
+ ax_list = ax.reshape(-1)
+ imshow_list = [None] * 12
+
+ current_frame = frames_list[0]
+
+ img_bgr = cv2.imread(current_frame)
+ img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
+ img_gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
+ img_hsv = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV)
+ img_lab = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2LAB)
+
+ cs_list = [
+ img_rgb,
+ img_rgb[:, :, 0],
+ img_rgb[:, :, 1],
+ img_rgb[:, :, 2],
+ img_gray,
+ img_hsv[:, :, 0],
+ img_hsv[:, :, 1],
+ img_hsv[:, :, 2],
+ None,
+ img_lab[:, :, 0],
+ img_lab[:, :, 1],
+ img_lab[:, :, 2],
+ ]
+
+ for i in range(len(cs_list)):
+ if cs_list[i] is not None:
+ img = cs_list[i]
+ imshow_list[i] = ax_list[i].imshow(img)
+ ax_list[i].set_xlim(xlim)
+ ax_list[i].set_ylim(ylim)
+ ax_list[i].set_title(cs_names[i])
+
+ if i == 0:
+ ax_list[i].set_title(cs_names[i])
+ else:
+ ax_snr = snr(img[ylim[1]: ylim[0], xlim[0]: xlim[1]])
+ ax_list[i].set_title('{}, SNR={:.2f}'.format(cs_names[i], ax_snr))
+
+ [a.set_axis_off() for a in ax_list]
+ [a.callbacks.connect('ylim_changed', on_lims_change) for a in ax_list]
+
+ try:
+ mng = plt.get_current_fig_manager()
+ mng.window.state('zoomed')
+ mng.set_window_title('Inspect frames')
+ except:
+ pass
+
+ plt.show()
+
+ except Exception as ex:
+ print('\n[EXCEPTION] The following exception has occurred: \n\n'
+ ' {}'.format(ex))
+ input('\nPress ENTER/RETURN to exit...')
\ No newline at end of file
diff --git a/scripts/filter_frames.py b/scripts/filter_frames.py
index a838069..5195e74 100644
--- a/scripts/filter_frames.py
+++ b/scripts/filter_frames.py
@@ -29,6 +29,7 @@
import glob
import matplotlib.pyplot as plt
import mplcursors
+ import scipy.stats as stats
except Exception as ex:
print('\n[EXCEPTION] Import failed: \n\n'
@@ -37,164 +38,195 @@
exit()
separator = '---'
+colormap = 'viridis'
+
+colorspaces_list = ['rgb', 'hsv', 'lab', 'grayscale']
+color_conv_codes = [
+ [[], [41], [45], [7]],
+ [[55], [], [55, 45], [55, 7]],
+ [[57], [57, 41], [], [57, 7]],
+ [[8], [8, 41], [8, 45], []]
+]
+
+
+def convert_img(img, from_cs, to_cs):
+ from_cs_index = colorspaces_list.index(from_cs)
+ to_cs_index = colorspaces_list.index(to_cs)
+
+ conv_codes = color_conv_codes[from_cs_index][to_cs_index]
+
+ if len(conv_codes) == 0:
+ return img
+
+ for i, code in enumerate(conv_codes):
+ img = cv2.cvtColor(img, code)
+
+ return img
+
+
+def is_grayscale(img):
+ if (img[:, :, 0] == img[:, :, 1]).all() and (img[:, :, 0] == img[:, :, 2]).all():
+ return True
+ else:
+ return False
-def addBackgroundImage(fore: np.ndarray, back: np.ndarray) -> np.ndarray:
- background = back.copy().astype(float)
- alpha = fore
-
- try: # Try to read the third dimension of an array. If fails, the array is 2D.
- background.shape[2]
- dimension = 3
- except IndexError:
- dimension = 2
+def func(name, image, params):
+ return name(image, *params)
+
+
+def negative(img):
+ print('[FILTER] Convert to image negative')
+ return ~img
- foreground = fore.copy().astype(float)
- alpha = alpha.copy().astype(float) / 255
- if dimension == 3:
- if len(foreground.shape) == 2:
- # Color me purple
- foreground = np.stack((foreground,) * 3, -1)
- foreground = np.where(foreground == [0., 0., 0.], [0., 0., 0.], [0., 0., 0.])
- alpha = np.stack((alpha,) * 3, -1)
- else:
- foreground = np.where(foreground == [0., 0., 0.], [0., 0., 0.], [0., 0., 0.])
+def to_grayscale(img):
+ img_gray = convert_img(img, colorspace, 'grayscale')
+ print('[FILTER] Convert to grayscale')
+ return convert_img(img_gray, 'grayscale', colorspace)
- foreground = cv2.multiply(alpha, foreground)
- alpha = 1.0 - alpha
- background = cv2.multiply(alpha, background)
- combined = cv2.add(foreground, background)
- return combined.astype('uint8')
+def to_rgb(img):
+ print('[FILTER] Convert to RGB colorspace')
+ return convert_img(img, colorspace, 'rgb')
+
+
+def to_hsv(img):
+ print('[FILTER] Convert to HSV colorspace')
+ return convert_img(img, colorspace, 'hsv')
+
+def to_lab(img):
+ print('[FILTER] Convert to L*a*b* colorspace')
+ return convert_img(img, colorspace, 'lab')
+
-def func(name, image, params):
- return name(image, *params)
+def select_channel(img, channel=1):
+ try:
+ img_single = img[:, :, int(channel)-1]
+ except IndexError:
+ print('[ ERROR] Image is already single channel, cannot select channel {}'.format(channel))
+ return img
+
+ print('[FILTER] Selecting channel {}'.format(channel))
+ return cv2.merge([img_single, img_single, img_single])
+
+
+def highpass(img, sigma=51):
+ if sigma % 2 == 1:
+ sigma += 1
+ blur = cv2.GaussianBlur(img, (0, 0), int(sigma))
-def histeq(img):
- print('[FILTER] Histogram equalization')
- eq = cv2.equalizeHist(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
- return cv2.cvtColor(eq, cv2.COLOR_GRAY2BGR)
+ print('[FILTER] Highpass filter: sigma={:.0f}'.format(sigma))
+ return ~cv2.subtract(cv2.add(blur, 127), img)
+
+
+def normalize_image(img, lower=None, upper=None):
+ if lower is None:
+ lower = np.min(img)
+ if upper is None:
+ upper = np.max(img)
+ img_c = img.astype(int)
-def clahe(img, clip=2.0, tile=8):
- print('[FILTER] CLAHE: clip={:.1f}, tile={:.0f}'.format(clip, tile))
- clahe = cv2.createCLAHE(clipLimit=clip, tileGridSize=(int(tile), int(tile)))
- return cv2.cvtColor(clahe.apply(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)), cv2.COLOR_GRAY2BGR)
+ img_c = ((img_c - np.min(img_c)) / (np.max(img_c) - np.min(img_c)) * 255).astype('uint8')
+ return img_c
-def denoise(img, h=3, hcolor=3, template_size=7, search_size=21):
- print('[FILTER] Denoise: h={:.0f}, hcolor={:.0f}, template_size={:.0f}, search_size={:.0f}'.format(h, hcolor, template_size, search_size))
- if template_size % 2 == 1:
- template_size += 1
- if search_size % 2 == 1:
- search_size += 1
- return cv2.fastNlMeansDenoisingColored(img, None, int(h), int(hcolor), int(template_size), int(search_size))
+def intensity_capping(img, n_std=0.0):
+ img_g = convert_img(img, colorspace, 'grayscale')
+
+ median = np.median(img_g)
+ stdev = np.std(img_g)
+ cap = median - n_std * stdev
-def hsv_filter(img, hu=255, hl=0, su=255, sl=0, vu=255, vl=0):
- print('[FILTER] HSV: Hu={:.0f}, Hl={:.0f}, Su={:.0f}, Sl={:.0f}, Vu={:.0f}, Vl={:.0f}'.format(hu, hl, su, sl, vu, vl))
- img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
- mask = cv2.inRange(img, (hl, sl, vl), (hu, su, vu))
- return mask
-
+ img_g[img_g > cap] = cap
+ img_g = normalize_image(img_g, cap, np.max(img_g))
+ print('[FILTER] Pixel intensity capping: n_std={:.1f}'.format(n_std))
+ return convert_img(img_g, 'grayscale', colorspace)
+
+
def brightness_contrast(img, alpha=1.0, beta=0.0):
print('[FILTER] Brightness and contrast: alpha={:.1f}, beta={:.1f}'.format(alpha, beta))
- new = cv2.convertScaleAbs(img, alpha=alpha, beta=beta)
- return new
+ return cv2.convertScaleAbs(img, alpha=alpha, beta=beta)
def gamma(img, gamma=1.0):
- print('[FILTER] Gamma correction: gamma={:.1f}'.format(gamma))
invGamma = 1.0 / gamma
+
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
+
+ print('[FILTER] Gamma correction: gamma={:.1f}'.format(gamma))
return cv2.LUT(img, table)
+
+
+def gaussian_lookup(img, sigma=51):
+ x = np.arange(0, 256)
+ pdf = stats.norm.pdf(x, 127, sigma)
+
+ cdf = np.cumsum(pdf)
+ cdf_norm = np.array([(x - np.min(cdf))/(np.max(cdf) - np.min(cdf)) * 255 for x in cdf]).astype('uint8')
+
+ print('[FILTER] Gaussian lookup filter: sigma={}'.format(sigma))
+ return cv2.LUT(img, cdf_norm)
+
+
+def thresholding(img, c1u=255, c1l=0, c2u=255, c2l=0, c3u=255, c3l=0):
+ mask = cv2.inRange(img, (c1l, c2l, c3l), (c1u, c2u, c3u))
+
+ print('[FILTER] Thresholding: Channel 1: [{}, {}], Channel 2: [{}, {}], Channel 3: [{}, {}]'.format(c1u, c1l, c2u, c2l, c3u, c3l))
+ return mask
-def modify_channels(img, r=1.0, g=1.0, b=1.0):
- print('[FILTER] Modify channels: c1={:.1f}, c2={:.1f}, c3={:.1f}'.format(r, g, b))
- img_r = cv2.convertScaleAbs(img[:, :, 2], alpha=r, beta=0)
- img_g = cv2.convertScaleAbs(img[:, :, 1], alpha=g, beta=0)
- img_b = cv2.convertScaleAbs(img[:, :, 0], alpha=b, beta=0)
-
- if g == 0 and b == 0:
- img_g, img_b = img_r, img_r
- elif r == 0 and b == 0:
- img_r, img_b = img_g, img_g
- elif r == 0 and g == 0:
- img_r, img_g = img_b, img_b
-
- return np.dstack([img_b, img_g, img_r])
-
-
-def grayscale(img):
- print('[FILTER] Convert to grayscale')
- return cv2.cvtColor(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), cv2.COLOR_GRAY2BGR)
-
-
-def negative(img):
- print('[FILTER] Convert to image negative')
- return ~img
-
-
-def highpass(img, sigma=51):
- print('[FILTER] Highpass filter: sigma={:.0f}'.format(sigma))
- if sigma % 2 == 1:
- sigma += 1
- new = img - cv2.GaussianBlur(img, (0, 0), int(sigma)) + 127
-
- return new
-
-
-def laplacian(img):
- print('[FILTER] Laplacian of an image')
- new = cv2.Laplacian(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), cv2.CV_8U, ksize=3)
- return cv2.cvtColor(new, cv2.COLOR_GRAY2BGR)
-
+def denoise(img, ksize=3):
+ print('[FILTER] Denoise: ksize={}'.format(ksize))
+ return cv2.medianBlur(img, ksize)
-def intensity_capping(img, n_std=2):
- print('[FILTER] Pixel intensity capping: n_std='.format(n_std))
- img_g = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
- median = np.median(img_g)
- stdev = np.std(img_g)
- cap = median + n_std * stdev
- img_g[img_g > cap] = cap
+def remove_background(img, num_frames_background=10):
+ num_frames_background = int(num_frames_background)
+ h, w = img.shape[:2]
- return cv2.cvtColor(img_g, cv2.COLOR_GRAY2BGR)
+ if len(img_list) < num_frames_background:
+ num_frames_background = len(img_list)
+ step = len(img_list) // num_frames_background
+ img_back_path = r'{}/../median_{}.{}'.format(path.dirname(img_list[0]), num_frames_background, args.ext)
-def remove_background(img, num_imgs=10):
- print('[FILTER] Remove image background: num_imgs={:.0f}'.format(num_imgs))
- num_imgs = int(num_imgs)
- new = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).astype('int')
- h, w = new.shape
+ if path.exists(img_back_path):
+ back = cv2.imread(img_back_path)
+ else:
+ stack = np.ndarray([h, w, 3, num_frames_background], dtype='uint8')
- if len(img_list) < num_imgs:
- num_imgs = len(img_list)
+ for i in range(num_frames_background):
+ stack[:, :, :, i] = cv2.imread(img_list[i*step])
- back_path = r'{}/../median_{}.{}'.format(path.dirname(img_list[0]), num_imgs, args.ext)
+ back = np.median(stack, axis=3)
+ cv2.imwrite(img_back_path, back)
- if path.exists(back_path):
- back = cv2.imread(back_path, 0).astype('int')
- else:
- stack = np.ndarray([h, w, num_imgs], dtype='int')
+ print('[FILTER] Remove image background: num_frames_background={:.0f}'.format(num_frames_background))
+ return cv2.subtract(back, img)
- for i in range(num_imgs):
- stack[:, :, i] = cv2.imread(img_list[i], 0)
- back = np.median(stack, axis=2).astype('int')
- cv2.imwrite(back_path, back.astype('uint8'))
+def histeq(img):
+ img_gray = convert_img(img, colorspace, 'grayscale')
+ eq = cv2.equalizeHist(img_gray)
+
+ print('[FILTER] Histogram equalization')
+ return convert_img(eq, 'grayscale', colorspace)
- new -= back
- new[new < 0] = 0
- new[new > 255] = 255
- return cv2.cvtColor(new.astype('uint8'), cv2.COLOR_GRAY2BGR)
+def clahe(img, clip=2.0, tile=8):
+ clahe = cv2.createCLAHE(clipLimit=clip, tileGridSize=(int(tile), int(tile)))
+ img_gray = convert_img(img, colorspace, 'grayscale')
+ img_clahe = clahe.apply(img_gray)
+
+ print('[FILTER] CLAHE: clip={:.1f}, tile={:.0f}'.format(clip, tile))
+ return convert_img(img_clahe, 'grayscale', colorspace)
def params_to_list(params):
@@ -209,7 +241,7 @@ def keypress(event):
if event.key == ' ':
if is_original:
- img_shown.set_data(img_rgb)
+ img_shown.set_data(img[:, :, 0] if is_grayscale(img) else img)
else:
img_shown.set_data(original)
@@ -223,28 +255,34 @@ def keypress(event):
def update_frame(val):
global original
global img
- global img_rgb
original = cv2.imread(img_list[sl_ax_frame_num.val])
- img, img_rgb = apply_filters(original, filters_data)
+ original = cv2.cvtColor(original, cv2.COLOR_BGR2RGB)
+
+ img = apply_filters(original, filters_data)
if is_original:
- img_shown.set_data(img_rgb)
+ img_shown.set_data(original)
else:
- img_shown.set_data(img)
+ img_shown.set_data(img[:, :, 0] if is_grayscale(img) else img)
plt.draw()
return
def apply_filters(img, filters_data):
+ global colorspace
+
for i in range(filters_data.shape[0]):
img = func(globals()[filters_data[i][0]], img, params_to_list(filters_data[i][1]))
- img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+
+ if filters_data[i][0].startswith('to_'):
+ colorspace = filters_data[i][0].split('_')[1]
legend = 'Filters:'
+
for i in range(filters_data.shape[0]):
- func_args_names = globals()[filters_data[i][0]]
+ func_args_names = globals()[filters_data[i][0]]
func_args = inspect.getfullargspec(func_args_names)[0][1:] if filters_data[i][1] != '' else []
legend_values = ['{}={}'.format(p, v) for p, v in zip(func_args, filters_data[i][1].split(','))]
legend += '\n ' + filters_data[i][0] + ': ' + ', '.join(legend_values if filters_data[i][1] != '' else '')
@@ -257,80 +295,89 @@ def apply_filters(img, filters_data):
fontsize=9,
)
- return img, img_rgb
+ return img
if __name__ == '__main__':
- try:
- parser = ArgumentParser()
- parser.add_argument('--folder', type=str, help='Path to frames folder')
- parser.add_argument('--ext', type=str, help='Frames\' extension', default='jpg')
- parser.add_argument('--multi', type=int, help='Path to filter list file', default=0)
- args = parser.parse_args()
-
- img_list = glob.glob(r'{}/*.{}'.format(args.folder, args.ext))
- num_frames = len(img_list)
- filters_data = np.loadtxt(args.folder + '/filters.txt', dtype='str', delimiter=r'/', ndmin=2)
-
- fig, ax = plt.subplots()
- fig.canvas.mpl_connect('key_press_event', keypress)
- plt.subplots_adjust(bottom=0.13)
- plt.axis('off')
-
- axcolor = 'lightgoldenrodyellow'
- valfmt = "%d"
-
- ax_frame_num = plt.axes([0.2, 0.05, 0.63, 0.03], facecolor=axcolor)
- sl_ax_frame_num = Slider(ax_frame_num, f'Frame #\n({num_frames} total)', 0, num_frames - 1, valinit=0, valstep=1, valfmt=valfmt)
- sl_ax_frame_num.on_changed(update_frame)
-
- if args.multi == 0:
- img_path = img_list[0]
- img = cv2.imread(img_path)
- original = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
- is_original = False
-
- img, img_rgb = apply_filters(img, filters_data)
+ parser = ArgumentParser()
+ parser.add_argument('--folder', type=str, help='Path to frames folder')
+ parser.add_argument('--ext', type=str, help='Frames\' extension', default='jpg')
+ parser.add_argument('--multi', type=int, help='Path to filter list file', default=0)
+ args = parser.parse_args()
+
+ img_list = glob.glob(r'{}/*.{}'.format(args.folder, args.ext))
+ num_frames = len(img_list)
+ filters_data = np.loadtxt(args.folder + '/filters.txt', dtype='str', delimiter=r'/', ndmin=2)
+
+ fig, ax = plt.subplots()
+ fig.canvas.mpl_connect('key_press_event', keypress)
+ plt.subplots_adjust(bottom=0.13)
+ plt.axis('off')
+
+ axcolor = 'lightgoldenrodyellow'
+ valfmt = "%d"
+
+ ax_frame_num = plt.axes([0.2, 0.05, 0.63, 0.03], facecolor=axcolor)
+ sl_ax_frame_num = Slider(ax_frame_num, f'Frame #\n({num_frames} total)', 0, num_frames - 1, valinit=0, valstep=1, valfmt=valfmt)
+ sl_ax_frame_num.on_changed(update_frame)
+
+ if args.multi == 0:
+ img_path = img_list[0]
+ img = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
+ colorspace = 'rgb'
+
+ original = img.copy()
+ is_original = False
+
+ img = apply_filters(img, filters_data)
+
+ try:
+ mng = plt.get_current_fig_manager()
+ mng.window.state('zoomed')
+ mng.set_window_title('Filtering')
+ except:
+ pass
+
+ ax.set_title('Use SPACE to toggle between original and filtered image, and Q or ESC to exit')
+ ax.axis('off')
+
+ if is_grayscale(img):
+ img_shown = ax.imshow(img[:, :, 0], cmap=colormap)
+ else:
+ img_shown = ax.imshow(img)
- try:
- mng = plt.get_current_fig_manager()
- mng.window.state('zoomed')
- mng.set_window_title('Filtering')
- except:
- pass
+ plt.show()
+ exit()
- ax.set_title('Use SPACE to toggle between original and filtered image, and Q or ESC to exit')
- ax.axis('off')
- img_shown = ax.imshow(img_rgb)
- plt.show()
- exit()
+ else:
+ filtered_folder = args.folder + '_filtered'
- else:
- filtered_folder = args.folder + '_filtered'
+ print('[BEGIN] :STARTING FILTERING: '.ljust(len(separator), '-'))
+ print(' [INFO] Filtering frames from folder', args.folder + '/')
+ print(' [INFO] Filters to apply:', [row[0] for row in filters_data])
- print('[BEGIN] :STARTING FILTERING: '.ljust(len(separator), '-'))
- print(' [INFO] Filtering frames from folder', args.folder + '/')
- print(' [INFO] Filters to apply:', [row[0] for row in filters_data])
+ if not path.exists(filtered_folder):
+ makedirs(filtered_folder)
- if not path.exists(filtered_folder):
- makedirs(filtered_folder)
+ for j in range(len(img_list)):
+ img_path = img_list[j]
+ img = cv2.imread(img_path)
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ colorspace = 'rgb'
- for j in range(len(img_list)):
- img_path = img_list[j]
- img = cv2.imread(img_path)
+ for i in range(filters_data.shape[0]):
+ img = func(locals()[filters_data[i][0]], img, params_to_list(filters_data[i][1]))
- for i in range(filters_data.shape[0]):
- img = func(locals()[filters_data[i][0]], img, params_to_list(filters_data[i][1]))
+ if filters_data[i][0].startswith('to_'):
+ colorspace = filters_data[i][0].split('_')[1]
- cv2.imwrite('{}/{}'.format(filtered_folder, path.basename(img_path)), img)
+ img_rgb = convert_img(img, colorspace, 'rgb')
+ img_bgr = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR)
- print(' [INFO] Filtering frame {}/{} ({:.1f}%)'.format(j, num_frames - 1, j/(num_frames - 1) * 100))
+ cv2.imwrite('{}/{}'.format(filtered_folder, path.basename(img_path)), img_bgr)
- print(' [END] Filtering complete!')
- print(' [END] Results available in folder [{}]!'.format(filtered_folder))
- input('\nPress ENTER/RETURN to exit...')
+ print(' [INFO] Filtering frame {}/{} ({:.1f}%)'.format(j, num_frames - 1, j/(num_frames - 1) * 100))
- except Exception as ex:
- print('\n[EXCEPTION] The following exception has occurred: \n\n'
- ' {}'.format(ex))
+ print(' [END] Filtering complete!')
+ print(' [END] Results available in folder [{}]!'.format(filtered_folder))
input('\nPress ENTER/RETURN to exit...')
diff --git a/scripts/filters.xml b/scripts/filters.xml
index 8e04d2d..43b307d 100644
--- a/scripts/filters.xml
+++ b/scripts/filters.xml
@@ -1,13 +1,5 @@
-
- Grayscale
- grayscale
- Converts a color image to 8bit grayscale [0..255].
-
-
-
-
Negative
negative
@@ -17,76 +9,50 @@
- Brightness/Contrast
- brightness_contrast
- Adjusts overall brightness and contrast using linear transformation [Alpha*Y + Beta].
+ Convert to Grayscale
+ to_grayscale
+ Converts a color image to a single-channel 8bit grayscale [0..255].
-
-
- Alpha
- float
- 5.0
- 1.0
- 0.0
- 0.1
-
-
- Beta
- int
- 255
- 0
- -255
- 1
-
-
+
- Adjust gamma
- gamma
- Adjusts gamma exposure of the image using linear transformation [Gamma*Y].
+ Convert to RGB
+ to_rgb
+ Converts to three-channel 8bit RGB (red-green-blue) colorspace.
-
-
- Gamma
- float
- 3.0
- 1.0
- 0.0
- 0.1
-
-
+
- Hist. equalization
- histeq
- Stretches the histogram of the image to improve dynamic range and accentuate details.
+ Convert to HSV
+ to_hsv
+ Converts to three-channel 8bit HSV (hue-saturation-value) colorspace.
- CLAHE
- clahe
- Adaptive version of the histogram equalization with histogram clipping.
+ Convert to L*a*b*
+ to_lab
+ Converts to three-channel 8bit L*a*b* (CIELab) colorspace.
+
+
+
+
+
+ Single image channel
+ select_channel
+ Select a single channel from a three-channel image. Channel order depends on the image colorspace (RGB, HSV, L*a*b).
- Clip limit
- float
- 10.0
- 2.0
- 0.1
- 0.1
-
-
- Tile size
+ Channel number
int
- 64
- 8
- 4
- 4
+ 3
+ 1
+ 1
+ 1
@@ -111,12 +77,54 @@
Intensity capping
intensity_capping
- Limits local brightness using neighboring mean and variance.
+ Limits pixel values using global mean and variance. If tracer particles are darker than the water surface, apply negative filter before this one.
Num. standard deviations (n)
float
+ 5.0
+ 0.0
+ -5.0
+ 0.1
+
+
+
+
+
+ Brightness/Contrast adj.
+ brightness_contrast
+ Adjusts overall brightness and contrast using linear transformation [Alpha*Y + Beta].
+
+
+
+ Alpha
+ float
+ 5.0
+ 1.0
+ 0.0
+ 0.1
+
+
+ Beta
+ int
+ 255
+ 0
+ -255
+ 1
+
+
+
+
+
+ Gamma adjustment
+ gamma
+ Adjusts gamma exposure of the image using linear transformation [Gamma*Y].
+
+
+
+ Gamma
+ float
3.0
1.0
0.0
@@ -126,37 +134,46 @@
- Laplacian
- laplacian
- Calculates a gradient map of a grayscale image using Laplacian, can be useful for detecting tracer particles.
+ Gaussian CDF lookup
+ gaussian_lookup
+ Adjusts exposure using Gaussian cumulative distribution function as a lookup table.
-
+
+
+ Sigma
+ int
+ 250
+ 50
+ 1
+ 1
+
+
- HSV filter
- hsv_filter
- Filter image using hue, saturation and value (lightness). If slider values are left at default (upper at max, lower at min), the image is simply transformed to HSV colorspace.
+ Channel thresholding filter
+ thresholding
+ Filter image by thresholding individual image channels (returns a binarized [0, 1] image).
- Hue lower
+ Channel 1 low
int
- 180
+ 255
0
0
1
- Hue upper
+ Channel 1 high
int
- 180
- 180
+ 255
+ 255
0
1
- Saturation lower
+ Channel 2 low
int
255
0
@@ -164,7 +181,7 @@
1
- Saturation upper
+ Channel 2 high
int
255
255
@@ -172,7 +189,7 @@
1
- Lightness/value lower
+ Channel 3 low
int
255
0
@@ -180,7 +197,7 @@
1
- Lightness/value upper
+ Channel 3 high
int
255
255
@@ -193,74 +210,17 @@
Denoise
denoise
- Removes high frequency content, useful for removing camera noise but is VERY SLOW.
+ Removes salt-and-pepper type noise with a median filter.
- Strength
- int
- 15
- 3
- 1
- 1
-
-
- Strength color
+ Kernel size
int
- 15
+ 31
3
- 1
- 1
-
-
- Template size
- int
- 21
- 7
3
2
-
- Search area
- int
- 63
- 21
- 9
- 2
-
-
-
-
-
- Adjust image channels
- modify_channels
- Adjust image channels intensities to accentuate different types of details.
-
-
-
- Channel #1
- float
- 3.0
- 1.0
- 0.0
- 0.1
-
-
- Channel #2
- float
- 3.0
- 1.0
- 0.0
- 0.1
-
-
- Channel #1
- float
- 3.0
- 1.0
- 0.0
- 0.1
-
@@ -280,4 +240,37 @@
+
+
+ Hist. equalization
+ histeq
+ Stretches the histogram of the image to improve dynamic range and accentuate details.
+
+
+
+
+
+ CLAHE
+ clahe
+ Adaptive version of the histogram equalization with histogram clipping.
+
+
+
+ Clip limit
+ float
+ 10.0
+ 2.0
+ 0.1
+ 0.1
+
+
+ Tile size
+ int
+ 64
+ 8
+ 4
+ 4
+
+
+
diff --git a/scripts/inspect_frames.py b/scripts/inspect_frames.py
index 02a2441..51ee7bc 100644
--- a/scripts/inspect_frames.py
+++ b/scripts/inspect_frames.py
@@ -52,25 +52,25 @@ def keypress(event):
if event.key == 'escape':
exit()
- elif event.key == 'left':
+ elif event.key == 'down':
if sl_ax_frame_num.val == 0:
sl_ax_frame_num.set_val(num_frames - 1)
else:
sl_ax_frame_num.set_val(sl_ax_frame_num.val - 1)
- elif event.key == 'right':
+ elif event.key == 'up':
if sl_ax_frame_num.val == num_frames - 1:
sl_ax_frame_num.set_val(0)
else:
sl_ax_frame_num.set_val(sl_ax_frame_num.val + 1)
- elif event.key == 'up':
+ elif event.key == 'pageup':
if sl_ax_frame_num.val >= num_frames - 10:
sl_ax_frame_num.set_val(0)
else:
sl_ax_frame_num.set_val(sl_ax_frame_num.val + 10)
- elif event.key == 'down':
+ elif event.key == 'pagedown':
if sl_ax_frame_num.val <= 9:
sl_ax_frame_num.set_val(num_frames - 1)
else:
@@ -118,8 +118,8 @@ def keypress(event):
sl_ax_frame_num.on_changed(update_frame)
legend = 'Use slider to select frame,\n' \
- 'use LEFT and RIGHT keys to move by 1 frame\n' \
- 'or UP and DOWN keys to move by 10 frames\n' \
+ 'use UP and DOWN keys to move by +/- 1 frame\n' \
+ 'or PageUP and PageDOWN keys to move by +/- 10 frames\n' \
'Press ESC or Q to exit'
legend_toggle = plt.text(0.02, 0.97, legend,