|
| 1 | +""" |
| 2 | +Lens blur generator |
| 3 | +
|
| 4 | +""" |
| 5 | + |
| 6 | +import math |
| 7 | +from functools import reduce |
| 8 | + |
| 9 | +import cv2 |
| 10 | +import numpy as np |
| 11 | +from scipy import signal |
| 12 | + |
| 13 | +# These scales bring the size of the below components to roughly the specified radius - I just hard coded these |
| 14 | +kernel_scales = [1.4,1.2,1.2,1.2,1.2,1.2] |
| 15 | + |
| 16 | +# Kernel parameters a, b, A, B |
| 17 | +# These parameters are drawn from <http://yehar.com/blog/?p=1495> |
| 18 | +kernel_params = [ |
| 19 | + # 1-component |
| 20 | + [[0.862325, 1.624835, 0.767583, 1.862321]], |
| 21 | + |
| 22 | + # 2-components |
| 23 | + [[0.886528, 5.268909, 0.411259, -0.548794], |
| 24 | + [1.960518, 1.558213, 0.513282, 4.56111]], |
| 25 | + |
| 26 | + # 3-components |
| 27 | + [[2.17649, 5.043495, 1.621035, -2.105439], |
| 28 | + [1.019306, 9.027613, -0.28086, -0.162882], |
| 29 | + [2.81511, 1.597273, -0.366471, 10.300301]], |
| 30 | + |
| 31 | + # 4-components |
| 32 | + [[4.338459, 1.553635, -5.767909, 46.164397], |
| 33 | + [3.839993, 4.693183, 9.795391, -15.227561], |
| 34 | + [2.791880, 8.178137, -3.048324, 0.302959], |
| 35 | + [1.342190, 12.328289, 0.010001, 0.244650]], |
| 36 | + |
| 37 | + # 5-components |
| 38 | + [[4.892608, 1.685979, -22.356787, 85.91246], |
| 39 | + [4.71187, 4.998496, 35.918936, -28.875618], |
| 40 | + [4.052795, 8.244168, -13.212253, -1.578428], |
| 41 | + [2.929212, 11.900859, 0.507991, 1.816328], |
| 42 | + [1.512961, 16.116382, 0.138051, -0.01]], |
| 43 | + |
| 44 | + # 6-components |
| 45 | + [[5.143778, 2.079813, -82.326596, 111.231024], |
| 46 | + [5.612426, 6.153387, 113.878661, 58.004879], |
| 47 | + [5.982921, 9.802895, 39.479083, -162.028887], |
| 48 | + [6.505167, 11.059237, -71.286026, 95.027069], |
| 49 | + [3.869579, 14.81052, 1.405746, -3.704914], |
| 50 | + [2.201904, 19.032909, -0.152784, -0.107988]]] |
| 51 | + |
| 52 | +# Obtain specific parameters and scale for a given component count |
| 53 | +def get_parameters(component_count = 2): |
| 54 | + parameter_index = max(0, min(component_count - 1, len(kernel_params))) |
| 55 | + parameter_dictionaries = [dict(zip(['a','b','A','B'], b)) for b in kernel_params[parameter_index]] |
| 56 | + return (parameter_dictionaries, kernel_scales[parameter_index]) |
| 57 | + |
| 58 | +# Produces a complex kernel of a given radius and scale (adjusts radius to be more accurate) |
| 59 | +# a and b are parameters of this complex kernel |
| 60 | +def complex_kernel_1d(radius, scale, a, b): |
| 61 | + kernel_radius = radius |
| 62 | + kernel_size = kernel_radius * 2 + 1 |
| 63 | + ax = np.arange(-kernel_radius, kernel_radius + 1., dtype=np.float32) |
| 64 | + ax = ax * scale * (1 / kernel_radius) |
| 65 | + kernel_complex = np.zeros((kernel_size), dtype=np.complex64) |
| 66 | + kernel_complex.real = np.exp(-a * (ax**2)) * np.cos(b * (ax**2)) |
| 67 | + kernel_complex.imag = np.exp(-a * (ax**2)) * np.sin(b * (ax**2)) |
| 68 | + return kernel_complex.reshape((1, kernel_size)) |
| 69 | + |
| 70 | +def normalise_kernels(kernels, params): |
| 71 | + # Normalises with respect to A*real+B*imag |
| 72 | + total = 0 |
| 73 | + |
| 74 | + for k,p in zip(kernels, params): |
| 75 | + # 1D kernel - applied in 2D |
| 76 | + for i in range(k.shape[1]): |
| 77 | + for j in range(k.shape[1]): |
| 78 | + # Complex multiply and weighted sum |
| 79 | + total += p['A'] * (k[0,i].real*k[0,j].real - k[0,i].imag*k[0,j].imag) + p['B'] * (k[0,i].real*k[0,j].imag + k[0,i].imag*k[0,j].real) |
| 80 | + |
| 81 | + scalar = 1 / math.sqrt(total) |
| 82 | + kernels = np.asarray(kernels) * scalar |
| 83 | + |
| 84 | + return kernels |
| 85 | + |
| 86 | +# Combine the real and imaginary parts of an image, weighted by A and B |
| 87 | +def weighted_sum(kernel, params): |
| 88 | + return np.add(kernel.real * params['A'], kernel.imag * params['B']) |
| 89 | + |
| 90 | +# Produce a 2D kernel by self-multiplying a 1d kernel. This would be slower to use |
| 91 | +# than the separable approach, mostly for visualisation below |
| 92 | +def multiply_kernel(kernel): |
| 93 | + kernel_size = kernel.shape[1] |
| 94 | + a = np.repeat(kernel, kernel_size, 0) |
| 95 | + b = np.repeat(kernel.transpose(), kernel_size, 1) |
| 96 | + return np.multiply(a,b) |
| 97 | + |
| 98 | + |
| 99 | +def lens_blur(img, radius=3, components=5, exposure_gamma=5): |
| 100 | + |
| 101 | + img = np.ascontiguousarray(img.transpose(2,0,1), dtype=np.float32) |
| 102 | + |
| 103 | + |
| 104 | + # Obtain component parameters / scale values |
| 105 | + parameters, scale = get_parameters(component_count = components) |
| 106 | + |
| 107 | + # Create each component for size radius, using scale and other component parameters |
| 108 | + components = [complex_kernel_1d(radius, scale, component_params['a'], component_params['b']) for component_params in parameters] |
| 109 | + |
| 110 | + # Normalise all kernels together (the combination of all applied kernels in 2D must sum to 1) |
| 111 | + components = normalise_kernels(components, parameters) |
| 112 | + |
| 113 | + # Increase exposure to highlight bright spots |
| 114 | + img = np.power(img, exposure_gamma) |
| 115 | + |
| 116 | + # Process RGB channels for all components |
| 117 | + component_output = list() |
| 118 | + for component, component_params in zip(components, parameters): |
| 119 | + channels = list() |
| 120 | + for channel in range(img.shape[0]): |
| 121 | + inter = signal.convolve2d(img[channel], component, boundary='symm', mode='same') |
| 122 | + channels.append(signal.convolve2d(inter, component.transpose(), boundary='symm', mode='same')) |
| 123 | + |
| 124 | + # The final component output is a stack of RGB, with weighted sums of real and imaginary parts |
| 125 | + component_image = np.stack([weighted_sum(channel, component_params) for channel in channels]) |
| 126 | + component_output.append(component_image) |
| 127 | + |
| 128 | + # Add all components together |
| 129 | + output_image = reduce(np.add, component_output) |
| 130 | + |
| 131 | + # Reverse exposure |
| 132 | + output_image = np.clip(output_image, 0, None) |
| 133 | + output_image = np.power(output_image, 1.0/exposure_gamma) |
| 134 | + |
| 135 | + # Avoid out of range values - generally this only occurs with small negatives |
| 136 | + # due to imperfect complex kernels |
| 137 | + output_image = np.clip(output_image, 0, 1) |
| 138 | + |
| 139 | + #output_image *= 255 |
| 140 | + #output_image = output_image.transpose(1,2,0).astype(np.uint8) |
| 141 | + output_image = output_image.transpose(1,2,0) |
| 142 | + return output_image |
0 commit comments