forked from cmcguinness/focusstack
-
Notifications
You must be signed in to change notification settings - Fork 0
/
FocusStack.py
147 lines (102 loc) · 4.79 KB
/
FocusStack.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
"""
Simple Focus Stacker
Author: Charles McGuinness ([email protected])
Copyright: Copyright 2015 Charles McGuinness
License: Apache License 2.0
This code will take a series of images and merge them so that each
pixel is taken from the image with the sharpest focus at that location.
The logic is roughly the following:
1. Align the images. Changing the focus on a lens, even
if the camera remains fixed, causes a mild zooming on the images.
We need to correct the images so they line up perfectly on top
of each other.
2. Perform a gaussian blur on all images
3. Compute the laplacian on the blurred image to generate a gradient map
4. Create a blank output image with the same size as the original input
images
4. For each pixel [x,y] in the output image, copy the pixel [x,y] from
the input image which has the largest gradient [x,y]
This algorithm was inspired by the high-level description given at
http://stackoverflow.com/questions/15911783/what-are-some-common-focus-stacking-algorithms
"""
import numpy as np
import cv2
def findHomography(image_1_kp, image_2_kp, matches):
image_1_points = np.zeros((len(matches), 1, 2), dtype=np.float32)
image_2_points = np.zeros((len(matches), 1, 2), dtype=np.float32)
for i in range(0,len(matches)):
image_1_points[i] = image_1_kp[matches[i].queryIdx].pt
image_2_points[i] = image_2_kp[matches[i].trainIdx].pt
homography, mask = cv2.findHomography(image_1_points, image_2_points, cv2.RANSAC, ransacReprojThreshold=2.0)
return homography
#
# Align the images so they overlap properly...
#
#
def align_images(images):
# SIFT generally produces better results, but it is not FOSS, so chose the feature detector
# that suits the needs of your project. ORB does OK
use_sift = True
outimages = []
if use_sift:
detector = cv2.xfeatures2d.SIFT_create()
else:
detector = cv2.ORB_create(1000)
# We assume that image 0 is the "base" image and align everything to it
print("Detecting features of base image")
outimages.append(images[0])
image1gray = cv2.cvtColor(images[0],cv2.COLOR_BGR2GRAY)
image_1_kp, image_1_desc = detector.detectAndCompute(image1gray, None)
for i in range(1,len(images)):
print("Aligning image {}".format(i))
image_i_kp, image_i_desc = detector.detectAndCompute(images[i], None)
if use_sift:
bf = cv2.BFMatcher()
# This returns the top two matches for each feature point (list of list)
pairMatches = bf.knnMatch(image_i_desc,image_1_desc, k=2)
rawMatches = []
for m,n in pairMatches:
if m.distance < 0.7*n.distance:
rawMatches.append(m)
else:
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
rawMatches = bf.match(image_i_desc, image_1_desc)
sortMatches = sorted(rawMatches, key=lambda x: x.distance)
matches = sortMatches[0:128]
hom = findHomography(image_i_kp, image_1_kp, matches)
newimage = cv2.warpPerspective(images[i], hom, (images[i].shape[1], images[i].shape[0]), flags=cv2.INTER_LINEAR)
outimages.append(newimage)
# If you find that there's a large amount of ghosting, it may be because one or more of the input
# images gets misaligned. Outputting the aligned images may help diagnose that.
# cv2.imwrite("aligned{}.png".format(i), newimage)
return outimages
#
# Compute the gradient map of the image
def doLap(image):
# YOU SHOULD TUNE THESE VALUES TO SUIT YOUR NEEDS
kernel_size = 5 # Size of the laplacian window
blur_size = 5 # How big of a kernal to use for the gaussian blur
# Generally, keeping these two values the same or very close works well
# Also, odd numbers, please...
blurred = cv2.GaussianBlur(image, (blur_size,blur_size), 0)
return cv2.Laplacian(blurred, cv2.CV_64F, ksize=kernel_size)
#
# This routine finds the points of best focus in all images and produces a merged result...
#
def focus_stack(unimages):
images = align_images(unimages)
print("Computing the laplacian of the blurred images")
laps = []
for i in range(len(images)):
print("Lap {}".format(i))
laps.append(doLap(cv2.cvtColor(images[i],cv2.COLOR_BGR2GRAY)))
laps = np.asarray(laps)
print("Shape of array of laplacians = {}".format(laps.shape))
output = np.zeros(shape=images[0].shape, dtype=images[0].dtype)
abs_laps = np.absolute(laps)
maxima = abs_laps.max(axis=0)
bool_mask = abs_laps == maxima
mask = bool_mask.astype(np.uint8)
for i in range(0,len(images)):
output = cv2.bitwise_not(images[i],output, mask=mask[i])
return 255-output