%matplotlib inline
from matplotlib import pyplot as plt
import imutils
import cv2
import numpy as np
cv2.__version__
'3.1.0'
def detectAndDescribe(image):
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
descriptor = cv2.xfeatures2d.SIFT_create()
(kps, features) = descriptor.detectAndCompute(image, None)
# convert the keypoints from KeyPoint objects to NumPy
# arrays
kps = np.float32([kp.pt for kp in kps])
# return a tuple of keypoints and features
return (kps, features)
def matchKeypoints(kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh):
# compute the raw matches and initialize the list of actual
# matches
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches = []
# loop over the raw matches
for m in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))
# computing a homography requires at least 4 matches
if len(matches) > 4:
# construct the two sets of points
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])
# compute the homography between the two sets of points
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
reprojThresh)
# return the matches along with the homograpy matrix
# and status of each matched point
return (matches, H, status)
# otherwise, no homograpy could be computed
return None
# load the two images
imageA = cv2.imread('1orig.png')
imageB = cv2.imread('2orig.png')
TODO:
Habcam images are "along-track" to adapt to the existing code i'm rotating the images here this is bad because is an extra step in the workflow
#imageB = np.rot90(imageB_v, k=3)
#imageA = np.rot90(imageA_v, k=3)
(kpsA, featuresA) = detectAndDescribe(imageA)
(kpsB, featuresB) = detectAndDescribe(imageB)
# match features between the two images
ratio=0.75
reprojThresh=4.0
M = matchKeypoints(kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh)
TODO:
I need to study the parameters ratio and reprojThresh
(matches, H, status) = M
result = cv2.warpPerspective(imageA, H, (imageA.shape[1], imageA.shape[0] + imageB.shape[0]))
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
plt.figure(figsize=(10,10))
plt.imshow(result)
plt.title('My fisrt mosaic');
plt.figure(figsize=(10,10))
plt.imshow(imageA)
plt.title('First image');
plt.figure(figsize=(10,10))
plt.imshow(imageB,'gray')
plt.title('Second Image');
from IPython.core.display import Image
Image('desired.png')
Desired output (only 2 images for now):
black-band on the border:
The algorithm used by the Habcam devs for the geometric correction of each image, produces a black curved band on the border. The shape and area of those black-band vary very little between images and depends on the camera's attitude parameters and lens distortion.
image alignment 'cut':
The images are not perfectly aligned (see desired output compared with the resulting image, the lower border is cutting put part of ImageA)
Note the output shown here (I made it cheating with a gimp) has:
Potential solution with opencv:
run a moving average window along the 4 borders (with an user defined width) performing an edge detection to mark the curved lines between the image and the black-band. Use the line a "cutting-tool" to set to null/transparent the pixels falling between the line and the border