#!/usr/bin/env python # coding: utf-8 # In[1]: get_ipython().run_line_magic('matplotlib', 'inline') from matplotlib import pyplot as plt # In[2]: import imutils import cv2 import numpy as np # In[3]: cv2.__version__ # In[4]: def detectAndDescribe(image): # convert the image to grayscale gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) descriptor = cv2.xfeatures2d.SIFT_create() (kps, features) = descriptor.detectAndCompute(image, None) # convert the keypoints from KeyPoint objects to NumPy # arrays kps = np.float32([kp.pt for kp in kps]) # return a tuple of keypoints and features return (kps, features) # In[5]: def matchKeypoints(kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh): # compute the raw matches and initialize the list of actual # matches matcher = cv2.DescriptorMatcher_create("BruteForce") rawMatches = matcher.knnMatch(featuresA, featuresB, 2) matches = [] # loop over the raw matches for m in rawMatches: # ensure the distance is within a certain ratio of each # other (i.e. Lowe's ratio test) if len(m) == 2 and m[0].distance < m[1].distance * ratio: matches.append((m[0].trainIdx, m[0].queryIdx)) # computing a homography requires at least 4 matches if len(matches) > 4: # construct the two sets of points ptsA = np.float32([kpsA[i] for (_, i) in matches]) ptsB = np.float32([kpsB[i] for (i, _) in matches]) # compute the homography between the two sets of points (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, reprojThresh) # return the matches along with the homograpy matrix # and status of each matched point return (matches, H, status) # otherwise, no homograpy could be computed return None # In[6]: # load the two images imageA = cv2.imread('1orig.png') imageB = cv2.imread('2orig.png') # TODO: # # ``` # Habcam images are "along-track" to adapt to the existing code i'm rotating the images here this is bad because is an extra step in the workflow # ``` # In[7]: #imageB = np.rot90(imageB_v, k=3) #imageA = np.rot90(imageA_v, k=3) # In[8]: (kpsA, featuresA) = detectAndDescribe(imageA) (kpsB, featuresB) = detectAndDescribe(imageB) # In[9]: # match features between the two images ratio=0.75 reprojThresh=4.0 M = matchKeypoints(kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh) # TODO: # # ``` # I need to study the parameters ratio and reprojThresh # ``` # In[10]: (matches, H, status) = M result = cv2.warpPerspective(imageA, H, (imageA.shape[1], imageA.shape[0] + imageB.shape[0])) result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB # In[11]: plt.figure(figsize=(10,10)) plt.imshow(result) plt.title('My fisrt mosaic'); # In[12]: plt.figure(figsize=(10,10)) plt.imshow(imageA) plt.title('First image'); # In[13]: plt.figure(figsize=(10,10)) plt.imshow(imageB,'gray') plt.title('Second Image'); # In[14]: from IPython.core.display import Image # In[15]: Image('desired.png') # Desired output (only 2 images for now): # # * black-band on the border: # # ```The algorithm used by the Habcam devs for the geometric correction of each image, produces a black curved band on the border. The shape and area of those black-band vary very little between images and depends on the camera's attitude parameters and lens distortion.``` # # # # * image alignment 'cut': # # ```The images are not perfectly aligned (see desired output compared with the resulting image, the lower border is cutting put part of ImageA)``` # # # Note the output shown here (I made it cheating with a gimp) has: # # * black-band set to transpart adding an alpha channel and manually detected the black area from the image # # Potential solution with opencv: # # ```run a moving average window along the 4 borders (with an user defined width) performing an edge detection to mark the curved lines between the image and the black-band. Use the line a "cutting-tool" to set to null/transparent the pixels falling between the line and the border```