> 文章列表 > 图像拼接算法研究

图像拼接算法研究

图像拼接算法研究

一、算法一

# SIFT的BF匹配import numpy as np
import cv2class Stitcher:# 拼接函数def stitch(self, images, ratio = 0.75, reprojThresh = 4.0, showMatches = False):# 读取图像imageB, imageA = images# 计算特征点和特征向量kpsA, featureA = self.detectAndDescribe(imageA)print(kpsA)print(featureA)print(kpsA.shape)print(featureA.shape)kpsB, featureB = self.detectAndDescribe(imageB)print(kpsB.shape)print(featureB.shape)# 匹配两张图片的特征点M = self.matchKeypoints(kpsA, kpsB, featureA, featureB, ratio, reprojThresh)# 没有匹配点,退出if not M:return Nonematches, H, status = M# 将图片A进行视角变换 中间结果(透视变换)result = cv2.warpPerspective(imageA, H, (imageA.shape[1] + imageB.shape[1], imageA.shape[0]))cv2.imshow('1', result)# 将图片B传入]result[0:imageB.shape[0], 0:imageB.shape[1]] = imageBself.cv_show('result', result)# 检测是否需要显示图片匹配if showMatches:# 生成匹配图片vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches, status)# 返回结果return result, vis# 返回匹配结果return resultdef detectAndDescribe(self, image):# 转换为灰度图gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)# 建立SIFT生成器descriptor = cv2.xfeatures2d.SIFT_create()# 检测特征点并计算描述子# kps: 关键点信息,包括位置,尺度,方向信息# features: 关键点描述符,每个关键点对应128个梯度信息的特征向量kps, features = descriptor.detectAndCompute(gray, None)kps = np.float32([kp.pt for kp in kps])# 得到特征点和特征向量return kps, featuresdef matchKeypoints(self, kpsA, kpsB, featureA, featureB, ratio, reprojThresh):# 建立暴力匹配器matcher = cv2.BFMatcher()# 使用KNN检测来自AB图的SIFT特征匹配rawMatches = matcher.knnMatch(featureA, featureB, 2)print(type(rawMatches))print(len(rawMatches))# 过滤matches = []for m in rawMatches:if len(m) == 2 and m[0].distance < m[1].distance * ratio:matches.append((m[0].trainIdx, m[0].queryIdx))print(matches)print(len(matches))if len(matches) > 4:# 获取匹配对的点坐标ptsA = np.float32([kpsA[i] for (_, i) in matches])print("ptsA的个数:", len(ptsA))ptsB = np.float32([kpsB[i] for (i, _) in matches])print(ptsB)# 计算H矩阵(计算单应性矩阵)(计算透视变换必须要4个点以上)H, status = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, reprojThresh)print(H)return matches, H, status# 展示图像def cv_show(self,name, img):cv2.imshow(name, img)cv2.waitKey(0)cv2.destroyAllWindows()def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status):# 初始化可视化图片,将A、B图左右连接到一起hA, wA = imageA.shape[:2]hB, wB = imageB.shape[:2]vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")vis[0:hA, 0:wA] = imageAvis[0:hB, wA:] = imageB# 联合遍历,画出匹配对for ((trainIdx, queryIdx), s) in zip(matches, status):# 当点对匹配成功时,画到可视化图上if s == 1:# 画出匹配对ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))cv2.line(vis, ptA, ptB, (0, 255, 0), 1)# 返回可视化结果return vis
from Stitcher import Stitcher
import cv2# 读取图片
img1 = cv2.imread('1.jpg')
img1 = cv2.resize(img1, dsize=(1920,1080))
img2 = cv2.imread('2.jpg')
img2 = cv2.resize(img2, dsize=(1920,1080))# 图片拼接
stitcher = Stitcher()
result, vis = stitcher.stitch([img1, img2], showMatches=True)cv2.imwrite("1_1.jpg", result)
cv2.imwrite("2_1.jpg", vis)# cv2.imshow('1', img1)
# cv2.imshow('2', img2)
# cv2.imshow('keypoints matches', vis)
# cv2.imshow('result', result)
# cv2.waitKey(0)
# cv2.destroyWindow()

二、算法二

1.下载vlfeat-0.9.20,Index of /download (vlfeat.org)

 找到vlfeat-0.9.20/bin/win64下的sift.exe

2.下载PVC包,找到PCV-master\\PCV\\localdescriptors\\sift.py文件的18行,

cmmd = str("D:\\\\app\\\\vlfeat-0.9.20\\\\bin\\\\win64\\\\sift.exe "+imagename+" --output="+resultname+" "+params)

将vlfeat-0.9.20/bin/win64下的sift.exe的绝对路径添加到上面,注意.exe后有一个空格。

在代码的最后面调价运行

if __name__ == '__main__':imname = ('0.jpg')               #待处理图像路径im=Image.open(imname)process_image(imname, '0.sift')

这就就可以生成与图片对应的.sift文件

问题:调用sift.process_image片段时生成的sift特征文件大小为0字节

原因分析:vlfeat0.9.21版本太高导致部分电脑提取特征为空

解决办法:更换版本为vlfeat0.9.20版本,部分电脑更换版本后可能会出现vcomp100.dll文件找不到问题,可下载对应的dll文件,下载链接https://cn.dll-files.com/vcomp100.dll.html,64位系统对应64位,下载后放入C:\\Windows\\SysWOW64和C:\\Windows\\System32,有则替换,亲测可运行成功。

3.利用图像与生成的.sift文件进行三张图片的拼接(修改代码可以实现任意多张图片的拼接)

# -*- coding: utf-8 -*-
from pylab import *
from numpy import *
from PIL import Image
# If you have PCV installed, these imports should work
from PCV.geometry import homography, warp
from PCV.localdescriptors import sift
"""
This is the panorama example from section 3.3.
"""
# set paths to data folder
featname = ['D:\\\\PCV-master\\\\全景拼接4\\\\' + str(i + 1) + '.sift' for i in range(3)]
imname = ['D:\\\\PCV-master\\\\全景拼接4\\\\' + str(i + 1) + '.jpg' for i in range(3)]# extract features and match
l = {}
d = {}
for i in range(3):sift.process_image(imname[i],featname[i])l[i],d[i] = sift.read_features_from_file(featname[i])
matches = {}
for i in range(2):matches[i] = sift.match(d[i+1],d[i])print(matches)
# visualize the matches (Figure 3-11 in the book)
for i in range(2):im1 = array(Image.open(imname[i]))im2 = array(Image.open(imname[i+1]))# figure()# sift.plot_matches(im2,im1,l[i+1],l[i],matches[i],show_below=True)
# function to convert the matches to hom. points
def convert_points(j):ndx = matches[j].nonzero()[0]fp = homography.make_homog(l[j+1][ndx,:2].T)ndx2 = [int(matches[j][i]) for i in ndx]tp = homography.make_homog(l[j][ndx2,:2].T)# switch x and y - TODO this should move elsewherefp = vstack([fp[1],fp[0],fp[2]])tp = vstack([tp[1],tp[0],tp[2]])return fp,tp
# estimate the homographies
model = homography.RansacModel()
fp,tp = convert_points(0)
H_01 = homography.H_from_ransac(fp,tp,model)[0] #im 0 to 1
tp,fp = convert_points(1) #NB: reverse order
H_21 = homography.H_from_ransac(fp,tp,model)[0] #im 2 to 1
# warp the images
delta = 2000 # for padding and translation
im1 = array(Image.open(imname[0]), "uint8")
im2 = array(Image.open(imname[1]), "uint8")
im_01 = warp.panorama(H_01,im1,im2,delta,delta)
im1 = array(Image.open(imname[2]), "f")
im_21 = warp.panorama(H_21,im1,im_01,delta,delta)
figure()
imshow(array(im_21, "uint8"))
axis('off')
savefig("result.png",dpi=300)
show()