这里用RANSAC来过滤误匹配

本代码主要是实现图像误匹配,理论部分及参考文章移步:

计算机视觉:RANSAC剔除基础矩阵F错误匹配(Python实现)_Sunrise永不言弃的博客-CSDN博客

以下为代码部分,只需要修改350-366行就可以了,自己根据实际情况修改


import os
import cv2
import numpy as np
import random
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
if int(cv2.__version__[0]) < 3:  # pragma: no coverprint('Warning: OpenCV 3 is not installed')#ransac
def compute_fundamental(x1, x2):n = x1.shape[1]if x2.shape[1] != n:raise ValueError("Number of points don't match.")# build matrix for equationsA = np.zeros((n, 9))for i in range(n):A[i] = [x1[0, i] * x2[0, i], x1[0, i] * x2[1, i], x1[0, i] * x2[2, i],x1[1, i] * x2[0, i], x1[1, i] * x2[1, i], x1[1, i] * x2[2, i],x1[2, i] * x2[0, i], x1[2, i] * x2[1, i], x1[2, i] * x2[2, i]]# compute linear least square solutionU, S, V = np.linalg.svd(A)F = V[-1].reshape(3, 3)# constrain F# make rank 2 by zeroing out last singular valueU, S, V = np.linalg.svd(F)S[2] = 0F = np.dot(U, np.dot(np.diag(S), V))return F / F[2, 2]def compute_fundamental_normalized(x1, x2):"""    Computes the fundamental matrix from corresponding points(x1,x2 3*n arrays) using the normalized 8 point algorithm.从相应点计算基本矩阵(x1,x2 3*n数组)使用标准化的8点算法"""n = x1.shape[1]if x2.shape[1] != n:raise ValueError("Number of points don't match.")# normalize image coordinates#标准化图像坐标x1 = x1 / x1[2]mean_1 = np.mean(x1[:2], axis=1)S1 = np.sqrt(2) / np.std(x1[:2])T1 = np.array([[S1, 0, -S1 * mean_1[0]], [0, S1, -S1 * mean_1[1]], [0, 0, 1]])x1 = np.dot(T1, x1)x2 = x2 / x2[2]mean_2 = np.mean(x2[:2], axis=1)S2 = np.sqrt(2) / np.std(x2[:2])T2 = np.array([[S2, 0, -S2 * mean_2[0]], [0, S2, -S2 * mean_2[1]], [0, 0, 1]])x2 = np.dot(T2, x2)# compute F with the normalized coordinates#用归一化坐标计算FF = compute_fundamental(x1, x2)# print (F)# reverse normalizationF = np.dot(T1.T, np.dot(F, T2))return F / F[2, 2]def randSeed(good, num = 8):''':param good: 初始的匹配点对:param num: 选择随机选取的点对数量:return: 8个点对list'''eight_point = random.sample(good, num)return eight_pointdef PointCoordinates(eight_points, keypoints1, keypoints2):  #随机八点求  eight_points是匹配好的点''':param eight_points: 随机八点:param keypoints1: 点坐标:param keypoints2: 点坐标:return:8个点'''x1 = []x2 = []tuple_dim = [1.,]             #这里改成了np类型类型#print(type(eight_points))   类型是list#print(eight_points)fin_list_1=[]           #初始化保存8点的坐标fin_list_2 = []for i in eight_points:#        pts[0, :] = ys#        pts[1, :] = xs#print(type(keypoints1))   #元组#tuple_x1 = keypoints1[i][:2] + tuple_dim#tuple_x2 = keypoints2[i][:2] + tuple_dimidex_0=i[0].queryIdx                  #查询的描述符索引idex_1 = i[0].trainIdx                #目标的描述符索引#print("xxxxxxx",idex_0)tuple_x1 = keypoints1[idex_0][:2]           #得到关键点1的坐标  关键点转置后是N*3的矩阵,取前2维度tuple_x2 = keypoints2[idex_1][:2]           #得到关键点2的坐标#print("坐标是",tuple_x1,tuple_x2)           目前输出[257. 418.] [220. 464.]# 转化为其次,最后+1个维度的那种new_tuple_x1=np.append(tuple_x1,tuple_dim)new_tuple_x2 = np.append(tuple_x2, tuple_dim)#print("xxxxxxx",new_tuple_x1,new_tuple_x2)    #  [663. 445.   1.] [269. 246.   1.]fin_list_1=np.append(fin_list_1,new_tuple_x1)fin_list_2 = np.append(fin_list_2, new_tuple_x2)#print(type(fin_list_1))   #fin_list_1 np类型的,24个数的集合#arr = np.arange(24).reshape(3, 8)#原来的#tuple_x1 = keypoints1[i[0].queryIdx].pt + tuple_dim#tuple_x2 = keypoints2[i[0].trainIdx].pt + tuple_dim#x1.append(tuple_x1)#x2.append(tuple_x2)#print("xxxxxxx",fin_list_1.shape)x1 = fin_list_1.reshape(-1, 3)     #列是3 ,行数计算x2 = fin_list_2.reshape(-1, 3)     #列是3 ,行数计算#print(x1)#print("xxxxxxxx",x1.shape)return np.array(x1, dtype=float), np.array(x2, dtype=float)def ransac(good, keypoints1, keypoints2, confidence,iter_num):Max_num = 0good_F = np.zeros([3,3])inlier_points = []for i in range(iter_num):eight_points = randSeed(good)x1,x2 = PointCoordinates(eight_points, keypoints1, keypoints2)F = compute_fundamental_normalized(x1.T, x2.T)num, ransac_good = inlier(F, good, keypoints1, keypoints2, confidence)if num > Max_num:Max_num = numgood_F = Finlier_points = ransac_goodprint(Max_num, good_F)return Max_num, good_F, inlier_pointsdef computeReprojError(x1, x2, F):"""计算投影误差"""ww = 1.0/(F[2,0]*x1[0]+F[2,1]*x1[1]+F[2,2])dx = (F[0,0]*x1[0]+F[0,1]*x1[1]+F[0,2])*ww - x2[0]dy = (F[1,0]*x1[0]+F[1,1]*x1[1]+F[1,2])*ww - x2[1]return dx*dx + dy*dydef inlier(F,good, keypoints1,keypoints2,confidence):   #找到内点num = 0ransac_good = []x1, x2 = PointCoordinates(good, keypoints1, keypoints2)for i in range(len(x2)):line = F.dot(x1[i].T)#在对极几何中极线表达式为[A B C],Ax+By+C=0,  方向向量可以表示为[-B,A]line_v = np.array([-line[1], line[0]])err = h = np.linalg.norm(np.cross(x2[i,:2], line_v)/np.linalg.norm(line_v))# err = computeReprojError(x1[i], x2[i], F)if abs(err) < confidence:ransac_good.append(good[i])num += 1return num, ransac_gooddef nn_match_two_way(desc1, desc2, nn_thresh):assert desc1.shape[0] == desc2.shape[0]if desc1.shape[1] == 0 or desc2.shape[1] == 0:return np.zeros((3, 0))if nn_thresh < 0.0:raise ValueError('\'nn_thresh\' should be non-negative')# Compute L2 distance. Easy since vectors are unit normalized.#计算L2距离容易,因为向量已归一化dmat = np.dot(desc1.T, desc2)dmat = np.sqrt(2 - 2 * np.clip(dmat, -1, 1))# Get NN indices and scores.#获取NN索引和分数。idx = np.argmin(dmat, axis=1)                 #最近邻索引   argmin返回这个数组中最小的索引scores = dmat[np.arange(dmat.shape[0]), idx]  #最近邻分数# Threshold the NN matches.#NN匹配的阈值keep = scores < nn_thresh# Check if nearest neighbor goes both directions and keep those.#检查最近的邻居是否两个方向并保留这些方向idx2 = np.argmin(dmat, axis=0)keep_bi = np.arange(len(idx)) == idx2[idx]keep = np.logical_and(keep, keep_bi)idx = idx[keep]scores = scores[keep]# Get the surviving point indices.#获取幸存点索引m_idx1 = np.arange(desc1.shape[1])[keep]  #keep是布尔类型的 ture或者false  ,输出这样的[[0 1 2 3 4]]m_idx2 = idx# Populate the final 3xN match data structure.#填充最终的3XN匹配数据结构matches = np.zeros((3, int(keep.sum())))matches[0, :] = m_idx1#print(matches[0])  # 5.  22.  40.  68.  85.  94.  96. 109. 110. 126. 132. 135matches[1, :] = m_idx2#print(matches[1])  #56.  11. 369. 286.  94matches[2, :] = scores   #最近邻分数#print(matches[2])  #0.64095849 0.58723611 0.68436664 0.55057442return matches    #matches保存的就是点对的对应关系def match_descriptors(kp1, desc1, kp2, desc2):# Match the keypoints with the warped_keypoints with nearest neighbor searchbf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)##是一个暴力匹配的对象,取desc1中一个描述子,再与desc2中的所有描述子计算欧式距离matches = bf.match(desc1, desc2)##上一行的返回值类似元组的集合(i,j)代表第一个集合的第i个点的最佳匹配是第二个集合的第j个点matches_idx = np.array([m.queryIdx for m in matches])m_kp1 = [kp1[idx] for idx in matches_idx]matches_idx = np.array([m.trainIdx for m in matches])m_kp2 = [kp2[idx] for idx in matches_idx]####m_kp1是第一张图片的特征点,m_kp2是第二张图片的特征点,此时它们已经一一对应了(至少是最对应的,距离最小的return m_kp1, m_kp2, matchesdef showpoint(img, ptx):for i in range(ptx.shape[1]):x = int(round(ptx[0, i]))y = int(round(ptx[1, i]))# if x>20 and y>20 and x<640 and y <450:#   Nonecv2.circle(img, (x, y), 3, color=(255, 0, 0))return img
## test def drawMatches(img1, kp1, img2, kp2, matches)
def drawMatches(img1, kp1, img2, kp2, matches):"""My own implementation of cv2.drawMatches as OpenCV 2.4.9does not have this function available but it's supported inOpenCV 3.0.0This function takes in two images with their associatedkeypoints, as well as a list of DMatch data structure (matches)that contains which keypoints matched in which images.An image will be produced where a montage is shown withthe first image followed by the second image beside it.Keypoints are delineated with circles, while lines are connectedbetween matching keypoints.img1,img2 - Grayscale images #灰度图像kp1,kp2 - Detected list of keypoints through any of the OpenCV keypointdetection algorithms#通过任何OPENCV关键点检测到的关键点列表检测算法matches - A list of matches of corresponding keypoints through anyOpenCV keypoint matching algorithm#通过任何OPENCV关键点匹配算法"""# Create a new output image that concatenates the two images together#创建一个新的输出图像,将两个图像串联在一起# (a.k.a) a montage #剪辑rows1 = img1.shape[0]cols1 = img1.shape[1]rows2 = img2.shape[0]cols2 = img2.shape[1]#将第一个图像放在左侧#i1 = np.dstack([img1, img1, img1])#i2 = np.dstack([img2, img2, img2])#i1 = cv2.imread("D:\essay\SuperPoint\SuperPointPretrainedNetwork-master/assets/test/1.png")#i2 = cv2.imread("D:\essay\SuperPoint\SuperPointPretrainedNetwork-master/assets/test/2.png")#cv2.imshow("image1", i1)#cv2.imshow("image2", i1)#np.hstack将参数元组的元素数组按水平方向进行叠加#就是图像并排放一起out = np.hstack([img1, img2])#cv2.imshow("image2", out)print("输出尺寸", out.shape)# Place the next image to the right of it# out[0:480,640:1280] = np.dstack([img2, img2, img2])# For each pair of points we have between both images# draw circles, then connect a line between them#对于每对点,我们在这两个图像之间都有#绘制圆圈,然后在它们之间连接一条线for i in range(matches.shape[1]):   #[1]点的个数# Get the matching keypoints for each of the images#获取每个图像的匹配关键点img1_idx = matches[0, i]img2_idx = matches[1, i]x11 = int(img1_idx)y11 = int(img1_idx)x22 = int(img2_idx)y22 = int(img2_idx)# x - columns    x列# y - rows       y行x1 = kp1[0, x11]y1 = kp1[1, y11]x2 = kp2[0, x22]y2 = kp2[1, y22]# Draw a small circle at both co-ordinates# radius 4# colour blue# thickness = 1# a = np.random.randint(0, 256)  #返回一个随机整数值# b = np.random.randint(0, 256)# c = np.random.randint(0, 256)a = np.random.randint(5, 7)     #B 蓝色返回一个随机整数值b = np.random.randint(5, 7)     #G 绿色c = np.random.randint(244, 246) #R 红色cv2.circle(out, (int(np.round(x1)), int(np.round(y1))), 2, (a, b, c), 1)  # 画圆,cv2.circle()参考官方文档cv2.circle(out, (int(np.round(x2) + cols1), int(np.round(y2))), 2, (a, b, c), 1)# Draw a line in between the two points# thickness = 1# colour bluecv2.line(out, (int(np.round(x1)), int(np.round(y1))), (int(np.round(x2) + cols1), int(np.round(y2))),(a, b, c),2, shift=0)  # 画线,cv2.line()参考官方文档# Also return the image if you'd like a copyreturn outif __name__ == '__main__':font = cv2.FONT_HERSHEY_DUPLEX  # 设置可视化字体font_clr = (255, 255, 255)font_pt = (4, 12)font_sc = 0.4#读图#第一,二图的特征点和描述符#.d2-net是NPZ文件,你换成自己的就就好了data_1 = np.load("D:\essay\SuperPoint\superpoint\pictuer\style_image\match/11.jpg.d2-net")data_2 = np.load("D:\essay\SuperPoint\superpoint\pictuer\style_image\match/12.jpg.d2-net")#读入2张图像imgx_1 = cv2.imread('D:\essay\SuperPoint\superpoint\pictuer\style_image\match/11.jpg', 1)imgx_2 = cv2.imread('D:\essay\SuperPoint\superpoint\pictuer\style_image\match/12.jpg', 1)#可以用这个看下你的npz文件的属性# print(data.files)#我这里是(3473, 3)维,N是3473,不同的网络提取的特征点不一样。如果你的不是这样,你可以转置下  例如pts=pts.T#如果你的是(3473, 2)也是没问题的pts = data_1['keypoints']  # np类型  (3473, 3)  3473是N    pts[0]是[231.68674 271.08926   1.     ]  齐次坐标都有了# print(pts[0])desc = data_1['descriptors']  # np类型  (3473, 512)print("图1提取特征点数目", pts.shape[0])#imgx = img1.copy()  #不要copy就不是灰色的pts=pts.T                               # 因为其他函数写的时候都是对应3*N的来写的,转置以后成为3*Ndesc=desc.T                     #转职后成为512*N,本身是(3473, 512)img11 = showpoint(imgx_1, pts)cv2.namedWindow('imgone', 0)cv2.imshow("imgone", img11)  #第一张图显示特征点#cv2.waitKey(0)   有这个才能显示窗口##第2图的特征点和描述符pts1 = data_2['keypoints']  # np类型  (3473, 3)  3473是N    pts[0]是[231.68674 271.08926   1.     ]  齐次坐标都有了desc1 = data_2['descriptors']  # np类型  (3473, 512)desc1=desc1.Tprint("图1提取特征点数目", pts1.shape[0])pts1=pts1.Timg22 = showpoint(imgx_2, pts1)cv2.namedWindow('imgtwo', 0)cv2.imshow("imgtwo", img22)  #第一张图显示特征点#cv2.waitKey(0)   #有这个才能显示窗口#下面都是共同使用的东西# 尝试暴力匹配+KNN算法实现desc_test_0=desc.T    #矩阵转置后 #可以顺利输出 这里必须(1099, 128)N在前desc_test_1 = desc1.Tpts_test_0 =pts.T       #特征点也转置pts_test_1 =pts1.T#特征点转为元组pts_test_0 = tuple(pts_test_0)pts_test_1 = tuple(pts_test_1)match_test = cv2.BFMatcher()matches = match_test.knnMatch(desc_test_0, desc_test_1, k=2)#print("这里的长度是",len(matches))  #tuple  长度893good = []for m, n in matches:if m.distance < 0.8 * n.distance:   #可以自己改good.append([m])print("number of feature points:",pts.shape[1], pts1.shape[1])  #特征点个数,调试正确print("good match num:{} good match points:".format(len(good))) #正确  knn匹配到25个##这里只能将pts, pts1抓换后才能用Max_num, good_F, inlier_points = ransac(good, pts_test_0, pts_test_1, confidence=30, iter_num=500)print(pts.shape[1], "/", pts1.shape[1], "/", len(good), "/", Max_num, "/", '%.2f' % (Max_num / len(good)))#print(inlier_points)   #这个就是得出的ransac后的匹配数#inlier_points 转化为match的坐标  #match是一个3*67的数组,这里转换后还要转置下T#print("xxxxxxxxx",len(inlier_points))#print(inlier_points)tuple_x = []tuple_y = []#matche = np.zeros((2,len(inlier_points)))   #初始化matche, 0填充 M行 2列,M为匹配到的点数量  最后转置下T#print(type(matche))  #np类型的for i in inlier_points:#print(i)idex_x=i[0].queryIdx                  #查询的描述符索引 845个idex_y = i[0].trainIdx                #目标的描述符索引 对应555个#print("xxxxxxx",idex_0)tuple_x=np.append(tuple_x,idex_x)       #  查询的描述符的索引弄到一起#print(tuple_x)tuple_y = np.append(tuple_y, idex_y )   #  目标的描述符的索引弄到一起match=np.vstack((tuple_x,tuple_y))#画图  testout =drawMatches(imgx_1, pts, imgx_2,pts1, match)   #传入了彩色图cv2.namedWindow("matcher", 0)cv2.imshow("matcher", out)cv2.waitKey(0) # 函数的功能是不断刷新图像 , 频率时间为delay , 单位为ms ,处理视频的时候用的print('==> Finshed ')#

最终显示图片    D2-net实现

这个是Superpoint实现的

这里显示的是 :特征点数量 A,B/匹配对数/正确对数/准确率

适合新手--特征点匹配及消除误匹配点-python语言-适用于深度学习提取的特征点和描述符(超简单)相关推荐

  1. 第四篇:基于深度学习的人脸特征点检测 - 数据预处理

    在上一篇博文中,我们整理了300-W.LFPW.HELEN.AFW.IBUG和300-VW这6个数据集,使用Python将特征点绘制在对应的图片上,人工验证了数据集的正确性,最终获得了223034个人 ...

  2. 【深度学习】FPN(特征金字塔)简介:Feature Pyramid Networks for Object Detection

    [深度学习]FPN(特征金字塔):Feature Pyramid Networks for Object Detection 提示:博主取舍了很多大佬的博文并亲测有效,分享笔记邀大家共同学习讨论 博文 ...

  3. 影像组学学员成果分享:BraTS2017数据集的迁移学习+深度学习提取特征+机器学习

    感谢[呼呼]同学分享的"BraTS2017数据集的迁移学习+深度学习提取特征+机器学习" 迁移学习 一.迁移学习(把之前对Medicalnet的迁移学习内容跑了一遍,用的BraTS ...

  4. 一个基于SpringBoot开发的RBAC系统,非常适合新手入门JavaWeb代码审计实战的系统,长文警告,要好好学习。

    嗨,大家好,我是闪石星曜CyberSecurity创始人Power7089. 欢迎大家搜索我的微信公众号:闪石星曜CyberSecurity 本文是[炼石计划@Java代码审计]内部圈子原创课程,现分 ...

  5. 第三篇:基于深度学习的人脸特征点检测 - 数据集整理

    https://yinguobing.com/facial-landmark-localization-by-deep-learning-data-collate/ 在上一篇博文中,我们已经下载到了包 ...

  6. 第五篇:基于深度学习的人脸特征点检测 - 生成TFRecord文件

    在上一篇博文中,我们已经获取到了所有样本的面部区域,并且对面部区域的有效性进行了验证.当使用TensorFlow进行神经网络训练时,涉及到的大量IO操作会成为训练速度的瓶颈.为了加快训练的速度,方便后 ...

  7. 第二篇:基于深度学习的人脸特征点检测 - 数据与方法(转载)

    https://yinguobing.com/facial-landmark-localization-by-deep-learning-data-and-algorithm/ 在上一篇博文中,我们了 ...

  8. 面向机器学习的特征工程 八、自动化特征提取器:图像特征提取和深度学习

    来源:ApacheCN<面向机器学习的特征工程>翻译项目 译者:friedhelm739 校对:(虚位以待) 视觉和声音是人类固有的感觉输入.我们的大脑是可以迅速进化我们的能力来处理视觉和 ...

  9. 深度学习网络和特征图可视化的工具介绍

    1.深度学习网络结构画图工具: 网络结构画图工具https://cbovar.github.io/ConvNetDraw/ 输入:层信息 输出:网络结构图 网络结构图实例 2.caffe可视化工具 输 ...

最新文章

  1. mysql 用drop和delete方法删除用户的区别
  2. linux禁止系统休眠,让linux系统休眠
  3. 数据结构与算法--二叉树第k个大的节点
  4. AtCoder Beginner Contest 176总结
  5. DTrace Tools
  6. 1190: [HNOI2007]梦幻岛宝珠 - BZOJ
  7. ubuntu下编译安装mysql记录
  8. 禁止微信浏览器的下拉滑动
  9. java设计模式--简单工厂模式
  10. 新手学Docker(1)Hello World
  11. VC MakeUp 操作XML
  12. 用iperf在ambarella s2l上进行网络性能测试
  13. 消息中间件MQ的学习境界和路线
  14. Matlab绘图常用设置及函数
  15. 手把手教你做树莓派魔镜-MagicMirror(七)-接下来
  16. GitLab-CI持续集成(CI)的介绍与运行机制
  17. 正则匹配问号_跟BBEdit学正则表达式,轻松地学习晦涩难解的语法
  18. 革命性新疗法后 第三例艾滋病治愈病例可能出现了!
  19. python 做软件授权代理_软件提卡+授权API+代理系统三合一网站源码(开源)
  20. 学习大数据方向_李孟_新浪博客

热门文章

  1. python 石头剪刀布 1.0
  2. 领先的人工智能牙科软件公司DentalMonitoring宣布获得1.5亿美元成长融资,估值超过10亿美元
  3. 通信值勤维护管理条例_军队强化通信装备管理研究
  4. 东南亚Lazada年末爆品推荐,带你抢占旺季流量!
  5. 春节晚会告诉我们要占领东京
  6. 本地打开文件 % % 是什么意思
  7. windows下查看端口的方法以及管道符号|
  8. TGO鲲鹏会成立三周年,校长霍泰稳发表工作汇报
  9. python爬取天天基金_「天天基金网每日净值」python爬虫简单爬取天天基金的基金信息和每日净值 - 金橙教程网...
  10. 基于讯飞接口的语音识别(python)