OpenCV的RobustMatch匹配算法

简介: OpenCV的RobustMatch匹配算法

今天在网上看到一个比较健壮的图像特征匹配算法,遂拿出来与大家分享

1.首先看看类定义:

class RobustMatcher 
{
private:
  // pointer to the feature point detector object
  cv::Ptr<cv::FeatureDetector> detector;
  // pointer to the feature descriptor extractor object
  cv::Ptr<cv::DescriptorExtractor> extractor;
  // pointer to the matcher object
  cv::Ptr<cv::DescriptorMatcher > matcher;
  float ratio; // max ratio between 1st and 2nd NN
  bool refineF; // if true will refine the F matrix
  double distance; // min distance to epipolar
  double confidence; // confidence level (probability)
public:
  RobustMatcher() : ratio(0.65f), refineF(true),
    confidence(0.99), distance(3.0) 
  {
      // ORB is the default feature
      detector= new cv::OrbFeatureDetector();
      extractor= new cv::OrbDescriptorExtractor();
      matcher= new cv::BruteForceMatcher<cv::HammingLUT>;
  }
  // Set the feature detector
  void setFeatureDetector(cv::Ptr<cv::FeatureDetector>& detect) 
  {
      detector= detect;
  }
  // Set the descriptor extractor
  void setDescriptorExtractor(cv::Ptr<cv::DescriptorExtractor>& desc) 
  {
      extractor= desc;
  }
  // Set the matcher
  void setDescriptorMatcher(cv::Ptr<cv::DescriptorMatcher>& match) 
  {
      matcher= match;
  }
  // Set confidence level
  void setConfidenceLevel(double conf) 
  {
      confidence= conf;
  }
  //Set MinDistanceToEpipolar
  void setMinDistanceToEpipolar(double dist) 
  {
    distance= dist;
  }
  //Set ratio
  void setRatio(float rat) 
  {
    ratio= rat;
  }
  cv::Mat match(cv::Mat& image1, cv::Mat& image2, // input images
    // output matches and keypoints
    std::vector<cv::DMatch>& matches,
    std::vector<cv::KeyPoint>& keypoints1,
    std::vector<cv::KeyPoint>& keypoints2);
  cv::Mat ransacTest(
    const std::vector<cv::DMatch>& matches,
    const std::vector<cv::KeyPoint>& keypoints1,
    const std::vector<cv::KeyPoint>& keypoints2,
    std::vector<cv::DMatch>& outMatches);
  void symmetryTest(
    const std::vector<std::vector<cv::DMatch> >& matches1,
    const std::vector<std::vector<cv::DMatch> >& matches2,
    std::vector<cv::DMatch>& symMatches);
  int ratioTest(std::vector<std::vector<cv::DMatch>>& matches);
};

2.cpp中的内容为:

int RobustMatcher::ratioTest(std::vector<std::vector<cv::DMatch> >
  &matches) 
{
    int removed=0;
    // for all matches
    for (std::vector<std::vector<cv::DMatch> >::iterator
      matchIterator= matches.begin();
      matchIterator!= matches.end(); ++matchIterator) 
    {
        // if 2 NN has been identified
        if (matchIterator->size() > 1) 
        {
          // check distance ratio
          if ((*matchIterator)[0].distance/
            (*matchIterator)[1].distance > ratio)
          {
              matchIterator->clear(); // remove match
              removed++;
          }
        } else 
        { // does not have 2 neighbours
          matchIterator->clear(); // remove match
          removed++;
        }
    }
    return removed;//返回被删除的点数量
}
// Insert symmetrical matches in symMatches vector
void RobustMatcher::symmetryTest(
  const std::vector<std::vector<cv::DMatch> >& matches1,
  const std::vector<std::vector<cv::DMatch> >& matches2,
  std::vector<cv::DMatch>& symMatches)
{
    // for all matches image 1 -> image 2
    for (std::vector<std::vector<cv::DMatch> >::
      const_iterator matchIterator1= matches1.begin();
      matchIterator1!= matches1.end(); ++matchIterator1) 
    {
        // ignore deleted matches
        if (matchIterator1->size() < 2)
          continue;
        // for all matches image 2 -> image 1
        for (std::vector<std::vector<cv::DMatch> >::
          const_iterator matchIterator2= matches2.begin();
          matchIterator2!= matches2.end();
        ++matchIterator2) 
        {
          // ignore deleted matches
          if (matchIterator2->size() < 2)
            continue;
          // Match symmetry test
          if ((*matchIterator1)[0].queryIdx ==
            (*matchIterator2)[0].trainIdx &&
            (*matchIterator2)[0].queryIdx ==
            (*matchIterator1)[0].trainIdx) 
          {
              // add symmetrical match
              symMatches.push_back(
                cv::DMatch((*matchIterator1)[0].queryIdx,
                (*matchIterator1)[0].trainIdx,
                (*matchIterator1)[0].distance));
              break; // next match in image 1 -> image 2
          }
        }
    }
}
// Identify good matches using RANSAC
// Return fundemental matrix
cv::Mat RobustMatcher::ransacTest(
  const std::vector<cv::DMatch>& matches,
  const std::vector<cv::KeyPoint>& keypoints1,
  const std::vector<cv::KeyPoint>& keypoints2,
  std::vector<cv::DMatch>& outMatches) 
{
    // Convert keypoints into Point2f
    std::vector<cv::Point2f> points1, points2;
    cv::Mat fundemental;
    for (std::vector<cv::DMatch>::
      const_iterator it= matches.begin();
      it!= matches.end(); ++it) 
    {
        // Get the position of left keypoints
        float x= keypoints1[it->queryIdx].pt.x;
        float y= keypoints1[it->queryIdx].pt.y;
        points1.push_back(cv::Point2f(x,y));
        // Get the position of right keypoints
        x= keypoints2[it->trainIdx].pt.x;
        y= keypoints2[it->trainIdx].pt.y;
        points2.push_back(cv::Point2f(x,y));
    }
    // Compute F matrix using RANSAC
    std::vector<uchar> inliers(points1.size(),0);
    if (points1.size()>0&&points2.size()>0)
    {
      cv::Mat fundemental= cv::findFundamentalMat(
        cv::Mat(points1),cv::Mat(points2), // matching points
        inliers,       // match status (inlier or outlier)
        CV_FM_RANSAC, // RANSAC method
        distance,      // distance to epipolar line
        confidence); // confidence probability
      // extract the surviving (inliers) matches
      std::vector<uchar>::const_iterator
        itIn= inliers.begin();
      std::vector<cv::DMatch>::const_iterator
        itM= matches.begin();
      // for all matches
      for ( ;itIn!= inliers.end(); ++itIn, ++itM) 
      {
        if (*itIn) 
        { // it is a valid match
          outMatches.push_back(*itM);
        }
      }
      if (refineF) 
      {
        // The F matrix will be recomputed with
        // all accepted matches
        // Convert keypoints into Point2f
        // for final F computation
        points1.clear();
        points2.clear();
        for (std::vector<cv::DMatch>::
          const_iterator it= outMatches.begin();
          it!= outMatches.end(); ++it) 
        {
            // Get the position of left keypoints
            float x= keypoints1[it->queryIdx].pt.x;
            float y= keypoints1[it->queryIdx].pt.y;
            points1.push_back(cv::Point2f(x,y));
            // Get the position of right keypoints
            x= keypoints2[it->trainIdx].pt.x;
            y= keypoints2[it->trainIdx].pt.y;
            points2.push_back(cv::Point2f(x,y));
        }
        // Compute 8-point F from all accepted matches
        if (points1.size()>0&&points2.size()>0)
        {
          fundemental= cv::findFundamentalMat(
            cv::Mat(points1),cv::Mat(points2), // matches
            CV_FM_8POINT); // 8-point method
        }
      }
    }
  return fundemental;
}
// Match feature points using symmetry test and RANSAC
// returns fundemental matrix
cv::Mat RobustMatcher:: match(cv::Mat& image1,
  cv::Mat& image2, // input images
  // output matches and keypoints
  std::vector<cv::DMatch>& matches,
  std::vector<cv::KeyPoint>& keypoints1,
  std::vector<cv::KeyPoint>& keypoints2) 
{
  // 1a. Detection of the ORB features
  detector->detect(image1,keypoints1);
  detector->detect(image2,keypoints2);
  // 1b. Extraction of the ORB descriptors
  cv::Mat descriptors1, descriptors2;
  extractor->compute(image1,keypoints1,descriptors1);
  extractor->compute(image2,keypoints2,descriptors2);
  // 2. Match the two image descriptors
  // Construction of the matcher
  //cv::BruteForceMatcher<cv::L2<float>> matcher;
  // from image 1 to image 2
  // based on k nearest neighbours (with k=2)
  std::vector<std::vector<cv::DMatch> > matches1;
  matcher->knnMatch(descriptors1,descriptors2,
    matches1, // vector of matches (up to 2 per entry)
    2);        // return 2 nearest neighbours
  // from image 2 to image 1
  // based on k nearest neighbours (with k=2)
  std::vector<std::vector<cv::DMatch> > matches2;
  matcher->knnMatch(descriptors2,descriptors1,
    matches2, // vector of matches (up to 2 per entry)
    2);        // return 2 nearest neighbours
  // 3. Remove matches for which NN ratio is
  // > than threshold
  // clean image 1 -> image 2 matches
  int removed= ratioTest(matches1);
  // clean image 2 -> image 1 matches
  removed= ratioTest(matches2);
  // 4. Remove non-symmetrical matches
  std::vector<cv::DMatch> symMatches;
  symmetryTest(matches1,matches2,symMatches);
  //=========================================测试代码
  cv::Mat img_matches;
  cv::drawMatches( image1, keypoints1, image2, keypoints2, 
    symMatches, img_matches, cv::Scalar::all(-1), cv::Scalar::all(-1), 
    std::vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); 
  /*imshow("Test",img_matches);*/
  //cvWaitKey(0);
  //=========================================测试代码
  // 5. Validate matches using RANSAC
  cv::Mat fundemental= ransacTest(symMatches,
    keypoints1, keypoints2, matches);
  //=========================================测试代码
  std::vector<cv::Point2f> obj;
  std::vector<cv::Point2f> scene;
  for( int i = 0; i < symMatches.size(); i++ )
  {
    //-- Get the keypoints from the good matches
    obj.push_back( keypoints1[ symMatches[i].queryIdx ].pt );
    scene.push_back( keypoints2[ symMatches[i].trainIdx ].pt ); 
  }
  cv::Mat H = cv::findHomography( obj, scene, CV_RANSAC ,2);
  //-- Get the corners from the image_1 ( the object to be "detected" )
  std::vector<cv::Point2f> obj_corners(4);
  obj_corners[0]=cvPoint(0,0);
  obj_corners[1]=cvPoint(image1.cols, 0 );
  obj_corners[2]=cvPoint(image1.cols,image1.rows);
  obj_corners[3]=cvPoint(0,image1.rows);
  std::vector<cv::Point2f> scene_corners(4);
  cv::perspectiveTransform( obj_corners, scene_corners, H);
  for( int i = 0; i < 4; i++ )
  {
    scene_corners[i].x+=image1.cols;
  }
  line( img_matches, scene_corners[0], scene_corners[1], cv::Scalar(0, 255, 0), 2 );
  line( img_matches, scene_corners[1], scene_corners[2], cv::Scalar( 0, 255, 0), 2 );
  line( img_matches, scene_corners[2], scene_corners[3], cv::Scalar( 0, 255, 0), 2 );
  line( img_matches, scene_corners[3], scene_corners[0], cv::Scalar( 0, 255, 0), 2 );
  imshow("Test",img_matches);
  cvWaitKey(0);
  //=========================================测试代码
  // return the found fundemental matrix
  return fundemental;
}

剔除低质量匹配点

ratioTest用来剔除距离比例相差过大的配对点,配对点之间的距离相差越大,能匹配上的概率也就越小。这里使用一个参数ratio来控制剔除距离相差在一定范围之外的特征点。

symmetryTest用来判断两个图像间的特征点匹配是否是一一映射,对于不是的点则剔除掉

3.具体调用的例子:

void main()
{
  // set parameters
  int numKeyPoints = 1500;
  //Instantiate robust matcher
  RobustMatcher rmatcher;
  //instantiate detector, extractor, matcher
  cv::Ptr<cv::FeatureDetector> detector = new cv::OrbFeatureDetector(numKeyPoints);
  cv::Ptr<cv::DescriptorExtractor> extractor = new cv::OrbDescriptorExtractor;
  cv::Ptr<cv::DescriptorMatcher> matcher = new cv::BruteForceMatcher<cv::HammingLUT>;
  rmatcher.setFeatureDetector(detector);
  rmatcher.setDescriptorExtractor(extractor);
  rmatcher.setDescriptorMatcher(matcher);
  //Load input image detect keypoints
  cv::Mat img1;
  std::vector<cv::KeyPoint> img1_keypoints;
  cv::Mat img1_descriptors;
  cv::Mat img2;
  std::vector<cv::KeyPoint> img2_keypoints;
  cv::Mat img2_descriptors;
  std::vector<cv::DMatch>  matches;
  img1 = cv::imread("C:\\temp\\PyramidPattern.jpg", CV_LOAD_IMAGE_GRAYSCALE);
  /*img2 = cv::imread("C:\\temp\\PyramidPatternTest.bmp", CV_LOAD_IMAGE_GRAYSCALE);*/
  //img2 = cv::imread("C:\\temp\\test1.jpg", CV_LOAD_IMAGE_GRAYSCALE);
  img2 = cv::imread("C:\\temp\\test2.jpg", CV_LOAD_IMAGE_GRAYSCALE);
  rmatcher.match(img1, img2, matches, img1_keypoints, img2_keypoints);
}

最后,对于OpenCV中的所有特征匹配算法都可以用这个办法来做,比如SIFT, SURF等等。只需要简单的替换第一步中的extractor和detector就可以了。

匹配效果还不错,上个图


但是缺点也是存在的,如果完整匹配下来,大约一帧耗时100+ms,不适合实时性要求较高的应用


image.png

image.png

目录
相关文章
|
算法 C++
OpenCV-白平衡(完美反射算法)
OpenCV-白平衡(完美反射算法)
423 0
|
算法 C++
OpenCV-白平衡(灰度世界算法)
OpenCV-白平衡(灰度世界算法)
382 0
|
1月前
|
算法 计算机视觉 Python
圆形检测算法-基于颜色和形状(opencv)
该代码实现了一个圆检测算法,用于识别视频中的红色、白色和蓝色圆形。通过将图像从RGB转换为HSV颜色空间,并设置对应颜色的阈值范围,提取出目标颜色的区域。接着对这些区域进行轮廓提取和面积筛选,使用霍夫圆变换检测圆形,并在原图上绘制检测结果。
81 0
|
3月前
|
算法 定位技术 vr&ar
一文了解PnP算法,python opencv中的cv2.solvePnP()的使用,以及使用cv2.sovlePnP()方法标定相机和2D激光雷达
一文了解PnP算法,python opencv中的cv2.solvePnP()的使用,以及使用cv2.sovlePnP()方法标定相机和2D激光雷达
558 0
一文了解PnP算法,python opencv中的cv2.solvePnP()的使用,以及使用cv2.sovlePnP()方法标定相机和2D激光雷达
|
5月前
|
机器学习/深度学习 算法 计算机视觉
基于opencv的SVM算法的车牌识别系统设计与实现
基于opencv的SVM算法的车牌识别系统设计与实现
154 3
基于opencv的SVM算法的车牌识别系统设计与实现
|
5月前
|
移动开发 算法 计算机视觉
技术笔记:openCV特征点识别与findHomography算法过滤
技术笔记:openCV特征点识别与findHomography算法过滤
100 0
|
5月前
|
机器学习/深度学习 编译器 算法框架/工具
OpenCV算法库
numba是一个用于编译Python数组和数值计算函数的编译器,这个编译器能够大幅提高直接使用Python编写的函数的运算速度。
|
6月前
|
算法 计算机视觉
【OpenCV】- 分水岭算法
【OpenCV】- 分水岭算法
|
6月前
|
算法 C++ 计算机视觉
Opencv(C++)学习系列---Laplacian拉普拉斯边缘检测算法
Opencv(C++)学习系列---Laplacian拉普拉斯边缘检测算法
304 0
|
6月前
|
算法 C++ 计算机视觉
Opencv(C++)学习系列---Canny边缘检测算法
Opencv(C++)学习系列---Canny边缘检测算法
183 0
下一篇
无影云桌面