标签:
直接上代码吧,代码最能说明问题。
另外可参考这篇博文:http://www.cnblogs.com/wangguchangqing/p/4333873.html
#include <stdio.h> #include <iostream> #include "opencv2/core/core.hpp"//因为在属性中已经配置了opencv等目录,所以把其当成了本地目录一样 #include <opencv2/nonfree/features2d.hpp> #include "opencv2/highgui/highgui.hpp" #include<opencv2/legacy/legacy.hpp> /*这里用的是图像对图像集,注意FLANN的用法*/ using namespace cv; using namespace std; int main() { Mat trainImage = imread("1.jpg"), trainImage_gray; cvtColor(trainImage, trainImage_gray, CV_BGR2GRAY); vector<KeyPoint> train_keypoint; Mat trainDescriptor; SurfFeatureDetector featureDector(80);//80是什么? featureDector.detect(trainImage_gray, train_keypoint); SurfDescriptorExtractor featureExtractor; featureExtractor.compute(trainImage_gray, train_keypoint, trainDescriptor); //创建基于FLANN的描述符匹配对象 FlannBasedMatcher matcher; vector<Mat> train_desc_collection(1, trainDescriptor);//两个参数是什么 matcher.add(train_desc_collection);//这两句什么意思 matcher.train(); VideoCapture cap("test.avi"); unsigned int frameCount = 0; while (char(waitKey(1)) != ‘q‘) { int64 time0 = getTickCount(); Mat testImage, testIMage_gray; cap >> testImage; if (testImage.empty()) continue; cvtColor(testImage, testIMage_gray, CV_BGR2GRAY); vector<KeyPoint> test_keyPoint; Mat testDescriptor; featureDector.detect(testIMage_gray, test_keyPoint); featureExtractor.compute(testIMage_gray, test_keyPoint, testDescriptor); vector<vector<DMatch>> matches; matcher.knnMatch(testDescriptor, matches, 2);//对每个匹配,返回两个最近邻匹配 vector<DMatch> goodMatches; for (int i = 0; i < matches.size(); i++) { if (matches[i][0].distance < 0.6*matches[i][1].distance)//第一个匹配与第二个匹配距离 //足够小时,才认为这是一个好的匹配 goodMatches.push_back(matches[i][0]); } Mat dstImage; drawMatches(testImage, test_keyPoint, trainImage, train_keypoint, goodMatches, dstImage); imshow("匹配窗口", dstImage); cout << cvGetTickFrequency() / (getTickCount() - time0) << endl; } waitKey(0); return 0; }
标签:
原文地址:http://www.cnblogs.com/573177885qq/p/4725798.html