코코야이야기
[c++] OpenCV 템플릿매칭1 본문
소스코드
//cvExtractSURF 함수를 이용한 두 영상의 대응점 계산
//Descriptors - 1 이면 같은 사진
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
#include <nonfree\nonfree.hpp>
typedef struct MATCH_PAIR
{
int nA;
int nB;
} MATCH_PAIR;
void MergeImages(IplImage* Image1, IplImage* Image2, IplImage* dstImage);
int FindMatchingPoints(const CvSeq* tKeypoints, const CvSeq* tDescriptors,
const CvSeq* srcKeypoints, const CvSeq* srcDescriptors, int descriptor_size, MATCH_PAIR *pMatchPair );
int FindNearestPoint(const float* pA, int laplacian, const CvSeq* srcKeypoints, const CvSeq* srcDescriptors, int descriptor_size);
int TransformHomography(CvPoint corners[], CvPoint2D32f *pt1, CvPoint2D32f *pt2, int count);
int main( )
{
//원본영상은 비교대상보다 사진의 길이가 크거나 같아야함.
IplImage *srcImage; //원본 - 데이터베이스
if( (srcImage = cvLoadImage( "test2.jpg", CV_LOAD_IMAGE_GRAYSCALE)) == NULL ) //절대경로 사용시 \\으로 사용
return -1;
IplImage* tImage; //비교대상
//if((tImage = cvLoadImage("C:\\dataPicture1\\2.jpg", CV_LOAD_IMAGE_GRAYSCALE)) == NULL )
if((tImage = cvLoadImage("test2.jpg", CV_LOAD_IMAGE_GRAYSCALE)) == NULL )
return -1;
const int nXsrcOrg = tImage->width;
IplImage* dstImage = cvCreateImage( cvSize(tImage->width+srcImage->width, srcImage->height), IPL_DEPTH_8U, 3 );
CvSeq *tKeypoints = NULL, *tDescriptors = NULL;
CvSeq *srcKeypoints = NULL, *srcDescriptors = NULL;
CvMemStorage* storage = cvCreateMemStorage(0);
CvSURFParams params = cvSURFParams(500, 0);
cv::initModule_nonfree();
cvExtractSURF(tImage, NULL, &tKeypoints, &tDescriptors, storage, params);
// printf("tKeypoints: %d\n", tKeypoints->total);
// printf("tDescriptors: %d\n", tDescriptors->total);
cvExtractSURF(srcImage,NULL,&srcKeypoints,&srcDescriptors,storage, params);
// printf("srcKeypoints: %d\n", srcKeypoints->total);
// printf("Descriptors: %d\n", srcDescriptors->total);
MergeImages(tImage, srcImage, dstImage); //timage 영상과 srcimage 영상을 dstimage 영상에 복사
int i, k;
int x1, y1, x2, y2;
CvSURFPoint* surf1, *surf2;
int *pMatchIndx = new int[tKeypoints->total];
MATCH_PAIR *pMatchPair = new MATCH_PAIR[tKeypoints->total];
int descriptor_size = params.extended? 128 : 64;
int nMatchingCount = FindMatchingPoints(tKeypoints, tDescriptors, srcKeypoints, srcDescriptors, descriptor_size, pMatchPair); //매칭점 수 저장
// printf("nMatchingCount = %d\n", nMatchingCount);
//두 영상 판단
if(nMatchingCount + 1 > 100)
printf("두 영상이 일치합니다.");
else
printf("두 영상은 다릅니다.");
CvPoint2D32f *pt1 = new CvPoint2D32f[nMatchingCount];
CvPoint2D32f *pt2 = new CvPoint2D32f[nMatchingCount];
//일치부분 파란선으로 잇기
for( k=0; k<nMatchingCount; k++)
{
surf1= (CvSURFPoint*)cvGetSeqElem( tKeypoints, pMatchPair[k].nA );
x1 = cvRound(surf1->pt.x);
y1 = cvRound(surf1->pt.y);
surf2= (CvSURFPoint*)cvGetSeqElem( srcKeypoints, pMatchPair[k].nB );
x2 = cvRound(surf2->pt.x) + nXsrcOrg;
y2 = cvRound(surf2->pt.y);
CvPoint r1 = cvPoint(x1, y1);
CvPoint r2 = cvPoint(x2, y2);
cvLine( dstImage, r1, r2, CV_RGB(0, 0, 255) );
// to calculate a homography transform
pt1[k] = surf1->pt;
pt2[k] = surf2->pt;
}
CvPoint corners[4]= {{0, 0}, {tImage->width, 0}, {tImage->width, tImage->height}, {0, tImage->height}};
//매칭포인트의 수가 너무작으면 --> 사진이 없거나 너무 다르거나
if(nMatchingCount<4)
{
printf("We need more than 4 matching points to calculate a homography transform\n");
return 0;
}
int nRet = TransformHomography(corners, pt1, pt2, nMatchingCount);
//일치부분 빨간사각형 그리기
if(nRet >0)
{
for( i=0; i < 4; i++)
{
CvPoint r1 = corners[i%4]; r1.x += nXsrcOrg;
CvPoint r2 = corners[(i+1)%4]; r2.x += nXsrcOrg;
cvLine(dstImage, r1, r2, CV_RGB(255, 0, 0), 2 );
}
}
delete pMatchPair;
delete pt1;
delete pt2;
cvNamedWindow("dstImage", CV_WINDOW_AUTOSIZE);
cvShowImage("dstImage", dstImage);
cvWaitKey(0);
cvDestroyAllWindows();
cvReleaseMemStorage(&storage );
cvReleaseImage(&tImage);
cvReleaseImage(&srcImage);
cvReleaseImage(&dstImage);
return 0;
}
//한 윈도우에 두 영상 넣기 - 복사/
void MergeImages(IplImage* Image1, IplImage* Image2, IplImage* dstImage)
{
cvSet(dstImage, CV_RGB(255, 255, 255) );
//copy tImage to dstImage
cvSetImageROI(dstImage, cvRect( 0, 0, Image1->width, Image1->height ) );
//cvSetImageROI() - 관심영역설정
cvSetImageCOI(dstImage, 1);
cvCopy(Image1, dstImage );
cvSetImageCOI(dstImage, 2);
cvCopy(Image1, dstImage );
cvSetImageCOI(dstImage, 3);
cvCopy(Image1, dstImage );
//copy srcImage to dstImage
cvSetImageROI( dstImage, cvRect(Image1->width, 0, Image2->width, Image2->height ) );
cvSetImageCOI(dstImage, 1);
cvCopy(Image2, dstImage );
cvSetImageCOI(dstImage, 2);
cvCopy(Image2, dstImage );
cvSetImageCOI(dstImage, 3);
cvCopy(Image2, dstImage );
cvResetImageROI( dstImage );
}
int FindMatchingPoints(const CvSeq* tKeypoints, const CvSeq* tDescriptors,const CvSeq* srcKeypoints, const CvSeq* srcDescriptors, int descriptor_size, MATCH_PAIR *pMatchPair )
{
int i;
float* pA;
int nMatchB;
CvSURFPoint* surfA;
int k=0;
for( i=0; i<tDescriptors->total; i++)
{
pA= (float*)cvGetSeqElem( tDescriptors, i );
surfA= (CvSURFPoint*)cvGetSeqElem( tKeypoints, i );
nMatchB = FindNearestPoint(pA, surfA->laplacian, srcKeypoints,
srcDescriptors, descriptor_size);
if(nMatchB > 0)
{
pMatchPair[k].nA= i;
pMatchPair[k].nB= nMatchB;
k++;
}
}
return k;
}
int FindNearestPoint(const float* pA, int laplacian, const CvSeq* srcKeypoints, const CvSeq* srcDescriptors, int descriptor_size)
{
int i, k;
float* pB; // descriptor vector in srcDescriptors
CvSURFPoint* surfB;
int nMatch = -1;
double sum2, min1 = 10000, min2 = 10000;
for( i=0; i<srcDescriptors->total; i++)
{
surfB= (CvSURFPoint*)cvGetSeqElem( srcKeypoints, i );
pB= (float*)cvGetSeqElem( srcDescriptors, i );
if( laplacian != surfB->laplacian )
continue;
sum2 = 0.0f;
for(k=0; k< descriptor_size; k++)
{
sum2 += (pA[k] - pB[k])*(pA[k] - pB[k]);
}
if( sum2 < min1 )
{
min2 = min1;
min1 = sum2;
nMatch = i;
}
else if ( sum2 < min2 )
min2 = sum2;
}
// printf("min1 = %f, min2 = %f\n", min1, min2);
if ( min1 < 0.6*min2 )
return nMatch;
return -1;
}
int TransformHomography(CvPoint corners[], CvPoint2D32f *pt1, CvPoint2D32f *pt2, int count)
{
int i;
CvMat M1, M2;
double h[9];
CvMat H = cvMat(3, 3, CV_64F, h);
M1 = cvMat(1, count, CV_32FC2, pt1 );
M2 = cvMat(1, count, CV_32FC2, pt2 );
if( !cvFindHomography(&M1, &M2, &H, CV_RANSAC, 2 ))
return 0;
double x, y;
double X, Y, W;
for( i = 0; i < 4; i++ )
{
x = corners[i].x;
y = corners[i].y;
W = h[6]*x + h[7]*y + h[8];
X = (h[0]*x + h[1]*y + h[2])/W;
Y = (h[3]*x + h[4]*y + h[5])/W;
corners[i].x = cvRound(X);
corners[i].y = cvRound(Y);
// printf("(%f, %f) <->(%d, %d)\n", x, y, corners[i].x, corners[i].y);
}
return 1;
}
출력영상
'프로그래밍 > OpenCV' 카테고리의 다른 글
[c++] OpenCV 템플릿매칭2 (0) | 2015.09.27 |
---|---|
[c++] OpenCV 카메라영상 (0) | 2015.09.25 |
[c++] OpenCV 특징점검출 (0) | 2015.09.24 |
[c++] OpenCV 원검출 (0) | 2015.09.24 |
[c++] OpenCV 직선검출 (0) | 2015.09.23 |