孤独的香蕉.png
<h4>一、前言</h6>
写这篇博客的目的为记录,并且供大家学习,希望大家多多交流,有不正确的地方请指正,轻喷。!~~
<h4>二、透视校正 简介 </h6>
透视校正或者称为透视控制是对照片进行合成或者编辑以得到符合大众对于透视失真理解的结果的过程。
<h4>三、流程</h6>
首先想要对照片中的物体进行透视校正 所以就先要找到图片中的物体,一般都是矩形的物体,所以就需要用到OpenCv中找矩形的方法先找到图片中所有的矩形,然后判断寻找最大的矩形,然后再对这个矩形进行校正就OK拉。
<h3>上代码:</h3>
//寻找矩形
- (void)detectEdges{
cv::Mat original = [BLOpenCVHelper cvMatFromUIImage:_myImageView.image];
CGSize targetSize = self.myImageView.contentSize;
//把图片按比例缩小,因为屏幕显示的图片并不是实际大小,所以需要把图片的大小按比例缩小成当前屏幕的大小,并且映射过去,这样找矩形的时候返回的4个点坐标才是正确的
cv::resize(original, original, cvSize(targetSize.width, targetSize.height));
std::vector<std::vector<cv::Point>>squares;
std::vector<cv::Point> largest_square;
//寻找矩形
find_squares(original, squares);
//寻找最大的矩形
find_largest_square(squares, largest_square);
if (largest_square.size() == 4) {
// Manually sorting points, needs major improvement. Sorry.
NSMutableArray *points = [NSMutableArray array];
NSMutableDictionary *sortedPoints = [NSMutableDictionary dictionary];
for (int i = 0; i < 4; i++)
{
NSDictionary *dict = [NSDictionary dictionaryWithObjectsAndKeys:[NSValue valueWithCGPoint:CGPointMake(largest_square[i].x, largest_square[i].y)], @"point" , [NSNumber numberWithInt:(largest_square[i].x + largest_square[i].y)], @"value", nil];
[points addObject:dict];
}
int min = [[points valueForKeyPath:@"@min.value"] intValue];
int max = [[points valueForKeyPath:@"@max.value"] intValue];
int minIndex = 0;
int maxIndex = 0;
int missingIndexOne = 0;
int missingIndexTwo = 0;
for (int i = 0; i < 4; i++)
{
NSDictionary *dict = [points objectAtIndex:i];
if ([[dict objectForKey:@"value"] intValue] == min)
{
[sortedPoints setObject:[dict objectForKey:@"point"] forKey:@"0"];
minIndex = i;
continue;
}
if ([[dict objectForKey:@"value"] intValue] == max)
{
[sortedPoints setObject:[dict objectForKey:@"point"] forKey:@"2"];
maxIndex = i;
continue;
}
missingIndexOne = i;
}
for (int i = 0; i < 4; i++)
{
if (missingIndexOne != i && minIndex != i && maxIndex != i)
{
missingIndexTwo = i;
}
}
if (largest_square[missingIndexOne].x < largest_square[missingIndexTwo].x)
{
//2nd Point Found
[sortedPoints setObject:[[points objectAtIndex:missingIndexOne] objectForKey:@"point"] forKey:@"3"];
[sortedPoints setObject:[[points objectAtIndex:missingIndexTwo] objectForKey:@"point"] forKey:@"1"];
}
else
{
//4rd Point Found
[sortedPoints setObject:[[points objectAtIndex:missingIndexOne] objectForKey:@"point"] forKey:@"1"];
[sortedPoints setObject:[[points objectAtIndex:missingIndexTwo] objectForKey:@"point"] forKey:@"3"];
}
CGPoint point0 = [(NSValue *)[sortedPoints objectForKey:@"0"] CGPointValue];
CGPoint point1 = [(NSValue *)[sortedPoints objectForKey:@"1"] CGPointValue];
CGPoint point2 = [(NSValue *)[sortedPoints objectForKey:@"2"] CGPointValue];
CGPoint point3 = [(NSValue *)[sortedPoints objectForKey:@"3"] CGPointValue];
[_cropRect topLeftCornerToCGPoint:CGPointMake(point0.x+_myImageView.contentFrame.origin.x, point0.y+_myImageView.contentFrame.origin.y)];
[_cropRect topRightCornerToCGPoint:CGPointMake(point1.x+_myImageView.contentFrame.origin.x, point1.y+_myImageView.contentFrame.origin.y)];
[_cropRect bottomRightCornerToCGPoint:CGPointMake(point2.x+_myImageView.contentFrame.origin.x, point2.y+_myImageView.contentFrame.origin.y)];
[_cropRect bottomLeftCornerToCGPoint:CGPointMake(point3.x+_myImageView.contentFrame.origin.x, point3.y+_myImageView.contentFrame.origin.y)];
}
else{
//没找到的时候
[_cropRect topLeftCornerToCGPoint:CGPointMake(50, 100)];
[_cropRect topRightCornerToCGPoint:CGPointMake(SCREEN_WIDTH-50, 100)];
[_cropRect bottomRightCornerToCGPoint:CGPointMake(SCREEN_WIDTH-50, SCREEN_HEIGHT-126)];
[_cropRect bottomLeftCornerToCGPoint:CGPointMake(50, SCREEN_HEIGHT-126)];
}
original.release();
}
<h4>接下来是寻找矩形的代码<h4>
void find_squares(cv::Mat& image, std::vector<std::vector<cv::Point>>&squares) {
// blur will enhance edge detection
cv::Mat blurred(image);
// medianBlur(image, blurred, 9);
GaussianBlur(image, blurred, cvSize(11,11), 0);//change from median blur to gaussian for more accuracy of square detection
cv::Mat gray0(blurred.size(), CV_8U), gray;
std::vector<std::vector<cv::Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&blurred, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
// Canny(gray0, gray, 0, 50, 5);
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, cv::Mat(), cv::Point(-1,-1));
}
else
{
gray = gray0 >= (l+1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
std::vector<cv::Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(cv::Mat(contours[i]), approx, arcLength(cv::Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea(cv::Mat(approx))) > 1000 &&
isContourConvex(cv::Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
}
}
<h4>最后剪切和校正<h4>
if([_cropRect frameEdited]){
//Thanks To stackOverflow
CGFloat scaleFactor = [_myImageView contentScale];
CGPoint ptBottomLeft = [_cropRect coordinatesForPoint:1 withScaleFactor:scaleFactor];
CGPoint ptBottomRight = [_cropRect coordinatesForPoint:2 withScaleFactor:scaleFactor];
CGPoint ptTopRight = [_cropRect coordinatesForPoint:3 withScaleFactor:scaleFactor];
CGPoint ptTopLeft = [_cropRect coordinatesForPoint:4 withScaleFactor:scaleFactor];
CGFloat w1 = sqrt( pow(ptBottomRight.x - ptBottomLeft.x , 2) + pow(ptBottomRight.x - ptBottomLeft.x, 2));
CGFloat w2 = sqrt( pow(ptTopRight.x - ptTopLeft.x , 2) + pow(ptTopRight.x - ptTopLeft.x, 2));
CGFloat h1 = sqrt( pow(ptTopRight.y - ptBottomRight.y , 2) + pow(ptTopRight.y - ptBottomRight.y, 2));
CGFloat h2 = sqrt( pow(ptTopLeft.y - ptBottomLeft.y , 2) + pow(ptTopLeft.y - ptBottomLeft.y, 2));
CGFloat maxWidth = (w1 < w2) ? w1 : w2;
CGFloat maxHeight = (h1 < h2) ? h1 : h2;
cv::Point2f src[4], dst[4];
src[0].x = ptTopLeft.x;
src[0].y = ptTopLeft.y;
src[1].x = ptTopRight.x;
src[1].y = ptTopRight.y;
src[2].x = ptBottomRight.x;
src[2].y = ptBottomRight.y;
src[3].x = ptBottomLeft.x;
src[3].y = ptBottomLeft.y;
dst[0].x = 0;
dst[0].y = 0;
dst[1].x = maxWidth - 1;
dst[1].y = 0;
dst[2].x = maxWidth - 1;
dst[2].y = maxHeight - 1;
dst[3].x = 0;
dst[3].y = maxHeight - 1;
cv::Mat undistorted = cv::Mat( cvSize(maxWidth,maxHeight), CV_8UC4);
cv::Mat original = [BLOpenCVHelper cvMatFromUIImage:_editorImg];
cv::warpPerspective(original, undistorted, cv::getPerspectiveTransform(src, dst), cvSize(maxWidth, maxHeight));
BLImageViewController *edcVc = [[BLImageViewController alloc] init];
UIImage *img = nil;
//判断图片方向
if (_rotateSlider == 0.5f||_rotateSlider == -1.5) {
img = [UIImage imageWithCGImage:[BLOpenCVHelper UIImageFromCVMat:undistorted].CGImage scale:1 orientation:UIImageOrientationRight];
}else if(std::abs(_rotateSlider) == 1.f||_rotateSlider==-3){
img = [UIImage imageWithCGImage:[BLOpenCVHelper UIImageFromCVMat:undistorted].CGImage scale:1 orientation:UIImageOrientationDown];
}else if (_rotateSlider == -0.5 || _rotateSlider == -2.5){
img =[UIImage imageWithCGImage:[BLOpenCVHelper UIImageFromCVMat:undistorted].CGImage scale:1 orientation:UIImageOrientationLeft];
}else{
img = [UIImage imageWithCGImage:[BLOpenCVHelper UIImageFromCVMat:undistorted].CGImage scale:1 orientation:UIImageOrientationUp];
}
//剪切并且跳转到图片展示页面
edcVc.myImage = img;//[MMOpenCVHelper UIImageFromCVMat:undistorted];
[self.navigationController pushViewController:edcVc animated:YES];
original.release();
undistorted.release();
}
接下来是Demo 然后没了哈哈