最近在实现OCR的一个项目,需要用到拍照功能,得到的图片需要和AVCaptureVideoPreviewLayer设定的frame大小一致,得到图片然后进行识别。搜索到两种方法
1.设置AVCaptureStillImageOutput 的 outputSettings 属性。
NSDictionary *outputSettings = @{AVVideoWidthKey : @(CGRectGetWidth(self.view.bounds)),
AVVideoHeightKey: @(CGRectGetWidth(self.view.bounds)),
AVVideoCodecKey : AVVideoCodecJPEG};
[stillImageOutput setOutputSettings:outputSettings];
但是我根据上面的属性设定的到的图片还是没什么变化,所以我尝试了得到图进行裁切。
2.获取图片根据frame截取图片。
//创建UIImage Category
-(UIImage *)clipImage:(CGSize)size {
UIImage *aImage = [self fixOrientation] ;
//被切图片宽比例比高比例小 或者相等,以图片宽进行放大
if (aImage.size.width*size.height <= aImage.size.height*size.width) {
//以被剪裁图片的宽度为基准,得到剪切范围的大小
CGFloat width = aImage.size.width;
CGFloat height = aImage.size.width * size.height / size.width;
// 调用剪切方法
// 这里是以中心位置剪切,也可以通过改变rect的x、y值调整剪切位置
return [aImage imageFromRect:CGRectMake(0, (aImage.size.height -height)/2, width, height)];
} else {
//被切图片宽比例比高比例大,以图片高进行剪裁
// 以被剪切图片的高度为基准,得到剪切范围的大小
CGFloat width = aImage.size.height * size.width / size.height;
CGFloat height = aImage.size.height;
// 调用剪切方法
// 这里是以中心位置剪切,也可以通过改变rect的x、y值调整剪切位置
return [aImage imageFromRect:CGRectMake((aImage.size.width -width)/2, 0, width, height)];
}
}
/**
*从图片中按指定的位置大小截取图片的一部分
* UIImage image 原始的图片
* CGRect rect 要截取的区域
*/
- (UIImage *)imageFromRect:(CGRect)rect{
//将UIImage转换成CGImageRef
CGImageRef sourceImageRef = [self CGImage];
//按照给定的矩形区域进行剪裁
CGImageRef newImageRef = CGImageCreateWithImageInRect(sourceImageRef, rect);
//将CGImageRef转换成UIImage
UIImage *newImage = [UIImage imageWithCGImage:newImageRef];
//返回剪裁后的图片
return newImage;
}
- (UIImage *)fixOrientation{
// No-op if the orientation is already correct
UIImage *aImage = self ;
if (aImage.imageOrientation == UIImageOrientationUp)
return aImage;
// We need to calculate the proper transformation to make the image upright.
// We do it in 2 steps: Rotate if Left/Right/Down, and then flip if Mirrored.
CGAffineTransform transform =CGAffineTransformIdentity;
switch (aImage.imageOrientation) {
case UIImageOrientationDown:
case UIImageOrientationDownMirrored:
transform = CGAffineTransformTranslate(transform, aImage.size.width, aImage.size.height);
transform = CGAffineTransformRotate(transform, M_PI);
break;
case UIImageOrientationLeft:
case UIImageOrientationLeftMirrored:
transform = CGAffineTransformTranslate(transform, aImage.size.width,0);
transform = CGAffineTransformRotate(transform, M_PI_2);
break;
case UIImageOrientationRight:
case UIImageOrientationRightMirrored:
transform = CGAffineTransformTranslate(transform, 0, aImage.size.height);
transform = CGAffineTransformRotate(transform, -M_PI_2);
break;
default:
break;
}
switch (aImage.imageOrientation) {
case UIImageOrientationUpMirrored:
case UIImageOrientationDownMirrored:
transform = CGAffineTransformTranslate(transform, aImage.size.width,0);
transform = CGAffineTransformScale(transform, -1, 1);
break;
case UIImageOrientationLeftMirrored:
case UIImageOrientationRightMirrored:
transform = CGAffineTransformTranslate(transform, aImage.size.height,0);
transform = CGAffineTransformScale(transform, -1, 1);
break;
default:
break;
}
// Now we draw the underlying CGImage into a new context, applying the transform
// calculated above.
CGContextRef ctx =CGBitmapContextCreate(NULL, aImage.size.width, aImage.size.height,
CGImageGetBitsPerComponent(aImage.CGImage),0,
CGImageGetColorSpace(aImage.CGImage),
CGImageGetBitmapInfo(aImage.CGImage));
CGContextConcatCTM(ctx, transform);
switch (aImage.imageOrientation) {
case UIImageOrientationLeft:
case UIImageOrientationLeftMirrored:
case UIImageOrientationRight:
case UIImageOrientationRightMirrored:
// Grr...
CGContextDrawImage(ctx,CGRectMake(0,0,aImage.size.height,aImage.size.width), aImage.CGImage);
break;
default:
CGContextDrawImage(ctx,CGRectMake(0,0,aImage.size.width,aImage.size.height), aImage.CGImage);
break;
}
// And now we just create a new UIImage from the drawing context
CGImageRef cgimg =CGBitmapContextCreateImage(ctx);
UIImage *img = [UIImage imageWithCGImage:cgimg];
CGContextRelease(ctx);
CGImageRelease(cgimg);
return img;
}
部分代码来自网络!