最近做一个项目有人脸识别和自动拍照两个功能,先记录一下相关代码。
人脸识别这块用的第三方是face++,但客户端没有应用,只负责拍照传照片,数据由后台来分析,那么就此来讲实现这些功能其实和扫码类似。
首先获取设备,创建输入输出流,以及捕捉静态图片流,捕捉影像层,初始化AVCaptureSession对象。
//获取摄像设备
device = [self cameraWithPosition:AVCaptureDevicePositionFront];
//创建输入流
input = [AVCaptureDeviceInput deviceInputWithDevice:device error:nil];
if (!input) return;
//创建输出流
output = [[AVCaptureMetadataOutput alloc]init];
//设置代理 在主线程里刷新
[output setMetadataObjectsDelegate:self queue:dispatch_get_main_queue()];
//初始化链接对象
_session = [[AVCaptureSession alloc]init];
//高质量采集率
[_session setSessionPreset:AVCaptureSessionPresetHigh];
[_session addInput:input];
[_session addOutput:output];
[_session addOutput:self.stillImageOutput];
if ([_session canAddOutput:self.videoDataOutput]) {
[_session addOutput:self.videoDataOutput];
}
//设置像素格式
[_videoDataOutput setVideoSettings:[NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey]];
//设置扫脸类型
output.metadataObjectTypes=@[AVMetadataObjectTypeFace];
layer = [AVCaptureVideoPreviewLayer layerWithSession:_session];
layer.videoGravity=AVLayerVideoGravityResizeAspectFill;
layer.frame=self.view.layer.bounds;
[self.view.layer insertSublayer:layer atIndex:0];
//开始捕获
[_session startRunning];
设置AVCaptureMetadataOutputObjectsDelegate,AVCaptureVideoDataOutputSampleBufferDelegate代理,并且实现两个方法。
//AVCaptureVideoDataOutput获取实时图像,这个代理方法的回调频率很快,几乎与手机屏幕的刷新频率一样快
-(void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection{
[connection setVideoOrientation:AVCaptureVideoOrientationPortrait];
constantImage = [self imageFromSampleBuffer:sampleBuffer];
[self addFaceFrameWithImage:constantImage];
}
//CMSampleBufferRef转NSImage
-(UIImage *)imageFromSampleBuffer:(CMSampleBufferRef)sampleBuffer{
// 为媒体数据设置一个CMSampleBuffer的Core Video图像缓存对象
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
// 锁定pixel buffer的基地址
CVPixelBufferLockBaseAddress(imageBuffer, 0);
// 得到pixel buffer的基地址
void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
// 得到pixel buffer的行字节数
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
// 得到pixel buffer的宽和高
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
// 创建一个依赖于设备的RGB颜色空间
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
// 用抽样缓存的数据创建一个位图格式的图形上下文(graphics context)对象
CGContextRef context = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
// 根据这个位图context中的像素数据创建一个Quartz image对象
CGImageRef quartzImage = CGBitmapContextCreateImage(context);
// 解锁pixel buffer
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
// 释放context和颜色空间
CGContextRelease(context); CGColorSpaceRelease(colorSpace);
// 用Quartz image创建一个UIImage对象image
UIImage *image = [UIImage imageWithCGImage:quartzImage scale:1 orientation:UIImageOrientationUp];
// 释放Quartz image对象
CGImageRelease(quartzImage);
return (image);
}
//人脸定位,添加框架
- (void)addFaceFrameWithImage:(UIImage *)images{
CIContext * context = [CIContext contextWithOptions:nil ];
CIImage * image = [CIImage imageWithCGImage:images.CGImage];
NSDictionary * param = [NSDictionary dictionaryWithObject:CIDetectorAccuracyLow forKey:CIDetectorAccuracy];
CIDetector * faceDetector = [CIDetector detectorOfType:CIDetectorTypeFace context:context options:param];
NSArray * detectResult = [faceDetector featuresInImage:image];
for(int j=0;m_highlitView[j]!=nil;j++){
m_highlitView[j].hidden = YES;
}
int i=0;
for(CIFaceFeature* faceObject in detectResult){
CGRect modifiedFaceBounds = faceObject.bounds;
modifiedFaceBounds.origin.y = images.size.height-faceObject.bounds.size.height -faceObject.bounds.origin.y;
[self addSubViewWithFrame:modifiedFaceBounds index:i];
i++;
}
}
///自画图像
-(void)addSubViewWithFrame:(CGRect)frame index:(int)_index{
if(m_highlitView[_index]==nil) {
m_highlitView[_index]= [[UIView alloc] initWithFrame:frame];
m_highlitView[_index].layer.borderWidth = 2;
m_highlitView[_index].layer.borderColor = [[UIColor redColor] CGColor];
[self.view addSubview:m_highlitView[_index]];
m_transform[_index] = m_highlitView[_index].transform;
}
frame.origin.x = frame.origin.x/2.5;
frame.origin.y = frame.origin.y/2.5;
frame.size.width = frame.size.width/1.8;
frame.size.height = frame.size.height/1.8;
m_highlitView[_index].frame = frame;
///根据头像大小缩放自画View
float scale = frame.size.width/220;
CGAffineTransform transform = CGAffineTransformScale(m_transform[_index], scale,scale);
m_highlitView[_index].transform = transform;
m_highlitView[_index].hidden = NO;
}
通过如下方法识别人脸
captureOutput:(AVCaptureOutput *)captureOutput didOutputMetadataObjects:(NSArray *)metadataObjects fromConnection:(AVCaptureConnection *)connection
通过如下方法获取图片,最终上传服务器比较
captureStillImageAsynchronouslyFromConnection:(AVCaptureConnection *)connection completionHandler:(void (^)(CMSampleBufferRef imageDataSampleBuffer, NSError *error))handler
目前未解决的问题是,添加识别人脸的红框闪动频率很快,其他正常!