Core ML
允许你把多种机器学习模型集成到你的应用程序中。除了支持30层以上的广泛深入学习,它还支持标准模型,如树集成、支持向量机(SVMs)和广义线性模型。因为它是建立在低层次的技术,如金属和加速,Core ML
无缝地利用CPU
和GPU
提供最大的性能和效率的优势。您可以在设备上运行机器学习模型,数据不需要离开设备就可以进行分析。
利用Core ML我们可以学习多种机器学习模型并能更快的集成到开发程序中。其中包括Siri
、Camera
、QuickType
等。Core ML
框架的应用层包括Vision、Natural Language Processing
、GamePlayKit。底层支持包括Accelerate 、BNNS、Metal Performance Shaders。
Vision
Machine Learning Image Analysis - 机器学习之图片分析
- (void)machineLearningImageAnalysis{
Resnet50 *resnetModel = [[Resnet50 alloc] init];
UIImage *image = self.showimage.image;
VNCoreMLModel *vnCoreModel = [VNCoreMLModel modelForMLModel:resnetModel.model error:nil];
// Machine Learning Image Analysis
VNCoreMLRequest *vnCoreRequest = [[VNCoreMLRequest alloc] initWithModel:vnCoreModel completionHandler:^(VNRequest * _Nonnull request, NSError * _Nullable error) {
CGFloat confidence = 0.0f;
VNClassificationObservation *tempClassification = nil;
for (VNClassificationObservation *classification in request.results) {
if (classification.confidence > confidence) {
confidence = classification.confidence;
tempClassification = classification;
}
}
NSLog(@"识别结果:%@",tempClassification.identifier);
NSLog(@"匹配率:%f",tempClassification.confidence);
} ];
VNImageRequestHandler *vnImageRequestHandler = [[VNImageRequestHandler alloc] initWithCGImage:image.CGImage options:@{}];
NSError *error = nil;
[vnImageRequestHandler performRequests:@[vnCoreRequest] error:&error];
if (error) {
NSLog(@"%@",error.localizedDescription);
}
}
识别结果
CoreMLDemo[1467:915475] 识别结果:Granny Smith
CoreMLDemo[1467:915475] 匹配率:0.982098
Face Detection 人脸检测
- (void)faceDetection{
UIImage *image = self.showimage.image;
// 区域识别就使用VNDetectRectanglesRequest,人脸识别就使用VNDetectFaceRectanglesRequest,
VNDetectFaceRectanglesRequest *faceRectang = [[VNDetectFaceRectanglesRequest alloc] initWithCompletionHandler:^(VNRequest * _Nonnull request, NSError * _Nullable error) {
// results这个集合,这个集合中装的就是vision框架后者model处理后的结果数据
for (VNFaceObservation *faceObservation in request.results) {
CGFloat imageWidth = self.showimage.frame.size.width;
CGFloat imageHeight = self.showimage.frame.size.height;
CGRect frame = CGRectMake(faceObservation.boundingBox.origin.x*imageWidth, ( 1.0-faceObservation.boundingBox.origin.y)*imageHeight-faceObservation.boundingBox.size.height * imageHeight, faceObservation.boundingBox.size.width*imageWidth,faceObservation.boundingBox.size.height*imageHeight);
NSLog(@"faceObservation.boundingBox = %@",NSStringFromCGRect(faceObservation.boundingBox));
UIView *faceView = [[UIView alloc] initWithFrame:frame];
faceView.layer.borderWidth = 2.0;
faceView.layer.borderColor = [UIColor redColor].CGColor;
[self.showimage addSubview:faceView];
}
}];
VNImageRequestHandler *vnImageRequestHandler = [[VNImageRequestHandler alloc] initWithCGImage:image.CGImage options:@{}];
NSError *error = nil;
[vnImageRequestHandler performRequests:@[faceRectang] error:&error];
if (error) {
NSLog(@"%@",error.localizedDescription);
}
}
检测结果
Face Recognition人脸识别
#pragma mark - 人脸识别
- (void)faceRecognition{
UIImage *image = self.showimage.image;
VNDetectFaceLandmarksRequest *faceRectang = [[VNDetectFaceLandmarksRequest alloc] initWithCompletionHandler:^(VNRequest * _Nonnull request, NSError * _Nullable error) {
// 创建特征存储对象
MLDetectModel *detectData = [[MLDetectModel alloc]init];
NSLog(@"%lu",request.results.count);
// 遍历出每个人的所有特征
for (VNFaceObservation *faceObservation in request.results) {
// 创建脸部模型,里面储存着脸部所有特征属性信息
MLFaceModel *faceModel = [[MLFaceModel alloc] init];
// 获取细节特征
VNFaceLandmarks2D *landmarks = faceObservation.landmarks;
[self getAllkeyWithClass:[VNFaceLandmarks2D class] isProperty:YES block:^(NSString *key) {
// 过滤属性
if ([key isEqualToString:@"allPoints"]) {
return;
}
// 获得对应细节具体特征,如,眉毛、鼻子、眼睛嘴巴等
VNFaceLandmarkRegion2D *region2D = [landmarks valueForKey:key];
// 特征存储对象进行存储
[faceModel setValue:region2D forKey:key];
[faceModel.allPoints addObject:region2D];
}];
faceModel.observation = faceObservation;
[detectData.facePoints addObject:faceModel];
}
// 绘制脸部特征
for (MLFaceModel *faceModel in detectData.facePoints) {
self.showimage.image = [self drawImage:self.showimage.image faceObservation:faceModel.observation detectArray:faceModel.allPoints];
}
}];
VNImageRequestHandler *vnImageRequestHandler = [[VNImageRequestHandler alloc] initWithCGImage:image.CGImage options:@{}];
NSError *error = nil;
[vnImageRequestHandler performRequests:@[faceRectang] error:&error];
if (error) {
NSLog(@"%@",error.localizedDescription);
}
}
- (UIImage *)drawImage:(UIImage *)image faceObservation:(VNFaceObservation *)faceObservation detectArray:(NSArray *)detectArray{
UIImage *sourceImage = image;
// 遍历所有特征
for (VNFaceLandmarkRegion2D *landmarks2D in detectArray) {
CGPoint points[landmarks2D.pointCount];
// 转换特征的所有点
for (int i=0; i<landmarks2D.pointCount; i++) {
CGPoint point = landmarks2D.normalizedPoints[I];
CGFloat rectWidth = sourceImage.size.width * faceObservation.boundingBox.size.width;
CGFloat rectHeight = sourceImage.size.height * faceObservation.boundingBox.size.height;
CGPoint p = CGPointMake(point.x * rectWidth + faceObservation.boundingBox.origin.x * sourceImage.size.width, faceObservation.boundingBox.origin.y * sourceImage.size.height + point.y * rectHeight);
points[i] = p;
[self.pointArray addObject:[NSValue valueWithCGPoint:p]];
}
UIGraphicsBeginImageContextWithOptions(sourceImage.size, false, 1);
CGContextRef context = UIGraphicsGetCurrentContext();
[[UIColor redColor] set];
CGContextSetLineWidth(context, 1.5);
// 设置翻转
CGContextTranslateCTM(context, 0, sourceImage.size.height);
CGContextScaleCTM(context, 1.0, -1.0);
// 设置线类型
CGContextSetLineJoin(context, kCGLineJoinRound);
CGContextSetLineCap(context, kCGLineCapRound);
// 设置抗锯齿
CGContextSetShouldAntialias(context, true);
CGContextSetAllowsAntialiasing(context, true);
// 绘制
CGRect rect = CGRectMake(0, 0, sourceImage.size.width, sourceImage.size.height);
CGContextDrawImage(context, rect, sourceImage.CGImage);
CGContextAddLines(context, points, landmarks2D.pointCount);
CGContextDrawPath(context, kCGPathStroke);
// 结束绘制
sourceImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
}
return sourceImage;
}
Text Detection 文字识别
#pragma mark - 文字识别
- (void)textDetection{
UIImage *image = self.showimage.image;
VNDetectTextRectanglesRequest *textRectangRequset = [[VNDetectTextRectanglesRequest alloc] initWithCompletionHandler:^(VNRequest * _Nonnull request, NSError * _Nullable error) {
NSLog(@"%lu",request.results.count);
for (VNTextObservation *observation in request.results) {
for (VNRectangleObservation *rectangleObservation in observation.characterBoxes) {
CGFloat imageWidth = self.showimage.frame.size.width;
CGFloat imageHeight = self.showimage.frame.size.height;
CGRect frame = CGRectMake(rectangleObservation.boundingBox.origin.x*imageWidth, ( 1.0-rectangleObservation.boundingBox.origin.y)*imageHeight-rectangleObservation.boundingBox.size.height * imageHeight, rectangleObservation.boundingBox.size.width*imageWidth,rectangleObservation.boundingBox.size.height*imageHeight);
NSLog(@"faceObservation.boundingBox = %@",NSStringFromCGRect(rectangleObservation.boundingBox));
UIView *faceView = [[UIView alloc] initWithFrame:frame];
faceView.layer.borderWidth = 2.0;
faceView.layer.borderColor = [UIColor redColor].CGColor;
[self.showimage addSubview:faceView];
}
}
}];
[textRectangRequset setValue:@(YES) forKey:@"reportCharacterBoxes"]; // 设置识别具体文字
VNImageRequestHandler *vnImageRequestHandler = [[VNImageRequestHandler alloc] initWithCGImage:image.CGImage options:@{}];
NSError *error = nil;
[vnImageRequestHandler performRequests:@[textRectangRequset] error:&error];
if (error) {
NSLog(@"%@",error.localizedDescription);
}
}