【作者前言】:13年入圈,分享些本人工作中遇到的点点滴滴那些事儿,17年刚开始写博客,高手勿喷!以分享交流为主,欢迎各路豪杰点评改进!
1.应用场景:
2.实现目标:
通过对图片的处理,实现毛玻璃效果,这里因为有些时候模糊程度是因具体需求变化的,所以把方法写活了,方便大家的调用。
3.代码说明:
方式一,推荐使用,效果比较好!Swift版本已更新!
#pragma mark -
#pragma mark - yp_blurryImage: withBlurLevel: 传入图片返回设置毛玻璃效果后的图片
/** Attention points:
① 需要引入#import <Accelerate/Accelerate.h>
② 传入变量 blur 代表毛玻璃设置的程度,值越大越模糊 范围须在0-1中间,默认为0.5
*/
- (UIImage *)yp_blurryImage:(UIImage *)image withBlurLevel:(CGFloat)blur {
if (blur < 0.f || blur > 1.f) {
blur = 0.5f;
}
int boxSize = (int)(blur * 40);
boxSize = boxSize - (boxSize % 2) + 1;
CGImageRef img = image.CGImage;
vImage_Buffer inBuffer, outBuffer;
vImage_Error error;
void *pixelBuffer;
CGDataProviderRef inProvider = CGImageGetDataProvider(img);
CFDataRef inBitmapData = CGDataProviderCopyData(inProvider);
inBuffer.width = CGImageGetWidth(img);
inBuffer.height = CGImageGetHeight(img);
inBuffer.rowBytes = CGImageGetBytesPerRow(img);
inBuffer.data = (void*)CFDataGetBytePtr(inBitmapData);
pixelBuffer = malloc(CGImageGetBytesPerRow(img) *
CGImageGetHeight(img));
if(pixelBuffer == NULL)
NSLog(@"No pixelbuffer");
outBuffer.data = pixelBuffer;
outBuffer.width = CGImageGetWidth(img);
outBuffer.height = CGImageGetHeight(img);
outBuffer.rowBytes = CGImageGetBytesPerRow(img);
error = vImageBoxConvolve_ARGB8888(&inBuffer,
&outBuffer,
NULL,
0,
0,
boxSize,
boxSize,
NULL,
kvImageEdgeExtend);
if (error) {
NSLog(@"error from convolution %ld", error);
}
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef ctx = CGBitmapContextCreate(
outBuffer.data,
outBuffer.width,
outBuffer.height,
8,
outBuffer.rowBytes,
colorSpace,
kCGImageAlphaNoneSkipLast);
CGImageRef imageRef = CGBitmapContextCreateImage (ctx);
UIImage *returnImage = [UIImage imageWithCGImage:imageRef];
//clean up
CGContextRelease(ctx);
CGColorSpaceRelease(colorSpace);
free(pixelBuffer);
CFRelease(inBitmapData);
CGColorSpaceRelease(colorSpace);
CGImageRelease(imageRef);
return returnImage;
}
//使用时直接调用即可
UIImage * image = [UIImage imageNamed:@"wukong"];
self.blurryImageView.image = [self yp_blurryImage:image withBlurLevel:0.1];
方式二,代码简洁,效果欠佳
//创建毛玻璃
UIBlurEffect * blur = [UIBlurEffect effectWithStyle:UIBlurEffectStyleDark];
UIVisualEffectView * effe = [[UIVisualEffectView alloc] initWithEffect:blur];
//毛玻璃大小与所需要添加毛玻璃效果的大小一致
effe.frame = CGRectMake(0,0,self.blurryImageView.frame.size.width - 100,self.blurryImageView.frame.size.width - 100);
effe.layer.masksToBounds = YES;
effe.layer.cornerRadius = 5;
//透明度为1时,毛玻璃程度为最明显
effe.alpha = 0.5;
//把要添加的视图加到毛玻璃上
[self.blurryImageView addSubview:effe];
两种方法均设置为0.5时的效果如下图
💕有的朋友需要方法一的swift版本,抽空翻译了一版,支持最新Swift4语法
注意
:需要导入 Accelerate,即: #import Accelerate
func yp_burryImage(image:UIImage, blur:CGFloat) -> UIImage {
let blur = (blur < 0.0 || blur > 1.0) ? 0.5 : blur;
var boxSize = Int(blur * 40)
boxSize = boxSize - (boxSize % 2) + 1
let cgImg = image.cgImage
var inBuffer = vImage_Buffer()
var outBuffer = vImage_Buffer()
var pixelBuffer: UnsafeMutableRawPointer
let inProvider = cgImg?.dataProvider
let inBitmapData = inProvider?.data
inBuffer.width = vImagePixelCount((cgImg?.width)!)
inBuffer.height = vImagePixelCount((cgImg?.height)!)
inBuffer.rowBytes = (cgImg?.bytesPerRow)!
inBuffer.data = UnsafeMutableRawPointer(mutating: CFDataGetBytePtr(inBitmapData))
//手动申请内存---
pixelBuffer = malloc((cgImg?.bytesPerRow)! * (cgImg?.height)!)
outBuffer.data = pixelBuffer
outBuffer.width = vImagePixelCount((cgImg?.width)!)
outBuffer.height = vImagePixelCount((cgImg?.height)!)
outBuffer.rowBytes = (cgImg?.bytesPerRow)!
var error = vImageBoxConvolve_ARGB8888(&inBuffer, &outBuffer, nil, vImagePixelCount(0), vImagePixelCount(0), UInt32(boxSize), UInt32(boxSize), nil, vImage_Flags(kvImageEdgeExtend))
if error != kvImageNoError {
error = vImageBoxConvolve_ARGB8888(&inBuffer,&outBuffer, nil, vImagePixelCount(0), vImagePixelCount(0),UInt32(boxSize), UInt32(boxSize), nil, vImage_Flags(kvImageEdgeExtend))
if (kvImageNoError != error)
{
error = vImageBoxConvolve_ARGB8888(&inBuffer, &outBuffer, nil, vImagePixelCount(0), vImagePixelCount(0), UInt32(boxSize), UInt32(boxSize), nil, vImage_Flags(kvImageEdgeExtend))
}
}
let colorSpace = CGColorSpaceCreateDeviceRGB()
let ctx = CGContext(data: outBuffer.data, width: Int(outBuffer.width), height: Int(outBuffer.height), bitsPerComponent: 8, bytesPerRow: outBuffer.rowBytes, space: colorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue)
let imageRef = ctx!.makeImage()
let returnImage = UIImage.init(cgImage: imageRef!)
//clean up --- 清理内存
free(pixelBuffer)
return returnImage
}