解码设置
设置ItemOutput的pixelFormat为420v或者420f
- (void)p_createPlayerItemOutput {
if (!self.itemOutput) {
NSMutableDictionary *pixBuffAttributes = [NSMutableDictionary dictionary];
[pixBuffAttributes setObject:@(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)
forKey:(id)kCVPixelBufferPixelFormatTypeKey];
self.itemOutput = [[AVPlayerItemVideoOutput alloc] initWithPixelBufferAttributes:pixBuffAttributes];
[self.itemOutput setDelegate:self queue:operationQueue];
[self.itemOutput requestNotificationOfMediaDataChangeWithAdvanceInterval:ONE_FRAME_DURATION];
[_playerItem addOutput:self.itemOutput];
}
}
preview&textureCache创建
1、初始化MTKView
2、设置MTKView的device
- (void)p_initPreview:(CGRect)frame {
self.preivew = [[MTKView alloc] initWithFrame:frame];
self.preivew.device = MTLCreateSystemDefaultDevice();
self.preivew.delegate = self;
// self.preivew.framebufferOnly = NO;
self.viewportSize = (vector_uint2){self.preivew.drawableSize.width, self.preivew.drawableSize.height};
CVMetalTextureCacheCreate(NULL, NULL, self.preivew.device, NULL, &_textureCache);
}
pipeLine创建
1、通过MTKView的device创建Libary
2、通过Libary获取顶点函数和片源函数
3、创建MTLRenderPipelineDescriptor,并设置顶点函数、片源函数、pixelFormat
4、通过MTLRenderPipelineDescriptor生成MTLRenderPipelineState,生成耗性能,须持有
5、通过MTKView的device创建MTLCommandQueue,生成耗性能,须持有
- (void)p_setupPipeline {
NSString *bundlePath = [[NSBundle bundleForClass:[self class]].resourcePath
stringByAppendingPathComponent:@"/ZZH_Sharder.bundle"];
NSBundle *resource_bundle = [NSBundle bundleWithPath:bundlePath];
NSError *error;
if (@available(iOS 10.0, *)) {
id<MTLLibrary> defaultLibrary = [self.preivew.device newDefaultLibraryWithBundle:resource_bundle error:&error];
if (error) {
ZZHDebugLog(@"%@", error);
}
id<MTLFunction> vertexFunc = [defaultLibrary newFunctionWithName:@"vertexShader"];
id<MTLFunction> fragmentFunc = [defaultLibrary newFunctionWithName:@"samplingShader"];
MTLRenderPipelineDescriptor *des = [[MTLRenderPipelineDescriptor alloc] init];
des.vertexFunction = vertexFunc;
des.fragmentFunction = fragmentFunc;
des.colorAttachments[0].pixelFormat = self.preivew.colorPixelFormat;
self.pipelineState = [self.preivew.device newRenderPipelineStateWithDescriptor:des error:nil];
self.commandQueue = [self.preivew.device newCommandQueue];
} else {
}
}
注意点
1、MTLLibary初始化
- (nullable id <MTLLibrary>)newDefaultLibrary;
- (nullable id <MTLLibrary>)newDefaultLibraryWithBundle:(NSBundle *)bundle error:(__autoreleasing NSError **)error
只会扫描mainBundle的metal文件,如果metal文件不再mainbundle需要通过指定bundle的方法初始化
初始化顶点坐标、纹理坐标
- (void)p_setupVertex {
static const ZZHVertex quadVertices[] =
{ // 顶点坐标,分别是x、y、z、w; 纹理坐标,x、y;
{ { 1.0, -1.0, 0.0, 1.0 }, { 1.f, 1.f } },
{ { -1.0, -1.0, 0.0, 1.0 }, { 0.f, 1.f } },
{ { -1.0, 1.0, 0.0, 1.0 }, { 0.f, 0.f } },
{ { 1.0, -1.0, 0.0, 1.0 }, { 1.f, 1.f } },
{ { -1.0, 1.0, 0.0, 1.0 }, { 0.f, 0.f } },
{ { 1.0, 1.0, 0.0, 1.0 }, { 1.f, 0.f } },
};
self.vertices = [self.preivew.device newBufferWithBytes:quadVertices
length:sizeof(quadVertices)
options:MTLResourceStorageModeShared]; // 创建顶点缓存
self.numVertices = sizeof(quadVertices) / sizeof(ZZHVertex); // 顶点个数
}
注意点
1、顶点坐标原点在中心点,纹理坐标原点在左下角
2、纹理的生成是由图片像素来生成的,而图像的存储是从左上角开始的,对应于上图,就是图像左上角像素生成的纹理部分就在纹理左下角处,即 上下颠倒。
设置转换矩阵
- (void)p_setupMatrix { // 设置好转换的矩阵
matrix_float3x3 kColorConversion601FullRangeMatrix = (matrix_float3x3){
(simd_float3){1.164, 1.164, 1.164},
(simd_float3){0.0, -0.213, 2.112},
(simd_float3){1.793, -0.533, 0.0},
};
// shader 会将数据归一化,而 uv 的取值区间本身存在-128到正128 然后归一化到0-1 为了正确计算成rgb,
// 则需要归一化到 -0.5 - 0.5的区间
vector_float3 kColorConversion601VideoRangeOffset = (vector_float3){ 0, -0.5, -0.5}; // 这个是偏移
ZZHConvertMatrix matrix;
// 设置参数
matrix.matrix = kColorConversion601FullRangeMatrix;
matrix.offset = kColorConversion601VideoRangeOffset;
self.convertMatrix = [self.preivew.device newBufferWithBytes:&matrix
length:sizeof(ZZHConvertMatrix)
options:MTLResourceStorageModeShared];
}
注意点
1、YUV和rgb的转换矩阵主要依赖yuv的颜色空间决策
2、shader 会将数据归一化,而 uv 的取值区间本身存在-128到正128 然后归一化到0-1 为了正确计算成rgb, 则需要归一化到 -0.5 - 0.5的区间。同时对VideoRange的y需要减掉16/255.0
帧渲染
1、通过commandQuene生成当前这次渲染所需要的MTLCommandBuffer
2、通过MTKView获取MTLRenderPassDescriptor
3、设置MTLRenderPassDescriptor的clearColor
4、通过MTLCommandBuffer和MTLRenderPassDescriptor生成MTLRenderCommandEncoder
5、encoder设置viewPort
6、encoder设置renderPipelineState
7、encoder设置顶点buffer
8、encoder上传纹理
9、encoder设置片源buffer(转换矩阵)
10、encoder drawing
11、encoder endEncoding
12、commandBuffer 显示
13、commandBuffer commit
- (void)displayPixelBuffer:(CVPixelBufferRef)pixelBuffer {
/* 两种方法获取颜色空间信息
CFTypeRef colorAttachments = CVBufferGetAttachment(pixelBuffer, kCVImageBufferYCbCrMatrixKey, NULL);
OSType pixelFormat = CVPixelBufferGetPixelFormatType(pixelBuffer);
CFDictionaryRef cfMetadataDic = CMCopyDictionaryOfAttachments(NULL, pixelBuffer, kCMAttachmentMode_ShouldPropagate);
NSDictionary *nsMetadataDic = (__bridge NSDictionary *_Nonnull)(cfMetadataDic);
//发现用pixelFormatType创建的format跟pixelbuffer的format是一样的
CFDictionaryRef formatDes = CVPixelFormatDescriptionCreateWithPixelFormatType(kCFAllocatorDefault, CVPixelBufferGetPixelFormatType(pixelBuffer));
**/
id<MTLCommandBuffer> commandBuffer = [self.commandQueue commandBuffer];
MTLRenderPassDescriptor *renderPassDes = self.preivew.currentRenderPassDescriptor;
if (renderPassDes && pixelBuffer) {
renderPassDes.colorAttachments[0].clearColor = MTLClearColorMake(0, 0.f, 0.f, 1.f);
id<MTLRenderCommandEncoder> renderEncoder = [commandBuffer renderCommandEncoderWithDescriptor:renderPassDes];
[renderEncoder setViewport:(MTLViewport){0.0, 0.0, self.viewportSize.x, self.viewportSize.y, -1.0, 1.0 }]; // 设置显示区域
[renderEncoder setRenderPipelineState:self.pipelineState]; // 设置渲染管道,以保证顶点和片元两个shader会被调用
[renderEncoder setVertexBuffer:self.vertices offset:0 atIndex:ZZHVertexInputIndexVertices];
[self p_setupTextureWithEncoder:renderEncoder pixelBuffer:pixelBuffer];
[renderEncoder setFragmentBuffer:self.convertMatrix offset:0 atIndex:ZZHFragmentInputIndexMatrix];
[renderEncoder drawPrimitives:MTLPrimitiveTypeTriangle vertexStart:0 vertexCount:self.numVertices];
[renderEncoder endEncoding];
[commandBuffer presentDrawable:self.preivew.currentDrawable];
}
[commandBuffer commit];
}
纹理上传
- (void)p_setupTextureWithEncoder:(id<MTLRenderCommandEncoder>)encoder pixelBuffer:(CVPixelBufferRef)pixelBuffer {
id<MTLTexture> textureY = nil;
id<MTLTexture> textureUV = nil;
size_t width = CVPixelBufferGetWidthOfPlane(pixelBuffer, 0);
size_t height = CVPixelBufferGetHeightOfPlane(pixelBuffer, 0);
CVMetalTextureRef yTexture = NULL;
CVReturn status = CVMetalTextureCacheCreateTextureFromImage(NULL, self.textureCache, pixelBuffer, NULL, MTLPixelFormatR8Unorm, width, height, 0, &yTexture);
if (status == kCVReturnSuccess) {
textureY = CVMetalTextureGetTexture(yTexture);
CFRelease(yTexture);
}
width = CVPixelBufferGetWidthOfPlane(pixelBuffer, 1);
height = CVPixelBufferGetHeightOfPlane(pixelBuffer, 1);
CVMetalTextureRef uvTexture = NULL;
status = CVMetalTextureCacheCreateTextureFromImage(NULL, self.textureCache, pixelBuffer, NULL, MTLPixelFormatRG8Unorm, width, height, 1, &uvTexture);
if (status == kCVReturnSuccess) {
textureUV = CVMetalTextureGetTexture(uvTexture);
CFRelease(uvTexture);
}
if (textureY && textureUV) {
[encoder setFragmentTexture:textureY atIndex:ZZHFragmentTextureIndexTextureY];
[encoder setFragmentTexture:textureUV atIndex:ZZHFragmentTextureIndexTextureUV];
}
}
注意点
1、uv数据是通过RG格式上传到一个纹理的
2、纹理宽高需要是当前plane的宽高
metal
typedef struct {
float4 clipSpacePosition [[position]];
float2 textureCoordinate;
} RasterizerData;
// vertex_id是顶点shader每次处理的index,用于定位当前的顶点
// buffer表明是缓存数据,ZZHVertexInputIndexVertices是索引
vertex RasterizerData vertexShader(uint vertexID [[vertex_id]], constant ZZHVertex *vertexArray [[buffer(ZZHVertexInputIndexVertices)]]) {
RasterizerData out;
out.clipSpacePosition = vertexArray[vertexID].position;
out.textureCoordinate = vertexArray[vertexID].textureCoordinate;
return out;
}
//stage_in :标识该数据来自vertex的输出
// texture表明是纹理数据,LYFragmentTextureIndexTextureY是索引
fragment float4 samplingShader(RasterizerData input [[stage_in]], texture2d<float> textureY [[texture(ZZHFragmentTextureIndexTextureY)]], texture2d<float> textureUV [[texture(ZZHFragmentTextureIndexTextureUV)]], constant ZZHConvertMatrix *convertMatrix [[buffer(ZZHFragmentInputIndexMatrix)]]) {
constexpr sampler textureSampler (mag_filter::linear, min_filter::linear);
float3 yuv = float3(textureY.sample(textureSampler, input.textureCoordinate).r, textureUV.sample(textureSampler, input.textureCoordinate).rg);
float3 rgb = convertMatrix->matrix * (yuv + convertMatrix->offset);
return float4(rgb, 1.0);
}
注意点
1、metal引用pod文件需要通过modlemap的方式引用
2、[[buffer(ZZHFragmentInputIndexMatrix)]] : buffer表明是缓存数据,ZZHVertexInputIndexVertices是索引。对应encoder的接口
- (void)setFragmentBuffer:(nullable id <MTLBuffer>)buffer offset:(NSUInteger)offset atIndex:(NSUInteger)index;
- (void)setVertexBuffer:(nullable id <MTLBuffer>)buffer offset:(NSUInteger)offset atIndex:(NSUInteger)index;
3、[[vertex_id]] :vertex_id是顶点shader每次处理的index,用于定位当前的顶点
4、[[stage_in]] : stage_in :标识该数据来自vertex的输出
5、Metal中的内存访问主要有两种方式:Device模式和Constant模式,由代码中显式指定。
Device模式是比较通用的访问模式,使用限制比较少,而Constant模式是为了多次读取而设计的快速访问只读模式,通过Constant内存模式访问的参数的数据的字节数量是固定的,特点总结为:
Device支持读写,并且没有size的限制;
Constant是只读,并且限定大小;
如何选择Device和Constant模式?
先看数据size是否会变化,再看访问的频率高低,只有那些固定size且经常访问的部分适合使用constant模式,其他的均用Device。
6、Sampler是采样器,决定如何对一个纹理进行采样操作。寻址模式,过滤模式,归一化坐标,比较函数。
在Metal程序里初始化的采样器必须使用constexpr修饰符声明。
采样器指针和引用是不支持的,将会导致编译错误。