代码分析说明:
import { mat4, vec3 } from 'wgpu-matrix';
import { makeSample, SampleInit } from '../../components/SampleLayout';
import {
cubeVertexArray,
cubeVertexSize,
cubeUVOffset,
cubePositionOffset,
cubeVertexCount,
} from '../../meshes/cube';
import basicVertWGSL from '../../shaders/basic.vert.wgsl';
import sampleSelfWGSL from './sampleSelf.frag.wgsl';
const init: SampleInit = async ({ canvas, pageState }) => {
const adapter = await navigator.gpu.requestAdapter();
const device = await adapter.requestDevice();
if (!pageState.active) return;
const context = canvas.getContext('webgpu') as GPUCanvasContext;
const devicePixelRatio = window.devicePixelRatio;
canvas.width = canvas.clientWidth * devicePixelRatio;
canvas.height = canvas.clientHeight * devicePixelRatio;
const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device,
format: presentationFormat,
// Specify we want both RENDER_ATTACHMENT and COPY_SRC since we
// will copy out of the swapchain texture.
// 配置GPU上下文的用例为渲染附件 并且 COPY_SRC
usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
alphaMode: 'premultiplied',
});
// Create a vertex buffer from the cube data.
const verticesBuffer = device.createBuffer({
size: cubeVertexArray.byteLength,
usage: GPUBufferUsage.VERTEX,
mappedAtCreation: true,
});
new Float32Array(verticesBuffer.getMappedRange()).set(cubeVertexArray);
verticesBuffer.unmap();
const pipeline = device.createRenderPipeline({
layout: 'auto',
vertex: {
module: device.createShaderModule({
code: basicVertWGSL,
}),
entryPoint: 'main',
buffers: [
{
arrayStride: cubeVertexSize,
attributes: [
{
// position
shaderLocation: 0,
offset: cubePositionOffset,
format: 'float32x4',
},
{
// uv
shaderLocation: 1,
offset: cubeUVOffset,
format: 'float32x2',
},
],
},
],
},
fragment: {
module: device.createShaderModule({
code: sampleSelfWGSL,
}),
entryPoint: 'main',
targets: [
{
format: presentationFormat,
},
],
},
primitive: {
topology: 'triangle-list',
// Backface culling since the cube is solid piece of geometry.
// Faces pointing away from the camera will be occluded by faces
// pointing toward the camera.
cullMode: 'back',
},
// Enable depth testing so that the fragment closest to the camera
// is rendered in front.
depthStencil: {
depthWriteEnabled: true,
depthCompare: 'less',
format: 'depth24plus',
},
});
const depthTexture = device.createTexture({
size: [canvas.width, canvas.height],
format: 'depth24plus',
usage: GPUTextureUsage.RENDER_ATTACHMENT,
});
const uniformBufferSize = 4 * 16; // 4x4 matrix
const uniformBuffer = device.createBuffer({
size: uniformBufferSize,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
// We will copy the frame's rendering results into this texture and
// sample it on the next frame.
// 作为下一帧的纹理
const cubeTexture = device.createTexture({
size: [canvas.width, canvas.height],
format: presentationFormat,
usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST,
});
// Create a sampler with linear filtering for smooth interpolation.
const sampler = device.createSampler({
magFilter: 'linear',
minFilter: 'linear',
});
const uniformBindGroup = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{
binding: 0,
resource: {
buffer: uniformBuffer,
},
},
{
binding: 1,
resource: sampler,
},
{
binding: 2,
resource: cubeTexture.createView(),
},
],
});
const renderPassDescriptor: GPURenderPassDescriptor = {
colorAttachments: [
{
view: undefined, // Assigned later
clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
loadOp: 'clear',
storeOp: 'store',
},
],
depthStencilAttachment: {
view: depthTexture.createView(),
depthClearValue: 1.0,
depthLoadOp: 'clear',
depthStoreOp: 'store',
},
};
const aspect = canvas.width / canvas.height;
const projectionMatrix = mat4.perspective(
(2 * Math.PI) / 5,
aspect,
1,
100.0
);
const modelViewProjectionMatrix = mat4.create();
function getTransformationMatrix() {
const viewMatrix = mat4.identity();
mat4.translate(viewMatrix, vec3.fromValues(0, 0, -4), viewMatrix);
const now = Date.now() / 1000;
mat4.rotate(
viewMatrix,
vec3.fromValues(Math.sin(now), Math.cos(now), 0),
1,
viewMatrix
);
mat4.multiply(projectionMatrix, viewMatrix, modelViewProjectionMatrix);
return modelViewProjectionMatrix as Float32Array;
}
function frame() {
// Sample is no longer the active page.
if (!pageState.active) return;
const transformationMatrix = getTransformationMatrix();
device.queue.writeBuffer(
uniformBuffer,
0,
transformationMatrix.buffer,
transformationMatrix.byteOffset,
transformationMatrix.byteLength
);
const swapChainTexture = context.getCurrentTexture();
// 从canvas中获取view
renderPassDescriptor.colorAttachments[0].view = swapChainTexture.createView();
const commandEncoder = device.createCommandEncoder();
const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
passEncoder.setPipeline(pipeline);
passEncoder.setBindGroup(0, uniformBindGroup);
passEncoder.setVertexBuffer(0, verticesBuffer);
passEncoder.draw(cubeVertexCount);
passEncoder.end();
// Copy the rendering results from the swapchain into |cubeTexture|.
// 拷贝纹理到纹理,将渲染的结果拷贝到cubeTexture, cubeTexture又作为下一帧的纹理传入到Shader中
commandEncoder.copyTextureToTexture(
{
texture: swapChainTexture,
},
{
texture: cubeTexture,
},
[canvas.width, canvas.height]
);
device.queue.submit([commandEncoder.finish()]);
requestAnimationFrame(frame);
}
requestAnimationFrame(frame);
};
顶点着色器
struct Uniforms {
modelViewProjectionMatrix : mat4x4<f32>,
}
@binding(0) @group(0) var<uniform> uniforms : Uniforms;
struct VertexOutput {
@builtin(position) Position : vec4<f32>,
@location(0) fragUV : vec2<f32>,
@location(1) fragPosition: vec4<f32>,
}
@vertex
fn main(
@location(0) position : vec4<f32>,
@location(1) uv : vec2<f32>
) -> VertexOutput {
var output : VertexOutput;
output.Position = uniforms.modelViewProjectionMatrix * position;
output.fragUV = uv;
output.fragPosition = 0.5 * (position + vec4(1.0, 1.0, 1.0, 1.0));
return output;
}
片元着色器
@binding(1) @group(0) var mySampler: sampler;
// 上一帧渲染的纹理
@binding(2) @group(0) var myTexture: texture_2d<f32>;
@fragment
fn main(
@location(0) fragUV: vec2<f32>,
@location(1) fragPosition: vec4<f32>
) -> @location(0) vec4<f32> {
// 采样贴图的颜色
let texColor = textureSample(myTexture, mySampler, fragUV * 0.8 + vec2(0.1));
// 颜色值接近0.5的返回1, 否则返回 0
let f = select(1.0, 0.0, length(texColor.rgb - vec3(0.5)) < 0.01);
// 根据f 颜色混合
return f * texColor + (1.0 - f) * fragPosition;
}
总结步骤
实现渲染目标纹理的绘制过程:
-
context.configure
纹理上下文配置中,需要配置用例可拷贝usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
- 创建一个渲染目标纹理,并通过uniform bindGroup 设置参数
- 设置颜色附件的view
const swapChainTexture = context.getCurrentTexture();
// 从canvas中获取view
renderPassDescriptor.colorAttachments[0].view = swapChainTexture.createView();
- 设置编码结束后
passEncoder.end()
, 进行纹理拷贝,使用了commandEncoder.copyTextureToTexture
, 具体如:
commandEncoder.copyTextureToTexture(
{
texture: swapChainTexture,
},
{
texture: cubeTexture,
},
[canvas.width, canvas.height]
);
- 最后执行渲染提交
device.queue.submit([commandEncoder.finish()])
- 片元着色器中,将上一帧的贴图绘制到本帧的cube上