WebGPU实现水波纹效果

基础

1.基础三角形 (静态):

  1. get device & ctx
const device = await navigator.gpu.requestDevice();
const adapter = await navigator.gpu.requestAdapter();
const ctx = canvas.getContext('webgpu');
  1. config ctx
ctx.configure({
  device,
  format: navigator.gpu.getPreferredCanvasFormat(),
});
  1. Create pipeline Pipeline: defines rendering & calculation. think of it as a factory assembly line that provides different functional units to complete production of different parts.
import fragShader from './shaders/frag.wgsl?raw'
import vertShader from './shaders/vert.wgsl?raw'

const pipeline = device.createRenderPipeline({
    layout: 'auto',
    vertex: {
        module: device.createShaderModule({
            code: vertShader,
        }),
        entryPoint: 'main',
    },
    fragment: {
        module: device.createShaderModule({
            code: fragShader,
        }),
        entryPoint: 'main',
        targets: [
            {
                format: presentationFormat,
            },
        ],
    },
    primitive: {
        topology: 'triangle-list',
    },
});
  1. render each frame

    1. get render pass descriptor ==> docs
    const textureView = context.getCurrentTexture().createView();
    
    const renderPassDescriptor: GPURenderPassDescriptor = {
      colorAttachments: [
        {
          view: textureView,
          clearValue: [0, 0, 0, 0], // Clear to transparent
          loadOp: 'clear',// if loadOp isnt 'clear', clearValue will be ignore
          storeOp: 'store',
        },
      ],
    };
    
    1. set up the commandEncoder ==> record GPU commands (does not run them immediately)
    const commandEncoder = device.createCommandEncoder();
    const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
    passEncoder.setPipeline(pipeline);
    // draw(vertexCount, instanceCount?, firstVertex?, firstInstance?)
    // same as draw(3, 1, 0, 0)
    passEncoder.draw(3);
    passEncoder.end();
    
    1. submit commandEncoder ==> submit GPU commands to GPU
    const commands = commandEncoder.finish();
    device.queue.submit([commands]);
    
  2. vertex.wgsl:

@vertex
fn main(
  @builtin(vertex_index) VertexIndex : u32
) -> @builtin(position) vec4f {
  var pos = array<vec2f, 3>(
    // 归一化设备坐标
    vec2(0.0, 0.5),
    vec2(-0.5, -0.5),
    vec2(0.5, -0.5)
  );
  // homogeneous coordinates(齐次坐标)
  // (x, y, z, w)
  return vec4f(pos[VertexIndex], 0.0, 1.0);
}
  1. frag.wgsl:
@fragment
fn main() -> @location(0) vec4f {
  return vec4(0.3, 0.9, 0.6, 1.0);
}

2. 彩色三角形 (动态):

diff:

  1. create timeBuffer:
const timeBuffer = device.createBuffer({
  size: 4,
  usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
  // UNIFORM: high speed,
  // COPY_DST: can be written to by the GPU,
  // STORAGE: can be read and written by the GPU, storage in VRAM
});
  1. create binding group:
  const bindGroup = device.createBindGroup({
    layout: bindGroupLayout,
    entries: [
      {
        binding: 0,
        resource: {
          buffer: timeBuffer,
        },
      },
    ],
  });

  const pipeline = device.createRenderPipeline({
    layout: device.createPipelineLayout({
      bindGroupLayouts: [bindGroupLayout],
    }),
    ...//same as before
  });
  1. setTimeBuffer each frame:

  const startTime = performance.now();

  const frame = () => {
    const currentTime = (performance.now() - startTime) / 1000; // 1000ms eqs 1s
    device.queue.writeBuffer(timeBuffer, 0, new Float32Array([currentTime]));
    ...// same as before

    const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
    passEncoder.setPipeline(pipeline);
    passEncoder.setBindGroup(0, bindGroup);
    ...// same as before
  }
  1. frag.wgsl:
@group(0) @binding(0) var<uniform> time: f32;

@fragment
fn main() -> @location(0) vec4f {
  // 使用时间创建彩虹色效果
  let r = sin(time * 2.0) * 0.5 + 0.5;
  let g = sin(time * 2.0 + 2.094) * 0.5 + 0.5; // 2.094 ≈ 2π/3
  let b = sin(time * 2.0 + 4.188) * 0.5 + 0.5; // 4.188 ≈ 4π/3
  
  return vec4f(r, g, b, 1.0);
}

3. 根据位置渐变彩色三角形

diff:

  1. frag.wgsl:
@group(0) @binding(0) var<uniform> time: f32;

@fragment
fn main(@location(0) worldPos: vec2f) -> @location(0) vec4f {
  // 计算从左下到右上的流动方向
  // worldPos.x 范围 [-0.5, 0.5], worldPos.y 范围 [-0.5, 0.5]
  // 将坐标转换到 [0, 1] 范围
  let normalizedX = (worldPos.x + 0.5);
  let normalizedY = (worldPos.y + 0.5);
  
  // 计算从左下到右上的渐变值 (左下角为0,右上角为1)
  let gradient = (normalizedX + normalizedY) * 0.5;
  
  // 添加时间偏移来创建流动效果
  let flowOffset = time * 0.5;
  // fract: 取小数部分
  let animatedGradient = fract(gradient + flowOffset);
  
  // 使用动画渐变值创建彩虹色效果
  let phase = animatedGradient * 6.28318; // 2π
  // let r = sin(phase) * 0.5 + 0.5;
  // 如果使用let r = 1的话,r会被类型推导为1i => 32i
  let r: f32 = 1;
  let g = sin(phase + 2.094) * 0.3 + 0.7; // 2.094 ≈ 2π/3
  let b = sin(phase + 4.188) * 0.3 + 0.7; // 4.188 ≈ 4π/3
  
  return vec4f(r, g, b, 1.0);
}
  1. vert.wgsl:
struct VertexOutput {
  @builtin(position) position: vec4f,
  // 这里的pos会在传递到片元着色器前光栅化时被插值,默认是perspective,在考虑深度情况下的透视矫正插值
  // 还有linear, flat,flat即为与主顶点一致,linear就是简单线性插值
  // 可以通过@interpolate(xxx)来指定插值方式
  @location(0) worldPos: vec2f,
}

@vertex
fn main(
  @builtin(vertex_index) VertexIndex : u32
) -> VertexOutput {
  var pos = array<vec2f, 3>(
    vec2(0, 0.5), 
    vec2(-0.5, -0.5), 
    vec2(0.5, -0.5),  
  );
  
  var output: VertexOutput;
  output.position = vec4f(pos[VertexIndex], 0.0, 1.0);
  output.worldPos = pos[VertexIndex];
  return output;
}

4.1. 开启MSAA

diff:

  1. create pipeline
const pipeline = device.createRenderPipeline({
  ...//same as before
  multisample: {
    count: 4, // wgpu only support 1, 4
  },
});
  1. process texture
// const textureView = context.getCurrentTexture().createView();

const ctxTexture = context.getCurrentTexture()

const multisampleTexture = device.createTexture({
  format: ctxTexture.format,
  usage: GPUTextureUsage.RENDER_ATTACHMENT,
  size: [ctxTexture.width, ctxTexture.height],
  sampleCount: 4,
});

const renderPassDescriptor: GPURenderPassDescriptor = {
  colorAttachments: [
    {
      view: multisampleTexture.createView(),
      resolveTarget: ctxTexture.createView(),
      clearValue: [1, 1, 1, 0], // Clear to transparent
      loadOp: 'clear',
      storeOp: 'store',
    },
  ],
};

正篇

  • 如何实现通过webgpu展现点击有涟漪的效果(暂时不包含反射):
  1. 原理就是片元着色器根据点击位置数组计算采样偏移,然后就ok了。 so重点有两个,一个是如何将点击记录组织并传入,另一个是这个偏移怎么偏移。

  2. 关注第一个点:

  3. 首先点击记录我们直接addeventlistoner就行了,记录点击的相对坐标(和wgpu坐标系保持一致,在里面好处理),以及时间戳。 示例如下:

interface ClickRecord {
  x: number; // 横轴比例 [-1, 1]
  y: number; // 纵轴比例 [-1, 1]
  time: number; // 点击时间戳
}
  1. 定义片元着色器内的数据结构,这里实现如下:
struct ClickItem {
  xy_time_pad: vec4f,
}

struct ClickBuffer {
  count: f32,
  items: array<ClickItem, 40>,
}
  1. 定义gpu那里的buffer,绑到group上面啥啥的:

const clickDataBuffer = device.createBuffer({
  // 这里为什么是656 = 16*41呢,因为 struct ClickBuffer 里的count的4位要对齐到16位,就是这样。。。这之前还导致了一个bug,gpt都找不到,hh。
  size: 656, // 16 * 41
  usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
})
const bindGroupLayoutClicks = device.createBindGroupLayout({
  entries: [
    {
      binding: 0,
      visibility: GPUShaderStage.FRAGMENT,
      buffer: { type: 'uniform' },
    },
  ],
});
...some code
const clickData = new Float32Array(656 / 4)
clickData[0] = clickQueue.length
clickData[1] = 0
clickData[2] = 0
clickData[3] = 0

for (let i = 0; i < clickQueue.length && i < MAX_CLICKS; i++) {
  const base = 4 + i * 4 // 跳过4个float的头部(count + pad),每条记录4个float
  clickData[base] = clickQueue[i].x
  clickData[base + 1] = clickQueue[i].y
  clickData[base + 2] = clickQueue[i].time
  clickData[base + 3] = 0 // padding for 16-byte alignment
}
device.queue.writeBuffer(clickDataBuffer, 0, clickData)
  1. 片元着色器内获取
struct ClickItem {
  xy_time_pad: vec4f,
}

struct ClickBuffer {
  count: f32,
  items: array<ClickItem, 40>,
}

@group(2) @binding(0) var<uniform> clicks: ClickBuffer;

至此,完成第一步。 2. 实现点击涟漪效果

思路:对于每个点击,计算一个圆环,在圆环内根据圆环内的距离圆环外圈距离计算一个偏移量,然后将将偏移量乘以点击点到当前片元着色器计算位置的单位向量得到偏移向量。多个偏移向量直接加起来即可。

实现:直接看代码吧,这里的pulse计算方法是我随便拟的一个,效果不错:

@group(0) @binding(0) var mySampler: sampler;
@group(0) @binding(1) var myTexture: texture_2d<f32>;
@group(1) @binding(0) var<uniform> time: f32;

struct ClickItem {
  xy_time_pad: vec4f,
}

struct ClickBuffer {
  count: f32,
  items: array<ClickItem, 40>,
}

@group(2) @binding(0) var<uniform> clicks: ClickBuffer;

const PI: f32 = 3.1415926535897932384626433832795;

fn mathMod(x: f32, y: f32) -> f32 {
  return x - y * floor(x / y);
}

@fragment
fn main(@location(0) texcoord: vec2f, @location(1) position: vec2f) -> @location(0) vec4f {
  var totalPulse: vec2f = vec2(0.0, 0.0);
  let now = time;

  for (var i: f32 = 0; i < clicks.count; i = i + 1) {
    let c = clicks.items[i32(i)].xy_time_pad;
    var center = c.xy;
    // 这里不实现反射
    let shadowCenterArr = array<vec2f, 1>(
      center,
    );
    let tClick = c.z;
    let speed: f32 = 0.85;
    let maxAge: f32 = 10 / speed;
    var age = (now - tClick) * speed;

    if (age < 0.0 || age > maxAge) {
      continue;
    }

    let radius = age * 0.6;
    let weakenIndex = pow(clamp((maxAge - age) / maxAge, 0.0, 1.0), 6);
    let width = 0.3 * pow((1.5 - clamp((maxAge - age) / maxAge, 0.0, 1.0)), 2);
    let strength = 0.1 * weakenIndex * weakenIndex;
    for(var j = 0u; j < 1; j++) {
      let d = distance(position, shadowCenterArr[j]);
      let x = 1 - clamp((d - (radius - width)) / width, 0.0, 1.0);
      let pulse = strength * (-1.0 / pow(x + 1.0, 2)) * sin(3 * PI * pow(x, 0.5));

      totalPulse = totalPulse + pulse * normalize(position - shadowCenterArr[j]);
    }

  }

  let distorted_texcoord = texcoord + totalPulse / 2.0;
  var textureColor = textureSample(myTexture, mySampler, distorted_texcoord);

  if (textureColor.a > 0.3) {
    let fixedPosition = position + totalPulse;
    let r = sin(fixedPosition.x * 10 + time) * 0.3 + 0.7;
    let g = sin(fixedPosition.x * 10 + time + 0.666 * PI) * 0.2 + 0.8; // 2.094 ≈ 2π/3
    let b = sin(fixedPosition.x * 10 + time + 1.333 * PI) * 0.0 + 1.0; // 4.188 ≈ 4π/3
    textureColor = mix(vec4(r, g, b, 1.0), textureColor, textureColor.a * 0.2);
    // textureColor = mix(vec4(0.4, 0.6, 1.0, 1.0), textureColor, 0.8);
    // textureColor = mix(vec4(1.0, 1.0, 1.0, 1.0), textureColor, 0.8);
    return textureColor;
  }

  return vec4(1, 1, 1, 0);
}