diff --git a/src/components/SineTestGPU.tsx b/src/components/SineTestGPU.tsx index 26ec2a9..8429ba8 100644 --- a/src/components/SineTestGPU.tsx +++ b/src/components/SineTestGPU.tsx @@ -1,10 +1,11 @@ import useDevice from "@/hooks/useDevice"; -import {useCallback, useEffect, useMemo, useReducer, useState} from "react"; +import {useCallback, useEffect, useMemo, useState} from "react"; import compute from "@/shaders/sineTest/compute.wgsl"; const numChannels = 2; //(shader uses vec2: x = left, y = right) const SineTestGPU = () => { + const [audioContext, setAudioContext] = useState(undefined); const [playing, setPlaying] = useState(false); const [chunkDurationInSeconds, setChunkDurationInSeconds] = useState(.05); const [maxBufferedChunks, setMaxBufferedChunks] = useState(4); @@ -13,7 +14,6 @@ const SineTestGPU = () => { const [startTime, setStartTime] = useState(0.0); const [nextChunkOffset, setNextChunkOffset] = useState(-1); const [timeoutId, setTimeoutId] = useState(null); - const [audioContext, setAudioContext] = useState(undefined); const {device} = useDevice(); useEffect(() => { @@ -33,12 +33,11 @@ const SineTestGPU = () => { async function stopMakingSound() { if (audioContext) await audioContext.suspend(); - if(audioContext) await audioContext.close(); + if (audioContext) await audioContext.close(); if (timeoutId) clearTimeout(timeoutId); - setAudioContext(undefined); setTimeoutId(null); + setAudioContext(undefined); } - }, [playing, sampleRate]); const chunkNumSamplesPerChannel: number | undefined = useMemo(() => { @@ -143,7 +142,7 @@ const SineTestGPU = () => { }, [nextChunkOffset]) const createSoundChunk = useCallback(async(chunkTime: number) => { - if (!audioContext || audioContext.state === "closed" || !chunkBuffer || !chunkMapBuffer || !timeInfoBuffer || !pipeline || !bindGroup || !chunkNumSamplesPerChannel || !chunkNumSamples) return; + if (!audioContext || audioContext.state === "closed" || !chunkBuffer || !chunkMapBuffer || !timeInfoBuffer || !pipeline || !bindGroup || !chunkNumSamplesPerChannel || !chunkNumSamples || !chunkBufferSize) return; device.queue.writeBuffer(timeInfoBuffer, 0, new Float32Array([chunkTime])); const commandEncoder = device.createCommandEncoder(); @@ -201,53 +200,3 @@ const SineTestGPU = () => { export default SineTestGPU; - -/* - - //Class to calculate amount of buffer for constant feeding rate while playing - //On every tick() it calculates how much time was passed, and how much frames - //it should read to compensate. - - //A second with 48000Hz sample rate contains 48k samples per second - -class PlaybackBuffer { - constructor(sampleRate, channels, bufferSize) { - this.fraction = RENDER_QUANTUM * channels; - if (bufferSize % this.fraction !== 0) { - throw new Error("Buffer size should be even to the fraction size: " + bufferSize); - } - this.bufferSize = bufferSize * channels; - this.samplesPerMs = sampleRate * channels / 1000; - } - - start() { - this.lastTick = performance.now(); - this.remainder = this.bufferSize; - return this.tick(); - } - - tick() { - const now = performance.now(); - const samplesToFeed = (now - this.lastTick) * this.samplesPerMs + this.remainder; - const samplesToRead = Math.round(samplesToFeed / this.fraction) * this.fraction; - this.remainder = samplesToFeed - samplesToRead; - this.lastTick = now; - - return samplesToRead; - } -} - -this.playback = new PlaybackBuffer(this.context.sampleRate, channels, this.bufferSize); -this._readAndFeed(this.playback.start()); -this.interval = setInterval(() => { - this._readAndFeed(this.playback.tick()); -}, Math.floor(RENDER_QUANTUM / this.context.sampleRate / 1000)); - -function _readAndFeed(amount) { - if (!amount) return; - - let buffer = new Float32Array(amount); - fillBufferInSomeWay(buffer); - this.node.port.postMessage({message: 'data', data: buffer}); -} - */ \ No newline at end of file diff --git a/src/components/ThreeOhThree.tsx b/src/components/ThreeOhThree.tsx index c0d5a80..36da503 100644 --- a/src/components/ThreeOhThree.tsx +++ b/src/components/ThreeOhThree.tsx @@ -30,7 +30,7 @@ const ThreeOhThree = () => { const [res, setRes] = useState(2.2); const [lfo, setLfo] = useState(1); const [flt, setFlt] = useState(-1.5); - const {adapter, device} = useDevice() + const {device} = useDevice() function handleReset() { setPartials(256); @@ -55,7 +55,7 @@ const ThreeOhThree = () => { } useEffect(() => { - if (!audioContext || !adapter || !device) return; + if (!audioContext || !device) return; const audioCtx = audioContext; async function playSound() { @@ -181,7 +181,7 @@ const ThreeOhThree = () => { audioCtx.suspend(); } - }, [audioContext, device, adapter, playing]) + }, [audioContext, device, playing]) useEffect(() => { if (!audioParamBuffer || !device) return; diff --git a/src/components/ThreeOhThreeStreaming.tsx b/src/components/ThreeOhThreeStreaming.tsx new file mode 100644 index 0000000..0036cc2 --- /dev/null +++ b/src/components/ThreeOhThreeStreaming.tsx @@ -0,0 +1,410 @@ +import useDevice from "@/hooks/useDevice"; +import dynamic from "next/dynamic"; +import React, {useCallback, useEffect, useMemo, useState} from "react"; +import compute from "@/shaders/threeOhThreeStreaming/compute.wgsl"; +import styled from "styled-components"; + +const KnobParamLabel = dynamic(() => import("el-vis-audio").then((mod) => mod.KnobParamLabel), {ssr: false}); + +const chunkDurationInSeconds = 1; +const numChannels = 2; // currently only two channels allowed (shader uses vec2) +const workgroupSize = 256; +const maxBufferedChunks = 2; + +const ThreeOhThreeStreaming = () => { + const [audioContext, setAudioContext] = useState(undefined); + const [playing, setPlaying] = useState(false); + const [startTime, setStartTime] = useState(0.0); + const [timeoutId, setTimeoutId] = useState(null); + const [partials, setPartials] = useState(256); + const [frequency, setFrequency] = useState(38); + const [timeMod, setTimeMod] = useState(16); + const [timeScale, setTimeScale] = useState(9); + const [gain, setGain] = useState(0.7); + const [dist, setDist] = useState(0.5); + const [dur, setDur] = useState(0.26); + const [ratio, setRatio] = useState(2); + const [sampOffset, setSampOffset] = useState(1); + const [fundamental, setFundamental] = useState(440); + const [stereo, setStereo] = useState(0.01); + const [nse, setNse] = useState(19871.8972); + const [res, setRes] = useState(2.2); + const [lfo, setLfo] = useState(1); + const [flt, setFlt] = useState(-1.5); + const {device} = useDevice() + + function handleReset() { + setPartials(256); + setFrequency(38); + setTimeMod(16); + setTimeScale(9); + setGain(0.7); + setDist(0.5); + setDur(0.26); + setRatio(2); + setSampOffset(1); + setFundamental(440); + setStereo(0.01); + setNse(19871.8972); + setRes(2.2); + setLfo(1); + setFlt(-1.5); + } + + useEffect(() => { + if (playing) { + startMakingSound(); + } else { + stopMakingSound(); + } + + async function startMakingSound() { + if (typeof window !== 'undefined') { + setAudioContext(await new AudioContext()); + setStartTime(performance.now() / 1000.0); + } + } + + async function stopMakingSound() { + if (audioContext) await audioContext.suspend(); + if (audioContext) await audioContext.close(); + if (timeoutId) clearTimeout(timeoutId); + setTimeoutId(null); + setAudioContext(undefined); + } + }, [playing]); + + useEffect(() => { + if(audioContext?.state === 'running') { + createSoundChunk(0); + } + }, [audioContext]); + + const chunkNumSamplesPerChannel: number | undefined = useMemo(() => { + if (!audioContext || !chunkDurationInSeconds) return; + return audioContext.sampleRate * chunkDurationInSeconds; + }, [audioContext, chunkDurationInSeconds]); + + + const {chunkNumSamples, audioBuffer}: { + chunkNumSamples: number | undefined; + audioBuffer: AudioBuffer | undefined + } = useMemo(() => { + let buffer; + let chunkSamps; + if (audioContext && chunkNumSamplesPerChannel) { + chunkSamps = numChannels * chunkNumSamplesPerChannel; + buffer = audioContext.createBuffer( + numChannels, + chunkNumSamplesPerChannel, + audioContext.sampleRate + ); + } + return { + chunkNumSamples: chunkSamps, + audioBuffer: buffer + }; + }, [audioContext, chunkNumSamplesPerChannel]); + + const channels: Float32Array[] = useMemo(() => { + if (!audioBuffer) return []; + return [...Array(numChannels)].map((_, i) => audioBuffer.getChannelData(i)); + }, [audioBuffer]); + + const chunkBufferSize: number | undefined = useMemo(() => { + if (!chunkNumSamples) return; + return Float32Array.BYTES_PER_ELEMENT * chunkNumSamples; + }, [chunkNumSamples]); + + const {chunkBuffer, chunkMapBuffer, timeInfoBuffer, audioParamBuffer}: { + chunkBuffer: GPUBuffer | undefined; + chunkMapBuffer: GPUBuffer | undefined; + timeInfoBuffer: GPUBuffer | undefined; + audioParamBuffer: GPUBuffer | undefined; + } = useMemo(() => { + if (!device || !chunkBufferSize) { + return {chunkBuffer: undefined, chunkMapBuffer: undefined, timeInfoBuffer: undefined, audioParamBuffer: undefined}; + } + return { + chunkBuffer: device.createBuffer({ + size: chunkBufferSize, + usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC, + }), + chunkMapBuffer: device.createBuffer({ + size: chunkBufferSize, + usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST, + }), + timeInfoBuffer: device.createBuffer({ + size: Float32Array.BYTES_PER_ELEMENT * 1, + usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST + }), + audioParamBuffer: device.createBuffer({ + size: Float32Array.BYTES_PER_ELEMENT * 15, + usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST + }) + }; + }, [device, chunkBufferSize]); + + + const {pipeline, bindGroup}: { + pipeline: GPUComputePipeline | undefined; + bindGroup: GPUBindGroup | undefined + } = useMemo(() => { + if (!device || !audioContext || !workgroupSize || !timeInfoBuffer || !chunkBuffer || !audioParamBuffer) { + return {pipeline: undefined, bindGroup: undefined}; + } + const audioShaderModule = device.createShaderModule({ + label: "Audio shader", + code: compute + }); + + const pipeline = device.createComputePipeline({ + layout: 'auto', + compute: { + module: audioShaderModule, + entryPoint: 'synthesize', + constants: { + SAMPLING_RATE: audioContext.sampleRate, + WORKGROUP_SIZE: workgroupSize + } + } + }); + + const bindGroup = device.createBindGroup({ + layout: pipeline.getBindGroupLayout(0), + entries: [ + {binding: 0, resource: {buffer: timeInfoBuffer}}, + {binding: 1, resource: {buffer: chunkBuffer}}, + {binding: 2, resource: {buffer: audioParamBuffer}} + ] + }); + return {pipeline, bindGroup}; + }, [device, audioContext, workgroupSize, timeInfoBuffer, chunkBuffer, audioParamBuffer]); + + const createSoundChunk = useCallback(async (chunkTime: number) => { + + if (!audioContext || audioContext.state === "closed" || !chunkBuffer || !chunkMapBuffer || !timeInfoBuffer || !pipeline || !bindGroup || !chunkNumSamplesPerChannel || !chunkNumSamples || !chunkBufferSize) return; + const bufferedSeconds = (startTime + chunkTime) - (performance.now() / 1000.0); + const numBufferedChunks = Math.floor(bufferedSeconds / chunkDurationInSeconds); + if (numBufferedChunks > maxBufferedChunks) { + setTimeoutId(setTimeout(async function () { + await createSoundChunk(chunkTime) + }, chunkDurationInSeconds * 1000.0)); + return; + } + device.queue.writeBuffer(timeInfoBuffer, 0, new Float32Array([chunkTime])); + + const commandEncoder = device.createCommandEncoder(); + + const pass = commandEncoder.beginComputePass(); + pass.setPipeline(pipeline); + pass.setBindGroup(0, bindGroup); + pass.dispatchWorkgroups( + Math.ceil(chunkNumSamplesPerChannel / workgroupSize) + ); + pass.end(); + + commandEncoder.copyBufferToBuffer(chunkBuffer, 0, chunkMapBuffer, 0, chunkBufferSize); + + device.queue.submit([commandEncoder.finish()]); + + await chunkMapBuffer.mapAsync(GPUMapMode.READ, 0, chunkBufferSize); + + const chunkData = new Float32Array(chunkNumSamples); + chunkData.set(new Float32Array(chunkMapBuffer.getMappedRange())); + chunkMapBuffer.unmap(); + + await playChunk(chunkTime, chunkData); + }, [audioContext, device, chunkBuffer, chunkBufferSize, chunkMapBuffer, pipeline, timeInfoBuffer, bindGroup, chunkNumSamplesPerChannel, workgroupSize, chunkNumSamples]); + + async function playChunk(chunkTime: number, chunkData: Float32Array) { + if (!audioBuffer || audioContext?.state === "closed" || !chunkData || !audioContext) return; + for (let i = 0; i < audioBuffer.length; ++i) { + for (const [offset, channel] of channels.entries()) { + channel[i] = chunkData[i * numChannels + offset]; + } + } + let audioSource = audioContext.createBufferSource(); + audioSource.buffer = audioBuffer; + + // if(chunkTime !== 0.0) { // start playing on 2nd chunk to avoid 2nd chunk glitch... + audioSource.connect(audioContext.destination); + audioSource.start(chunkTime); + audioSource.onended = () => { + audioSource.disconnect(); + } + await createSoundChunk(chunkTime + audioSource.buffer.duration); + } + + useEffect(() => { + if (!audioParamBuffer || !device) return; + device.queue.writeBuffer(audioParamBuffer, 0, new Float32Array([partials, frequency, timeMod, timeScale, gain, dist, dur, ratio, sampOffset, fundamental, stereo, nse, res, lfo, flt])); + }, [device, audioParamBuffer, partials, frequency, timeScale, timeMod, gain, dist, dur, ratio, sampOffset, fundamental, stereo, nse, res, lfo, flt]); + + + return ( + <> +

This synth requires chrome://flags/#enable-webgpu-developer-features flag + to be enabled

+ You may need to copy/paste the chrome flags URL into the searchbar and restart chrome, then return to this page. + If you lose control, refresh the page.

+ + + + + + + + + + + + + + + + + + +
+ + + + ) +} + +const KnobsFlexBox = styled.div` + // justify-content: space-evenly; + display: flex; + flex-wrap: wrap; + gap: 25px; + flex-direction: row; + margin: 15px; + padding: 15px; + border: 2px solid #ff0000; +`; + +export default ThreeOhThreeStreaming; + + diff --git a/src/pages/threeOhThreeStreaming.tsx b/src/pages/threeOhThreeStreaming.tsx new file mode 100644 index 0000000..158dfb5 --- /dev/null +++ b/src/pages/threeOhThreeStreaming.tsx @@ -0,0 +1,11 @@ +import dynamic from 'next/dynamic' + +const ThreeOhThreeStreaming = dynamic( + () => import('@/components/ThreeOhThreeStreaming'), + {ssr: false} +) + +export default function threeOhThreeStreaming() { + + return ; +} \ No newline at end of file diff --git a/src/shaders/threeOhThreeStreaming/compute.wgsl b/src/shaders/threeOhThreeStreaming/compute.wgsl new file mode 100644 index 0000000..33bf2db --- /dev/null +++ b/src/shaders/threeOhThreeStreaming/compute.wgsl @@ -0,0 +1,101 @@ +const PARTIALS: u32 = 256u; +const PI2: f32 = 6.283185307179586476925286766559; + +override WORKGROUP_SIZE: u32 = 256; +override SAMPLING_RATE: f32 = 44100.0; + +struct TimeInfo { offset: f32 } +struct AudioParam { + partials: f32, + frequency: f32, + timeMod: f32, + timeScale: f32, + gain: f32, + dist: f32, + dur: f32, + ratio: f32, + sampOffset: f32, + fundamental: f32, + stereo: f32, + nse: f32, + res: f32, + lfo: f32, + flt: f32, +} + +@group(0) @binding(0) var time_info: TimeInfo; +@group(0) @binding(1) var song_chunk: array>; // 2 channel pcm data +@binding(2) @group(0) var audio_param: AudioParam; + +@compute +@workgroup_size(WORKGROUP_SIZE) +fn synthesize(@builtin(global_invocation_id) global_id: vec3) { + let sample = global_id.x; + + if sample >= arrayLength(&song_chunk) { + return; + } + + let t = f32(sample) / SAMPLING_RATE; + + song_chunk[sample] = mainSound(time_info.offset + t, audio_param); +} + +fn dist(s: vec2, d: f32) -> vec2 { + let distClamp: vec2 = vec2(s * d); + let distSig: vec2 = clamp(distClamp, vec2(-1.0), vec2(1.0)); + return distSig; +} + +fn _filter(h: f32, cut: f32, res: f32) -> f32 { + let cutted: f32 = cut - 20.0; + let df: f32 = max(h - cutted, 0.0); + let df2: f32 = abs(h - cutted); + return exp(-0.005 * df * df) * 0.5 + exp(df2 * df2 * -0.1) * res; +} + +fn nse(x: f32) -> f32 { + return fract(sin(x * 110.082) * audio_param.nse); +} + +fn ntof(n: f32, fundamental: f32) -> f32 { + return fundamental * pow(2.0, (n - 69.0) / 12.0); +} + +fn synth(tseq: f32, t: f32, audio_param: AudioParam) -> vec2 { + var v: vec2 = vec2(0.0); + let tnote: f32 = fract(tseq); + let dr: f32 = audio_param.dur; + let amp: f32 = smoothstep(0.05, 0.0, abs(tnote - dr - 0.05) - dr) * exp(tnote * -1.0); + let seqn: f32 = nse(floor(tseq)); + let n: f32 = 20.0 + floor(seqn * audio_param.frequency); + let f: f32 = ntof(n, audio_param.fundamental); + let sqr: f32 = smoothstep(0.0, 0.01, abs((t*audio_param.timeScale)%audio_param.timeMod - 20.0) - 20.0); + let base: f32 = f; + let flt: f32 = exp(tnote * audio_param.flt) * 50.0 + pow(cos(t * audio_param.lfo) * 0.5 + 0.5, 4.0) * 80.0; + + for (var i = 0u; i < u32(audio_param.partials); i += 1) { + var h: f32 = f32(i + u32(audio_param.sampOffset)); + var inten: f32 = 1.0 / h; + + inten = mix(inten, inten * (h%audio_param.ratio), sqr); + inten *= exp(-1.0 * max(audio_param.ratio - h, 0.0)); + inten *= _filter(h, flt, audio_param.res); + + var vx = v.x + (inten * sin((PI2 + (audio_param.stereo / 2)) * (t * base * h))); + var vy = v.y + (inten * sin((PI2 - (audio_param.stereo / 2)) * (t * base * h))); + v = vec2(vx, vy); + } + + let o: f32 = v.x * amp; + + return vec2(dist(v * amp, audio_param.dist)); +} + +fn mainSound(time: f32, audio_param: AudioParam) -> vec2 { + + var tb: f32 = (time * audio_param.timeScale)%audio_param.timeMod; + var mx: vec2 = synth(tb, time, audio_param) * audio_param.gain; + + return vec2(mx); +} \ No newline at end of file