diff --git a/src/webgpu/shader/execution/expression/expression.ts b/src/webgpu/shader/execution/expression/expression.ts index f85516f29bdd..0668005b4aeb 100644 --- a/src/webgpu/shader/execution/expression/expression.ts +++ b/src/webgpu/shader/execution/expression/expression.ts @@ -267,12 +267,12 @@ type PipelineCache = Map; * @param create the function used to construct a value, if not found in the cache * @returns the value, either fetched from the cache, or newly built. */ -function getOrCreate(map: Map, key: K, create: () => V) { +async function getOrCreate(map: Map, key: K, create: () => Promise) { const existing = map.get(key); if (existing !== undefined) { return existing; } - const value = create(); + const value = await create(); map.set(key, value); return value; } @@ -354,16 +354,24 @@ export async function run( }; const processBatch = async (batchCases: CaseList) => { - const checkBatch = await submitBatch( - t, - shaderBuilder, - parameterTypes, - resultType, - batchCases, - cfg.inputSource, - pipelineCache - ); - checkBatch(); + try { + const checkBatch = await submitBatch( + t, + shaderBuilder, + parameterTypes, + resultType, + batchCases, + cfg.inputSource, + pipelineCache + ); + checkBatch(); + } catch (err) { + if (err instanceof GPUPipelineError) { + t.fail(`Pipeline Creation Error, ${err.reason}: ${err.message}`); + } else { + throw err; + } + } void t.queue.onSubmittedWorkDone().finally(batchFinishedCallback); }; @@ -1040,6 +1048,7 @@ async function buildPipeline( const module = t.device.createShaderModule({ code: source }); // build the pipeline + const pipeline = await t.device.createComputePipelineAsync({ layout: 'auto', compute: { module, entryPoint: 'main' }, @@ -1084,12 +1093,12 @@ async function buildPipeline( } // build the compute pipeline, if the shader hasn't been compiled already. - const pipeline = getOrCreate(pipelineCache, source, () => { + const pipeline = await getOrCreate(pipelineCache, source, () => { // build the shader module const module = t.device.createShaderModule({ code: source }); // build the pipeline - return t.device.createComputePipeline({ + return t.device.createComputePipelineAsync({ layout: 'auto', compute: { module, entryPoint: 'main' }, }); diff --git a/src/webgpu/shader/execution/robust_access.spec.ts b/src/webgpu/shader/execution/robust_access.spec.ts index 965dd283dd16..aafce2dcaae5 100644 --- a/src/webgpu/shader/execution/robust_access.spec.ts +++ b/src/webgpu/shader/execution/robust_access.spec.ts @@ -62,35 +62,43 @@ fn main() { t.debug(source); const module = t.device.createShaderModule({ code: source }); - const pipeline = await t.device.createComputePipelineAsync({ - layout, - compute: { module, entryPoint: 'main' }, - }); - - const group = t.device.createBindGroup({ - layout: pipeline.getBindGroupLayout(1), - entries: [ - { binding: 0, resource: { buffer: constantsBuffer } }, - { binding: 1, resource: { buffer: resultBuffer } }, - ], - }); - const testGroup = t.device.createBindGroup({ - layout: pipeline.getBindGroupLayout(0), - entries: testBindings, - }); + try { + const pipeline = await t.device.createComputePipelineAsync({ + layout, + compute: { module, entryPoint: 'main' }, + }); - const encoder = t.device.createCommandEncoder(); - const pass = encoder.beginComputePass(); - pass.setPipeline(pipeline); - pass.setBindGroup(0, testGroup, dynamicOffsets); - pass.setBindGroup(1, group); - pass.dispatchWorkgroups(1); - pass.end(); + const group = t.device.createBindGroup({ + layout: pipeline.getBindGroupLayout(1), + entries: [ + { binding: 0, resource: { buffer: constantsBuffer } }, + { binding: 1, resource: { buffer: resultBuffer } }, + ], + }); - t.queue.submit([encoder.finish()]); + const testGroup = t.device.createBindGroup({ + layout: pipeline.getBindGroupLayout(0), + entries: testBindings, + }); - t.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([0])); + const encoder = t.device.createCommandEncoder(); + const pass = encoder.beginComputePass(); + pass.setPipeline(pipeline); + pass.setBindGroup(0, testGroup, dynamicOffsets); + pass.setBindGroup(1, group); + pass.dispatchWorkgroups(1); + pass.end(); + + t.queue.submit([encoder.finish()]); + t.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([0])); + } catch (err) { + if (err instanceof GPUPipelineError) { + t.fail(`Pipeline Creation Error, ${err.reason}: ${err.message}`); + } else { + throw err; + } + } } /** Fill an ArrayBuffer with sentinel values, except clear a region to zero. */ diff --git a/src/webgpu/shader/execution/zero_init.spec.ts b/src/webgpu/shader/execution/zero_init.spec.ts index e03a72f8df56..eef155b68c62 100644 --- a/src/webgpu/shader/execution/zero_init.spec.ts +++ b/src/webgpu/shader/execution/zero_init.spec.ts @@ -446,101 +446,118 @@ g.test('compute,zero_init') ], }); - const fillPipeline = await t.device.createComputePipelineAsync({ - layout: t.device.createPipelineLayout({ bindGroupLayouts: [fillLayout] }), - label: 'Workgroup Fill Pipeline', + try { + const fillPipeline = await t.device.createComputePipelineAsync({ + layout: t.device.createPipelineLayout({ bindGroupLayouts: [fillLayout] }), + label: 'Workgroup Fill Pipeline', + compute: { + module: t.device.createShaderModule({ + code: wgsl, + }), + entryPoint: 'fill', + }, + }); + + const inputBuffer = t.makeBufferWithContents( + new Uint32Array([...iterRange(wg_memory_limits / 4, _i => 0xdeadbeef)]), + GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST + ); + t.trackForCleanup(inputBuffer); + const outputBuffer = t.device.createBuffer({ + size: wg_memory_limits, + usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC, + }); + t.trackForCleanup(outputBuffer); + + const bg = t.device.createBindGroup({ + layout: fillPipeline.getBindGroupLayout(0), + entries: [ + { + binding: 0, + resource: { + buffer: inputBuffer, + }, + }, + { + binding: 1, + resource: { + buffer: outputBuffer, + }, + }, + ], + }); + + const e = t.device.createCommandEncoder(); + const p = e.beginComputePass(); + p.setPipeline(fillPipeline); + p.setBindGroup(0, bg); + p.dispatchWorkgroups(1); + p.end(); + t.queue.submit([e.finish()]); + } catch (err) { + if (err instanceof GPUPipelineError) { + t.fail(`Pipeline Creation Error, ${err.reason}: ${err.message}`); + return; + } else { + throw err; + } + } + } + + try { + const pipeline = await t.device.createComputePipelineAsync({ + layout: 'auto', compute: { module: t.device.createShaderModule({ code: wgsl, }), - entryPoint: 'fill', + entryPoint: 'main', }, }); - const inputBuffer = t.makeBufferWithContents( - new Uint32Array([...iterRange(wg_memory_limits / 4, _i => 0xdeadbeef)]), - GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST - ); - t.trackForCleanup(inputBuffer); - const outputBuffer = t.device.createBuffer({ - size: wg_memory_limits, + const resultBuffer = t.device.createBuffer({ + size: 4, usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC, }); - t.trackForCleanup(outputBuffer); + t.trackForCleanup(resultBuffer); - const bg = t.device.createBindGroup({ - layout: fillPipeline.getBindGroupLayout(0), + const zeroBuffer = t.device.createBuffer({ + size: 4, + usage: GPUBufferUsage.UNIFORM, + }); + t.trackForCleanup(zeroBuffer); + + const bindGroup = t.device.createBindGroup({ + layout: pipeline.getBindGroupLayout(0), entries: [ { binding: 0, resource: { - buffer: inputBuffer, + buffer: resultBuffer, }, }, { binding: 1, resource: { - buffer: outputBuffer, + buffer: zeroBuffer, }, }, ], }); - const e = t.device.createCommandEncoder(); - const p = e.beginComputePass(); - p.setPipeline(fillPipeline); - p.setBindGroup(0, bg); - p.dispatchWorkgroups(1); - p.end(); - t.queue.submit([e.finish()]); + const encoder = t.device.createCommandEncoder(); + const pass = encoder.beginComputePass(); + pass.setPipeline(pipeline); + pass.setBindGroup(0, bindGroup); + pass.dispatchWorkgroups(1); + pass.end(); + t.queue.submit([encoder.finish()]); + t.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([0])); + } catch (err) { + if (err instanceof GPUPipelineError) { + t.fail(`Pipeline Creation Error, ${err.reason}: ${err.message}`); + } else { + throw err; + } } - - const pipeline = await t.device.createComputePipelineAsync({ - layout: 'auto', - compute: { - module: t.device.createShaderModule({ - code: wgsl, - }), - entryPoint: 'main', - }, - }); - - const resultBuffer = t.device.createBuffer({ - size: 4, - usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC, - }); - t.trackForCleanup(resultBuffer); - - const zeroBuffer = t.device.createBuffer({ - size: 4, - usage: GPUBufferUsage.UNIFORM, - }); - t.trackForCleanup(zeroBuffer); - - const bindGroup = t.device.createBindGroup({ - layout: pipeline.getBindGroupLayout(0), - entries: [ - { - binding: 0, - resource: { - buffer: resultBuffer, - }, - }, - { - binding: 1, - resource: { - buffer: zeroBuffer, - }, - }, - ], - }); - - const encoder = t.device.createCommandEncoder(); - const pass = encoder.beginComputePass(); - pass.setPipeline(pipeline); - pass.setBindGroup(0, bindGroup); - pass.dispatchWorkgroups(1); - pass.end(); - t.queue.submit([encoder.finish()]); - t.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([0])); });