From 8abb9aacfbbe0293a7536c3e9ebfea46141a461b Mon Sep 17 00:00:00 2001 From: Jan Date: Fri, 26 Sep 2025 04:16:25 +0200 Subject: [PATCH 01/13] sketch idea --- examples/browser.html | 39 +++++ wgpu/backends/js_webgpu/__init__.py | 239 +++++++++++++++++++++++++++- wgpu/utils/compute.py | 42 ++--- wgpu/utils/device.py | 1 + 4 files changed, 294 insertions(+), 27 deletions(-) create mode 100644 examples/browser.html diff --git a/examples/browser.html b/examples/browser.html new file mode 100644 index 00000000..67270f80 --- /dev/null +++ b/examples/browser.html @@ -0,0 +1,39 @@ + + + + + RenderCanvas HTML canvas via Pyodide:
+ + + +
+some text below the canvas! + + + \ No newline at end of file diff --git a/wgpu/backends/js_webgpu/__init__.py b/wgpu/backends/js_webgpu/__init__.py index 7317b133..780104be 100644 --- a/wgpu/backends/js_webgpu/__init__.py +++ b/wgpu/backends/js_webgpu/__init__.py @@ -9,23 +9,250 @@ # NOTE: this is just a stub for now!! from .. import _register_backend +from ... import classes, structs, enums, flags +from pyodide.ffi import run_sync, JsProxy, to_js -class GPU: +from js import window, Uint32Array, ArrayBuffer + +def translate_python_methods(js_obj): + # print("Translating methods for", js_obj) + method_map = {} + for attr in dir(js_obj): + if "_" in attr: + continue + # print("attr", attr, type(attr)) + # print("attr value", getattr(js_obj, attr), type(getattr(js_obj, attr))) + if isinstance(getattr(js_obj, attr), JsProxy): + # print("maybe method", attr) + #assume that is like a method? + target = getattr(js_obj, attr) + py_name = ''.join(['_' + c.lower() if c.isupper() else c for c in attr]).lstrip('_') + method_map[py_name] = target + py_name_async = py_name + '_async' + method_map[py_name_async] = target + py_name_sync = py_name + '_sync' + method_map[py_name_sync] = lambda *args, target=target: run_sync(target(*args)) + + for name, target in method_map.items(): + js_obj.name = target + # setattr(js_obj, name, target) + + return js_obj + +# for use in to_js() https://pyodide.org/en/stable/usage/api/python-api/ffi.html#pyodide.ffi.ToJsConverter +def simple_js_accessor(value, convert, cache): + if hasattr(value, "js"): + value = value.js + # print("converted to js", value) + return convert(value) + +# TODO: can we implement our own variant of JsProxy and PyProxy, to_js and to_py? to work with pyodide and not around it? +# https://pyodide.org/en/stable/usage/type-conversions.html#type-translations + +class GPU(classes.GPU): def request_adapter_sync(self, **parameters): - raise NotImplementedError("Cannot use sync API functions in JS.") + return run_sync(self.request_adapter_async(**parameters)) + # raise NotImplementedError("Cannot use sync API functions in JS.") async def request_adapter_async(self, **parameters): gpu = window.navigator.gpu # noqa: F821 - return await gpu.request_adapter(**parameters) - - def get_preferred_canvas_format(self): - raise NotImplementedError() + self.js = gpu + adapter = await gpu.requestAdapter(**parameters) + # print(dir(adapter)) + # print(type(adapter.requestDevice)) + # adapter = translate_python_methods(adapter) + # print(dir(adapter)) + py_adapter = GPUAdapter(adapter) + # print(py_adapter, dir(py_adapter)) + return py_adapter @property def wgsl_language_features(self): return set() +class GPUAdapter(classes.GPUAdapter): + def __init__(self, js_adapter): + self.js = js_adapter + + def request_device_sync(self, **parameters): + return run_sync(self.request_device_async(**parameters)) + # raise NotImplementedError("Cannot use sync API functions in JS.") + + async def request_device_async(self, **parameters): + device = await self.js.requestDevice(**parameters) + # device = translate_python_methods(device) + return GPUDevice(device) + +class GPUDevice(classes.GPUDevice): + def __init__(self, js_device): + self.js = js_device + + @property + def queue(self): + # TODO: maybe needs a class... + return GPUQueue(self.js.queue, self) + + def create_shader_module(self, *args, **kwargs): + # print("create_shader_module", args, kwargs) + js_sm = self.js.createShaderModule(*args, **kwargs) + return GPUShaderModule(js_sm) + + def create_buffer(self, *args, **kwargs): + js_buf = self.js.createBuffer(*args, **kwargs) + return GPUBuffer(js_buf) + + # TODO: apidiff + def create_buffer_with_data(self, *args, **kwargs): + kwargs["mappedAtCreation"] = True + data = kwargs.get("data") + data_size = (data.nbytes + 3) & ~3 # align to 4 bytes + kwargs["size"] = data_size + js_buf = self.js.createBuffer(*args, **kwargs) + # TODO: dtype? + Uint32Array.new(js_buf.getMappedRange()).set(kwargs["data"]) + js_buf.unmap() + # print(dir(js_buf)) + return GPUBuffer(js_buf) + + def create_bind_group_layout(self, *args, **kwargs): + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + js_bgl = self.js.createBindGroupLayout(*args, **js_kwargs) + return GPUBindGroupLayout(js_bgl) + + def create_compute_pipeline(self, *args, **kwargs): + # TODO: can we automatically get the js object when it's called somehwere? maybe by implementing _to_js? + # print("create_compute_pipeline", args, kwargs) + # kwargs["compute"]["module"] = kwargs["compute"]["module"].to_js() + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + # print("create_compute_pipeline", args, js_kwargs) + # print(dir(js_kwargs)) + js_cp = self.js.createComputePipeline(*args, **js_kwargs) + return GPUComputePipeline(js_cp) + + def create_bind_group(self, *args, **kwargs): + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + js_bg = self.js.createBindGroup(*args, **js_kwargs) + return GPUBindGroup(js_bg) + + def create_command_encoder(self, *args, **kwargs): + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + js_ce = self.js.createCommandEncoder(*args, **js_kwargs) + return GPUCommandEncoder(js_ce) + +class GPUShaderModule(classes.GPUShaderModule): + def __init__(self, js_sm): + self.js = js_sm + +class GPUBuffer(classes.GPUBuffer): + def __init__(self, js_buf): + self.js = js_buf + + # TODO apidiff + def write_mapped(self, *args, **kwargs): + raise NotImplementedError("write_mapped not implemented yet in JS backend") + + # TODO: idl attributes round trip -.- + @property + def _size(self): + return self.js.size + +class GPUBindGroupLayout(classes.GPUBindGroupLayout): + def __init__(self, js_bgl): + self.js = js_bgl + +# TODO: mixin class +class GPUComputePipeline(classes.GPUComputePipeline): + def __init__(self, js_cp): + self.js = js_cp + + def get_bind_group_layout(self, *args, **kwargs): + js_bgl = self.js.getBindGroupLayout(*args, **kwargs) + return GPUBindGroupLayout(js_bgl) + +class GPUBindGroup(classes.GPUBindGroup): + def __init__(self, js_bg): + self.js = js_bg + +class GPUCommandEncoder(classes.GPUCommandEncoder): + def __init__(self, js_ce): + self.js = js_ce + + def begin_compute_pass(self, *args, **kwargs): + # TODO: no args, should be empty maybe? + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + js_cp = self.js.beginComputePass(*args, **js_kwargs) + return GPUComputePassEncoder(js_cp) + + def finish(self, *args, **kwargs): + js_args = to_js(args, eager_converter=simple_js_accessor) + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + js_cmd_buf = self.js.finish(*js_args, **js_kwargs) + return GPUCommandBuffer(js_cmd_buf) + +class GPUComputePassEncoder(classes.GPUComputePassEncoder): + def __init__(self, js_cp): + self.js = js_cp + + def set_pipeline(self, *args, **kwargs): + js_args = to_js(args, eager_converter=simple_js_accessor) + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + self.js.setPipeline(*js_args, **js_kwargs) + + def set_bind_group(self, *args, **kwargs): + js_args = to_js(args, eager_converter=simple_js_accessor) + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + self.js.setBindGroup(*js_args, **js_kwargs) + + def dispatch_workgroups(self, *args, **kwargs): + js_args = to_js(args, eager_converter=simple_js_accessor) + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + self.js.dispatchWorkgroups(*js_args, **js_kwargs) + + def end(self, *args, **kwargs): + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + self.js.end(*args, **js_kwargs) + +class GPUCommandBuffer(classes.GPUCommandBuffer): + def __init__(self, js_cb): + self.js = js_cb + +class GPUQueue(classes.GPUQueue): + def __init__(self, js_queue, device: GPUDevice): + self.js = js_queue + self._device = device #needed for the read_buffer api diff I guess + + def submit(self, *args, **kwargs): + js_args = to_js(args, eager_converter=simple_js_accessor) + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + self.js.submit(*js_args, **js_kwargs) + + # TODO: api diff + def read_buffer(self, buffer: GPUBuffer, buffer_offset: int=0, size: int | None = None) -> memoryview: + # largely copied from wgpu-native/_api.py + print(dir(self)) + device = self._device + + if not size: + data_length = buffer.size - buffer_offset + else: + data_length = int(size) + if not (0 <= buffer_offset < buffer.size): # pragma: no cover + raise ValueError("Invalid buffer_offset") + if not (data_length <= buffer.size - buffer_offset): # pragma: no cover + raise ValueError("Invalid data_length") + data_length = (data_length + 3) & ~3 # align to 4 bytes + + temp_buffer = device.js.createBuffer( + size=data_length, + usage=flags.BufferUsage.COPY_DST | flags.BufferUsage.MAP_READ, + mappedAtCreation=True, + label="output buffer temp" + ) + res = temp_buffer.getMappedRange() + res = res.slice(0) + temp_buffer.unmap() + return res.to_py() # should give a memoryview? gpu = GPU() _register_backend(gpu) diff --git a/wgpu/utils/compute.py b/wgpu/utils/compute.py index 705b1384..b020e701 100644 --- a/wgpu/utils/compute.py +++ b/wgpu/utils/compute.py @@ -135,7 +135,7 @@ def compute_with_buffers(input_arrays, output_arrays, shader, constants=None, n= # Create bindings and binding layouts bindings = [] - binding_layouts = [] + # binding_layouts = [] for index, buffer in buffers.items(): bindings.append( { @@ -143,27 +143,26 @@ def compute_with_buffers(input_arrays, output_arrays, shader, constants=None, n= "resource": {"buffer": buffer, "offset": 0, "size": buffer.size}, } ) - storage_types = ( - wgpu.BufferBindingType.read_only_storage, - wgpu.BufferBindingType.storage, - ) - binding_layouts.append( - { - "binding": index, - "visibility": wgpu.ShaderStage.COMPUTE, - "buffer": { - "type": storage_types[index in output_infos], - "has_dynamic_offset": False, - }, - } - ) + # storage_types = ( + # wgpu.BufferBindingType.read_only_storage, + # wgpu.BufferBindingType.storage, + # ) + # binding_layouts.append( + # { + # "binding": index, + # "visibility": wgpu.ShaderStage.COMPUTE, + # "buffer": { + # "type": storage_types[index in output_infos], + # "has_dynamic_offset": False, + # }, + # } + # ) # Put buffers together - bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) - pipeline_layout = device.create_pipeline_layout( - bind_group_layouts=[bind_group_layout] - ) - bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) + # bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) + # pipeline_layout = device.create_pipeline_layout( + # bind_group_layouts=[bind_group_layout] + # ) compute = { "module": cshader, @@ -175,9 +174,10 @@ def compute_with_buffers(input_arrays, output_arrays, shader, constants=None, n= # Create a pipeline and "run it" compute_pipeline = device.create_compute_pipeline( - layout=pipeline_layout, + layout="auto", compute=compute, ) + bind_group = device.create_bind_group(layout=compute_pipeline.get_bind_group_layout(0), entries=bindings) command_encoder = device.create_command_encoder() compute_pass = command_encoder.begin_compute_pass() compute_pass.set_pipeline(compute_pipeline) diff --git a/wgpu/utils/device.py b/wgpu/utils/device.py index 7becddee..54b2cfef 100644 --- a/wgpu/utils/device.py +++ b/wgpu/utils/device.py @@ -13,5 +13,6 @@ def get_default_device(): import wgpu.backends.auto adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") + print(adapter, dir(adapter)) _default_device = adapter.request_device_sync() return _default_device From 0d22166411f7e9e76623b869996aaf26e24f7029 Mon Sep 17 00:00:00 2001 From: Jan Date: Fri, 26 Sep 2025 04:42:51 +0200 Subject: [PATCH 02/13] some hangup with buffer alignment -.- --- examples/browser.html | 6 ++---- wgpu/backends/js_webgpu/__init__.py | 15 ++++++++++++++- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/examples/browser.html b/examples/browser.html index 67270f80..11b06d6f 100644 --- a/examples/browser.html +++ b/examples/browser.html @@ -15,16 +15,14 @@ // --allow-file-access-from-files or local webserver // TODO: replace the actual code here (unless you have the module) pythonCode = await (await fetch("compute_noop.py")).text(); - // pythonCode = await (await fetch("events.py")).text(); - // pythonCode = await (await fetch("noise.py")).text(); - + // Load Pyodide let pyodide = await loadPyodide(); await pyodide.loadPackage("micropip"); const micropip = pyodide.pyimport("micropip"); - // await micropip.install('numpy'); + await micropip.install('numpy'); // await micropip.install('rendercanvas'); // await micropip.install('../dist/rendercanvas-2.2.1-py3-none-any.whl'); // local wheel for auto testing await micropip.install('../dist/wgpu-0.24.0-py3-none-any.whl'); // from PyPI diff --git a/wgpu/backends/js_webgpu/__init__.py b/wgpu/backends/js_webgpu/__init__.py index 780104be..4564c8c8 100644 --- a/wgpu/backends/js_webgpu/__init__.py +++ b/wgpu/backends/js_webgpu/__init__.py @@ -108,6 +108,8 @@ def create_buffer_with_data(self, *args, **kwargs): data = kwargs.get("data") data_size = (data.nbytes + 3) & ~3 # align to 4 bytes kwargs["size"] = data_size + print(data_size) + kwargs["label"] = "input buffer" js_buf = self.js.createBuffer(*args, **kwargs) # TODO: dtype? Uint32Array.new(js_buf.getMappedRange()).set(kwargs["data"]) @@ -140,6 +142,12 @@ def create_command_encoder(self, *args, **kwargs): js_ce = self.js.createCommandEncoder(*args, **js_kwargs) return GPUCommandEncoder(js_ce) + def create_pipeline_layout(self, *args, **kwargs): + js_args = to_js(args, eager_converter=simple_js_accessor) + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + js_pl = self.js.createPipelineLayout(*js_args, **js_kwargs) + return GPUPipelineLayout(js_pl) + class GPUShaderModule(classes.GPUShaderModule): def __init__(self, js_sm): self.js = js_sm @@ -155,6 +163,7 @@ def write_mapped(self, *args, **kwargs): # TODO: idl attributes round trip -.- @property def _size(self): + # print("getting size", dir(self.js), self.js.size) return self.js.size class GPUBindGroupLayout(classes.GPUBindGroupLayout): @@ -230,7 +239,7 @@ def submit(self, *args, **kwargs): # TODO: api diff def read_buffer(self, buffer: GPUBuffer, buffer_offset: int=0, size: int | None = None) -> memoryview: # largely copied from wgpu-native/_api.py - print(dir(self)) + # print(dir(self)) device = self._device if not size: @@ -254,5 +263,9 @@ def read_buffer(self, buffer: GPUBuffer, buffer_offset: int=0, size: int | None temp_buffer.unmap() return res.to_py() # should give a memoryview? +class GPUPipelineLayout(classes.GPUPipelineLayout): + def __init__(self, js_pl): + self.js = js_pl + gpu = GPU() _register_backend(gpu) From ac858b81140d0d46e0ad56aec14f8ec6237dc8db Mon Sep 17 00:00:00 2001 From: Jan Date: Fri, 26 Sep 2025 16:42:36 +0200 Subject: [PATCH 03/13] new errors :! --- examples/browser.html | 6 ++- wgpu/backends/js_webgpu/__init__.py | 61 +++++++++++++++++++++++++++-- wgpu/utils/compute.py | 42 ++++++++++---------- 3 files changed, 82 insertions(+), 27 deletions(-) diff --git a/examples/browser.html b/examples/browser.html index 11b06d6f..7b4a2150 100644 --- a/examples/browser.html +++ b/examples/browser.html @@ -14,7 +14,9 @@ // fetch the file locally for easier scripting // --allow-file-access-from-files or local webserver // TODO: replace the actual code here (unless you have the module) - pythonCode = await (await fetch("compute_noop.py")).text(); + // pythonCode = await (await fetch("compute_noop.py")).text(); + // pythonCode = await (await fetch("compute_matmul.py")).text(); + pythonCode = await (await fetch("cube.py")).text(); // Load Pyodide @@ -24,7 +26,7 @@ const micropip = pyodide.pyimport("micropip"); await micropip.install('numpy'); // await micropip.install('rendercanvas'); - // await micropip.install('../dist/rendercanvas-2.2.1-py3-none-any.whl'); // local wheel for auto testing + await micropip.install('../../../../../../../projects/pygfx-repos/rendercanvas/dist/rendercanvas-2.2.1-py3-none-any.whl'); // local wheel for auto testing await micropip.install('../dist/wgpu-0.24.0-py3-none-any.whl'); // from PyPI // Run the Python code async because some calls are async it seems. diff --git a/wgpu/backends/js_webgpu/__init__.py b/wgpu/backends/js_webgpu/__init__.py index 4564c8c8..e3c38021 100644 --- a/wgpu/backends/js_webgpu/__init__.py +++ b/wgpu/backends/js_webgpu/__init__.py @@ -45,20 +45,22 @@ def simple_js_accessor(value, convert, cache): if hasattr(value, "js"): value = value.js # print("converted to js", value) + # todo convert snake_case back to camel_case? return convert(value) # TODO: can we implement our own variant of JsProxy and PyProxy, to_js and to_py? to work with pyodide and not around it? # https://pyodide.org/en/stable/usage/type-conversions.html#type-translations class GPU(classes.GPU): + def __init__(self): + self.js = window.navigator.gpu # noqa: F821 + def request_adapter_sync(self, **parameters): return run_sync(self.request_adapter_async(**parameters)) # raise NotImplementedError("Cannot use sync API functions in JS.") async def request_adapter_async(self, **parameters): - gpu = window.navigator.gpu # noqa: F821 - self.js = gpu - adapter = await gpu.requestAdapter(**parameters) + adapter = await self.js.requestAdapter(**parameters) # print(dir(adapter)) # print(type(adapter.requestDevice)) # adapter = translate_python_methods(adapter) @@ -67,10 +69,21 @@ async def request_adapter_async(self, **parameters): # print(py_adapter, dir(py_adapter)) return py_adapter + # api diff not really useful, but needed for compatibility I guess? + def enumerate_adapters_sync(self): + return run_sync(self.enumerate_adapters_async()) + + async def enumerate_adapters_async(self): + # bodge here: it blocks but we should await instead. + return [self.request_adapter_sync()] + @property def wgsl_language_features(self): return set() + + + class GPUAdapter(classes.GPUAdapter): def __init__(self, js_adapter): self.js = js_adapter @@ -83,6 +96,15 @@ async def request_device_async(self, **parameters): device = await self.js.requestDevice(**parameters) # device = translate_python_methods(device) return GPUDevice(device) + + # api diff just for overview gives adaper info for now + @property + def summary(self): + return self.adapter_info + + @property + def adapter_info(self): + return self.js.info class GPUDevice(classes.GPUDevice): def __init__(self, js_device): @@ -108,7 +130,7 @@ def create_buffer_with_data(self, *args, **kwargs): data = kwargs.get("data") data_size = (data.nbytes + 3) & ~3 # align to 4 bytes kwargs["size"] = data_size - print(data_size) + # print(data_size) kwargs["label"] = "input buffer" js_buf = self.js.createBuffer(*args, **kwargs) # TODO: dtype? @@ -143,11 +165,21 @@ def create_command_encoder(self, *args, **kwargs): return GPUCommandEncoder(js_ce) def create_pipeline_layout(self, *args, **kwargs): + print("create_pipeline_layout", args, kwargs) + # translate the key to camelCase manually here! + kwargs["bindGroupLayouts"] = kwargs.get("bind_group_layouts", []) js_args = to_js(args, eager_converter=simple_js_accessor) js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + print(js_args, js_kwargs) js_pl = self.js.createPipelineLayout(*js_args, **js_kwargs) return GPUPipelineLayout(js_pl) + def create_texture(self, *args, **kwargs): + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + js_tex = self.js.createTexture(*args, **js_kwargs) + return GPUTexture(js_tex) + + class GPUShaderModule(classes.GPUShaderModule): def __init__(self, js_sm): self.js = js_sm @@ -263,9 +295,30 @@ def read_buffer(self, buffer: GPUBuffer, buffer_offset: int=0, size: int | None temp_buffer.unmap() return res.to_py() # should give a memoryview? + def write_texture(self, *args, **kwargs): + print("GPUQueue.write_texture called with", args, kwargs) + js_args = to_js(args, eager_converter=simple_js_accessor) + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + print("Converted to JS args", js_args, js_kwargs) + self.js.writeTexture(*js_args, **js_kwargs) + class GPUPipelineLayout(classes.GPUPipelineLayout): def __init__(self, js_pl): self.js = js_pl +class GPUTexture(classes.GPUTexture): + def __init__(self, js_tex): + self.js = js_tex + + def create_view(self, *args, **kwargs): + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + js_view = self.js.createView(*args, **js_kwargs) + return GPUTextureView(js_view) + +class GPUTextureView(classes.GPUTextureView): + def __init__(self, js_view): + self.js = js_view + +# finally register the backend gpu = GPU() _register_backend(gpu) diff --git a/wgpu/utils/compute.py b/wgpu/utils/compute.py index b020e701..b6f9d7c3 100644 --- a/wgpu/utils/compute.py +++ b/wgpu/utils/compute.py @@ -135,7 +135,7 @@ def compute_with_buffers(input_arrays, output_arrays, shader, constants=None, n= # Create bindings and binding layouts bindings = [] - # binding_layouts = [] + binding_layouts = [] for index, buffer in buffers.items(): bindings.append( { @@ -143,26 +143,26 @@ def compute_with_buffers(input_arrays, output_arrays, shader, constants=None, n= "resource": {"buffer": buffer, "offset": 0, "size": buffer.size}, } ) - # storage_types = ( - # wgpu.BufferBindingType.read_only_storage, - # wgpu.BufferBindingType.storage, - # ) - # binding_layouts.append( - # { - # "binding": index, - # "visibility": wgpu.ShaderStage.COMPUTE, - # "buffer": { - # "type": storage_types[index in output_infos], - # "has_dynamic_offset": False, - # }, - # } - # ) + storage_types = ( + wgpu.BufferBindingType.read_only_storage, + wgpu.BufferBindingType.storage, + ) + binding_layouts.append( + { + "binding": index, + "visibility": wgpu.ShaderStage.COMPUTE, + "buffer": { + "type": storage_types[index in output_infos], + "has_dynamic_offset": False, + }, + } + ) # Put buffers together - # bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) - # pipeline_layout = device.create_pipeline_layout( - # bind_group_layouts=[bind_group_layout] - # ) + bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) + pipeline_layout = device.create_pipeline_layout( + bind_group_layouts=[bind_group_layout] + ) compute = { "module": cshader, @@ -174,10 +174,10 @@ def compute_with_buffers(input_arrays, output_arrays, shader, constants=None, n= # Create a pipeline and "run it" compute_pipeline = device.create_compute_pipeline( - layout="auto", + layout=pipeline_layout, compute=compute, ) - bind_group = device.create_bind_group(layout=compute_pipeline.get_bind_group_layout(0), entries=bindings) + bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) command_encoder = device.create_command_encoder() compute_pass = command_encoder.begin_compute_pass() compute_pass.set_pipeline(compute_pipeline) From b94ab8a00010d7b880d01bfa22caf053e3d93bfd Mon Sep 17 00:00:00 2001 From: Jan Date: Sat, 27 Sep 2025 02:24:40 +0200 Subject: [PATCH 04/13] context in progress --- examples/cube.py | 4 +- wgpu/_classes.py | 2 +- wgpu/backends/js_webgpu/__init__.py | 147 +++++++++++++++++++--------- 3 files changed, 106 insertions(+), 47 deletions(-) diff --git a/examples/cube.py b/examples/cube.py index 186a30fd..caace190 100644 --- a/examples/cube.py +++ b/examples/cube.py @@ -72,7 +72,9 @@ def get_render_pipeline_kwargs( canvas, device: wgpu.GPUDevice, pipeline_layout: wgpu.GPUPipelineLayout ) -> wgpu.RenderPipelineDescriptor: context = canvas.get_context("wgpu") + print("context:", context) render_texture_format = context.get_preferred_format(device.adapter) + print("render_texture_format:", render_texture_format) context.configure(device=device, format=render_texture_format) shader = device.create_shader_module(code=shader_source) @@ -463,7 +465,7 @@ async def draw_frame_async(): ) texture_data = np.repeat(texture_data, 64, 0) texture_data = np.repeat(texture_data, 64, 1) -texture_size = texture_data.shape[1], texture_data.shape[0], 1 +texture_size = (texture_data.shape[1], texture_data.shape[0], 1) # Use numpy to create a struct for the uniform uniform_dtype = [("transform", "float32", (4, 4))] diff --git a/wgpu/_classes.py b/wgpu/_classes.py index 881f888b..c53966b3 100644 --- a/wgpu/_classes.py +++ b/wgpu/_classes.py @@ -349,7 +349,7 @@ def configure( usage = str_flag_to_int(flags.TextureUsage, usage) color_space # noqa - not really supported, just assume srgb for now - tone_mapping # noqa - not supported yet + tone_mapping = {} if tone_mapping is None else tone_mapping # Allow more than the IDL modes, see https://github.com/pygfx/wgpu-py/pull/719 extra_alpha_modes = ["auto", "unpremultiplied", "inherit"] # from webgpu.h diff --git a/wgpu/backends/js_webgpu/__init__.py b/wgpu/backends/js_webgpu/__init__.py index e3c38021..19bfdf3b 100644 --- a/wgpu/backends/js_webgpu/__init__.py +++ b/wgpu/backends/js_webgpu/__init__.py @@ -15,37 +15,25 @@ from js import window, Uint32Array, ArrayBuffer -def translate_python_methods(js_obj): - # print("Translating methods for", js_obj) - method_map = {} - for attr in dir(js_obj): - if "_" in attr: - continue - # print("attr", attr, type(attr)) - # print("attr value", getattr(js_obj, attr), type(getattr(js_obj, attr))) - if isinstance(getattr(js_obj, attr), JsProxy): - # print("maybe method", attr) - #assume that is like a method? - target = getattr(js_obj, attr) - py_name = ''.join(['_' + c.lower() if c.isupper() else c for c in attr]).lstrip('_') - method_map[py_name] = target - py_name_async = py_name + '_async' - method_map[py_name_async] = target - py_name_sync = py_name + '_sync' - method_map[py_name_sync] = lambda *args, target=target: run_sync(target(*args)) - - for name, target in method_map.items(): - js_obj.name = target - # setattr(js_obj, name, target) - - return js_obj +def to_camel_case(snake_str): + components = snake_str.split('_') + return components[0] + ''.join(x.title() for x in components[1:]) + # for use in to_js() https://pyodide.org/en/stable/usage/api/python-api/ffi.html#pyodide.ffi.ToJsConverter def simple_js_accessor(value, convert, cache): if hasattr(value, "js"): value = value.js # print("converted to js", value) - # todo convert snake_case back to camel_case? + elif isinstance(value, structs.Struct): + value = value.__dict__ # as dict? + value = {to_camel_case(k):v for k,v in value.items()} + # recursion limit??? + # elif isinstance(value, dict): + # # convert keys to camelCase + # value = {to_camel_case(k):v for k,v in value.items()} + + # TODO: array like might need special handling like do not unpack yourself... return convert(value) # TODO: can we implement our own variant of JsProxy and PyProxy, to_js and to_py? to work with pyodide and not around it? @@ -95,33 +83,41 @@ def request_device_sync(self, **parameters): async def request_device_async(self, **parameters): device = await self.js.requestDevice(**parameters) # device = translate_python_methods(device) - return GPUDevice(device) - + return GPUDevice(device, adapter=self) + # api diff just for overview gives adaper info for now @property def summary(self): return self.adapter_info - + @property def adapter_info(self): return self.js.info class GPUDevice(classes.GPUDevice): - def __init__(self, js_device): + def __init__(self, js_device, adapter): self.js = js_device + self._adapter = adapter @property def queue(self): - # TODO: maybe needs a class... return GPUQueue(self.js.queue, self) + # API diff: useful to have? + @property + def adapter(self): + return self._adapter + def create_shader_module(self, *args, **kwargs): - # print("create_shader_module", args, kwargs) - js_sm = self.js.createShaderModule(*args, **kwargs) + js_args = to_js(args, eager_converter=simple_js_accessor) + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + js_sm = self.js.createShaderModule(*js_args, **js_kwargs) return GPUShaderModule(js_sm) def create_buffer(self, *args, **kwargs): - js_buf = self.js.createBuffer(*args, **kwargs) + js_args = to_js(args, eager_converter=simple_js_accessor) + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + js_buf = self.js.createBuffer(*js_args, **js_kwargs) return GPUBuffer(js_buf) # TODO: apidiff @@ -140,8 +136,9 @@ def create_buffer_with_data(self, *args, **kwargs): return GPUBuffer(js_buf) def create_bind_group_layout(self, *args, **kwargs): + js_args = to_js(args, eager_converter=simple_js_accessor) js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - js_bgl = self.js.createBindGroupLayout(*args, **js_kwargs) + js_bgl = self.js.createBindGroupLayout(*js_args, **js_kwargs) return GPUBindGroupLayout(js_bgl) def create_compute_pipeline(self, *args, **kwargs): @@ -155,8 +152,13 @@ def create_compute_pipeline(self, *args, **kwargs): return GPUComputePipeline(js_cp) def create_bind_group(self, *args, **kwargs): + print("create_bind_group", args, kwargs) + kwargs = {to_camel_case(k):v for k,v in kwargs.items()} + print("converted kwargs", kwargs) + js_args = to_js(args, eager_converter=simple_js_accessor) js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - js_bg = self.js.createBindGroup(*args, **js_kwargs) + print("js args", js_args, js_kwargs) + js_bg = self.js.createBindGroup(*js_args, **js_kwargs) return GPUBindGroup(js_bg) def create_command_encoder(self, *args, **kwargs): @@ -165,12 +167,9 @@ def create_command_encoder(self, *args, **kwargs): return GPUCommandEncoder(js_ce) def create_pipeline_layout(self, *args, **kwargs): - print("create_pipeline_layout", args, kwargs) - # translate the key to camelCase manually here! - kwargs["bindGroupLayouts"] = kwargs.get("bind_group_layouts", []) + kwargs = {to_camel_case(k):v for k,v in kwargs.items()} js_args = to_js(args, eager_converter=simple_js_accessor) js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - print(js_args, js_kwargs) js_pl = self.js.createPipelineLayout(*js_args, **js_kwargs) return GPUPipelineLayout(js_pl) @@ -179,11 +178,30 @@ def create_texture(self, *args, **kwargs): js_tex = self.js.createTexture(*args, **js_kwargs) return GPUTexture(js_tex) + def create_sampler(self, *args, **kwargs): + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + js_samp = self.js.createSampler(*args, **js_kwargs) + return GPUSampler(js_samp) + + def create_render_pipeline(self, *args, **kwargs): + print("create_render_pipeline", args, kwargs) + kwargs = {to_camel_case(k):v for k,v in kwargs.items()} + print("converted kwargs", kwargs) + js_args = to_js(args, eager_converter=simple_js_accessor) + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + print("js args", js_args, js_kwargs) + js_rp = self.js.createRenderPipeline(*js_args, **js_kwargs) + return GPURenderPipeline(js_rp) class GPUShaderModule(classes.GPUShaderModule): def __init__(self, js_sm): self.js = js_sm + # part of base object because we never call super().__init__() on ours + @property + def _label(self): + return self.js.label + class GPUBuffer(classes.GPUBuffer): def __init__(self, js_buf): self.js = js_buf @@ -295,12 +313,23 @@ def read_buffer(self, buffer: GPUBuffer, buffer_offset: int=0, size: int | None temp_buffer.unmap() return res.to_py() # should give a memoryview? - def write_texture(self, *args, **kwargs): - print("GPUQueue.write_texture called with", args, kwargs) - js_args = to_js(args, eager_converter=simple_js_accessor) - js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - print("Converted to JS args", js_args, js_kwargs) - self.js.writeTexture(*js_args, **js_kwargs) + # this one misbehaves with args or kwargs, like it seems the data gets unpacked? + def write_texture(self, destination, data, data_layout, size): + # print("GPUQueue.write_texture called with", destination, data, data_layout, size) + js_destination = to_js(destination, eager_converter=simple_js_accessor) + js_data = ArrayBuffer.new(data.nbytes) # does this actually hold any data? + # js_data = Uint32Array.new(js_data).set(data) # maybe like this???? + # print("data js type", type(js_data)) + # print(js_data) + js_data_layout = to_js(data_layout, eager_converter=simple_js_accessor) + js_size = to_js(size, eager_converter=simple_js_accessor) + self.js.writeTexture(js_destination, js_data, js_data_layout, js_size) + + # def write_texture(self, *args, **kwargs): + # js_args = to_js(args, eager_converter=simple_js_accessor) + # js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + # self.js.writeTexture(*js_args, **js_kwargs) + class GPUPipelineLayout(classes.GPUPipelineLayout): def __init__(self, js_pl): @@ -319,6 +348,34 @@ class GPUTextureView(classes.GPUTextureView): def __init__(self, js_view): self.js = js_view +class GPUSampler(classes.GPUSampler): + def __init__(self, js_samp): + self.js = js_samp + +class GPUCanvasContext(classes.GPUCanvasContext): + # TODO update rendercanvas.html get_context to work here? + def __init__(self, canvas, present_methods): + # the super init also does this... maybe we can call it? + super().__init__(canvas, present_methods) + + + @property + def js(self) -> JsProxy: + return self.canvas.html_context + + # undo the api diff + def get_preferred_format(self, adapter: GPUAdapter | None) -> enums.TextureFormat: + return gpu.js.getPreferredCanvasFormat() + + def configure(self, *args, **kwargs): + js_args = to_js(args, eager_converter=simple_js_accessor) + js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + self.js.configure(*js_args, **js_kwargs) + +class GPURenderPipeline(classes.GPURenderPipeline): + def __init__(self, js_rp): + self.js = js_rp + # finally register the backend gpu = GPU() _register_backend(gpu) From a42e6f2097413368adaa066817b07e91cd2ef5ff Mon Sep 17 00:00:00 2001 From: Jan Date: Sat, 27 Sep 2025 04:47:17 +0200 Subject: [PATCH 05/13] more headache and shortcuts --- examples/browser.html | 1 + examples/cube.py | 26 ++++---- wgpu/backends/js_webgpu/__init__.py | 93 +++++++++++++++++------------ 3 files changed, 70 insertions(+), 50 deletions(-) diff --git a/examples/browser.html b/examples/browser.html index 7b4a2150..8f8c1c13 100644 --- a/examples/browser.html +++ b/examples/browser.html @@ -21,6 +21,7 @@ // Load Pyodide let pyodide = await loadPyodide(); + pyodide.setDebug(true); await pyodide.loadPackage("micropip"); const micropip = pyodide.pyimport("micropip"); diff --git a/examples/cube.py b/examples/cube.py index caace190..2ca60a80 100644 --- a/examples/cube.py +++ b/examples/cube.py @@ -35,10 +35,10 @@ def setup_drawing_sync( adapter = wgpu.gpu.request_adapter_sync(power_preference=power_preference) device = adapter.request_device_sync(required_limits=limits) - pipeline_layout, uniform_buffer, bind_groups = create_pipeline_layout(device) - pipeline_kwargs = get_render_pipeline_kwargs(canvas, device, pipeline_layout) - + pipeline_kwargs = get_render_pipeline_kwargs(canvas, device, None) render_pipeline = device.create_render_pipeline(**pipeline_kwargs) + _, uniform_buffer, bind_groups = create_pipeline_layout(device, render_pipeline) + return get_draw_function( canvas, device, render_pipeline, uniform_buffer, bind_groups, asynchronous=False @@ -55,10 +55,10 @@ async def setup_drawing_async(canvas, limits=None): adapter = await wgpu.gpu.request_adapter_async(power_preference="high-performance") device = await adapter.request_device_async(required_limits=limits) - pipeline_layout, uniform_buffer, bind_groups = create_pipeline_layout(device) - pipeline_kwargs = get_render_pipeline_kwargs(canvas, device, pipeline_layout) - + pipeline_kwargs = get_render_pipeline_kwargs(canvas, device, None) render_pipeline = await device.create_render_pipeline_async(**pipeline_kwargs) + _, uniform_buffer, bind_groups = create_pipeline_layout(device, render_pipeline) + return get_draw_function( canvas, device, render_pipeline, uniform_buffer, bind_groups, asynchronous=True @@ -74,14 +74,14 @@ def get_render_pipeline_kwargs( context = canvas.get_context("wgpu") print("context:", context) render_texture_format = context.get_preferred_format(device.adapter) - print("render_texture_format:", render_texture_format) + print(render_texture_format) context.configure(device=device, format=render_texture_format) shader = device.create_shader_module(code=shader_source) # wgpu.RenderPipelineDescriptor return wgpu.RenderPipelineDescriptor( - layout=pipeline_layout, + layout="auto", vertex=wgpu.VertexState( module=shader, entry_point="vs_main", @@ -122,7 +122,7 @@ def get_render_pipeline_kwargs( ) -def create_pipeline_layout(device: wgpu.GPUDevice): +def create_pipeline_layout(device: wgpu.GPUDevice, pipeline: wgpu.GPURenderPipeline): # Create uniform buffer - data is uploaded each frame uniform_buffer = device.create_buffer( size=uniform_data.nbytes, @@ -180,7 +180,7 @@ def create_pipeline_layout(device: wgpu.GPUDevice): wgpu.BindGroupLayoutEntry( binding=0, visibility=wgpu.ShaderStage.VERTEX | wgpu.ShaderStage.FRAGMENT, - buffer={}, + buffer=wgpu.BufferBindingLayout(), ) ) @@ -194,7 +194,7 @@ def create_pipeline_layout(device: wgpu.GPUDevice): wgpu.BindGroupLayoutEntry( binding=1, visibility=wgpu.ShaderStage.FRAGMENT, - texture={}, + texture=wgpu.TextureBindingLayout(), ) ) @@ -206,7 +206,7 @@ def create_pipeline_layout(device: wgpu.GPUDevice): ) bind_groups_layout_entries[0].append( wgpu.BindGroupLayoutEntry( - binding=2, visibility=wgpu.ShaderStage.FRAGMENT, sampler={} + binding=2, visibility=wgpu.ShaderStage.FRAGMENT, sampler=wgpu.SamplerBindingLayout() ) ) @@ -220,7 +220,7 @@ def create_pipeline_layout(device: wgpu.GPUDevice): bind_group_layout = device.create_bind_group_layout(entries=layout_entries) bind_group_layouts.append(bind_group_layout) bind_groups.append( - device.create_bind_group(layout=bind_group_layout, entries=entries) + device.create_bind_group(layout=pipeline.get_bind_group_layout(0), entries=entries) ) pipeline_layout = device.create_pipeline_layout( diff --git a/wgpu/backends/js_webgpu/__init__.py b/wgpu/backends/js_webgpu/__init__.py index 19bfdf3b..1ec1dee4 100644 --- a/wgpu/backends/js_webgpu/__init__.py +++ b/wgpu/backends/js_webgpu/__init__.py @@ -21,19 +21,37 @@ def to_camel_case(snake_str): # for use in to_js() https://pyodide.org/en/stable/usage/api/python-api/ffi.html#pyodide.ffi.ToJsConverter +# you have to do the recursion yourself... def simple_js_accessor(value, convert, cache): if hasattr(value, "js"): - value = value.js + # print("has js", value) + return value.js # print("converted to js", value) elif isinstance(value, structs.Struct): - value = value.__dict__ # as dict? - value = {to_camel_case(k):v for k,v in value.items()} - # recursion limit??? - # elif isinstance(value, dict): - # # convert keys to camelCase - # value = {to_camel_case(k):v for k,v in value.items()} - - # TODO: array like might need special handling like do not unpack yourself... + result = {} + cache(value, result) + for k, v in value.__dict__.items(): + if v is None: + continue + camel_key = to_camel_case(k) + result[camel_key] = convert(v) + return result + elif isinstance(value, dict): + result = {} + cache(value, result) + for k, v in value.items(): + if v is None: + continue + camel_key = to_camel_case(k) if isinstance(k, str) else k + result[camel_key] = convert(v) + return result + elif isinstance(value, (tuple, list)): + result = [] + cache(value, result) + for v in value: + result.append(convert(v)) + return result + return convert(value) # TODO: can we implement our own variant of JsProxy and PyProxy, to_js and to_py? to work with pyodide and not around it? @@ -111,13 +129,13 @@ def adapter(self): def create_shader_module(self, *args, **kwargs): js_args = to_js(args, eager_converter=simple_js_accessor) js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - js_sm = self.js.createShaderModule(*js_args, **js_kwargs) + js_sm = self.js.createShaderModule(*js_args, js_kwargs) return GPUShaderModule(js_sm) def create_buffer(self, *args, **kwargs): js_args = to_js(args, eager_converter=simple_js_accessor) js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - js_buf = self.js.createBuffer(*js_args, **js_kwargs) + js_buf = self.js.createBuffer(*js_args, js_kwargs) return GPUBuffer(js_buf) # TODO: apidiff @@ -135,10 +153,13 @@ def create_buffer_with_data(self, *args, **kwargs): # print(dir(js_buf)) return GPUBuffer(js_buf) + # because there is no default and it has to be one of the binding group layouts this might need a custom check -.- def create_bind_group_layout(self, *args, **kwargs): + print("create_bind_group_layout", args, kwargs) js_args = to_js(args, eager_converter=simple_js_accessor) js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - js_bgl = self.js.createBindGroupLayout(*js_args, **js_kwargs) + print("JS create_bind_group_layout", js_args, js_kwargs, type(js_kwargs["entries"])) + js_bgl = self.js.createBindGroupLayout(*js_args, js_kwargs) return GPUBindGroupLayout(js_bgl) def create_compute_pipeline(self, *args, **kwargs): @@ -148,26 +169,21 @@ def create_compute_pipeline(self, *args, **kwargs): js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) # print("create_compute_pipeline", args, js_kwargs) # print(dir(js_kwargs)) - js_cp = self.js.createComputePipeline(*args, **js_kwargs) + js_cp = self.js.createComputePipeline(*args, js_kwargs) return GPUComputePipeline(js_cp) def create_bind_group(self, *args, **kwargs): - print("create_bind_group", args, kwargs) - kwargs = {to_camel_case(k):v for k,v in kwargs.items()} - print("converted kwargs", kwargs) js_args = to_js(args, eager_converter=simple_js_accessor) js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - print("js args", js_args, js_kwargs) - js_bg = self.js.createBindGroup(*js_args, **js_kwargs) + js_bg = self.js.createBindGroup(*js_args, js_kwargs) return GPUBindGroup(js_bg) def create_command_encoder(self, *args, **kwargs): js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - js_ce = self.js.createCommandEncoder(*args, **js_kwargs) + js_ce = self.js.createCommandEncoder(*args, js_kwargs) return GPUCommandEncoder(js_ce) def create_pipeline_layout(self, *args, **kwargs): - kwargs = {to_camel_case(k):v for k,v in kwargs.items()} js_args = to_js(args, eager_converter=simple_js_accessor) js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) js_pl = self.js.createPipelineLayout(*js_args, **js_kwargs) @@ -175,27 +191,30 @@ def create_pipeline_layout(self, *args, **kwargs): def create_texture(self, *args, **kwargs): js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - js_tex = self.js.createTexture(*args, **js_kwargs) + js_tex = self.js.createTexture(*args, js_kwargs) return GPUTexture(js_tex) def create_sampler(self, *args, **kwargs): js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - js_samp = self.js.createSampler(*args, **js_kwargs) + js_samp = self.js.createSampler(*args, js_kwargs) return GPUSampler(js_samp) + # breaks because we access the same module twice and might be losing it to GC or something -.- def create_render_pipeline(self, *args, **kwargs): print("create_render_pipeline", args, kwargs) - kwargs = {to_camel_case(k):v for k,v in kwargs.items()} - print("converted kwargs", kwargs) js_args = to_js(args, eager_converter=simple_js_accessor) js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - print("js args", js_args, js_kwargs) - js_rp = self.js.createRenderPipeline(*js_args, **js_kwargs) + print("JS create_render_pipeline", js_args, js_kwargs) + js_rp = self.js.createRenderPipeline(*js_args, js_kwargs) return GPURenderPipeline(js_rp) class GPUShaderModule(classes.GPUShaderModule): def __init__(self, js_sm): - self.js = js_sm + self._js = js_sm + + @property + def js(self): + return self._js # hope we don't lose the reference after one access? # part of base object because we never call super().__init__() on ours @property @@ -240,13 +259,13 @@ def __init__(self, js_ce): def begin_compute_pass(self, *args, **kwargs): # TODO: no args, should be empty maybe? js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - js_cp = self.js.beginComputePass(*args, **js_kwargs) + js_cp = self.js.beginComputePass(*args, js_kwargs) return GPUComputePassEncoder(js_cp) def finish(self, *args, **kwargs): js_args = to_js(args, eager_converter=simple_js_accessor) js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - js_cmd_buf = self.js.finish(*js_args, **js_kwargs) + js_cmd_buf = self.js.finish(*js_args, js_kwargs) return GPUCommandBuffer(js_cmd_buf) class GPUComputePassEncoder(classes.GPUComputePassEncoder): @@ -256,21 +275,21 @@ def __init__(self, js_cp): def set_pipeline(self, *args, **kwargs): js_args = to_js(args, eager_converter=simple_js_accessor) js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - self.js.setPipeline(*js_args, **js_kwargs) + self.js.setPipeline(*js_args, js_kwargs) def set_bind_group(self, *args, **kwargs): js_args = to_js(args, eager_converter=simple_js_accessor) js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - self.js.setBindGroup(*js_args, **js_kwargs) + self.js.setBindGroup(*js_args, js_kwargs) def dispatch_workgroups(self, *args, **kwargs): js_args = to_js(args, eager_converter=simple_js_accessor) js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - self.js.dispatchWorkgroups(*js_args, **js_kwargs) + self.js.dispatchWorkgroups(*js_args, js_kwargs) def end(self, *args, **kwargs): js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - self.js.end(*args, **js_kwargs) + self.js.end(*args, js_kwargs) class GPUCommandBuffer(classes.GPUCommandBuffer): def __init__(self, js_cb): @@ -284,7 +303,7 @@ def __init__(self, js_queue, device: GPUDevice): def submit(self, *args, **kwargs): js_args = to_js(args, eager_converter=simple_js_accessor) js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - self.js.submit(*js_args, **js_kwargs) + self.js.submit(*js_args, js_kwargs) # TODO: api diff def read_buffer(self, buffer: GPUBuffer, buffer_offset: int=0, size: int | None = None) -> memoryview: @@ -328,7 +347,7 @@ def write_texture(self, destination, data, data_layout, size): # def write_texture(self, *args, **kwargs): # js_args = to_js(args, eager_converter=simple_js_accessor) # js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - # self.js.writeTexture(*js_args, **js_kwargs) + # self.js.writeTexture(*js_args, js_kwargs) class GPUPipelineLayout(classes.GPUPipelineLayout): @@ -341,7 +360,7 @@ def __init__(self, js_tex): def create_view(self, *args, **kwargs): js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - js_view = self.js.createView(*args, **js_kwargs) + js_view = self.js.createView(*args, js_kwargs) return GPUTextureView(js_view) class GPUTextureView(classes.GPUTextureView): @@ -370,7 +389,7 @@ def get_preferred_format(self, adapter: GPUAdapter | None) -> enums.TextureForma def configure(self, *args, **kwargs): js_args = to_js(args, eager_converter=simple_js_accessor) js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - self.js.configure(*js_args, **js_kwargs) + self.js.configure(*js_args, js_kwargs) class GPURenderPipeline(classes.GPURenderPipeline): def __init__(self, js_rp): From 0618db68e70b5c1bc44ef00afdb5e61d84e3a005 Mon Sep 17 00:00:00 2001 From: Jan Date: Sat, 27 Sep 2025 05:43:18 +0200 Subject: [PATCH 06/13] first success! --- examples/browser.html | 8 +-- examples/cube.py | 19 +++---- examples/triangle.py | 9 ++-- wgpu/backends/js_webgpu/__init__.py | 80 ++++++++++++++++++++++------- 4 files changed, 82 insertions(+), 34 deletions(-) diff --git a/examples/browser.html b/examples/browser.html index 8f8c1c13..ff063795 100644 --- a/examples/browser.html +++ b/examples/browser.html @@ -2,12 +2,12 @@ - RenderCanvas HTML canvas via Pyodide:
+ wgpu-py on the HTML RenderCanvas canvas with Pyodide:

-some text below the canvas! +pixels got drawn! +
@@ -14,15 +14,17 @@ // fetch the file locally for easier scripting // --allow-file-access-from-files or local webserver // TODO: replace the actual code here (unless you have the module) - // pythonCode = await (await fetch("compute_noop.py")).text(); + pythonCode = await (await fetch("compute_noop.py")).text(); // pythonCode = await (await fetch("compute_matmul.py")).text(); // pythonCode = await (await fetch("cube.py")).text(); - pythonCode = await (await fetch("triangle.py")).text(); + // pythonCode = await (await fetch("triangle.py")).text(); // pythonCode = await (await fetch("triangle_glsl.py")).text(); // Load Pyodide + console.log("Loading pyodide..."); let pyodide = await loadPyodide(); + console.log("Pyodide awaiting"); pyodide.setDebug(true); await pyodide.loadPackage("micropip"); diff --git a/wgpu/backends/js_webgpu/__init__.py b/wgpu/backends/js_webgpu/__init__.py index 01a9f5cc..c739d56f 100644 --- a/wgpu/backends/js_webgpu/__init__.py +++ b/wgpu/backends/js_webgpu/__init__.py @@ -11,12 +11,17 @@ from ... import classes, structs, enums, flags from pyodide.ffi import run_sync, JsProxy, to_js -from js import window, Uint32Array, ArrayBuffer, Float32Array, Uint8Array, BigInt +from js import window, Uint32Array, ArrayBuffer, Float32Array, Uint8Array, BigInt, Object, undefined def to_camel_case(snake_str): components = snake_str.split('_') - return components[0] + ''.join(x.title() for x in components[1:]) + res = components[0] + ''.join(x.title() for x in components[1:]) + # maybe keywords are a problem? + # https://pyodide.org/en/stable/usage/faq.html#how-can-i-access-javascript-objects-attributes-in-python-if-their-names-are-python-keywords + # if res in ["type", "format"]: + # res += "_" + return res # for use in to_js() https://pyodide.org/en/stable/usage/api/python-api/ffi.html#pyodide.ffi.ToJsConverter @@ -35,20 +40,17 @@ def simple_js_accessor(value, convert, cache): camel_key = to_camel_case(k) result[camel_key] = convert(v) return result + # this might recursively call itself... + # maybe use a map? or do a dict_converted? elif isinstance(value, dict): result = {} # cache(value, result) for k, v in value.items(): camel_key = to_camel_case(k) if isinstance(k, str) else k result[camel_key] = convert(v) + if len(result) == 0: + return undefined # or Object.new() ? return result - elif isinstance(value, (tuple, list)): - result = [] - # cache(value, result) - for v in value: - result.append(convert(v)) - return result - # is this a default conversation? return convert(value) # TODO: can we implement our own variant of JsProxy and PyProxy, to_js and to_py? to work with pyodide and not around it? @@ -186,33 +188,83 @@ def create_buffer_with_data(self, *args, **kwargs): return GPUBuffer(label, js_buf, self, data_size, usage, enums.BufferMapState.unmapped) - # because there is no default and it has to be one of the binding group layouts this might need a custom check -.- - def create_bind_group_layout(self, *args, **kwargs): - # print("create_bind_group_layout", args, kwargs) - js_args = to_js(args, eager_converter=simple_js_accessor) - js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - # print("JS create_bind_group_layout", js_args, js_kwargs, type(js_kwargs["entries"])) - js_bgl = self._internal.createBindGroupLayout(*js_args, js_kwargs) - - label = kwargs.get("label", "") + # or here??? + def create_bind_group_layout(self, *, label: str = "", entries: list[structs.BindGroupLayoutEntryStruct]) -> classes.GPUBindGroupLayout: + empty_value = undefined # figure out what pyodide is happy with + js_entries = [] + for entry in entries: + # we need exactly one of them needs to exist: + # https://www.w3.org/TR/webgpu/#dictdef-gpubindgrouplayoutentry + buffer = entry.get("buffer") + sampler = entry.get("sampler") + texture = entry.get("texture") + storage_texture = entry.get("storage_texture") + external_texture = entry.get("external_texture") # not sure if exists in wgpu-native, but let's have it anyway. + + if buffer is not None: + sampler = texture = storage_texture = external_texture = empty_value + # or struct.BufferBindingLayout? + buffer = { + "type": buffer.get("type", enums.BufferBindingType.uniform), + "hasDynamicOffset": buffer.get("has_dynamic_offset", False), + "minBindingSize": buffer.get("min_binding_size", 0) + } + buffer = to_js(buffer, depth=1) + elif sampler is not None: + buffer = texture = storage_texture = external_texture = empty_value + sampler = { + "type": sampler.get("type", enums.SamplerBindingType.filtering), + } + elif texture is not None: + buffer = sampler = storage_texture = external_texture = empty_value + texture = { + "sampleType": texture.get("sample_type", enums.TextureSampleType.float), + "viewDimension": texture.get("view_dimension", enums.TextureViewDimension.d2), + "multisampled": texture.get("multisampled", False), + } + elif storage_texture is not None: + buffer = sampler = texture = external_texture = empty_value + storage_texture = { + "access": storage_texture.get("access", enums.StorageTextureAccess.write_only), + "format": storage_texture.get("format"), + "viewDimension": storage_texture.get("view_dimension", enums.TextureViewDimension.d2), + } + elif external_texture is not None: + buffer = sampler = texture = storage_texture = empty_value + external_texture = { + # https://www.w3.org/TR/webgpu/#dictdef-gpuexternaltexturebindinglayout + # there is nothing here... which makes this an empty dict/set? + } + else: + raise ValueError( + "BindGroupLayoutEntry must have exactly one of buffer, sampler, texture, storage_texture, external_texture set. Got none." + ) + js_entry = { + "binding": entry.get("binding"), + "visibility": entry.get("visibility"), + "buffer": buffer, + "sampler": sampler, + "texture": texture, + "storage_texture": storage_texture, + "external_texture": external_texture, + } + js_entries.append(js_entry) + + js_bgl = self._internal.createBindGroupLayout(label=label, entries=js_entries) return classes.GPUBindGroupLayout(label, js_bgl, self) def create_compute_pipeline(self, *args, **kwargs): - # TODO: can we automatically get the js object when it's called somehwere? maybe by implementing _to_js? - # print("create_compute_pipeline", args, kwargs) - # kwargs["compute"]["module"] = kwargs["compute"]["module"].to_js() js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - # print("create_compute_pipeline", args, js_kwargs) - # print(dir(js_kwargs)) js_cp = self._internal.createComputePipeline(*args, js_kwargs) label = kwargs.get("label", "") return GPUComputePipeline(label, js_cp, self) - def create_bind_group(self, *args, **kwargs): - js_args = to_js(args, eager_converter=simple_js_accessor) + # I think the entries arg gers unpacked with a single dict inside, so trying to do the list around that manually + def create_bind_group(self, **kwargs) -> classes.GPUBindGroup: js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - js_bg = self._internal.createBindGroup(*js_args, js_kwargs) + js_kwargs = to_js(js_kwargs) # to get the actual map? + js_bg = self._internal.createBindGroup(js_kwargs) label = kwargs.get("label", "") return classes.GPUBindGroup(label, js_bg, self) @@ -224,12 +276,11 @@ def create_command_encoder(self, *args, **kwargs): label = kwargs.get("label", "") return GPUCommandEncoder(label, js_ce, self) - def create_pipeline_layout(self, *args, **kwargs): - js_args = to_js(args, eager_converter=simple_js_accessor) - js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - js_pl = self._internal.createPipelineLayout(*js_args, js_kwargs) + # or was it here? + def create_pipeline_layout(self, *, label="", bind_group_layouts: list[classes.GPUBindGroupLayout]) -> classes.GPUPipelineLayout: + js_bind_group_layouts = [to_js(bgl, eager_converter=simple_js_accessor) for bgl in bind_group_layouts] + js_pl = self._internal.createPipelineLayout(label=label, bindGroupLayouts=js_bind_group_layouts) - label = kwargs.get("label", "") return classes.GPUPipelineLayout(label, js_pl, self) def create_texture(self, *args, **kwargs): diff --git a/wgpu/utils/compute.py b/wgpu/utils/compute.py index b6f9d7c3..4d02020c 100644 --- a/wgpu/utils/compute.py +++ b/wgpu/utils/compute.py @@ -158,6 +158,10 @@ def compute_with_buffers(input_arrays, output_arrays, shader, constants=None, n= } ) + print(binding_layouts[0]["buffer"]["type"]) + print(type(binding_layouts[0]["buffer"]["type"])) + print(isinstance(binding_layouts[0]["buffer"]["type"], wgpu.BufferBindingType)) + # Put buffers together bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) pipeline_layout = device.create_pipeline_layout( From 431b7013d712871b275808773129e58ceac7200c Mon Sep 17 00:00:00 2001 From: Jan Date: Mon, 29 Sep 2025 03:06:01 +0200 Subject: [PATCH 10/13] move data with .assign --- examples/browser.html | 4 +- wgpu/_classes.py | 2 +- wgpu/backends/js_webgpu/__init__.py | 118 ++++++++++++++++++---------- wgpu/utils/compute.py | 4 - 4 files changed, 78 insertions(+), 50 deletions(-) diff --git a/examples/browser.html b/examples/browser.html index d40c899b..57a14c94 100644 --- a/examples/browser.html +++ b/examples/browser.html @@ -14,9 +14,9 @@ // fetch the file locally for easier scripting // --allow-file-access-from-files or local webserver // TODO: replace the actual code here (unless you have the module) - pythonCode = await (await fetch("compute_noop.py")).text(); + // pythonCode = await (await fetch("compute_noop.py")).text(); // pythonCode = await (await fetch("compute_matmul.py")).text(); - // pythonCode = await (await fetch("cube.py")).text(); + pythonCode = await (await fetch("cube.py")).text(); // pythonCode = await (await fetch("triangle.py")).text(); // pythonCode = await (await fetch("triangle_glsl.py")).text(); diff --git a/wgpu/_classes.py b/wgpu/_classes.py index c53966b3..73894860 100644 --- a/wgpu/_classes.py +++ b/wgpu/_classes.py @@ -1885,7 +1885,7 @@ def set_index_buffer( call to `GPUDevice.create_render_pipeline()`, it must match. offset (int): The byte offset in the buffer. Default 0. size (int): The number of bytes to use. If zero, the remaining size - (after offset) of the buffer is used. Default 0. + (after offset) of the buffer is used. """ raise NotImplementedError() diff --git a/wgpu/backends/js_webgpu/__init__.py b/wgpu/backends/js_webgpu/__init__.py index c739d56f..82c66291 100644 --- a/wgpu/backends/js_webgpu/__init__.py +++ b/wgpu/backends/js_webgpu/__init__.py @@ -49,7 +49,7 @@ def simple_js_accessor(value, convert, cache): camel_key = to_camel_case(k) if isinstance(k, str) else k result[camel_key] = convert(v) if len(result) == 0: - return undefined # or Object.new() ? + return Object.new() # maybe this? return result return convert(value) @@ -171,21 +171,17 @@ def create_buffer(self, *args, **kwargs): return GPUBuffer(label, js_buf, self, size, usage, map_state) # TODO: apidiff rewritten so we avoid the buggy mess in map_write for a bit. - def create_buffer_with_data(self, *args, **kwargs): - kwargs["mappedAtCreation"] = True - data = kwargs.get("data") - data_size = (data.nbytes + 3) & ~3 # align to 4 bytes - kwargs["size"] = data_size - # print(data_size) - js_buf = self._internal.createBuffer(*args, **kwargs) - # TODO: dtype? (always cast to Uint32 I guess, only bytes should matter...) - Uint32Array.new(js_buf.getMappedRange()).set(kwargs["data"]) - js_buf.unmap() - # print(dir(js_buf)) + def create_buffer_with_data(self, *, label="", data, usage: flags.BufferUsageFlags) -> classes.GPUBuffer: - label = kwargs.get("label", "") - usage = kwargs.get("usage") + data = memoryview(data).cast("B") # unit8 + data_size = (data.nbytes + 3) & ~3 # align to 4 bytes + size = BigInt(data_size) + # if it's a Descriptor you need the keywords + js_buf = self._internal.createBuffer(label=label, size=size, usage=usage, mappedAtCreation=True) + mapping_buffer = Uint8Array.new(js_buf.getMappedRange(BigInt(0), size)) + mapping_buffer.assign(data) #.set only works with JS array I think... + js_buf.unmap() return GPUBuffer(label, js_buf, self, data_size, usage, enums.BufferMapState.unmapped) # or here??? @@ -263,7 +259,7 @@ def create_compute_pipeline(self, *args, **kwargs): # I think the entries arg gers unpacked with a single dict inside, so trying to do the list around that manually def create_bind_group(self, **kwargs) -> classes.GPUBindGroup: js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - js_kwargs = to_js(js_kwargs) # to get the actual map? + js_kwargs = to_js(js_kwargs) # to get the actual map? (should happen on the function call anyways...) js_bg = self._internal.createBindGroup(js_kwargs) label = kwargs.get("label", "") @@ -314,6 +310,8 @@ def create_render_pipeline(self, *args, **kwargs): kwargs["vertex"] = to_js(kwargs["vertex"], eager_converter=simple_js_accessor) js_args = to_js(args, eager_converter=simple_js_accessor) js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) + + # js_kwargs = to_js(js_kwargs) # to get the actual map? js_rp = self._internal.createRenderPipeline(*js_args, js_kwargs) label = kwargs.get("label", "") @@ -331,15 +329,20 @@ def write_mapped(self, data, buffer_offset: int | None = None): # TODO: get dtype if self.map_state != enums.BufferMapState.mapped: raise RuntimeError(f"Can only write to a buffer if its mapped: {self.map_state=}") + + # make sure it's in a known datatype??? + data = memoryview(data).cast("B") + size = (data.nbytes + 3) & ~3 + # GPUSIze64 type if buffer_offset is None: buffer_offset = 0 js_offset = BigInt(buffer_offset) - js_size = BigInt(data.nbytes) + js_size = BigInt(size) - # TODO: try to make theses args work in the first place -.- - array_buf = self._internal.getMappedRange() - Uint32Array.new(array_buf).set(data) + # these can't be passed as keyword arguments I guess... + array_buf = self._internal.getMappedRange(js_offset, js_size) + Uint8Array.new(array_buf).assign(data) def map_sync(self, mode=None, offset=0, size=None): return run_sync(self.map_async(mode, offset, size)) @@ -395,10 +398,17 @@ def set_pipeline(self, *args, **kwargs): js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) self._internal.setPipeline(*js_args, js_kwargs) - def set_bind_group(self, *args, **kwargs): - js_args = to_js(args, eager_converter=simple_js_accessor) - js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - self._internal.setBindGroup(*js_args, js_kwargs) + # function has overloads! + def set_bind_group( + self, + index:int, + bind_group: classes.GPUBindGroup, + dynamic_offsets_data: list[int] = (), + dynamic_offsets_data_start = None, + dynamic_offsets_data_length = None + ) -> None: + + self._internal.setBindGroup(index, bind_group._internal, dynamic_offsets_data) def dispatch_workgroups(self, *args, **kwargs): js_args = to_js(args, eager_converter=simple_js_accessor) @@ -441,15 +451,23 @@ def read_buffer(self, buffer: GPUBuffer, buffer_offset: int=0, size: int | None return res.to_py() # should give a memoryview? # this one misbehaves with args or kwargs, like it seems the data gets unpacked? - def write_texture(self, destination, data, data_layout, size): - # print("GPUQueue.write_texture called with", destination, data, data_layout, size) + def write_texture(self, + destination: structs.TexelCopyTextureInfoStruct | None = None, + data: memoryview | None = None, + data_layout: structs.TexelCopyBufferLayoutStruct | None = None, + size: tuple[int, int, int] | structs.Extent3DStruct | None = None, + ) -> None: js_destination = to_js(destination, eager_converter=simple_js_accessor) - js_data = ArrayBuffer.new(data.nbytes) # does this actually hold any data? - # js_data = Uint32Array.new(js_data).set(data) # maybe like this???? - # print("data js type", type(js_data)) - # print(js_data) + + data = memoryview(data).cast("B") + data_size = (data.nbytes + 3) & ~3 # align to + js_data = Uint8Array.new(data_size) + js_data.assign(data) + + js_data_layout = to_js(data_layout, eager_converter=simple_js_accessor) js_size = to_js(size, eager_converter=simple_js_accessor) + self._internal.writeTexture(js_destination, js_data, js_data_layout, js_size) # def write_texture(self, *args, **kwargs): @@ -517,20 +535,34 @@ class GPURenderPassEncoder(classes.GPURenderPassEncoder): def set_pipeline(self, pipeline: GPURenderPipeline): self._internal.setPipeline(pipeline._internal) - def set_index_buffer(self, *args, **kwargs): - js_args = to_js(args, eager_converter=simple_js_accessor) - js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - self._internal.setIndexBuffer(*js_args, js_kwargs) - - def set_vertex_buffer(self, *args, **kwargs): - js_args = to_js(args, eager_converter=simple_js_accessor) - js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - self._internal.setVertexBuffer(*js_args, js_kwargs) - - def set_bind_group(self, *args, **kwargs): - js_args = to_js(args, eager_converter=simple_js_accessor) - js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - self._internal.setBindGroup(*js_args, js_kwargs) + def set_index_buffer(self, buffer: GPUBuffer, format: enums.IndexFormat, offset: int = 0, size: int | None= None): + # for GPUSize64 you can't pass them as kwargs, as they get converted to something else... + # they need to be position args and then it works. + js_buffer = buffer._internal + js_format = to_js(format) + js_offset = BigInt(offset) + js_size = BigInt(size) if size is not None else js_buffer.size + + self._internal.setIndexBuffer(js_buffer, js_format, js_offset, js_size) + + def set_vertex_buffer(self, slot, buffer: GPUBuffer, offset=0, size: int | None = None): + # slot is a GPUsize32 so that works, but the others don't + js_offset = BigInt(offset) + js_size = BigInt(size) if size is not None else buffer.size + + self._internal.setVertexBuffer(slot, buffer._internal, js_offset, js_size) + + # function has overloads! + def set_bind_group( + self, + index:int, + bind_group: classes.GPUBindGroup, + dynamic_offsets_data: list[int] = (), + dynamic_offsets_data_start = None, + dynamic_offsets_data_length = None + ) -> None: + + self._internal.setBindGroup(index, bind_group._internal, dynamic_offsets_data) def draw_indexed(self, *args, **kwargs): js_args = to_js(args, eager_converter=simple_js_accessor) diff --git a/wgpu/utils/compute.py b/wgpu/utils/compute.py index 4d02020c..b6f9d7c3 100644 --- a/wgpu/utils/compute.py +++ b/wgpu/utils/compute.py @@ -158,10 +158,6 @@ def compute_with_buffers(input_arrays, output_arrays, shader, constants=None, n= } ) - print(binding_layouts[0]["buffer"]["type"]) - print(type(binding_layouts[0]["buffer"]["type"])) - print(isinstance(binding_layouts[0]["buffer"]["type"], wgpu.BufferBindingType)) - # Put buffers together bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) pipeline_layout = device.create_pipeline_layout( From 0cc63816adc3f8aab18d6f00c676991c5e3b8e59 Mon Sep 17 00:00:00 2001 From: Jan Date: Mon, 29 Sep 2025 03:58:40 +0200 Subject: [PATCH 11/13] enable imgui demos --- examples/browser.html | 10 +++++--- wgpu/backends/js_webgpu/__init__.py | 40 ++++++++++++++++++++++++++--- 2 files changed, 42 insertions(+), 8 deletions(-) diff --git a/examples/browser.html b/examples/browser.html index 57a14c94..7450b0b8 100644 --- a/examples/browser.html +++ b/examples/browser.html @@ -16,10 +16,11 @@ // TODO: replace the actual code here (unless you have the module) // pythonCode = await (await fetch("compute_noop.py")).text(); // pythonCode = await (await fetch("compute_matmul.py")).text(); - pythonCode = await (await fetch("cube.py")).text(); + // pythonCode = await (await fetch("cube.py")).text(); // pythonCode = await (await fetch("triangle.py")).text(); - // pythonCode = await (await fetch("triangle_glsl.py")).text(); - + pythonCode = await (await fetch("imgui_backend_sea.py")).text(); + // pythonCode = await (await fetch("imgui_renderer_sea.py")).text(); + // pythonCode = await (await fetch("imgui_basic_example.py")).text(); // Load Pyodide console.log("Loading pyodide..."); @@ -30,9 +31,10 @@ await pyodide.loadPackage("micropip"); const micropip = pyodide.pyimport("micropip"); await micropip.install('numpy'); + await micropip.install('imgui-bundle'); // await micropip.install('rendercanvas'); await micropip.install('../../../../../../../projects/pygfx-repos/rendercanvas/dist/rendercanvas-2.2.1-py3-none-any.whl'); // local wheel for auto testing - await micropip.install('../dist/wgpu-0.24.0-py3-none-any.whl'); // from PyPI + await micropip.install('../dist/wgpu-0.24.0-py3-none-any.whl'); // also local, probably need to name the wheel differently. // Run the Python code async because some calls are async it seems. pyodide.runPythonAsync(pythonCode); diff --git a/wgpu/backends/js_webgpu/__init__.py b/wgpu/backends/js_webgpu/__init__.py index 82c66291..363b63f3 100644 --- a/wgpu/backends/js_webgpu/__init__.py +++ b/wgpu/backends/js_webgpu/__init__.py @@ -211,6 +211,7 @@ def create_bind_group_layout(self, *, label: str = "", entries: list[structs.Bin sampler = { "type": sampler.get("type", enums.SamplerBindingType.filtering), } + sampler = to_js(sampler, depth=1) elif texture is not None: buffer = sampler = storage_texture = external_texture = empty_value texture = { @@ -218,6 +219,7 @@ def create_bind_group_layout(self, *, label: str = "", entries: list[structs.Bin "viewDimension": texture.get("view_dimension", enums.TextureViewDimension.d2), "multisampled": texture.get("multisampled", False), } + texture = to_js(texture, depth=1) elif storage_texture is not None: buffer = sampler = texture = external_texture = empty_value storage_texture = { @@ -225,6 +227,7 @@ def create_bind_group_layout(self, *, label: str = "", entries: list[structs.Bin "format": storage_texture.get("format"), "viewDimension": storage_texture.get("view_dimension", enums.TextureViewDimension.d2), } + storage_texture = to_js(storage_texture, depth=1) elif external_texture is not None: buffer = sampler = texture = storage_texture = empty_value external_texture = { @@ -470,10 +473,27 @@ def write_texture(self, self._internal.writeTexture(js_destination, js_data, js_data_layout, js_size) - # def write_texture(self, *args, **kwargs): - # js_args = to_js(args, eager_converter=simple_js_accessor) - # js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) - # self._internal.writeTexture(*js_args, js_kwargs) + def write_buffer( + self, + buffer: GPUBuffer | None = None, + buffer_offset: int | None = None, + data: memoryview | None = None, + data_offset: int = 0, + size: int | None = None, + ): + data = memoryview(data).cast("B") + if size is None: + size = data.nbytes - data_offset + size = (size + 3) & ~3 # align to 4 bytes + + if buffer_offset is None: + buffer_offset = 0 + + js_data = Uint8Array.new(size) + js_data.assign(data[data_offset:data_offset+size]) + + self._internal.writeBuffer(buffer._internal, BigInt(buffer_offset), js_data, 0, size) + class GPUTexture(classes.GPUTexture): @@ -531,6 +551,7 @@ class GPURenderPipeline(classes.GPURenderPipeline): def get_bind_group_layout(self, index: int | None = None) -> classes.GPUBindGroupLayout: return classes.GPUBindGroupLayout("", self._internal.getBindGroupLayout(index), self._device) +# TODO: abstract to mixin class GPURenderPassEncoder(classes.GPURenderPassEncoder): def set_pipeline(self, pipeline: GPURenderPipeline): self._internal.setPipeline(pipeline._internal) @@ -564,6 +585,17 @@ def set_bind_group( self._internal.setBindGroup(index, bind_group._internal, dynamic_offsets_data) + def set_viewport(self, *args): + js_args = to_js(args, eager_converter=simple_js_accessor) + self._internal.setViewport(*js_args) + + def set_blend_constant(self, color = None): + self._internal.setBlendConstant(color) + + def set_scissor_rect(self, *args): + js_args = to_js(args, eager_converter=simple_js_accessor) + self._internal.setScissorRect(*js_args) + def draw_indexed(self, *args, **kwargs): js_args = to_js(args, eager_converter=simple_js_accessor) js_kwargs = to_js(kwargs, eager_converter=simple_js_accessor) From d2cd796327d6ffca866faf46697ca5909cc9f7ab Mon Sep 17 00:00:00 2001 From: Jan Date: Wed, 8 Oct 2025 11:48:27 +0200 Subject: [PATCH 12/13] BigInt in not reuqired --- codegen/idlparser.py | 3 ++- codegen/jswriter.py | 20 +++++++++++++++++++ examples/browser.html | 30 +++++++++++++++++++++++++---- wgpu/backends/js_webgpu/__init__.py | 25 +++++++++++------------- 4 files changed, 59 insertions(+), 19 deletions(-) create mode 100644 codegen/jswriter.py diff --git a/codegen/idlparser.py b/codegen/idlparser.py index dfbe6f00..848d61e9 100644 --- a/codegen/idlparser.py +++ b/codegen/idlparser.py @@ -7,6 +7,7 @@ identify and remove code paths that are no longer used. """ +from typing import Dict from codegen.utils import print from codegen.files import read_file @@ -128,7 +129,7 @@ def peek_line(self): def parse(self, verbose=True): self._interfaces = {} - self.classes = {} + self.classes:Dict[str, Interface] = {} self.structs = {} self.flags = {} self.enums = {} diff --git a/codegen/jswriter.py b/codegen/jswriter.py new file mode 100644 index 00000000..8604496d --- /dev/null +++ b/codegen/jswriter.py @@ -0,0 +1,20 @@ +""" +Codegen the JS webgpu backend, based on the parsed idl. + +write to the backends/js_webgpu/_api.py file. +""" + +from codegen.idlparser import get_idl_parser + + +idl = get_idl_parser() + +# todo import our to_js converter functions from elsewhere? + +for name, interface in idl.classes.items(): + # write idl line, header + # write the to_js block + # get label (where needed?) + # return the constructor call to the base class maybe? + print(name, interface.functions) + \ No newline at end of file diff --git a/examples/browser.html b/examples/browser.html index 7450b0b8..8b8e0ac4 100644 --- a/examples/browser.html +++ b/examples/browser.html @@ -3,9 +3,28 @@ wgpu-py on the HTML RenderCanvas canvas with Pyodide:
- + + + + + + + +

pixels got drawn!