diff --git a/codegen/__init__.py b/codegen/__init__.py
index d194bc4e..8ba026a7 100644
--- a/codegen/__init__.py
+++ b/codegen/__init__.py
@@ -1,7 +1,7 @@
import io
from .utils import print, PrintToFile
-from . import apiwriter, apipatcher, wgpu_native_patcher, idlparser, hparser
+from . import apiwriter, apipatcher, wgpu_native_patcher, idlparser, hparser, jswriter
from .files import file_cache
@@ -15,6 +15,7 @@ def main():
prepare()
update_api()
update_wgpu_native()
+ update_js()
file_cache.write("resources/codegen_report.md", log.getvalue())
@@ -63,3 +64,19 @@ def update_wgpu_native():
code1 = file_cache.read("backends/wgpu_native/_api.py")
code2 = wgpu_native_patcher.patch_wgpu_native_backend(code1)
file_cache.write("backends/wgpu_native/_api.py", code2)
+
+
+def update_js():
+ """
+ Writes? (maybe updates later) the JS webgpu backend API.
+ """
+
+ print("## Writing backends/js_webgpu/_api.py")
+
+ code = jswriter.generate_js_webgpu_api()
+ # TODO: run the code against a patcher that adds hand written API diff methods
+
+ file_cache.write("backends/js_webgpu/_api.py", code)
+
+
+
diff --git a/codegen/files.py b/codegen/files.py
index ad1a89d1..cb95b3f5 100644
--- a/codegen/files.py
+++ b/codegen/files.py
@@ -35,6 +35,7 @@ class FileCache:
"structs.py",
"backends/wgpu_native/_api.py",
"backends/wgpu_native/_mappings.py",
+ "backends/js_webgpu/_api.py", # TODO: maybe this file should be more like _mappings
"resources/codegen_report.md",
]
diff --git a/codegen/idlparser.py b/codegen/idlparser.py
index dfbe6f00..38bce2f0 100644
--- a/codegen/idlparser.py
+++ b/codegen/idlparser.py
@@ -7,6 +7,7 @@
identify and remove code paths that are no longer used.
"""
+from typing import Dict
from codegen.utils import print
from codegen.files import read_file
@@ -128,7 +129,7 @@ def peek_line(self):
def parse(self, verbose=True):
self._interfaces = {}
- self.classes = {}
+ self.classes:Dict[str, Interface] = {}
self.structs = {}
self.flags = {}
self.enums = {}
@@ -222,6 +223,7 @@ def resolve_type(self, typename) -> str:
"ImageData": "ArrayLike",
"VideoFrame": "ArrayLike",
"AllowSharedBufferSource": "ArrayLike",
+ "[AllowShared] Uint32Array": "ArrayLike",
"GPUPipelineConstantValue": "float",
"GPUExternalTexture": "object",
"undefined": "None",
diff --git a/codegen/jswriter.py b/codegen/jswriter.py
new file mode 100644
index 00000000..ad1a6684
--- /dev/null
+++ b/codegen/jswriter.py
@@ -0,0 +1,276 @@
+"""
+Codegen the JS webgpu backend, based on the parsed idl.
+
+write to the backends/js_webgpu/_api.py file.
+"""
+
+import os
+import re
+from codegen.idlparser import Attribute, get_idl_parser, Interface
+from codegen.apipatcher import IdlPatcherMixin, BaseApiPatcher
+from codegen.utils import Patcher
+from textwrap import indent, dedent
+
+
+file_preamble ="""
+# Auto-generated API for the JS WebGPU backend, based on the IDL and custom implementations.
+
+from ... import classes, structs, enums, flags
+from ...structs import ArrayLike, Sequence # for typing hints
+from typing import Union
+
+from pyodide.ffi import to_js, run_sync, JsProxy
+from js import window, Uint8Array
+
+from ._helpers import simple_js_accessor
+from ._implementation import GPUPromise
+"""
+# maybe we should also generate a __all__ list to just import the defined classes?
+
+# TODO: the constructor often needs more args, like device hands down self
+# maybe label can be done via the property?
+create_template = """
+def {py_method_name}(self, **kwargs):
+ descriptor = structs.{py_descriptor_name}(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.{js_method_name}(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return {return_type}(label, js_obj, device=self)
+"""
+
+unary_template = """
+def {py_method_name}(self) -> None:
+ self._internal.{js_method_name}()
+"""
+
+# TODO: this is a bit more complex but doable.
+# return needs to be optional and also resolve the promise?
+# TODO: with empty body looks odd :/
+positional_args_template = """
+{header}
+ {body}
+ self._internal.{js_method_name}({js_args})
+"""
+# TODO: construct a return value if needed?
+
+
+# might require size to be calculated if None? (offset etc)
+data_conversion = """
+ if {py_data} is not None:
+ data = memoryview({py_data}).cast("B")
+ data_size = (data.nbytes + 3) & ~3 # align to 4 bytes
+ js_data = Uint8Array.new(data_size)
+ js_data.assign(data)
+ else:
+ js_data = None
+"""
+
+# most likely copy and modify the code in apipatcher.py... because we hopefully need code that looks really similar to _classes.py
+idl = get_idl_parser()
+helper_patcher = BaseApiPatcher() # to get access to name2py_names function
+
+# can't use importlib because pyodide isn't available -.-
+# maybe use ast?
+root = os.path.abspath(os.path.join(__file__, "..", ".."))
+custom_implementations = open(os.path.join(root, "wgpu", "backends", "js_webgpu", "_implementation.py")).read()
+
+class JsPatcher(Patcher):
+ # TODO: we can put custom methods here!
+ pass
+
+patcher = JsPatcher(custom_implementations)
+
+def generate_method_code(class_name: str, function_name: str, idl_line: str) -> str:
+ # TODO: refactor into something like this
+ pass
+
+def get_class_def(class_name: str, interface: Interface) -> str:
+ # TODO: refactor
+ pass
+
+
+# basically three cases for methods (from idl+apidiff):
+# 1. alreayd exists in _classes.py and can be used as is (generate nothing)
+# 2. custom implementation in _implementations.py (copy string over)
+# 3. auto-generate remaining methods based on idl
+
+
+
+def generate_js_webgpu_api() -> str:
+ """Generate the JS translation API code we can autogenerate."""
+
+
+ # TODO: preamble?
+ output = file_preamble + "\n\n"
+
+ # classname, start_line, end_line
+ custom_classes = {c: (s, e) for c, s, e in patcher.iter_classes()}
+
+ # todo import our to_js converter functions from elsewhere?
+ # we need to have the mixins first!
+ ordered_classes = sorted(idl.classes.items(), key=lambda c: "Mixin" not in c[0]) # mixins first
+ for class_name, interface in ordered_classes:
+ # write idl line, header
+ # write the to_js block
+ # get label (where needed?)
+ # return the constructor call to the base class maybe?
+
+ custom_methods = {}
+
+ if class_name in custom_classes:
+ class_line = custom_classes[class_name][0] +1
+ for method_name, start_line, end_line in patcher.iter_methods(class_line):
+ # grab the actual contents ?
+ # maybe include a comment that is in the line prior from _implementation.py?
+ method_lines = patcher.lines[start_line:end_line+1]
+ custom_methods[method_name] = method_lines
+
+ # include custom properties too
+ for prop_name, start_line, end_line in patcher.iter_properties(class_line):
+ prop_lines = patcher.lines[start_line-1:end_line+1]
+ custom_methods[prop_name] = prop_lines
+
+ mixins = [c for c in interface.bases if c not in ("DOMException", "EventTarget")] # skip some we skip
+ class_header = f"class {class_name}(classes.{class_name}, {', '.join(mixins)}):"
+
+ class_lines = ["\n"]
+ # TODO: can we property some of the webgpu attributes to replace the existing private mappings
+
+ for function_name, idl_line in interface.functions.items():
+ return_type = idl_line.split(" ")[0] # on some parts this doesn't exist
+ py_method_name = helper_patcher.name2py_names(class_name, function_name)
+ # TODO: resolve async double methods!
+ py_method_name = py_method_name[0] # TODO: async always special case?
+
+ if py_method_name in custom_methods:
+ # Case 2: custom implementation exists!
+ class_lines.append(f"\n# Custom implementation for {function_name} from _implementation.py:\n")
+ class_lines.append(dedent("\n".join(custom_methods[py_method_name])))
+ class_lines.append("\n") # for space I guess
+ custom_methods.pop(py_method_name) # remove ones we have added.
+ continue
+
+ if py_method_name == "__init__":
+ # whacky way, but essentially this mean classes.py implements a useable constructor already.
+ continue
+
+ # TODO: mixin classes seem to cause double methods? should we skip them?
+
+ # based on apipatcher.IDlCommentINjector.get_method_comment
+ args = idl_line.split("(")[1].rsplit(")")[0].split(", ")
+ args = [Attribute(arg) for arg in args if arg.strip()]
+
+ # TODO: the create_x_pipeline_async methods become the sync variant without suffix!
+ if return_type and return_type.startswith("Promise<") and return_type.endswith(">"):
+ return_type = return_type.split("<")[-1].rstrip(">?")
+
+ # skip these for now as they are more troublesome -.-
+ if py_method_name.endswith("_sync"):
+ class_lines.append(f"\n# TODO: {function_name} sync variant likely taken from _classes.py directly!")
+ continue
+
+ if function_name.endswith("Async"):
+ class_lines.append(f"\n# TODO: was was there a redefinition for {function_name} async variant?")
+ continue
+
+ # case 1: single argument as a descriptor (TODO: could be optional - but that should just work)
+ if len(args) == 1 and args[0].typename.endswith(
+ ("Options", "Descriptor", "Configuration")
+ ):
+ method_string = create_template.format(
+ py_method_name=py_method_name,
+ py_descriptor_name=args[0].typename.removeprefix("GPU"),
+ js_method_name=function_name,
+ return_type=return_type if return_type else "None",
+ )
+ class_lines.append(method_string)
+
+ # case 2: no arguments (and nothing to return?)
+ elif (len(args) == 0 and return_type == "undefined"):
+ method_string = unary_template.format(
+ py_method_name=py_method_name,
+ js_method_name=function_name,
+ )
+ class_lines.append(method_string)
+ # TODO: return values, could be simple or complex... so might need a constructor or not at all?
+
+ # case 3: positional arguments, some of which might need ._internal lookup or struct->to_js conversion... but not all.
+ elif (len(args) > 0):
+
+ header = helper_patcher.get_method_def(class_name, py_method_name).partition("):")[0].lstrip()
+ # put all potentially forward refrenced classes into quotes
+ header = " ".join(f'"{h}"' if h.startswith("GPU") else h for h in header.split(" ")).replace(':"','":')
+ # turn all optional type hints into Union with None
+ # int | None -> Union[int, None]
+ exp = r":\s([\w\"]+)\s\| None"
+ header = re.sub(exp, lambda m: f": Union[{m.group(1)}, None]", header)
+ header = header.replace('Sequence[GPURenderBundle]', 'Sequence["GPURenderBundle"]') # TODO: just a temporary bodge!
+
+ param_list = []
+ conversion_lines = []
+ js_arg_list = []
+ for idx, arg in enumerate(args):
+ py_name = helper_patcher.name2py_names(class_name, arg.name)[0]
+ param_list.append(py_name)
+ # if it's a GPUObject kinda thing we most likely need to call ._internal to get the correct js object
+ if arg.typename.removesuffix("?") in idl.classes:
+ # TODO: do we need to check against none for optionals?
+ # technically the our js_accessor does this lookup too?
+ conversion_lines.append(f"js_{arg.name} = {py_name}._internal")
+ js_arg_list.append(f"js_{arg.name}")
+ # TODO: sequence of complex type?
+
+ elif arg.typename.removeprefix('GPU').removesuffix("?") in idl.structs and arg.typename not in ("GPUExtent3D", "GPUColor"):
+ conversion_lines.append(f"{py_name}_desc = structs.{arg.typename.removeprefix('GPU').removesuffix('?')}(**{py_name})")
+ conversion_lines.append(f"js_{arg.name} = to_js({py_name}_desc, eager_converter=simple_js_accessor)")
+ js_arg_list.append(f"js_{arg.name}")
+ elif py_name.endswith("data"): # maybe not an exhaustive check?
+ conversion_lines.append(data_conversion.format(py_data=py_name))
+ js_arg_list.append("js_data") #might be a problem if there is two!
+ else:
+ py_type = idl.resolve_type(arg.typename)
+ if py_type not in __builtins__ and not py_type.startswith(("enums.", "flags.")):
+ conversion_lines.append(f"# TODO: argument {py_name} of JS type {arg.typename}, py type {py_type} might need conversion")
+ js_arg_list.append(py_name)
+
+ method_string = positional_args_template.format(
+ header=header,
+ body=("\n ".join(conversion_lines)),
+ js_method_name=function_name,
+ js_args=", ".join(js_arg_list),
+ return_type=return_type if return_type != "undefined" else "None",
+ )
+ class_lines.append(method_string)
+
+ # TODO: have a return line constructor function?
+
+ else:
+ class_lines.append(f"\n# TODO: implement codegen for {function_name} with args {args} or return type {return_type}")
+
+ # if there are some methods not part of the idl, we should write them too
+ if custom_methods:
+ class_lines.append("\n# Additional custom methods from _implementation.py:\n")
+ for method_name, method_lines in custom_methods.items():
+ class_lines.append(dedent("\n".join(method_lines)))
+ class_lines.append("\n\n")
+
+ # do we need them in the first place?
+ if all(line.lstrip().startswith("#") for line in class_lines if line.strip()):
+ class_lines.append("\npass")
+
+ output += class_header
+ output += indent("".join(class_lines), " ")
+ output += "\n\n" # separation between classes
+
+ # TODO: most likely better to return a structure like
+ # dict(class: dict(method : code_lines))
+
+
+ # TODO: postamble:
+ output += "\ngpu = GPU()\n"
+
+ return output
+
+
+# TODO: we need to add some of the apidiff functions too... but I am not yet sure if we want to generate them or maybe import them?
diff --git a/codegen/utils.py b/codegen/utils.py
index 5b3b2a7a..7ee9d2cc 100644
--- a/codegen/utils.py
+++ b/codegen/utils.py
@@ -349,7 +349,7 @@ def iter_classes(self, start_line=0):
def iter_properties(self, start_line=0):
"""Generator to iterate over the properties.
- Each iteration yields (classname, linenr_first, linenr_last),
+ Each iteration yields (propertyname, linenr_first, linenr_last),
where linenr_first is the line that startswith `def`,
and linenr_last is the last line of code.
"""
@@ -357,7 +357,7 @@ def iter_properties(self, start_line=0):
def iter_methods(self, start_line=0):
"""Generator to iterate over the methods.
- Each iteration yields (classname, linenr_first, linenr_last)
+ Each iteration yields (methodname, linenr_first, linenr_last)
where linenr_first is the line that startswith `def`,
and linenr_last is the last line of code.
"""
diff --git a/examples/browser.html b/examples/browser.html
new file mode 100644
index 00000000..2dc5ae9b
--- /dev/null
+++ b/examples/browser.html
@@ -0,0 +1,97 @@
+
+
+
+
+ wgpu-py on the HTML RenderCanvas canvas with Pyodide:
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+pixels got drawn!
+
+
+
\ No newline at end of file
diff --git a/examples/cube.py b/examples/cube.py
index 13a49126..d01a623b 100644
--- a/examples/cube.py
+++ b/examples/cube.py
@@ -472,7 +472,6 @@ def draw_func():
for a in wgpu.gpu.enumerate_adapters_sync():
print(a.summary)
-
if __name__ == "__main__":
canvas = RenderCanvas(
size=(640, 480),
diff --git a/examples/serve_browser_examples.py b/examples/serve_browser_examples.py
new file mode 100644
index 00000000..5958a47f
--- /dev/null
+++ b/examples/serve_browser_examples.py
@@ -0,0 +1,304 @@
+"""
+A little script that serves browser-based example, using a wheel from the local wgpu.
+
+* Examples that run wgpu fully in the browser in Pyodide / PyScript.
+
+What this script does:
+
+* runs the codegen for js_webgpu backend
+* Build the .whl for wgpu, so Pyodide can install the dev version.
+* Start a tiny webserver to host html files for a selection of examples.
+* Opens a webpage in the default browser.
+
+Files are loaded from disk on each request, so you can leave the server running
+and just update examples, update wgpu and build the wheel, etc.
+"""
+
+# this is adapted from the rendercanvas version
+
+import os
+import sys
+import shutil
+import webbrowser
+from http.server import BaseHTTPRequestHandler, HTTPServer
+
+import flit
+import wgpu
+from codegen import update_js, file_cache
+
+
+#examples that don't require a canvas, we will capture the output to a div
+compute_examples = {
+ # "compute_int64.py", # this one requires native only features, so won't work in the browser for now
+ "compute_noop.py": [], # no deps
+ "compute_matmul.py": ["numpy"],
+ # "compute_textures.py": ["numpy", "imageio"], #imageio doesn't work in pyodide right now (fetch?)
+ "compute_timestamps.py": [], # this one still crashes as the descriptor doesn't get converted into an object...
+}
+
+# these need rendercanvas too, so we will patch in the local wheel untill there is a rendercanvas release on pypi
+graphics_examples = {
+ "triangle.py":[], # no deps
+ "cube.py": ["numpy"],
+ "offscreen_hdr.py": ["numpy", "pypng"], # pyscript says it doesn't work in pyodide.
+ # "triangle_glsl.py": # we can't use GLSL in the browser... I am looking into maybe using wasm compiled naga manually - at a later date.
+ "imgui_backend_sea.py": ["numpy", "imgui-bundle"],
+ "imgui_basic_example.py": ["imgui-bundle"], # might even work without wgpu as imgui already works in pyodide...
+ "imgui_renderer_sea.py": ["numpy", "imgui-bundle"],
+}
+
+
+root = os.path.abspath(os.path.join(__file__, "..", ".."))
+
+short_version = ".".join(str(i) for i in wgpu.version_info[:3])
+wheel_name = f"wgpu-{short_version}-py3-none-any.whl"
+
+
+def get_html_index():
+ """Create a landing page."""
+
+ compute_examples_list = [f"
" for name in graphics_examples.keys()]
+
+ html = """
+
+
+
+ wgpu PyScript examples
+
+
+
+
+ Rebuild the wheel
+ """
+
+ html += "List of compute examples that run in PyScript:\n"
+ html += f"
{''.join(compute_examples_list)}
\n\n"
+
+ html += "List of graphics examples that run in PyScript:\n"
+ html += f"
{''.join(graphics_examples_list)}
\n\n"
+
+ html += "\n\n"
+ return html
+
+
+html_index = get_html_index()
+
+
+# An html template to show examples using pyscript.
+pyscript_graphics_template = """
+
+
+
+
+ {example_script} via PyScript
+
+
+
+
+ Back to list
+
+
+ {docstring}
+
+
+
+
+
+
+
+
+
+"""
+
+# TODO: a pyodide example for the compute examples (so we can capture output?)
+# modified from _pyodide_iframe.html from rendercanvas
+pyodide_compute_template = """
+
+
+
+
+ {example_script} via Pyodide
+
+
+
+
+
+ Back to list
+
+ {docstring}
+
+
+
Output:
+
+
+
+
+
+"""
+
+
+
+
+if not (
+ os.path.isfile(os.path.join(root, "wgpu", "__init__.py"))
+ and os.path.isfile(os.path.join(root, "pyproject.toml"))
+):
+ raise RuntimeError("This script must run in a checkout repo of wgpu-py.")
+
+rendercanvas_wheel = "rendercanvas-2.2.1-py3-none-any.whl"
+def copy_rendercanvas_wheel():
+ """
+ copies a local rendercanvas wheel into the wgpu dist folder, so the webserver can serve it.
+ expects that rendercanvas is a repo with the wheel build, in a dir next to the wgpu-py repo.
+ """
+ src = os.path.join(root, "..", "rendercanvas", "dist", rendercanvas_wheel)
+ dst = os.path.join(root, "dist", rendercanvas_wheel)
+ shutil.copyfile(src, dst)
+
+
+def build_wheel():
+ # TODO: run the codegen for js_webgpu backend!
+ file_cache.reset()
+ update_js()
+ # (doesn't work right now :/)
+
+ # TODO: can we use the existing hatch build system?
+ os.environ["WGPU_PY_BUILD_NOARCH"] = "1"
+ toml_filename = os.path.join(root, "pyproject.toml")
+ flit.main(["-f", toml_filename, "build", "--no-use-vcs", "--format", "wheel"])
+ wheel_filename = os.path.join(root, "dist", wheel_name)
+ assert os.path.isfile(wheel_filename), f"{wheel_name} does not exist"
+
+
+def get_docstring_from_py_file(fname):
+ filename = os.path.join(root, "examples", fname)
+ docstate = 0
+ doc = ""
+ with open(filename, "rb") as f:
+ while True:
+ line = f.readline().decode()
+ if docstate == 0:
+ if line.lstrip().startswith('"""'):
+ docstate = 1
+ else:
+ if docstate == 1 and line.lstrip().startswith(("---", "===")):
+ docstate = 2
+ doc = ""
+ elif '"""' in line:
+ doc += line.partition('"""')[0]
+ break
+ else:
+ doc += line
+
+ return doc.replace("\n\n", "
")
+
+
+class MyHandler(BaseHTTPRequestHandler):
+ def do_GET(self):
+ if self.path == "/":
+ self.respond(200, html_index, "text/html")
+ elif self.path == "/build":
+ # TODO: add progress instead of blocking before load?
+ # also seems like this might get called multiple times?
+ try:
+ build_wheel()
+ except Exception as err:
+ self.respond(500, str(err), "text/plain")
+ else:
+ html = f"Wheel build: {wheel_name}
Back to list"
+ self.respond(200, html, "text/html")
+ elif self.path.endswith(".whl"):
+ filename = os.path.join(root, "dist", self.path.strip("/"))
+ if os.path.isfile(filename):
+ with open(filename, "rb") as f:
+ data = f.read()
+ self.respond(200, data, "application/octet-stream")
+ else:
+ self.respond(404, "wheel not found")
+ elif self.path.endswith(".html"):
+ name = self.path.strip("/")
+ pyname = name.replace(".html", ".py")
+ if pyname in graphics_examples:
+ deps = graphics_examples[pyname].copy() # don't modify them multiple times!
+ deps.append(f"./{rendercanvas_wheel}")
+ deps.append(f"./{wheel_name}")
+ # sometimes sniffio is missing, other times it's not?
+ doc = get_docstring_from_py_file(pyname)
+ html = pyscript_graphics_template.format(docstring=doc, example_script=pyname, dependencies=", ".join([f'"{d}"' for d in deps]))
+ self.respond(200, html, "text/html")
+ elif pyname in compute_examples:
+ doc = get_docstring_from_py_file(pyname)
+ deps = compute_examples[pyname].copy()
+ deps.append(f"./{wheel_name}")
+ html = pyodide_compute_template.format(docstring=doc, example_script=pyname, dependencies="\n".join([f"await micropip.install({dep!r});" for dep in deps]))
+ self.respond(200, html, "text/html")
+ else:
+ self.respond(404, "example not found")
+ elif self.path.endswith(".py"):
+ filename = os.path.join(root, "examples", self.path.strip("/"))
+ if os.path.isfile(filename):
+ with open(filename, "rb") as f:
+ data = f.read()
+ self.respond(200, data, "text/plain")
+ else:
+ self.respond(404, "py file not found")
+ else:
+ self.respond(404, "not found")
+
+ def respond(self, code, body, content_type="text/plain"):
+ self.send_response(code)
+ self.send_header("Content-type", content_type)
+ self.end_headers()
+ if isinstance(body, str):
+ body = body.encode()
+ self.wfile.write(body)
+
+
+if __name__ == "__main__":
+ port = 8000
+ if len(sys.argv) > 1:
+ try:
+ port = int(sys.argv[-1])
+ except ValueError:
+ pass
+
+ copy_rendercanvas_wheel()
+ build_wheel()
+ print("Opening page in web browser ...")
+ webbrowser.open(f"http://localhost:{port}/")
+ HTTPServer(("", port), MyHandler).serve_forever()
diff --git a/examples/triangle.py b/examples/triangle.py
index b6a300e8..1d7676fa 100644
--- a/examples/triangle.py
+++ b/examples/triangle.py
@@ -71,19 +71,20 @@ def get_render_pipeline_kwargs(
render_texture_format = context.get_preferred_format(device.adapter)
context.configure(device=device, format=render_texture_format)
- shader = device.create_shader_module(code=shader_source)
+ vert_shader = device.create_shader_module(code=shader_source)
+ frag_shader = device.create_shader_module(code=shader_source)
pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[])
return wgpu.RenderPipelineDescriptor(
layout=pipeline_layout,
vertex=wgpu.VertexState(
- module=shader,
+ module=vert_shader,
entry_point="vs_main",
),
depth_stencil=None,
multisample=None,
fragment=wgpu.FragmentState(
- module=shader,
+ module=frag_shader,
entry_point="fs_main",
targets=[
wgpu.ColorTargetState(
@@ -175,7 +176,7 @@ async def draw_frame_async():
if __name__ == "__main__":
from rendercanvas.auto import RenderCanvas, loop
- canvas = RenderCanvas(size=(640, 480), title="wgpu triangle example")
+ canvas = RenderCanvas(size=(640, 480), title="wgpu triangle example", update_mode="continuous")
draw_frame = setup_drawing_sync(canvas)
canvas.request_draw(draw_frame)
loop.run()
diff --git a/wgpu/_classes.py b/wgpu/_classes.py
index c7c9f400..d20a3a8f 100644
--- a/wgpu/_classes.py
+++ b/wgpu/_classes.py
@@ -358,7 +358,7 @@ def configure(
usage = str_flag_to_int(flags.TextureUsage, usage)
color_space # noqa - not really supported, just assume srgb for now
- tone_mapping # noqa - not supported yet
+ tone_mapping = {} if tone_mapping is None else tone_mapping
# Allow more than the IDL modes, see https://github.com/pygfx/wgpu-py/pull/719
extra_alpha_modes = ["auto", "unpremultiplied", "inherit"] # from webgpu.h
@@ -1899,7 +1899,7 @@ def set_index_buffer(
call to `GPUDevice.create_render_pipeline()`, it must match.
offset (int): The byte offset in the buffer. Default 0.
size (int): The number of bytes to use. If zero, the remaining size
- (after offset) of the buffer is used. Default 0.
+ (after offset) of the buffer is used.
"""
raise NotImplementedError()
diff --git a/wgpu/backends/js_webgpu/__init__.py b/wgpu/backends/js_webgpu/__init__.py
index 7317b133..25952b59 100644
--- a/wgpu/backends/js_webgpu/__init__.py
+++ b/wgpu/backends/js_webgpu/__init__.py
@@ -6,26 +6,8 @@
generated.
"""
-# NOTE: this is just a stub for now!!
-
from .. import _register_backend
-
-
-class GPU:
- def request_adapter_sync(self, **parameters):
- raise NotImplementedError("Cannot use sync API functions in JS.")
-
- async def request_adapter_async(self, **parameters):
- gpu = window.navigator.gpu # noqa: F821
- return await gpu.request_adapter(**parameters)
-
- def get_preferred_canvas_format(self):
- raise NotImplementedError()
-
- @property
- def wgsl_language_features(self):
- return set()
-
+from ._api import * # includes gpu from _implementation?
gpu = GPU()
_register_backend(gpu)
diff --git a/wgpu/backends/js_webgpu/_api.py b/wgpu/backends/js_webgpu/_api.py
new file mode 100644
index 00000000..d1b138aa
--- /dev/null
+++ b/wgpu/backends/js_webgpu/_api.py
@@ -0,0 +1,795 @@
+
+# Auto-generated API for the JS WebGPU backend, based on the IDL and custom implementations.
+
+from ... import classes, structs, enums, flags
+from ...structs import ArrayLike, Sequence # for typing hints
+from typing import Union
+
+from pyodide.ffi import to_js, run_sync, JsProxy
+from js import window, Uint8Array, Object
+
+from ._helpers import simple_js_accessor
+from ._implementation import GPUPromise
+
+
+class GPUCommandsMixin(classes.GPUCommandsMixin, ):
+
+ pass
+
+class GPUBindingCommandsMixin(classes.GPUBindingCommandsMixin, ):
+
+ # Custom implementation for setBindGroup from _implementation.py:
+ def set_bind_group(self, index: int, bind_group: classes.GPUBindGroup, dynamic_offsets_data: list[int] = (), dynamic_offsets_data_start=None, dynamic_offsets_data_length=None) -> None:
+ self._internal.setBindGroup(index, bind_group._internal, dynamic_offsets_data)
+
+
+class GPUDebugCommandsMixin(classes.GPUDebugCommandsMixin, ):
+
+ def push_debug_group(self, group_label: Union[str, None] = None) -> None:
+
+ self._internal.pushDebugGroup(group_label)
+
+ def pop_debug_group(self) -> None:
+ self._internal.popDebugGroup()
+
+ def insert_debug_marker(self, marker_label: Union[str, None] = None) -> None:
+
+ self._internal.insertDebugMarker(marker_label)
+
+
+class GPURenderCommandsMixin(classes.GPURenderCommandsMixin, ):
+
+ def set_pipeline(self, pipeline: Union["GPURenderPipeline", None] = None) -> None:
+ js_pipeline = pipeline._internal
+ self._internal.setPipeline(js_pipeline)
+
+ def set_index_buffer(self, buffer: Union["GPUBuffer", None] = None, index_format: enums.IndexFormatEnum | None = None, offset: int = 0, size: Union[int, None] = None) -> None:
+ js_buffer = buffer._internal
+ self._internal.setIndexBuffer(js_buffer, index_format, offset, size)
+
+ def set_vertex_buffer(self, slot: Union[int, None] = None, buffer: Union["GPUBuffer", None] = None, offset: int = 0, size: Union[int, None] = None) -> None:
+ js_buffer = buffer._internal
+ self._internal.setVertexBuffer(slot, js_buffer, offset, size)
+
+ def draw(self, vertex_count: Union[int, None] = None, instance_count: int = 1, first_vertex: int = 0, first_instance: int = 0) -> None:
+
+ self._internal.draw(vertex_count, instance_count, first_vertex, first_instance)
+
+ def draw_indexed(self, index_count: Union[int, None] = None, instance_count: int = 1, first_index: int = 0, base_vertex: int = 0, first_instance: int = 0) -> None:
+
+ self._internal.drawIndexed(index_count, instance_count, first_index, base_vertex, first_instance)
+
+ def draw_indirect(self, indirect_buffer: Union["GPUBuffer", None] = None, indirect_offset: Union[int, None] = None) -> None:
+ js_indirectBuffer = indirect_buffer._internal
+ self._internal.drawIndirect(js_indirectBuffer, indirect_offset)
+
+ def draw_indexed_indirect(self, indirect_buffer: Union["GPUBuffer", None] = None, indirect_offset: Union[int, None] = None) -> None:
+ js_indirectBuffer = indirect_buffer._internal
+ self._internal.drawIndexedIndirect(js_indirectBuffer, indirect_offset)
+
+
+class GPUObjectBase(classes.GPUObjectBase, ):
+
+ pass
+
+class GPUAdapterInfo(classes.GPUAdapterInfo, ):
+
+ pass
+
+class GPU(classes.GPU, ):
+
+ # TODO: requestAdapter sync variant likely taken from _classes.py directly!
+ # TODO: implement codegen for getPreferredCanvasFormat with args [] or return type GPUTextureFormat
+ # Additional custom methods from _implementation.py:
+ def __init__(self):
+ self._internal = window.navigator.gpu # noqa: F821
+
+ def request_adapter_async(self, loop=None, canvas=None, **options) -> GPUPromise["GPUAdapter"]:
+ options = structs.RequestAdapterOptions(**options)
+ js_options = to_js(options, eager_converter=simple_js_accessor)
+ js_adapter_promise = self._internal.requestAdapter(js_options)
+
+ if loop is None:
+ # can we use this instead?
+ webloop = js_adapter_promise.get_loop()
+ loop = webloop
+
+ def adapter_constructor(js_adapter):
+ return GPUAdapter(js_adapter, loop=loop)
+
+ promise = GPUPromise("request_adapter", adapter_constructor, loop=loop)
+
+ js_adapter_promise.then(promise._set_input) # we chain the js resolution to our promise
+ return promise
+
+ def enumerate_adapters_async(self, loop=None) -> GPUPromise[list["GPUAdapter"]]:
+ adapter_hp = self.request_adapter_sync(power_preference="high-performance")
+ adapter_lp = self.request_adapter_sync(power_preference="low-power")
+
+ promise = GPUPromise("enumerate_adapters", None, loop=loop)
+ promise._set_input([adapter_hp, adapter_lp])
+ return promise
+
+ @property
+ def wgsl_language_features(self):
+ return self._internal.wgslLanguageFeatures
+
+
+
+class GPUAdapter(classes.GPUAdapter, ):
+
+ # TODO: requestDevice sync variant likely taken from _classes.py directly!
+ # Additional custom methods from _implementation.py:
+ def __init__(self, js_adapter, loop):
+ internal = js_adapter
+ # manually turn these into useful python objects
+ features = set(js_adapter.features)
+
+ # TODO: _get_limits()?
+ limits = js_adapter.limits
+ py_limits = {}
+ for limit in dir(limits):
+ # we don't have the GPUSupportedLimits as a struct or list any where in the code right now, maybe we un skip it in the codegen?
+ if isinstance(getattr(limits, limit), int) and "_" not in limit:
+ py_limits[limit] = getattr(limits, limit)
+
+ infos = ["vendor", "architecture", "device", "description", "subgroupMinSize", "subgroupMaxSize", "isFallbackAdapter"]
+ adapter_info = js_adapter.info
+ py_adapter_info = {}
+ for info in infos:
+ if hasattr(adapter_info, info):
+ py_adapter_info[info] = getattr(adapter_info, info)
+
+ # for compatibility, we fill the native-extra infos too:
+ py_adapter_info["vendor_id"] = 0
+ py_adapter_info["device_id"] = 0
+ py_adapter_info["adapter_type"] = "browser"
+ py_adapter_info["backend_type"] = "WebGPU"
+
+ adapter_info = classes.GPUAdapterInfo(**py_adapter_info)
+
+ super().__init__(internal=internal, features=features, limits=py_limits, adapter_info=adapter_info, loop=loop)
+
+ def request_device_async(self, **kwargs) -> GPUPromise["GPUDevice"]:
+ descriptor = structs.DeviceDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_device_promise = self._internal.requestDevice(js_descriptor)
+
+ label = kwargs.get("label", "")
+
+ def device_constructor(js_device):
+ # TODO: do we need to hand down a default_queue here?
+ return GPUDevice(label, js_device, adapter=self)
+
+ promise = GPUPromise("request_device", device_constructor, loop=self._loop)
+ js_device_promise.then(promise._set_input)
+ return promise
+
+
+
+class GPUDevice(classes.GPUDevice, GPUObjectBase):
+
+ def destroy(self) -> None:
+ self._internal.destroy()
+
+ def create_buffer(self, **kwargs):
+ descriptor = structs.BufferDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.createBuffer(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return GPUBuffer(label, js_obj, device=self)
+
+ def create_texture(self, **kwargs):
+ descriptor = structs.TextureDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.createTexture(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return GPUTexture(label, js_obj, device=self)
+
+ def create_sampler(self, **kwargs):
+ descriptor = structs.SamplerDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.createSampler(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return GPUSampler(label, js_obj, device=self)
+
+ def import_external_texture(self, **kwargs):
+ descriptor = structs.ExternalTextureDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.importExternalTexture(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return GPUExternalTexture(label, js_obj, device=self)
+
+ def create_bind_group_layout(self, **kwargs):
+ descriptor = structs.BindGroupLayoutDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.createBindGroupLayout(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return GPUBindGroupLayout(label, js_obj, device=self)
+
+ def create_pipeline_layout(self, **kwargs):
+ descriptor = structs.PipelineLayoutDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.createPipelineLayout(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return GPUPipelineLayout(label, js_obj, device=self)
+
+ def create_bind_group(self, **kwargs):
+ descriptor = structs.BindGroupDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.createBindGroup(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return GPUBindGroup(label, js_obj, device=self)
+
+ def create_shader_module(self, **kwargs):
+ descriptor = structs.ShaderModuleDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.createShaderModule(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return GPUShaderModule(label, js_obj, device=self)
+
+ def create_compute_pipeline(self, **kwargs):
+ descriptor = structs.ComputePipelineDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.createComputePipeline(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return GPUComputePipeline(label, js_obj, device=self)
+
+ def create_render_pipeline(self, **kwargs):
+ descriptor = structs.RenderPipelineDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.createRenderPipeline(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return GPURenderPipeline(label, js_obj, device=self)
+
+ # TODO: was was there a redefinition for createComputePipelineAsync async variant?
+ # TODO: was was there a redefinition for createRenderPipelineAsync async variant?
+ def create_command_encoder(self, **kwargs):
+ descriptor = structs.CommandEncoderDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.createCommandEncoder(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return GPUCommandEncoder(label, js_obj, device=self)
+
+ def create_render_bundle_encoder(self, **kwargs):
+ descriptor = structs.RenderBundleEncoderDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.createRenderBundleEncoder(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return GPURenderBundleEncoder(label, js_obj, device=self)
+
+ # Custom implementation for createQuerySet from _implementation.py:
+ def create_query_set(self, **kwargs):
+ print(set(self._internal.features)) # timestamp-query is here!
+ print("GPUDevice.create_query_set", kwargs)
+ descriptor = structs.QuerySetDescriptor(**kwargs)
+ print(" descriptor:", descriptor)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ # js_descriptor = to_js(descriptor, dict_converter=Object.fromEntries) # hoping to get Object now? (wrong object or nested structure)?
+ print(" js_descriptor:", js_descriptor) # -> still a dict?
+ js_obj = self._internal.createQuerySet(js_descriptor)
+
+
+ # I am getting a type error... which could mean that the feature isn't requested correctly?
+ # https://www.w3.org/TR/webgpu/#queryset-creation
+ # likely not is, since the occlusion type throws the same error... it's the structure.
+
+ label = kwargs.pop("label", "")
+ type = descriptor.get("type")
+ count = descriptor.get("count")
+ return GPUQuerySet(label, js_obj, device=self, type=type, count=count)
+
+ def push_error_scope(self, filter: enums.ErrorFilterEnum | None = None) -> None:
+
+ self._internal.pushErrorScope(filter)
+
+ # TODO: popErrorScope sync variant likely taken from _classes.py directly!
+ # Additional custom methods from _implementation.py:
+ def __init__(self, label: str, js_device, adapter: GPUAdapter):
+ features = set(js_device.features)
+
+ js_limits = js_device.limits
+ limits = {}
+ for limit in dir(js_limits):
+ if isinstance(getattr(js_limits, limit), int) and "_" not in limit:
+ limits[limit] = getattr(js_limits, limit)
+
+ queue = GPUQueue(label="default queue", internal=js_device.queue, device=self)
+ super().__init__(label, internal=js_device, adapter=adapter, features=features, limits=limits, queue=queue)
+
+ def create_buffer_with_data_(self, *, label="", data, usage: flags.BufferUsageFlags) -> "GPUBuffer":
+ data = memoryview(data).cast("B") # unit8
+ data_size = (data.nbytes + 3) & ~3 # align to 4 bytes
+
+ # if it's a Descriptor you need the keywords
+ # do we need to also need to modify the usages?
+ js_buf = self._internal.createBuffer(label=label, size=data_size, usage=usage, mappedAtCreation=True)
+ # print("created buffer", js_buf, dir(js_buf), js_buf.size)
+ array_buf = js_buf.getMappedRange(0, data_size)
+ Uint8Array.new(array_buf).assign(data)
+ # print(array_buf.to_py().tolist())
+ js_buf.unmap()
+ # print("created buffer", js_buf, dir(js_buf), js_buf.size)
+ return GPUBuffer(label, js_buf, self, data_size, usage, enums.BufferMapState.unmapped)
+
+ def create_compute_pipeline_async(self, **kwargs):
+ descriptor = structs.ComputePipelineDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_promise = self._internal.createComputePipelineAsync(js_descriptor)
+
+ label = kwargs.get("label", "")
+
+ def construct_compute_pipeline(js_cp):
+ return classes.GPUComputePipeline(label, js_cp, self)
+
+ promise = GPUPromise("create_compute_pipeline", construct_compute_pipeline, loop=self._loop)
+ js_promise.then(promise._set_input)
+
+ return promise
+
+ def create_render_pipeline_async(self, **kwargs):
+ descriptor = structs.RenderPipelineDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_promise = self._internal.createRenderPipelineAsync(js_descriptor)
+
+ label = kwargs.get("label", "")
+
+ def construct_render_pipeline(js_rp):
+ return classes.GPURenderPipeline(label, js_rp, self)
+
+ promise = GPUPromise("create_render_pipeline", construct_render_pipeline, loop=self._loop)
+ js_promise.then(promise._set_input)
+
+ return promise
+
+ @property
+ def adapter(self) -> GPUAdapter:
+ return self._adapter
+
+
+
+class GPUBuffer(classes.GPUBuffer, GPUObjectBase):
+
+ # TODO: mapAsync sync variant likely taken from _classes.py directly!
+ def get_mapped_range(self, offset: int = 0, size: Union[int, None] = None) -> ArrayLike:
+
+ self._internal.getMappedRange(offset, size)
+
+ def unmap(self) -> None:
+ self._internal.unmap()
+
+ def destroy(self) -> None:
+ self._internal.destroy()
+
+ # Additional custom methods from _implementation.py:
+ def __init__(self, label, internal, device):
+ # can we just fill the _classes constructor with properties?
+ super().__init__(internal.label, internal, device, internal.size, internal.usage, internal.mapState)
+
+ def write_mapped(self, data, buffer_offset: int | None = None):
+ if self.map_state != enums.BufferMapState.mapped:
+ raise RuntimeError(f"Can only write to a buffer if its mapped: {self.map_state=}")
+
+ # make sure it's in a known datatype???
+ data = memoryview(data).cast("B")
+ size = (data.nbytes + 3) & ~3
+
+ # None default values become undefined in js, which should still work as the function can be overloaded.
+ # TODO: try without this line
+ if buffer_offset is None:
+ buffer_offset = 0
+
+ # these can't be passed as keyword arguments I guess...
+ array_buf = self._internal.getMappedRange(buffer_offset, size)
+ Uint8Array.new(array_buf).assign(data)
+
+ def map_async(self, mode: flags.MapModeFlags | None, offset: int = 0, size: int | None = None) -> GPUPromise[None]:
+ map_promise = self._internal.mapAsync(mode, offset, size)
+
+ promise = GPUPromise("buffer.map_async", None, loop=self._device._loop)
+ map_promise.then(promise._set_input) # presumably this signals via a none callback to nothing?
+ return promise
+
+ @property
+ def map_state(self) -> enums.BufferMapState:
+ return self._internal.mapState
+
+ @property
+ def size(self) -> int:
+ js_size = self._internal.size
+ # print("GPUBuffer.size", js_size, type(js_size))
+ return js_size
+
+ @property
+ def usage(self) -> flags.BufferUsageFlags:
+ return self._internal.usage
+
+
+
+class GPUTexture(classes.GPUTexture, GPUObjectBase):
+
+ # Custom implementation for createView from _implementation.py:
+ def create_view(self, **kwargs):
+ descriptor = structs.TextureViewDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.createView(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return classes.GPUTextureView(label, js_obj, device=self._device, texture=self, size=self._tex_info["size"])
+
+ def destroy(self) -> None:
+ self._internal.destroy()
+
+ # Additional custom methods from _implementation.py:
+ def __init__(self, label: str, internal, device):
+ # here we create the cached _tex_info dict
+
+ tex_info = {
+ "size": (internal.width, internal.height, internal.depthOrArrayLayers),
+ "mip_level_count": internal.mipLevelCount,
+ "sample_count": internal.sampleCount,
+ "dimension": internal.dimension,
+ "format": internal.format,
+ "usage": internal.usage,
+ }
+ super().__init__(internal.label, internal, device, tex_info)
+
+
+
+class GPUTextureView(classes.GPUTextureView, GPUObjectBase):
+
+ pass
+
+class GPUSampler(classes.GPUSampler, GPUObjectBase):
+
+ pass
+
+class GPUBindGroupLayout(classes.GPUBindGroupLayout, GPUObjectBase):
+
+ pass
+
+class GPUBindGroup(classes.GPUBindGroup, GPUObjectBase):
+
+ pass
+
+class GPUPipelineLayout(classes.GPUPipelineLayout, GPUObjectBase):
+
+ pass
+
+class GPUShaderModule(classes.GPUShaderModule, GPUObjectBase):
+
+ # TODO: getCompilationInfo sync variant likely taken from _classes.py directly!
+ pass
+
+class GPUCompilationMessage(classes.GPUCompilationMessage, ):
+
+ pass
+
+class GPUCompilationInfo(classes.GPUCompilationInfo, ):
+
+ pass
+
+class GPUPipelineError(classes.GPUPipelineError, ):
+
+ pass
+
+class GPUPipelineBase(classes.GPUPipelineBase, ):
+
+ # Custom implementation for getBindGroupLayout from _implementation.py:
+ def get_bind_group_layout(self, index: int) -> classes.GPUBindGroupLayout:
+ res = self._internal.getBindGroupLayout(index)
+ # returns the js object... so we call the constructor here manually - for now.
+ label = res.label
+ return classes.GPUBindGroupLayout(label, res, self._device)
+
+
+class GPUComputePipeline(classes.GPUComputePipeline, GPUObjectBase, GPUPipelineBase):
+
+ pass
+
+class GPURenderPipeline(classes.GPURenderPipeline, GPUObjectBase, GPUPipelineBase):
+
+ pass
+
+class GPUCommandBuffer(classes.GPUCommandBuffer, GPUObjectBase):
+
+ pass
+
+class GPUCommandEncoder(classes.GPUCommandEncoder, GPUObjectBase, GPUCommandsMixin, GPUDebugCommandsMixin):
+
+ def begin_render_pass(self, **kwargs):
+ descriptor = structs.RenderPassDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.beginRenderPass(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return GPURenderPassEncoder(label, js_obj, device=self)
+
+ def begin_compute_pass(self, **kwargs):
+ descriptor = structs.ComputePassDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.beginComputePass(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return GPUComputePassEncoder(label, js_obj, device=self)
+
+ def copy_buffer_to_buffer(self, source: Union["GPUBuffer", None] = None, source_offset: Union[int, None] = None, destination: Union["GPUBuffer", None] = None, destination_offset: Union[int, None] = None, size: Union[int, None] = None) -> None:
+ js_source = source._internal
+ js_destination = destination._internal
+ self._internal.copyBufferToBuffer(js_source, source_offset, js_destination, destination_offset, size)
+
+ def copy_buffer_to_texture(self, source: structs.TexelCopyBufferInfoStruct | None = None, destination: structs.TexelCopyTextureInfoStruct | None = None, copy_size: tuple[int, int, int] | structs.Extent3DStruct | None = None) -> None:
+ source_desc = structs.TexelCopyBufferInfo(**source)
+ js_source = to_js(source_desc, eager_converter=simple_js_accessor)
+ destination_desc = structs.TexelCopyTextureInfo(**destination)
+ js_destination = to_js(destination_desc, eager_converter=simple_js_accessor)
+ # TODO: argument copy_size of JS type GPUExtent3D, py type tuple[int, int, int] | structs.Extent3DStruct might need conversion
+ self._internal.copyBufferToTexture(js_source, js_destination, copy_size)
+
+ def copy_texture_to_buffer(self, source: structs.TexelCopyTextureInfoStruct | None = None, destination: structs.TexelCopyBufferInfoStruct | None = None, copy_size: tuple[int, int, int] | structs.Extent3DStruct | None = None) -> None:
+ source_desc = structs.TexelCopyTextureInfo(**source)
+ js_source = to_js(source_desc, eager_converter=simple_js_accessor)
+ destination_desc = structs.TexelCopyBufferInfo(**destination)
+ js_destination = to_js(destination_desc, eager_converter=simple_js_accessor)
+ # TODO: argument copy_size of JS type GPUExtent3D, py type tuple[int, int, int] | structs.Extent3DStruct might need conversion
+ self._internal.copyTextureToBuffer(js_source, js_destination, copy_size)
+
+ def copy_texture_to_texture(self, source: structs.TexelCopyTextureInfoStruct | None = None, destination: structs.TexelCopyTextureInfoStruct | None = None, copy_size: tuple[int, int, int] | structs.Extent3DStruct | None = None) -> None:
+ source_desc = structs.TexelCopyTextureInfo(**source)
+ js_source = to_js(source_desc, eager_converter=simple_js_accessor)
+ destination_desc = structs.TexelCopyTextureInfo(**destination)
+ js_destination = to_js(destination_desc, eager_converter=simple_js_accessor)
+ # TODO: argument copy_size of JS type GPUExtent3D, py type tuple[int, int, int] | structs.Extent3DStruct might need conversion
+ self._internal.copyTextureToTexture(js_source, js_destination, copy_size)
+
+ def clear_buffer(self, buffer: Union["GPUBuffer", None] = None, offset: int = 0, size: Union[int, None] = None) -> None:
+ js_buffer = buffer._internal
+ self._internal.clearBuffer(js_buffer, offset, size)
+
+ def resolve_query_set(self, query_set: Union["GPUQuerySet", None] = None, first_query: Union[int, None] = None, query_count: Union[int, None] = None, destination: Union["GPUBuffer", None] = None, destination_offset: Union[int, None] = None) -> None:
+ js_querySet = query_set._internal
+ js_destination = destination._internal
+ self._internal.resolveQuerySet(js_querySet, first_query, query_count, js_destination, destination_offset)
+
+ def finish(self, **kwargs):
+ descriptor = structs.CommandBufferDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.finish(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return GPUCommandBuffer(label, js_obj, device=self)
+
+
+class GPUComputePassEncoder(classes.GPUComputePassEncoder, GPUObjectBase, GPUCommandsMixin, GPUDebugCommandsMixin, GPUBindingCommandsMixin):
+
+ def set_pipeline(self, pipeline: Union["GPUComputePipeline", None] = None) -> None:
+ js_pipeline = pipeline._internal
+ self._internal.setPipeline(js_pipeline)
+
+ def dispatch_workgroups(self, workgroup_count_x: Union[int, None] = None, workgroup_count_y: int = 1, workgroup_count_z: int = 1) -> None:
+
+ self._internal.dispatchWorkgroups(workgroup_count_x, workgroup_count_y, workgroup_count_z)
+
+ def dispatch_workgroups_indirect(self, indirect_buffer: Union["GPUBuffer", None] = None, indirect_offset: Union[int, None] = None) -> None:
+ js_indirectBuffer = indirect_buffer._internal
+ self._internal.dispatchWorkgroupsIndirect(js_indirectBuffer, indirect_offset)
+
+ def end(self) -> None:
+ self._internal.end()
+
+
+class GPURenderPassEncoder(classes.GPURenderPassEncoder, GPUObjectBase, GPUCommandsMixin, GPUDebugCommandsMixin, GPUBindingCommandsMixin, GPURenderCommandsMixin):
+
+ def set_viewport(self, x: Union[float, None] = None, y: Union[float, None] = None, width: Union[float, None] = None, height: Union[float, None] = None, min_depth: Union[float, None] = None, max_depth: Union[float, None] = None) -> None:
+
+ self._internal.setViewport(x, y, width, height, min_depth, max_depth)
+
+ def set_scissor_rect(self, x: Union[int, None] = None, y: Union[int, None] = None, width: Union[int, None] = None, height: Union[int, None] = None) -> None:
+
+ self._internal.setScissorRect(x, y, width, height)
+
+ def set_blend_constant(self, color: tuple[float, float, float, float] | structs.ColorStruct | None = None) -> None:
+ # TODO: argument color of JS type GPUColor, py type tuple[float, float, float, float] | structs.ColorStruct might need conversion
+ self._internal.setBlendConstant(color)
+
+ def set_stencil_reference(self, reference: Union[int, None] = None) -> None:
+
+ self._internal.setStencilReference(reference)
+
+ def begin_occlusion_query(self, query_index: Union[int, None] = None) -> None:
+
+ self._internal.beginOcclusionQuery(query_index)
+
+ def end_occlusion_query(self) -> None:
+ self._internal.endOcclusionQuery()
+
+ def execute_bundles(self, bundles: Sequence["GPURenderBundle"] | None = None) -> None:
+ # TODO: argument bundles of JS type sequence, py type list[GPURenderBundle] might need conversion
+ self._internal.executeBundles(bundles)
+
+ def end(self) -> None:
+ self._internal.end()
+
+
+class GPURenderBundle(classes.GPURenderBundle, GPUObjectBase):
+
+ pass
+
+class GPURenderBundleEncoder(classes.GPURenderBundleEncoder, GPUObjectBase, GPUCommandsMixin, GPUDebugCommandsMixin, GPUBindingCommandsMixin, GPURenderCommandsMixin):
+
+ def finish(self, **kwargs):
+ descriptor = structs.RenderBundleDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.finish(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return GPURenderBundle(label, js_obj, device=self)
+
+
+class GPUQueue(classes.GPUQueue, GPUObjectBase):
+
+ # Custom implementation for submit from _implementation.py:
+ def submit(self, command_buffers: structs.Sequence["GPUCommandBuffer"]) -> None:
+ js_command_buffers = [cb._internal for cb in command_buffers]
+ self._internal.submit(js_command_buffers)
+
+ # TODO: onSubmittedWorkDone sync variant likely taken from _classes.py directly!
+ def write_buffer(self, buffer: Union["GPUBuffer", None] = None, buffer_offset: Union[int, None] = None, data: Union[ArrayLike, None] = None, data_offset: int = 0, size: Union[int, None] = None) -> None:
+ js_buffer = buffer._internal
+
+ if data is not None:
+ data = memoryview(data).cast("B")
+ data_size = (data.nbytes + 3) & ~3 # align to 4 bytes
+ js_data = Uint8Array.new(data_size)
+ js_data.assign(data)
+ else:
+ js_data = None
+
+ self._internal.writeBuffer(js_buffer, buffer_offset, js_data, data_offset, size)
+
+ def write_texture(self, destination: structs.TexelCopyTextureInfoStruct | None = None, data: Union[ArrayLike, None] = None, data_layout: structs.TexelCopyBufferLayoutStruct | None = None, size: tuple[int, int, int] | structs.Extent3DStruct | None = None) -> None:
+ destination_desc = structs.TexelCopyTextureInfo(**destination)
+ js_destination = to_js(destination_desc, eager_converter=simple_js_accessor)
+
+ if data is not None:
+ data = memoryview(data).cast("B")
+ data_size = (data.nbytes + 3) & ~3 # align to 4 bytes
+ js_data = Uint8Array.new(data_size)
+ js_data.assign(data)
+ else:
+ js_data = None
+
+ data_layout_desc = structs.TexelCopyBufferLayout(**data_layout)
+ js_dataLayout = to_js(data_layout_desc, eager_converter=simple_js_accessor)
+ # TODO: argument size of JS type GPUExtent3D, py type tuple[int, int, int] | structs.Extent3DStruct might need conversion
+ self._internal.writeTexture(js_destination, js_data, js_dataLayout, size)
+
+ def copy_external_image_to_texture(self, source: structs.CopyExternalImageSourceInfoStruct | None = None, destination: structs.CopyExternalImageDestInfoStruct | None = None, copy_size: tuple[int, int, int] | structs.Extent3DStruct | None = None) -> None:
+ source_desc = structs.CopyExternalImageSourceInfo(**source)
+ js_source = to_js(source_desc, eager_converter=simple_js_accessor)
+ destination_desc = structs.CopyExternalImageDestInfo(**destination)
+ js_destination = to_js(destination_desc, eager_converter=simple_js_accessor)
+ # TODO: argument copy_size of JS type GPUExtent3D, py type tuple[int, int, int] | structs.Extent3DStruct might need conversion
+ self._internal.copyExternalImageToTexture(js_source, js_destination, copy_size)
+
+ # Additional custom methods from _implementation.py:
+ def read_buffer(self, buffer: GPUBuffer, buffer_offset: int = 0, size: int | None = None) -> memoryview:
+ # largely copied from wgpu-native/_api.py
+ # print(dir(self))
+ device = self._device
+
+ if not size:
+ data_length = buffer.size - buffer_offset
+ else:
+ data_length = int(size)
+ if not (0 <= buffer_offset < buffer.size): # pragma: no cover
+ raise ValueError("Invalid buffer_offset")
+ if not (data_length <= buffer.size - buffer_offset): # pragma: no cover
+ raise ValueError("Invalid data_length")
+ data_length = (data_length + 3) & ~3 # align to 4 bytes
+
+ js_temp_buffer = device._internal.createBuffer(size=data_length, usage=flags.BufferUsage.COPY_DST | flags.BufferUsage.MAP_READ, mappedAtCreation=False, label="output buffer temp")
+
+ js_encoder = device._internal.createCommandEncoder()
+ # TODO: somehow test if all the offset math is correct
+ js_encoder.copyBufferToBuffer(buffer._internal, buffer_offset, js_temp_buffer, buffer_offset, data_length)
+ self._internal.submit([js_encoder.finish()])
+
+ # best way to await the promise directly?
+ # TODO: can we do more steps async before waiting?
+ run_sync(js_temp_buffer.mapAsync(flags.MapMode.READ, 0, data_length))
+ array_buf = js_temp_buffer.getMappedRange()
+ res = array_buf.slice(0)
+ js_temp_buffer.unmap()
+ return res.to_py()
+
+
+
+class GPUQuerySet(classes.GPUQuerySet, GPUObjectBase):
+
+ def destroy(self) -> None:
+ self._internal.destroy()
+
+
+class GPUCanvasContext(classes.GPUCanvasContext, ):
+
+ # Custom implementation for configure from _implementation.py:
+ def configure(self, **kwargs):
+ descriptor = structs.CanvasConfiguration(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+
+ self._internal.configure(js_descriptor)
+ self._config = {
+ "device": kwargs.get("device"),
+ "format": kwargs.get("format"),
+ "usage": kwargs.get("usage", 0x10),
+ "view_formats": kwargs.get("view_formats", ()),
+ "color_space": kwargs.get("color_space", "srgb"),
+ "tone_mapping": kwargs.get("tone_mapping", None),
+ "alpha_mode": kwargs.get("alpha_mode", "opaque"),
+ }
+
+ def unconfigure(self) -> None:
+ self._internal.unconfigure()
+
+ # TODO: implement codegen for getConfiguration with args [] or return type GPUCanvasConfiguration?
+ # Custom implementation for getCurrentTexture from _implementation.py:
+ def get_current_texture(self) -> GPUTexture:
+ js_texture = self._internal.getCurrentTexture()
+
+ label = "" # always empty?
+ return GPUTexture(label, js_texture, self._config["device"])
+
+ # Additional custom methods from _implementation.py:
+ def __init__(self, canvas, present_methods):
+ super().__init__(canvas, present_methods)
+ if self._present_method == "screen":
+ # rendercanvas.pyodide provides exactly this. Maybe we can also take a HTML canvas directly?
+ # for potential interop with webgpu applications that already run in the browser.
+ assert present_methods["screen"]["platform"] == "browser"
+ canvas_attribute = present_methods["screen"]["native_canvas_attribute"]
+ else:
+ # likely bitmap... but this could still work... might just try it
+ raise NotImplementedError(f"Unsupported present method: {self._present_method}")
+
+ self._internal = getattr(canvas, canvas_attribute).getContext("webgpu")
+
+ def get_preferred_format(self, adapter: GPUAdapter | None) -> enums.TextureFormat:
+ return gpu._internal.getPreferredCanvasFormat()
+
+
+
+class GPUDeviceLostInfo(classes.GPUDeviceLostInfo, ):
+
+ pass
+
+class GPUError(classes.GPUError, ):
+
+ pass
+
+class GPUValidationError(classes.GPUValidationError, GPUError):
+
+ pass
+
+class GPUOutOfMemoryError(classes.GPUOutOfMemoryError, GPUError):
+
+ pass
+
+class GPUInternalError(classes.GPUInternalError, GPUError):
+
+ pass
+
+
+gpu = GPU()
diff --git a/wgpu/backends/js_webgpu/_helpers.py b/wgpu/backends/js_webgpu/_helpers.py
new file mode 100644
index 00000000..e4f7501e
--- /dev/null
+++ b/wgpu/backends/js_webgpu/_helpers.py
@@ -0,0 +1,94 @@
+"""
+Helper functions for dealing with pyodide for the js webgpu backend.
+"""
+
+from ... import classes, structs
+from pyodide.ffi import to_js
+
+
+def to_camel_case(snake_str):
+ components = snake_str.split('_')
+ res = components[0] + ''.join(x.title() for x in components[1:])
+ # maybe keywords are a problem?
+ # https://pyodide.org/en/stable/usage/faq.html#how-can-i-access-javascript-objects-attributes-in-python-if-their-names-are-python-keywords
+ # if res in ["type", "format"]:
+ # res += "_"
+ return res
+
+
+# TODO: clean this up before reading for merge!
+
+# for use in to_js() https://pyodide.org/en/stable/usage/api/python-api/ffi.html#pyodide.ffi.ToJsConverter
+# you have to do the recursion yourself...
+def simple_js_accessor(value, convert, cache=None):
+ # print("simple_js_accessor", value, type(value), dir(value))
+ if isinstance(value, classes.GPUObjectBase):
+ # print("GPUObjectBase detected", value)
+ return value._internal # type : JsProxy
+ elif isinstance(value, structs.Struct):
+ result = {}
+ for k, v in value.items():
+ camel_key = to_camel_case(k)
+ # if there is a dict further down... we still need to fix those keys
+ if isinstance(v, dict):
+ if(k == "resource"): # this one is a more complex type.... https://www.w3.org/TR/webgpu/#typedefdef-gpubindingresource
+ # print("struct with resource dict detected", k, v)
+ v = structs.BufferBinding(**v)
+ # print("RESOURCE AS A STRUCT:", v)
+ down_convert = to_js(v, eager_converter=simple_js_accessor)
+ down_convert = to_js(down_convert.to_py(depth=1), depth=1) if hasattr(down_convert, "to_py") else down_convert
+ result[camel_key] = down_convert
+ # print("called convert(v) on RESOURCE STRUCT", result[camel_key])
+ continue
+ # print("struct with dict detected", value, k, v)
+ # print(dir(value))
+ v_struct_type_name = value.__annotations__[k].partition("Struct")[0] # will not work if there is more than two options -.-
+ # print("likely v struct type_name", v_struct_type_name)
+ v_struct_type = structs.__dict__[v_struct_type_name] # because the annotation is just a string... doesn't feel great
+ # print("likely v struct type", v_struct_type)
+ v = v_struct_type(**v)
+ # print("converted to struct", v)
+
+ # if there is a list of dicts... it will still call the the default sequence converter and then dict converter...
+ elif isinstance(v, (list)): #maybe tuple too?
+ if v and isinstance(v[0], dict): # assume all elements are the same type too and non empty?
+ # print("struct with list detected", value, k, v)
+ v_struct_type_name = value.__annotations__[k].removeprefix("Sequence[").partition("Struct")[0]
+ # print("likely v struct type_name", v_struct_type_name)
+ v_struct_type = structs.__dict__[v_struct_type_name]
+ # print("likely v struct type", v_struct_type)
+ v = [v_struct_type(**item) for item in v]
+ # print("converted to list of struct", v)
+ else:
+ # could be a list of other objects like GPUBindGroupLayout for example.
+ pass
+ # print("initial call to down_convert", v)
+ down_convert = to_js(v, eager_converter=simple_js_accessor)
+ # print("first result of down_convert", down_convert, dir(down_convert))
+ down_convert = to_js(down_convert.to_py(depth=1), depth=1) if hasattr(down_convert, "to_py") else down_convert
+ # print("final result of down_convert", down_convert)
+ result[camel_key] = down_convert
+ # print("struct conversion result: ", type(result), result)
+ return result
+
+ elif isinstance(value, (list, tuple)):
+ result = [to_js(v, eager_converter=simple_js_accessor) for v in value]
+ return to_js(result, depth=1) # to make sure it's like an ArrayList?
+ # this might recursively call itself...
+ # maybe use a map? or do a dict_converted?
+ # elif isinstance(value, dict):
+ # result = {}
+ # # cache(value, result)
+ # for k, v in value.items():
+ # camel_key = to_camel_case(k) if isinstance(k, str) else k
+ # result[camel_key] = convert(v)
+ # if len(result) == 0:
+ # return Object.new() # maybe this?
+ # let's hope this is only ever reached when all the contents are already converted.
+ # map = Map.new(result.items())
+ # return Object.fromEntries(map)
+ # print("simple_js_accessor default", value, type(value))
+ return convert(value) # or to_js(value)?
+
+# TODO: can we implement our own variant of JsProxy and PyProxy, to_js and to_py? to work with pyodide and not around it?
+# https://pyodide.org/en/stable/usage/type-conversions.html#type-translations
diff --git a/wgpu/backends/js_webgpu/_implementation.py b/wgpu/backends/js_webgpu/_implementation.py
new file mode 100644
index 00000000..81f730a0
--- /dev/null
+++ b/wgpu/backends/js_webgpu/_implementation.py
@@ -0,0 +1,358 @@
+"""
+This provides the pyodide implementation for the js_webgpu backend.
+Constructors and Methods defined here are picked over auto generated methods for the backend in _api.py
+"""
+
+from ... import classes, structs, flags, enums
+from ._helpers import simple_js_accessor
+
+from pyodide.ffi import to_js, run_sync, JsProxy
+from js import window, Uint8Array
+
+class GPUPromise(classes.GPUPromise):
+ # TODO: can we resolve the js promises and then call our constructors?
+ # should loop be globally the webloop? or will rendercanvas give us that in the future?
+
+ def sync_wait(self):
+ # pyodide way that hopefully works?
+ # explanation: https://blog.pyodide.org/posts/jspi/
+ result = run_sync(self)
+ return result
+
+
+class GPU(classes.GPU):
+ def __init__(self):
+ self._internal = window.navigator.gpu # noqa: F821
+
+ # TODO: maybe autogenerate async?
+ def request_adapter_async(self, loop=None, canvas=None, **options) -> GPUPromise["GPUAdapter"]:
+ options = structs.RequestAdapterOptions(**options)
+ js_options = to_js(options, eager_converter=simple_js_accessor)
+ js_adapter_promise = self._internal.requestAdapter(js_options)
+
+ if loop is None:
+ # can we use this instead?
+ webloop = js_adapter_promise.get_loop()
+ loop = webloop
+
+ def adapter_constructor(js_adapter):
+ return GPUAdapter(js_adapter, loop=loop)
+ promise = GPUPromise("request_adapter", adapter_constructor, loop=loop)
+
+ js_adapter_promise.then(promise._set_input) # we chain the js resolution to our promise
+ return promise
+
+ def enumerate_adapters_async(self, loop=None) -> GPUPromise[list["GPUAdapter"]]:
+ adapter_hp = self.request_adapter_sync(power_preference="high-performance")
+ adapter_lp = self.request_adapter_sync(power_preference="low-power")
+
+ promise = GPUPromise("enumerate_adapters", None, loop=loop)
+ promise._set_input([adapter_hp, adapter_lp])
+ return promise
+
+ # TODO: autogenerate properties!
+ @property
+ def wgsl_language_features(self):
+ return self._internal.wgslLanguageFeatures
+
+
+class GPUAdapter(classes.GPUAdapter):
+ def __init__(self, js_adapter, loop):
+ internal = js_adapter
+ # manually turn these into useful python objects
+ features = set(js_adapter.features)
+
+ # TODO: _get_limits()?
+ limits = js_adapter.limits
+ py_limits = {}
+ for limit in dir(limits):
+ # we don't have the GPUSupportedLimits as a struct or list any where in the code right now, maybe we un skip it in the codegen?
+ if isinstance(getattr(limits, limit), int) and "_" not in limit:
+ py_limits[limit] = getattr(limits, limit)
+
+ infos = ["vendor", "architecture", "device", "description", "subgroupMinSize", "subgroupMaxSize", "isFallbackAdapter"]
+ adapter_info = js_adapter.info
+ py_adapter_info = {}
+ for info in infos:
+ if hasattr(adapter_info, info):
+ py_adapter_info[info] = getattr(adapter_info, info)
+
+ #for compatibility, we fill the native-extra infos too:
+ py_adapter_info["vendor_id"] = 0
+ py_adapter_info["device_id"] = 0
+ py_adapter_info["adapter_type"] = "browser"
+ py_adapter_info["backend_type"] = "WebGPU"
+
+ adapter_info = classes.GPUAdapterInfo(**py_adapter_info)
+
+ super().__init__(internal=internal, features=features, limits=py_limits, adapter_info=adapter_info, loop=loop)
+
+ # TODO: we should
+ def request_device_async(self, **kwargs) -> GPUPromise["GPUDevice"]:
+ descriptor = structs.DeviceDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_device_promise = self._internal.requestDevice(js_descriptor)
+
+ label = kwargs.get("label", "")
+ def device_constructor(js_device):
+ # TODO: do we need to hand down a default_queue here?
+ return GPUDevice(label, js_device, adapter=self)
+
+ promise = GPUPromise("request_device", device_constructor, loop=self._loop)
+ js_device_promise.then(promise._set_input)
+ return promise
+
+
+class GPUDevice(classes.GPUDevice):
+ def __init__(self, label:str, js_device, adapter:GPUAdapter):
+ features = set(js_device.features)
+
+ js_limits = js_device.limits
+ limits = {}
+ for limit in dir(js_limits):
+ if isinstance(getattr(js_limits, limit), int) and "_" not in limit:
+ limits[limit] = getattr(js_limits, limit)
+
+ queue = GPUQueue(label="default queue", internal=js_device.queue, device=self)
+ super().__init__(label, internal=js_device, adapter=adapter, features=features, limits=limits, queue=queue)
+
+ # API diff: useful to have?
+ @property
+ def adapter(self) -> GPUAdapter:
+ return self._adapter
+
+ # TODO: currently unused, rewrite and test!
+ # TODO: apidiff rewritten so we avoid the buggy mess in map_write for a bit.
+ def create_buffer_with_data_(self, *, label="", data, usage: flags.BufferUsageFlags) -> "GPUBuffer":
+ data = memoryview(data).cast("B") # unit8
+ data_size = (data.nbytes + 3) & ~3 # align to 4 bytes
+
+ # if it's a Descriptor you need the keywords
+ # do we need to also need to modify the usages?
+ js_buf = self._internal.createBuffer(label=label, size=data_size, usage=usage, mappedAtCreation=True)
+ # print("created buffer", js_buf, dir(js_buf), js_buf.size)
+ array_buf = js_buf.getMappedRange(0, data_size)
+ Uint8Array.new(array_buf).assign(data)
+ # print(array_buf.to_py().tolist())
+ js_buf.unmap()
+ # print("created buffer", js_buf, dir(js_buf), js_buf.size)
+ return GPUBuffer(label, js_buf, self, data_size, usage, enums.BufferMapState.unmapped)
+
+ # TODO: no example tests this!
+ # TODO: this exists fake-sync and async in webgpu already. Needs to be handled in the generation correctly!
+ def create_compute_pipeline_async(self, **kwargs):
+ descriptor = structs.ComputePipelineDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_promise = self._internal.createComputePipelineAsync(js_descriptor)
+
+ label = kwargs.get("label", "")
+ def construct_compute_pipeline(js_cp):
+ return classes.GPUComputePipeline(label, js_cp, self)
+ promise = GPUPromise("create_compute_pipeline", construct_compute_pipeline, loop=self._loop)
+ js_promise.then(promise._set_input)
+
+ return promise
+
+ # TODO: same as above
+ def create_render_pipeline_async(self, **kwargs):
+ descriptor = structs.RenderPipelineDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_promise = self._internal.createRenderPipelineAsync(js_descriptor)
+
+ label = kwargs.get("label", "")
+ def construct_render_pipeline(js_rp):
+ return classes.GPURenderPipeline(label, js_rp, self)
+ promise = GPUPromise("create_render_pipeline", construct_render_pipeline, loop=self._loop)
+ js_promise.then(promise._set_input)
+
+ return promise
+
+ # this one needs additional parameters in the constructor
+ def create_query_set(self, **kwargs):
+ descriptor = structs.QuerySetDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.createQuerySet(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ type = descriptor.get("type")
+ count = descriptor.get("count")
+ return GPUQuerySet(label, js_obj, device=self, type=type, count=count)
+
+class GPUBuffer(classes.GPUBuffer):
+ # TODO: remove label from the constructors!
+ def __init__(self, label, internal, device):
+ # can we just fill the _classes constructor with properties?
+ super().__init__(internal.label, internal, device, internal.size, internal.usage, internal.mapState)
+
+ @property
+ def map_state(self) -> enums.BufferMapState:
+ return self._internal.mapState
+
+ @property
+ def size(self) -> int:
+ js_size = self._internal.size
+ # print("GPUBuffer.size", js_size, type(js_size))
+ return js_size
+
+ @property
+ def usage(self) -> flags.BufferUsageFlags:
+ return self._internal.usage
+
+ # TODO apidiff
+ def write_mapped(self, data, buffer_offset: int | None = None):
+ if self.map_state != enums.BufferMapState.mapped:
+ raise RuntimeError(f"Can only write to a buffer if its mapped: {self.map_state=}")
+
+ # make sure it's in a known datatype???
+ data = memoryview(data).cast("B")
+ size = (data.nbytes + 3) & ~3
+
+ # None default values become undefined in js, which should still work as the function can be overloaded.
+ # TODO: try without this line
+ if buffer_offset is None:
+ buffer_offset = 0
+
+ # these can't be passed as keyword arguments I guess...
+ array_buf = self._internal.getMappedRange(buffer_offset, size)
+ Uint8Array.new(array_buf).assign(data)
+
+ def map_async(self, mode: flags.MapModeFlags | None, offset: int = 0, size: int | None = None) -> GPUPromise[None]:
+ map_promise = self._internal.mapAsync(mode, offset, size)
+
+ promise = GPUPromise("buffer.map_async", None, loop=self._device._loop)
+ map_promise.then(promise._set_input) # presumably this signals via a none callback to nothing?
+ return promise
+
+# TODO: we can't overwrite mixins already inhereted from....
+class GPUBindingCommandsMixin(classes.GPUBindingCommandsMixin):
+ # function has overloads! so this simple one works for now.
+ def set_bind_group(
+ self,
+ index:int,
+ bind_group: classes.GPUBindGroup,
+ dynamic_offsets_data: list[int] = (),
+ dynamic_offsets_data_start = None,
+ dynamic_offsets_data_length = None
+ ) -> None:
+
+ self._internal.setBindGroup(index, bind_group._internal, dynamic_offsets_data)
+
+
+class GPUPipelineBase(classes.GPUPipelineBase):
+ # TODO: can we build some kind of "get_constructor" for the codegen instead?
+ def get_bind_group_layout(self, index: int) -> classes.GPUBindGroupLayout:
+ res = self._internal.getBindGroupLayout(index)
+ # returns the js object... so we call the constructor here manually - for now.
+ label = res.label
+ return classes.GPUBindGroupLayout(label, res, self._device)
+
+
+class GPUQueue(classes.GPUQueue):
+
+ # TODO: fix the generation for sequence types!
+ def submit(self, command_buffers: structs.Sequence["GPUCommandBuffer"]) -> None:
+ js_command_buffers = [cb._internal for cb in command_buffers]
+ self._internal.submit(js_command_buffers)
+
+ # API diff
+ def read_buffer(self, buffer: GPUBuffer, buffer_offset: int=0, size: int | None = None) -> memoryview:
+ # largely copied from wgpu-native/_api.py
+ # print(dir(self))
+ device = self._device
+
+ if not size:
+ data_length = buffer.size - buffer_offset
+ else:
+ data_length = int(size)
+ if not (0 <= buffer_offset < buffer.size): # pragma: no cover
+ raise ValueError("Invalid buffer_offset")
+ if not (data_length <= buffer.size - buffer_offset): # pragma: no cover
+ raise ValueError("Invalid data_length")
+ data_length = (data_length + 3) & ~3 # align to 4 bytes
+
+ js_temp_buffer = device._internal.createBuffer(
+ size=data_length,
+ usage=flags.BufferUsage.COPY_DST | flags.BufferUsage.MAP_READ,
+ mappedAtCreation=False,
+ label="output buffer temp"
+ )
+
+ js_encoder = device._internal.createCommandEncoder()
+ # TODO: somehow test if all the offset math is correct
+ js_encoder.copyBufferToBuffer(buffer._internal, buffer_offset, js_temp_buffer, buffer_offset, data_length)
+ self._internal.submit([js_encoder.finish()])
+
+ # best way to await the promise directly?
+ # TODO: can we do more steps async before waiting?
+ run_sync(js_temp_buffer.mapAsync(flags.MapMode.READ, 0, data_length))
+ array_buf = js_temp_buffer.getMappedRange()
+ res = array_buf.slice(0)
+ js_temp_buffer.unmap()
+ return res.to_py()
+
+
+class GPUTexture(classes.GPUTexture):
+ def __init__(self, label: str, internal, device):
+ # here we create the cached _tex_info dict
+
+ tex_info = {
+ "size": (internal.width, internal.height, internal.depthOrArrayLayers),
+ "mip_level_count": internal.mipLevelCount,
+ "sample_count": internal.sampleCount,
+ "dimension": internal.dimension,
+ "format": internal.format,
+ "usage": internal.usage,
+ }
+ super().__init__(internal.label, internal, device, tex_info)
+
+ # has a more complex constructor...
+ def create_view(self, **kwargs):
+ descriptor = structs.TextureViewDescriptor(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+ js_obj = self._internal.createView(js_descriptor)
+
+ label = kwargs.pop("label", "")
+ return classes.GPUTextureView(label, js_obj, device=self._device, texture=self, size=self._tex_info["size"])
+
+class GPUCanvasContext(classes.GPUCanvasContext):
+ def __init__(self, canvas, present_methods):
+ super().__init__(canvas, present_methods)
+ if self._present_method == "screen":
+ # rendercanvas.pyodide provides exactly this. Maybe we can also take a HTML canvas directly?
+ # for potential interop with webgpu applications that already run in the browser.
+ assert present_methods["screen"]["platform"] == "browser"
+ canvas_attribute = present_methods["screen"]["native_canvas_attribute"]
+ else:
+ # likely bitmap... but this could still work... might just try it
+ raise NotImplementedError(f"Unsupported present method: {self._present_method}")
+
+ self._internal = getattr(canvas, canvas_attribute).getContext("webgpu")
+
+ # we can't really replace ._config by getConfiguration() because the device constructor is so complex?
+ def configure(self, **kwargs):
+ descriptor = structs.CanvasConfiguration(**kwargs)
+ js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor)
+
+ self._internal.configure(js_descriptor)
+ self._config = {
+ "device": kwargs.get("device"),
+ "format": kwargs.get("format"),
+ "usage": kwargs.get("usage", 0x10),
+ "view_formats": kwargs.get("view_formats", ()),
+ "color_space": kwargs.get("color_space", "srgb"),
+ "tone_mapping": kwargs.get("tone_mapping", None),
+ "alpha_mode": kwargs.get("alpha_mode", "opaque"),
+ }
+
+ def get_current_texture(self) -> GPUTexture:
+ js_texture = self._internal.getCurrentTexture()
+
+ label = "" # always empty?
+ return GPUTexture(label, js_texture, self._config["device"])
+
+ # undo the api diff
+ def get_preferred_format(self, adapter: GPUAdapter | None) -> enums.TextureFormat:
+ return gpu._internal.getPreferredCanvasFormat()
+
+# needed here for the CanvasContext?
+gpu = GPU()
diff --git a/wgpu/resources/codegen_report.md b/wgpu/resources/codegen_report.md
index fcfb995e..4a611935 100644
--- a/wgpu/resources/codegen_report.md
+++ b/wgpu/resources/codegen_report.md
@@ -43,3 +43,4 @@
* Validated 153 C function calls
* Not using 68 C functions
* Validated 96 C structs
+## Writing backends/js_webgpu/_api.py
diff --git a/wgpu/utils/compute.py b/wgpu/utils/compute.py
index 705b1384..b6f9d7c3 100644
--- a/wgpu/utils/compute.py
+++ b/wgpu/utils/compute.py
@@ -163,7 +163,6 @@ def compute_with_buffers(input_arrays, output_arrays, shader, constants=None, n=
pipeline_layout = device.create_pipeline_layout(
bind_group_layouts=[bind_group_layout]
)
- bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings)
compute = {
"module": cshader,
@@ -178,6 +177,7 @@ def compute_with_buffers(input_arrays, output_arrays, shader, constants=None, n=
layout=pipeline_layout,
compute=compute,
)
+ bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings)
command_encoder = device.create_command_encoder()
compute_pass = command_encoder.begin_compute_pass()
compute_pass.set_pipeline(compute_pipeline)