From 655ce13c2819a33c530f4807b5382b08d930a941 Mon Sep 17 00:00:00 2001 From: Jonathan Lee Date: Fri, 4 Oct 2024 22:21:02 -0400 Subject: [PATCH 1/4] [wptrunner] Extract executor testdriver logic into mixin This no-op refactor will allow other WebDriver executors, not just the testharness executor, to perform testdriver actions. --- .../wptrunner/executors/executorwebdriver.py | 169 ++++++++++-------- 1 file changed, 91 insertions(+), 78 deletions(-) diff --git a/tools/wptrunner/wptrunner/executors/executorwebdriver.py b/tools/wptrunner/wptrunner/executors/executorwebdriver.py index 3c1bdfd7a726e7..c81b0e00a842b0 100644 --- a/tools/wptrunner/wptrunner/executors/executorwebdriver.py +++ b/tools/wptrunner/wptrunner/executors/executorwebdriver.py @@ -803,77 +803,17 @@ def run_func(self): self.result_flag.set() -class WebDriverTestharnessExecutor(TestharnessExecutor): - supports_testdriver = True - protocol_cls = WebDriverProtocol - _get_next_message = None - - def __init__(self, logger, browser, server_config, timeout_multiplier=1, - close_after_done=True, capabilities=None, debug_info=None, - cleanup_after_test=True, **kwargs): - """WebDriver-based executor for testharness.js tests""" - TestharnessExecutor.__init__(self, logger, browser, server_config, - timeout_multiplier=timeout_multiplier, - debug_info=debug_info) - self.protocol = self.protocol_cls(self, browser, capabilities) - with open(os.path.join(here, "testharness_webdriver_resume.js")) as f: - self.script_resume = f.read() - with open(os.path.join(here, "window-loaded.js")) as f: - self.window_loaded_script = f.read() - - if hasattr(self.protocol, 'bidi_script'): - # If `bidi_script` is available, the messages can be handled via BiDi. - self._get_next_message = self._get_next_message_bidi - else: - self._get_next_message = self._get_next_message_classic - - self.close_after_done = close_after_done - self.window_id = str(uuid.uuid4()) - self.cleanup_after_test = cleanup_after_test - - def is_alive(self): - return self.protocol.is_alive() - - def on_environment_change(self, new_environment): - if new_environment["protocol"] != self.last_environment["protocol"]: - self.protocol.testharness.load_runner(new_environment["protocol"]) - - def do_test(self, test): - url = self.test_url(test) - - success, data = WebDriverRun(self.logger, - self.do_testharness, - self.protocol, - url, - test.timeout * self.timeout_multiplier, - self.extra_timeout).run() - - if success: - data, extra = data - return self.convert_result(test, data, extra=extra) - - return (test.make_result(*data), []) - - def do_testharness(self, protocol, url, timeout): - # The previous test may not have closed its old windows (if something - # went wrong or if cleanup_after_test was False), so clean up here. - parent_window = protocol.testharness.close_old_windows() +class TestDriverExecutorMixin: + def __init__(self, script_resume: str): + self.script_resume = script_resume + def run_testdriver(self, protocol, url, timeout): # If protocol implements `bidi_events`, remove all the existing subscriptions. if hasattr(protocol, 'bidi_events'): # Use protocol loop to run the async cleanup. protocol.loop.run_until_complete(protocol.bidi_events.unsubscribe_all()) - # Now start the test harness - protocol.testharness.open_test_window(self.window_id) - test_window = protocol.testharness.get_test_window(self.window_id, - parent_window, - timeout=5*self.timeout_multiplier) - self.protocol.base.set_window(test_window) - - # Wait until about:blank has been loaded - protocol.base.execute_script(self.window_loaded_script, asynchronous=True) - + test_window = self.prepare_test_window(protocol) # Exceptions occurred outside the main loop. unexpected_exceptions = [] @@ -958,24 +898,22 @@ async def process_bidi_event(method, params): # Use protocol loop to run the async cleanup. protocol.loop.run_until_complete(protocol.bidi_events.unsubscribe_all()) - extra = {} - if (leak_part := getattr(protocol, "leak", None)) and (counters := leak_part.check()): - extra["leak_counters"] = counters - - # Attempt to clean up any leftover windows, if allowed. This is - # preferable as it will blame the correct test if something goes wrong - # closing windows, but if the user wants to see the test results we - # have to leave the window(s) open. - if self.cleanup_after_test: - protocol.testharness.close_old_windows() - if len(unexpected_exceptions) > 0: # TODO: what to do if there are more then 1 unexpected exceptions? raise unexpected_exceptions[0] + return rv - return rv, extra + def prepare_test_window(self, protocol): + return protocol.base.current_window - def _get_next_message_classic(self, protocol, url, _): + def _get_next_message(self, protocol, url, test_window): + if hasattr(protocol, 'bidi_script'): + # If `bidi_script` is available, the messages can be handled via BiDi. + return self._get_next_message_bidi(protocol, url, test_window) + else: + return self._get_next_message_classic(protocol, url) + + def _get_next_message_classic(self, protocol, url): """ Get the next message from the test_driver using the classic WebDriver async script execution. This will block the event loop until the test_driver send a message. @@ -1017,6 +955,81 @@ def _get_next_message_bidi(self, protocol, url, test_window): return deserialized_message +class WebDriverTestharnessExecutor(TestharnessExecutor, TestDriverExecutorMixin): + protocol_cls = WebDriverProtocol + supports_testdriver = True + + def __init__(self, logger, browser, server_config, timeout_multiplier=1, + close_after_done=True, capabilities=None, debug_info=None, + cleanup_after_test=True, **kwargs): + """WebDriver-based executor for testharness.js tests""" + TestharnessExecutor.__init__(self, logger, browser, server_config, + timeout_multiplier=timeout_multiplier, + debug_info=debug_info) + self.protocol = self.protocol_cls(self, browser, capabilities) + self.close_after_done = close_after_done + self.window_id = str(uuid.uuid4()) + self.cleanup_after_test = cleanup_after_test + + with open(os.path.join(here, "testharness_webdriver_resume.js")) as f: + script_resume = f.read() + TestDriverExecutorMixin.__init__(self, script_resume) + with open(os.path.join(here, "window-loaded.js")) as f: + self.window_loaded_script = f.read() + + def is_alive(self): + return self.protocol.is_alive() + + def on_environment_change(self, new_environment): + if new_environment["protocol"] != self.last_environment["protocol"]: + self.protocol.testharness.load_runner(new_environment["protocol"]) + + def do_test(self, test): + url = self.test_url(test) + + success, data = WebDriverRun(self.logger, + self.do_testharness, + self.protocol, + url, + test.timeout * self.timeout_multiplier, + self.extra_timeout).run() + + if success: + data, extra = data + return self.convert_result(test, data, extra=extra) + + return (test.make_result(*data), []) + + def prepare_test_window(self, protocol): + # The previous test may not have closed its old windows (if something + # went wrong or if cleanup_after_test was False), so clean up here. + parent_window = protocol.testharness.close_old_windows() + # Now start the test harness + protocol.testharness.open_test_window(self.window_id) + test_window = protocol.testharness.get_test_window(self.window_id, + parent_window, + timeout=5*self.timeout_multiplier) + self.protocol.base.set_window(test_window) + # Wait until about:blank has been loaded + protocol.base.execute_script(self.window_loaded_script, asynchronous=True) + return test_window + + def do_testharness(self, protocol, url, timeout): + try: + raw_results = self.run_testdriver(protocol, url, timeout) + extra = {} + if (leak_part := getattr(protocol, "leak", None)) and (counters := leak_part.check()): + extra["leak_counters"] = counters + return raw_results, extra + finally: + # Attempt to clean up any leftover windows, if allowed. This is + # preferable as it will blame the correct test if something goes wrong + # closing windows, but if the user wants to see the test results we + # have to leave the window(s) open. + if self.cleanup_after_test: + protocol.testharness.close_old_windows() + + class WebDriverRefTestExecutor(RefTestExecutor): protocol_cls = WebDriverProtocol From e0532a50a79c082e4d347dcc1a07c179a5216ce5 Mon Sep 17 00:00:00 2001 From: Jonathan Lee Date: Fri, 4 Oct 2024 22:38:51 -0400 Subject: [PATCH 2/4] [wptrunner] Split `message-queue.js` from `testharnessreport.js` This will allow non-testharness tests to use `testdriver.js` without needing extra scripts. Evaluating `message-queue.js` is idempotent so that, when using testharness with testdriver, the second inclusion is a no-op. Because resource scripts are cached, the size increase should not meaningfully affect test performance. --- tools/wptrunner/wptrunner/environment.py | 38 +++------ .../wptrunner/executors/message-queue.js | 77 +++++++++++++++++++ tools/wptrunner/wptrunner/testdriver-extra.js | 4 +- .../wptrunner/wptrunner/testharnessreport.js | 74 +----------------- tools/wptserve/wptserve/handlers.py | 14 ++-- 5 files changed, 99 insertions(+), 108 deletions(-) create mode 100644 tools/wptrunner/wptrunner/executors/message-queue.js diff --git a/tools/wptrunner/wptrunner/environment.py b/tools/wptrunner/wptrunner/environment.py index f8538b7da1d7f4..7755053f6a06f8 100644 --- a/tools/wptrunner/wptrunner/environment.py +++ b/tools/wptrunner/wptrunner/environment.py @@ -23,8 +23,6 @@ sys.path.insert(0, repo_root) from tools import localpaths # noqa: F401 -from wptserve.handlers import StringHandler - serve = None @@ -232,22 +230,25 @@ def get_routes(self): "text/javascript", "/_pdf_js/pdf.js"), (os.path.join(here, "..", "..", "third_party", "pdf_js", "pdf.worker.js"), None, "text/javascript", "/_pdf_js/pdf.worker.js"), - (self.options.get("testharnessreport", "testharnessreport.js"), + (self.options.get("testharnessreport", [os.path.join("executors", "message-queue.js"), + "testharnessreport.js"]), {"output": self.pause_after_test, "timeout_multiplier": self.testharness_timeout_multipler, "explicit_timeout": "true" if self.debug_info is not None else "false", "debug": "true" if self.debug_test else "false"}, "text/javascript;charset=utf8", - "/resources/testharnessreport.js")]: - path = os.path.normpath(os.path.join(here, path)) + "/resources/testharnessreport.js"), + ([os.path.join(repo_root, "resources", "testdriver.js"), + os.path.join("executors", "message-queue.js"), + "testdriver-extra.js"], {}, "text/javascript", "/resources/testdriver.js")]: + paths = [path] if isinstance(path, str) else path + abs_paths = [os.path.normpath(os.path.join(here, path)) for path in paths] # Note that .headers. files don't apply to static routes, so we need to # readd any static headers here. headers = {"Cache-Control": "max-age=3600"} - route_builder.add_static(path, format_args, content_type, route, + route_builder.add_static(abs_paths, format_args, content_type, route, headers=headers) - route_builder.add_handler("GET", "/resources/testdriver.js", TestdriverLoader()) - for url_base, test_root in self.test_paths.items(): if url_base == "/": continue @@ -315,27 +316,6 @@ def test_servers(self): return failed, pending -class TestdriverLoader: - """A special static handler for serving `/resources/testdriver.js`. - - This handler lazily reads `testdriver{,-extra}.js` so that wptrunner doesn't - need to pass the entire file contents to child `wptserve` processes, which - can slow `wptserve` startup by several seconds (crbug.com/1479850). - """ - def __init__(self): - self._handler = None - - def __call__(self, request, response): - if not self._handler: - data = b"" - with open(os.path.join(repo_root, "resources", "testdriver.js"), "rb") as fp: - data += fp.read() - with open(os.path.join(here, "testdriver-extra.js"), "rb") as fp: - data += fp.read() - self._handler = StringHandler(data, "text/javascript") - return self._handler(request, response) - - def wait_for_service(logger: StructuredLogger, host: str, port: int, diff --git a/tools/wptrunner/wptrunner/executors/message-queue.js b/tools/wptrunner/wptrunner/executors/message-queue.js new file mode 100644 index 00000000000000..bbad17180e0a82 --- /dev/null +++ b/tools/wptrunner/wptrunner/executors/message-queue.js @@ -0,0 +1,77 @@ +(function() { + if (window.__wptrunner_message_queue && window.__wptrunner_process_next_event) { + // Another script already set up the testdriver infrastructure. + return; + } + + class MessageQueue { + constructor() { + this.item_id = 0; + this._queue = []; + } + + push(item) { + let cmd_id = this.item_id++; + item.id = cmd_id; + this._queue.push(item); + __wptrunner_process_next_event(); + return cmd_id; + } + + shift() { + return this._queue.shift(); + } + } + + window.__wptrunner_testdriver_callback = null; + window.__wptrunner_message_queue = new MessageQueue(); + window.__wptrunner_url = null; + + window.__wptrunner_process_next_event = function() { + /* This function handles the next testdriver event. The presence of + window.testdriver_callback is used as a switch; when that function + is present we are able to handle the next event and when is is not + present we must wait. Therefore to drive the event processing, this + function must be called in two circumstances: + * Every time there is a new event that we may be able to handle + * Every time we set the callback function + This function unsets the callback, so no further testdriver actions + will be run until it is reset, which wptrunner does after it has + completed handling the current action. + */ + + if (!window.__wptrunner_testdriver_callback) { + return; + } + var data = window.__wptrunner_message_queue.shift(); + if (!data) { + return; + } + + var payload = undefined; + + switch(data.type) { + case "complete": + var tests = data.tests; + var status = data.status; + + var subtest_results = tests.map(function(x) { + return [x.name, x.status, x.message, x.stack]; + }); + payload = [status.status, + status.message, + status.stack, + subtest_results]; + clearTimeout(window.__wptrunner_timer); + break; + case "action": + payload = data; + break; + default: + return; + } + var callback = window.__wptrunner_testdriver_callback; + window.__wptrunner_testdriver_callback = null; + callback([__wptrunner_url, data.type, payload]); + }; +})(); diff --git a/tools/wptrunner/wptrunner/testdriver-extra.js b/tools/wptrunner/wptrunner/testdriver-extra.js index 87ae9e1f33d02f..079081c3a6d44d 100644 --- a/tools/wptrunner/wptrunner/testdriver-extra.js +++ b/tools/wptrunner/wptrunner/testdriver-extra.js @@ -59,7 +59,7 @@ }); function is_test_context() { - return window.__wptrunner_message_queue !== undefined; + return !!window.__wptrunner_is_test_context; } // Code copied from /common/utils.js @@ -226,7 +226,7 @@ }; window.test_driver_internal.set_test_context = function(context) { - if (window.__wptrunner_message_queue) { + if (is_test_context()) { throw new Error("Tried to set testharness context in a window containing testharness.js"); } testharness_context = context; diff --git a/tools/wptrunner/wptrunner/testharnessreport.js b/tools/wptrunner/wptrunner/testharnessreport.js index d385692445c508..326617f130376c 100644 --- a/tools/wptrunner/wptrunner/testharnessreport.js +++ b/tools/wptrunner/wptrunner/testharnessreport.js @@ -1,75 +1,6 @@ -class MessageQueue { - constructor() { - this.item_id = 0; - this._queue = []; - } - - push(item) { - let cmd_id = this.item_id++; - item.id = cmd_id; - this._queue.push(item); - __wptrunner_process_next_event(); - return cmd_id; - } - - shift() { - return this._queue.shift(); - } -} - -window.__wptrunner_testdriver_callback = null; -window.__wptrunner_message_queue = new MessageQueue(); -window.__wptrunner_url = null; - -window.__wptrunner_process_next_event = function() { - /* This function handles the next testdriver event. The presence of - window.testdriver_callback is used as a switch; when that function - is present we are able to handle the next event and when is is not - present we must wait. Therefore to drive the event processing, this - function must be called in two circumstances: - * Every time there is a new event that we may be able to handle - * Every time we set the callback function - This function unsets the callback, so no further testdriver actions - will be run until it is reset, which wptrunner does after it has - completed handling the current action. - */ - - if (!window.__wptrunner_testdriver_callback) { - return; - } - var data = window.__wptrunner_message_queue.shift(); - if (!data) { - return; - } - - var payload = undefined; - - switch(data.type) { - case "complete": - var tests = data.tests; - var status = data.status; - - var subtest_results = tests.map(function(x) { - return [x.name, x.status, x.message, x.stack]; - }); - payload = [status.status, - status.message, - status.stack, - subtest_results]; - clearTimeout(window.__wptrunner_timer); - break; - case "action": - payload = data; - break; - default: - return; - } - var callback = window.__wptrunner_testdriver_callback; - window.__wptrunner_testdriver_callback = null; - callback([__wptrunner_url, data.type, payload]); -}; - (function() { + window.__wptrunner_is_test_context = true; + var props = {output: %(output)d, timeout_multiplier: %(timeout_multiplier)s, explicit_timeout: %(explicit_timeout)s, @@ -85,4 +16,3 @@ window.__wptrunner_process_next_event = function() { }); setup(props); })(); - diff --git a/tools/wptserve/wptserve/handlers.py b/tools/wptserve/wptserve/handlers.py index 62faf47d645692..cde04b13cbb088 100644 --- a/tools/wptserve/wptserve/handlers.py +++ b/tools/wptserve/wptserve/handlers.py @@ -513,11 +513,13 @@ def __init__(self, path, format_args, content_type, **headers): Note that *.headers files have no effect in this handler. - :param path: Path to the template file to use + :param path: Path(s) to template files to use. If a sequence of paths is provided instead + of a single path, the contents of each file will be concatenated together before the + `format_args` are interpolated. :param format_args: Dictionary of values to substitute into the template file :param content_type: Content type header to server the response with :param headers: List of headers to send with responses""" - self._path = path + self._paths = [path] if isinstance(path, str) else path self._format_args = format_args self._content_type = content_type self._headers = headers @@ -525,7 +527,7 @@ def __init__(self, path, format_args, content_type, **headers): def __getnewargs_ex__(self): # Do not pickle `self._handler`, which can be arbitrarily large. - args = self._path, self._format_args, self._content_type + args = self._paths, self._format_args, self._content_type return args, self._headers def __call__(self, request, response): @@ -534,8 +536,10 @@ def __call__(self, request, response): # contents across processes can slow `wptserve` startup by several # seconds (crbug.com/1479850). if not self._handler: - with open(self._path) as f: - data = f.read() + data = "" + for path in self._paths: + with open(path) as f: + data += f.read() if self._format_args: data = data % self._format_args self._handler = StringHandler(data, self._content_type, **self._headers) From e049d6dbcfa5726d1c1e258b5988719e625e077b Mon Sep 17 00:00:00 2001 From: Jonathan Lee Date: Sat, 5 Oct 2024 00:33:56 -0400 Subject: [PATCH 3/4] [wptrunner] Add `testdriver` flag to non-testharness manifest items This will let wptrunner gracefully skip testdriver tests if the executor doesn't support them. --- tools/manifest/item.py | 18 +++++++++++++++++- tools/manifest/sourcefile.py | 7 +++++-- tools/manifest/tests/test_manifest.py | 26 ++++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 3 deletions(-) diff --git a/tools/manifest/item.py b/tools/manifest/item.py index e25f7ca2c29adc..e1f509bbdab4db 100644 --- a/tools/manifest/item.py +++ b/tools/manifest/item.py @@ -166,7 +166,7 @@ def pac(self) -> Optional[Text]: return self._extras.get("pac") @property - def testdriver(self) -> Optional[Text]: + def testdriver(self) -> Optional[bool]: return self._extras.get("testdriver") @property @@ -240,6 +240,10 @@ def fuzzy(self) -> Fuzzy: rv[key] = v return rv + @property + def testdriver(self) -> Optional[bool]: + return self._extras.get("testdriver") + def to_json(self) -> Tuple[Optional[Text], List[Tuple[Text, Text]], Dict[Text, Any]]: # type: ignore rel_url = None if self._url == self.path else self._url rv: Tuple[Optional[Text], List[Tuple[Text, Text]], Dict[Text, Any]] = (rel_url, self.references, {}) @@ -252,6 +256,8 @@ def to_json(self) -> Tuple[Optional[Text], List[Tuple[Text, Text]], Dict[Text, A extras["dpi"] = self.dpi if self.fuzzy: extras["fuzzy"] = list(self.fuzzy.items()) + if self.testdriver: + extras["testdriver"] = self.testdriver return rv @classmethod @@ -315,6 +321,16 @@ class CrashTest(URLManifestItem): def timeout(self) -> Optional[Text]: return None + @property + def testdriver(self) -> Optional[bool]: + return self._extras.get("testdriver") + + def to_json(self): # type: ignore + rel_url, extras = super().to_json() + if self.testdriver: + extras["testdriver"] = self.testdriver + return rel_url, extras + class WebDriverSpecTest(URLManifestItem): __slots__ = () diff --git a/tools/manifest/sourcefile.py b/tools/manifest/sourcefile.py index 3563fb9e5e90d6..8b588bc983b62c 100644 --- a/tools/manifest/sourcefile.py +++ b/tools/manifest/sourcefile.py @@ -927,7 +927,8 @@ def manifest_items(self) -> Tuple[Text, List[ManifestItem]]: self.tests_root, self.rel_path, self.url_base, - self.rel_url + self.rel_url, + testdriver=self.has_testdriver, )] elif self.name_is_print_reftest: @@ -946,6 +947,7 @@ def manifest_items(self) -> Tuple[Text, List[ManifestItem]]: viewport_size=self.viewport_size, fuzzy=self.fuzzy, page_ranges=self.page_ranges, + testdriver=self.has_testdriver, )] elif self.name_is_multi_global: @@ -1046,7 +1048,8 @@ def manifest_items(self) -> Tuple[Text, List[ManifestItem]]: timeout=self.timeout, viewport_size=self.viewport_size, dpi=self.dpi, - fuzzy=self.fuzzy + fuzzy=self.fuzzy, + testdriver=self.has_testdriver, )) elif self.content_is_css_visual and not self.name_is_reference: diff --git a/tools/manifest/tests/test_manifest.py b/tools/manifest/tests/test_manifest.py index fc2314b8356a21..d9abc40588a059 100644 --- a/tools/manifest/tests/test_manifest.py +++ b/tools/manifest/tests/test_manifest.py @@ -335,3 +335,29 @@ def test_manifest_spec_to_json(): ]}}, } } + + +@pytest.mark.parametrize("testdriver,expected_extra", [ + (True, {"testdriver": True}), + # Don't bloat the manifest with the `testdriver=False` default. + (False, {}), +]) +def test_dump_testdriver(testdriver, expected_extra): + m = manifest.Manifest("") + source_file = SourceFileWithTest("a" + os.path.sep + "b", "0"*40, item.RefTest, + testdriver=testdriver) + + tree, sourcefile_mock = tree_and_sourcefile_mocks([(source_file, None, True)]) + with mock.patch("tools.manifest.manifest.SourceFile", side_effect=sourcefile_mock): + assert m.update(tree) is True + + assert m.to_json() == { + 'version': 8, + 'url_base': '/', + 'items': { + 'reftest': {'a': {'b': [ + '0000000000000000000000000000000000000000', + (mock.ANY, [], expected_extra) + ]}}, + } + } From 25d72653e0f188c4e6d34d2645418709c6860595 Mon Sep 17 00:00:00 2001 From: Jonathan Lee Date: Sat, 5 Oct 2024 02:23:19 -0400 Subject: [PATCH 4/4] [wptrunner] Support testdriver.js in {ref,print-ref,crash}tests Non-testharness executors that don't support testdriver simply skip those tests. --- docs/writing-tests/testdriver.md | 3 -- infrastructure/crashtests/testdriver.html | 13 +++++++ .../reftest/testdriver-in-ref.html.ini | 5 +++ infrastructure/reftest/testdriver-child.html | 19 +++++++++++ .../reftest/testdriver-iframe.sub.html | 29 ++++++++++++++++ infrastructure/reftest/testdriver-in-ref.html | 6 ++++ infrastructure/reftest/testdriver-print.html | 23 +++++++++++++ infrastructure/reftest/testdriver.html | 21 ++++++++++++ lint.ignore | 6 ++++ .../wptrunner/executors/executorwebdriver.py | 23 +++++++------ .../wptrunner/executors/message-queue.js | 22 +++++++----- .../wptrunner/executors/test-wait.js | 34 +++++++++++++------ tools/wptrunner/wptrunner/testdriver-extra.js | 8 +++++ tools/wptrunner/wptrunner/wptrunner.py | 33 ++++++++---------- tools/wptrunner/wptrunner/wpttest.py | 30 +++++++++++++--- 15 files changed, 220 insertions(+), 55 deletions(-) create mode 100644 infrastructure/crashtests/testdriver.html create mode 100644 infrastructure/metadata/infrastructure/reftest/testdriver-in-ref.html.ini create mode 100644 infrastructure/reftest/testdriver-child.html create mode 100644 infrastructure/reftest/testdriver-iframe.sub.html create mode 100644 infrastructure/reftest/testdriver-in-ref.html create mode 100644 infrastructure/reftest/testdriver-print.html create mode 100644 infrastructure/reftest/testdriver.html diff --git a/docs/writing-tests/testdriver.md b/docs/writing-tests/testdriver.md index a9e58c36d84724..a18c17696a6a26 100644 --- a/docs/writing-tests/testdriver.md +++ b/docs/writing-tests/testdriver.md @@ -13,9 +13,6 @@ written purely using web platform APIs. Outside of automation contexts, it allows human operators to provide expected input manually (for operations which may be described in simple terms). -It is currently supported only for [testharness.js](testharness) -tests. - ## Markup ## The `testdriver.js` and `testdriver-vendor.js` must both be included diff --git a/infrastructure/crashtests/testdriver.html b/infrastructure/crashtests/testdriver.html new file mode 100644 index 00000000000000..ecd7430281983f --- /dev/null +++ b/infrastructure/crashtests/testdriver.html @@ -0,0 +1,13 @@ + + +crashtests support testdriver.js + + + + diff --git a/infrastructure/metadata/infrastructure/reftest/testdriver-in-ref.html.ini b/infrastructure/metadata/infrastructure/reftest/testdriver-in-ref.html.ini new file mode 100644 index 00000000000000..0d2b2f902f2f85 --- /dev/null +++ b/infrastructure/metadata/infrastructure/reftest/testdriver-in-ref.html.ini @@ -0,0 +1,5 @@ +[testdriver-in-ref.html] + disabled: + # https://github.com/web-platform-tests/wpt/issues/13183 + if product == "firefox" or product == "firefox_android": + "marionette executor doesn't implement testdriver for reftests" diff --git a/infrastructure/reftest/testdriver-child.html b/infrastructure/reftest/testdriver-child.html new file mode 100644 index 00000000000000..bfd239b7b226ab --- /dev/null +++ b/infrastructure/reftest/testdriver-child.html @@ -0,0 +1,19 @@ + + + + + + diff --git a/infrastructure/reftest/testdriver-iframe.sub.html b/infrastructure/reftest/testdriver-iframe.sub.html new file mode 100644 index 00000000000000..6761c6b84bb5ee --- /dev/null +++ b/infrastructure/reftest/testdriver-iframe.sub.html @@ -0,0 +1,29 @@ + + +reftests support testdriver.js in iframes + + + + + + diff --git a/infrastructure/reftest/testdriver-in-ref.html b/infrastructure/reftest/testdriver-in-ref.html new file mode 100644 index 00000000000000..8a1af46db35462 --- /dev/null +++ b/infrastructure/reftest/testdriver-in-ref.html @@ -0,0 +1,6 @@ + +references support testdriver.js + + diff --git a/infrastructure/reftest/testdriver-print.html b/infrastructure/reftest/testdriver-print.html new file mode 100644 index 00000000000000..b4dc6c9760965f --- /dev/null +++ b/infrastructure/reftest/testdriver-print.html @@ -0,0 +1,23 @@ + + +print-reftests support testdriver.js + + + + + +
page 1
+ diff --git a/infrastructure/reftest/testdriver.html b/infrastructure/reftest/testdriver.html new file mode 100644 index 00000000000000..d890d8926273af --- /dev/null +++ b/infrastructure/reftest/testdriver.html @@ -0,0 +1,21 @@ + + +reftests support testdriver.js + + + + + + diff --git a/lint.ignore b/lint.ignore index 3e1d05be28de38..a4cdb2305e52da 100644 --- a/lint.ignore +++ b/lint.ignore @@ -764,6 +764,12 @@ HTML INVALID SYNTAX: quirks/percentage-height-calculation.html HTML INVALID SYNTAX: trusted-types/TrustedTypePolicyFactory-getAttributeType-namespace.html # Tests which include testdriver.js but aren't testharness.js tests +# TODO(web-platform-tests/wpt#13183): Dismantle this rule once support is added. +TESTDRIVER-IN-UNSUPPORTED-TYPE: infrastructure/crashtests/testdriver.html +TESTDRIVER-IN-UNSUPPORTED-TYPE: infrastructure/reftest/testdriver.html +TESTDRIVER-IN-UNSUPPORTED-TYPE: infrastructure/reftest/testdriver-child.html +TESTDRIVER-IN-UNSUPPORTED-TYPE: infrastructure/reftest/testdriver-iframe.sub.html +TESTDRIVER-IN-UNSUPPORTED-TYPE: infrastructure/reftest/testdriver-print.html TESTDRIVER-IN-UNSUPPORTED-TYPE: css/css-grid/grid-model/grid-layout-stale-001.html TESTDRIVER-IN-UNSUPPORTED-TYPE: css/css-grid/grid-model/grid-layout-stale-002.html TESTDRIVER-IN-UNSUPPORTED-TYPE: css/css-scroll-anchoring/fullscreen-crash.html diff --git a/tools/wptrunner/wptrunner/executors/executorwebdriver.py b/tools/wptrunner/wptrunner/executors/executorwebdriver.py index c81b0e00a842b0..3974b147137016 100644 --- a/tools/wptrunner/wptrunner/executors/executorwebdriver.py +++ b/tools/wptrunner/wptrunner/executors/executorwebdriver.py @@ -1030,8 +1030,9 @@ def do_testharness(self, protocol, url, timeout): protocol.testharness.close_old_windows() -class WebDriverRefTestExecutor(RefTestExecutor): +class WebDriverRefTestExecutor(RefTestExecutor, TestDriverExecutorMixin): protocol_cls = WebDriverProtocol + supports_testdriver = True def __init__(self, logger, browser, server_config, timeout_multiplier=1, screenshot_cache=None, close_after_done=True, @@ -1055,7 +1056,8 @@ def __init__(self, logger, browser, server_config, timeout_multiplier=1, self.debug_test = debug_test with open(os.path.join(here, "test-wait.js")) as f: - self.wait_script = f.read() % {"classname": "reftest-wait"} + wait_script = f.read() % {"classname": "reftest-wait"} + TestDriverExecutorMixin.__init__(self, wait_script) def reset(self): self.implementation.reset() @@ -1101,8 +1103,9 @@ def screenshot(self, test, viewport_size, dpi, page_ranges): self.extra_timeout).run() def _screenshot(self, protocol, url, timeout): - self.protocol.base.load(url) - self.protocol.base.execute_script(self.wait_script, True) + # There's nothing we want from the "complete" message, so discard the + # return value. + self.run_testdriver(protocol, url, timeout) screenshot = self.protocol.webdriver.screenshot() if screenshot is None: @@ -1147,8 +1150,7 @@ def screenshot(self, test, viewport_size, dpi, page_ranges): self.extra_timeout).run() def _render(self, protocol, url, timeout): - protocol.webdriver.url = url - protocol.base.execute_script(self.wait_script, asynchronous=True) + self.run_testdriver(protocol, url, timeout) pdf = protocol.pdf_print.render_as_pdf(*self.viewport_size) screenshots = protocol.pdf_print.pdf_to_png(pdf, self.page_ranges) @@ -1160,8 +1162,9 @@ def _render(self, protocol, url, timeout): return screenshots -class WebDriverCrashtestExecutor(CrashtestExecutor): +class WebDriverCrashtestExecutor(CrashtestExecutor, TestDriverExecutorMixin): protocol_cls = WebDriverProtocol + supports_testdriver = True def __init__(self, logger, browser, server_config, timeout_multiplier=1, screenshot_cache=None, close_after_done=True, @@ -1179,7 +1182,8 @@ def __init__(self, logger, browser, server_config, timeout_multiplier=1, capabilities=capabilities) with open(os.path.join(here, "test-wait.js")) as f: - self.wait_script = f.read() % {"classname": "test-wait"} + wait_script = f.read() % {"classname": "test-wait"} + TestDriverExecutorMixin.__init__(self, wait_script) def do_test(self, test): timeout = (test.timeout * self.timeout_multiplier if self.debug_info is None @@ -1198,8 +1202,7 @@ def do_test(self, test): return (test.make_result(*data), []) def do_crashtest(self, protocol, url, timeout): - protocol.base.load(url) - protocol.base.execute_script(self.wait_script, asynchronous=True) + self.run_testdriver(protocol, url, timeout) result = {"status": "PASS", "message": None} if (leak_part := getattr(protocol, "leak", None)) and (counters := leak_part.check()): result["extra"] = {"leak_counters": counters} diff --git a/tools/wptrunner/wptrunner/executors/message-queue.js b/tools/wptrunner/wptrunner/executors/message-queue.js index bbad17180e0a82..c79b96aee29ec7 100644 --- a/tools/wptrunner/wptrunner/executors/message-queue.js +++ b/tools/wptrunner/wptrunner/executors/message-queue.js @@ -54,15 +54,19 @@ case "complete": var tests = data.tests; var status = data.status; - - var subtest_results = tests.map(function(x) { - return [x.name, x.status, x.message, x.stack]; - }); - payload = [status.status, - status.message, - status.stack, - subtest_results]; - clearTimeout(window.__wptrunner_timer); + if (tests && status) { + var subtest_results = tests.map(function(x) { + return [x.name, x.status, x.message, x.stack]; + }); + payload = [status.status, + status.message, + status.stack, + subtest_results]; + clearTimeout(window.__wptrunner_timer); + } else { + // Non-testharness test. + payload = []; + } break; case "action": payload = data; diff --git a/tools/wptrunner/wptrunner/executors/test-wait.js b/tools/wptrunner/wptrunner/executors/test-wait.js index ad08ad7d76fb02..25f9868290a2d8 100644 --- a/tools/wptrunner/wptrunner/executors/test-wait.js +++ b/tools/wptrunner/wptrunner/executors/test-wait.js @@ -1,6 +1,8 @@ -var callback = arguments[arguments.length - 1]; -var observer = null; -var root = document.documentElement; +window.__wptrunner_url = arguments.length > 1 ? arguments[0] : location.href; +window.__wptrunner_testdriver_callback = arguments[arguments.length - 1]; +if (window.__wptrunner_process_next_event) { + window.__wptrunner_process_next_event(); +} function wait_load() { if (Document.prototype.hasOwnProperty("fonts")) { @@ -13,7 +15,6 @@ function wait_load() { } } - function wait_paints() { // As of 2017-04-05, the Chromium web browser exhibits a rendering bug // (https://bugs.chromium.org/p/chromium/issues/detail?id=708757) that @@ -32,24 +33,35 @@ function wait_paints() { } function screenshot_if_ready() { + var root = document.documentElement; if (root && root.classList.contains("%(classname)s") && - observer === null) { - observer = new MutationObserver(wait_paints); - observer.observe(root, {attributes: true}); + !window.__wptrunner_observer) { + window.__wptrunner_observer = new MutationObserver(wait_paints); + __wptrunner_observer.observe(root, {attributes: true}); var event = new Event("TestRendered", {bubbles: true}); root.dispatchEvent(event); return; } - if (observer !== null) { - observer.disconnect(); + if (window.__wptrunner_observer) { + __wptrunner_observer.disconnect(); + } + if (window.__wptrunner_message_queue) { + __wptrunner_message_queue.push({type: "complete"}); + } else { + // Not using `testdriver.js`, so manually post a raw completion message + // that the executor understands. + __wptrunner_testdriver_callback([__wptrunner_url, "complete", []]); } - callback(); } if (document.readyState != "complete") { - addEventListener('load', wait_load); + if (!window.__wptrunner_wait_load) { + window.__wptrunner_wait_load = wait_load; + addEventListener('load', __wptrunner_wait_load); + } } else { wait_load(); } +// TODO: Should we do anything about unhandled rejections? diff --git a/tools/wptrunner/wptrunner/testdriver-extra.js b/tools/wptrunner/wptrunner/testdriver-extra.js index 079081c3a6d44d..199d0da0f87abe 100644 --- a/tools/wptrunner/wptrunner/testdriver-extra.js +++ b/tools/wptrunner/wptrunner/testdriver-extra.js @@ -58,6 +58,14 @@ event.stopImmediatePropagation(); }); + const rootClasses = document.documentElement.classList; + // For non-testharness tests, the presence of `(ref)test-wait` indicates + // it's the "main" browsing context through which testdriver actions are + // routed. Evaluate this eagerly before the test starts. + if (rootClasses.contains("reftest-wait") || rootClasses.contains("test-wait")) { + self.__wptrunner_is_test_context = true; + } + function is_test_context() { return !!window.__wptrunner_is_test_context; } diff --git a/tools/wptrunner/wptrunner/wptrunner.py b/tools/wptrunner/wptrunner/wptrunner.py index f390f29c0794f0..b56ce47f6b7e65 100644 --- a/tools/wptrunner/wptrunner/wptrunner.py +++ b/tools/wptrunner/wptrunner/wptrunner.py @@ -258,24 +258,21 @@ def run_test_iteration(test_status, test_loader, test_queue_builder, logger.test_end(test.id, status="SKIP", subsuite=subsuite_name) test_status.skipped += 1 - if test_type == "testharness": - for test in test_loader.tests[subsuite_name][test_type]: - skip_reason = None - if test.testdriver and not executor_cls.supports_testdriver: - skip_reason = "Executor does not support testdriver.js" - elif test.jsshell and not executor_cls.supports_jsshell: - skip_reason = "Executor does not support jsshell" - if skip_reason: - logger.test_start(test.id, subsuite=subsuite_name) - logger.test_end(test.id, - status="SKIP", - subsuite=subsuite_name, - message=skip_reason) - test_status.skipped += 1 - else: - tests_to_run[(subsuite_name, test_type)].append(test) - else: - tests_to_run[(subsuite_name, test_type)] = test_loader.tests[subsuite_name][test_type] + for test in test_loader.tests[subsuite_name][test_type]: + skip_reason = None + if getattr(test, "testdriver", False) and not executor_cls.supports_testdriver: + skip_reason = "Executor does not support testdriver.js" + elif test_type == "testharness" and test.jsshell and not executor_cls.supports_jsshell: + skip_reason = "Executor does not support jsshell" + if skip_reason: + logger.test_start(test.id, subsuite=subsuite_name) + logger.test_end(test.id, + status="SKIP", + subsuite=subsuite_name, + message=skip_reason) + test_status.skipped += 1 + else: + tests_to_run[(subsuite_name, test_type)].append(test) unexpected_fail_tests = defaultdict(list) unexpected_pass_tests = defaultdict(list) diff --git a/tools/wptrunner/wptrunner/wpttest.py b/tools/wptrunner/wptrunner/wpttest.py index 2e3fd974d4d43e..42214f07e399f4 100644 --- a/tools/wptrunner/wptrunner/wpttest.py +++ b/tools/wptrunner/wptrunner/wpttest.py @@ -535,7 +535,7 @@ class ReftestTest(Test): def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, references, timeout=None, path=None, viewport_size=None, dpi=None, fuzzy=None, - protocol="http", subdomain=False): + protocol="http", subdomain=False, testdriver=False): Test.__init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, timeout, path, protocol, subdomain) @@ -546,6 +546,7 @@ def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, r self.references = references self.viewport_size = self.get_viewport_size(viewport_size) self.dpi = dpi + self.testdriver = testdriver self._fuzzy = fuzzy or {} @classmethod @@ -553,7 +554,8 @@ def cls_kwargs(cls, manifest_test): return {"viewport_size": manifest_test.viewport_size, "dpi": manifest_test.dpi, "protocol": server_protocol(manifest_test), - "fuzzy": manifest_test.fuzzy} + "fuzzy": manifest_test.fuzzy, + "testdriver": bool(getattr(manifest_test, "testdriver", False))} @classmethod def from_manifest(cls, @@ -692,10 +694,10 @@ class PrintReftestTest(ReftestTest): def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, references, timeout=None, path=None, viewport_size=None, dpi=None, fuzzy=None, - page_ranges=None, protocol="http", subdomain=False): + page_ranges=None, protocol="http", subdomain=False, testdriver=False): super().__init__(url_base, tests_root, url, inherit_metadata, test_metadata, references, timeout, path, viewport_size, dpi, - fuzzy, protocol, subdomain=subdomain) + fuzzy, protocol, subdomain=subdomain, testdriver=testdriver) self._page_ranges = page_ranges @classmethod @@ -726,6 +728,26 @@ class CrashTest(Test): result_cls = CrashtestResult test_type = "crashtest" + def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, + timeout=None, path=None, protocol="http", subdomain=False, testdriver=False): + super().__init__(url_base, tests_root, url, inherit_metadata, test_metadata, + timeout, path, protocol, subdomain=subdomain) + self.testdriver = testdriver + + @classmethod + def from_manifest(cls, manifest_file, manifest_item, inherit_metadata, test_metadata): + timeout = cls.long_timeout if manifest_item.timeout == "long" else cls.default_timeout + return cls(manifest_file.url_base, + manifest_file.tests_root, + manifest_item.url, + inherit_metadata, + test_metadata, + timeout=timeout, + path=os.path.join(manifest_file.tests_root, manifest_item.path), + protocol=server_protocol(manifest_item), + subdomain=manifest_item.subdomain, + testdriver=bool(getattr(manifest_item, "testdriver", False))) + manifest_test_cls = {"reftest": ReftestTest, "print-reftest": PrintReftestTest,