diff --git a/infrastructure/crashtests/testdriver.html b/infrastructure/crashtests/testdriver.html
new file mode 100644
index 000000000000000..ecd7430281983f3
--- /dev/null
+++ b/infrastructure/crashtests/testdriver.html
@@ -0,0 +1,13 @@
+
+
+
crashtests support testdriver.js
+
+
+
+
diff --git a/infrastructure/metadata/infrastructure/reftest/testdriver-in-ref.html.ini b/infrastructure/metadata/infrastructure/reftest/testdriver-in-ref.html.ini
new file mode 100644
index 000000000000000..eac6a507b38848a
--- /dev/null
+++ b/infrastructure/metadata/infrastructure/reftest/testdriver-in-ref.html.ini
@@ -0,0 +1,4 @@
+[testdriver-in-ref.html]
+ disabled:
+ # https://github.com/web-platform-tests/wpt/issues/13183
+ if product == "firefox": "marionette executor doesn't implement testdriver for reftests"
diff --git a/infrastructure/reftest/testdriver-in-ref.html b/infrastructure/reftest/testdriver-in-ref.html
new file mode 100644
index 000000000000000..8a1af46db354624
--- /dev/null
+++ b/infrastructure/reftest/testdriver-in-ref.html
@@ -0,0 +1,6 @@
+
+references support testdriver.js
+
+
diff --git a/infrastructure/reftest/testdriver-print.html b/infrastructure/reftest/testdriver-print.html
new file mode 100644
index 000000000000000..b4dc6c9760965f8
--- /dev/null
+++ b/infrastructure/reftest/testdriver-print.html
@@ -0,0 +1,23 @@
+
+
+print-reftests support testdriver.js
+
+
+
+
+
+page 1
+
diff --git a/infrastructure/reftest/testdriver.html b/infrastructure/reftest/testdriver.html
new file mode 100644
index 000000000000000..d890d8926273af8
--- /dev/null
+++ b/infrastructure/reftest/testdriver.html
@@ -0,0 +1,21 @@
+
+
+reftests support testdriver.js
+
+
+
+
+
+
diff --git a/lint.ignore b/lint.ignore
index 3e1d05be28de383..239c1a3e862960e 100644
--- a/lint.ignore
+++ b/lint.ignore
@@ -764,6 +764,10 @@ HTML INVALID SYNTAX: quirks/percentage-height-calculation.html
HTML INVALID SYNTAX: trusted-types/TrustedTypePolicyFactory-getAttributeType-namespace.html
# Tests which include testdriver.js but aren't testharness.js tests
+# TODO(web-platform-tests/wpt#13183): Dismantle this rule once support is added.
+TESTDRIVER-IN-UNSUPPORTED-TYPE: infrastructure/crashtests/testdriver.html
+TESTDRIVER-IN-UNSUPPORTED-TYPE: infrastructure/reftest/testdriver.html
+TESTDRIVER-IN-UNSUPPORTED-TYPE: infrastructure/reftest/testdriver-print.html
TESTDRIVER-IN-UNSUPPORTED-TYPE: css/css-grid/grid-model/grid-layout-stale-001.html
TESTDRIVER-IN-UNSUPPORTED-TYPE: css/css-grid/grid-model/grid-layout-stale-002.html
TESTDRIVER-IN-UNSUPPORTED-TYPE: css/css-scroll-anchoring/fullscreen-crash.html
diff --git a/tools/wptrunner/wptrunner/executors/executorwebdriver.py b/tools/wptrunner/wptrunner/executors/executorwebdriver.py
index c81b0e00a842b04..3974b147137016f 100644
--- a/tools/wptrunner/wptrunner/executors/executorwebdriver.py
+++ b/tools/wptrunner/wptrunner/executors/executorwebdriver.py
@@ -1030,8 +1030,9 @@ def do_testharness(self, protocol, url, timeout):
protocol.testharness.close_old_windows()
-class WebDriverRefTestExecutor(RefTestExecutor):
+class WebDriverRefTestExecutor(RefTestExecutor, TestDriverExecutorMixin):
protocol_cls = WebDriverProtocol
+ supports_testdriver = True
def __init__(self, logger, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
@@ -1055,7 +1056,8 @@ def __init__(self, logger, browser, server_config, timeout_multiplier=1,
self.debug_test = debug_test
with open(os.path.join(here, "test-wait.js")) as f:
- self.wait_script = f.read() % {"classname": "reftest-wait"}
+ wait_script = f.read() % {"classname": "reftest-wait"}
+ TestDriverExecutorMixin.__init__(self, wait_script)
def reset(self):
self.implementation.reset()
@@ -1101,8 +1103,9 @@ def screenshot(self, test, viewport_size, dpi, page_ranges):
self.extra_timeout).run()
def _screenshot(self, protocol, url, timeout):
- self.protocol.base.load(url)
- self.protocol.base.execute_script(self.wait_script, True)
+ # There's nothing we want from the "complete" message, so discard the
+ # return value.
+ self.run_testdriver(protocol, url, timeout)
screenshot = self.protocol.webdriver.screenshot()
if screenshot is None:
@@ -1147,8 +1150,7 @@ def screenshot(self, test, viewport_size, dpi, page_ranges):
self.extra_timeout).run()
def _render(self, protocol, url, timeout):
- protocol.webdriver.url = url
- protocol.base.execute_script(self.wait_script, asynchronous=True)
+ self.run_testdriver(protocol, url, timeout)
pdf = protocol.pdf_print.render_as_pdf(*self.viewport_size)
screenshots = protocol.pdf_print.pdf_to_png(pdf, self.page_ranges)
@@ -1160,8 +1162,9 @@ def _render(self, protocol, url, timeout):
return screenshots
-class WebDriverCrashtestExecutor(CrashtestExecutor):
+class WebDriverCrashtestExecutor(CrashtestExecutor, TestDriverExecutorMixin):
protocol_cls = WebDriverProtocol
+ supports_testdriver = True
def __init__(self, logger, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
@@ -1179,7 +1182,8 @@ def __init__(self, logger, browser, server_config, timeout_multiplier=1,
capabilities=capabilities)
with open(os.path.join(here, "test-wait.js")) as f:
- self.wait_script = f.read() % {"classname": "test-wait"}
+ wait_script = f.read() % {"classname": "test-wait"}
+ TestDriverExecutorMixin.__init__(self, wait_script)
def do_test(self, test):
timeout = (test.timeout * self.timeout_multiplier if self.debug_info is None
@@ -1198,8 +1202,7 @@ def do_test(self, test):
return (test.make_result(*data), [])
def do_crashtest(self, protocol, url, timeout):
- protocol.base.load(url)
- protocol.base.execute_script(self.wait_script, asynchronous=True)
+ self.run_testdriver(protocol, url, timeout)
result = {"status": "PASS", "message": None}
if (leak_part := getattr(protocol, "leak", None)) and (counters := leak_part.check()):
result["extra"] = {"leak_counters": counters}
diff --git a/tools/wptrunner/wptrunner/executors/message-queue.js b/tools/wptrunner/wptrunner/executors/message-queue.js
index bbad17180e0a827..c79b96aee29ec7a 100644
--- a/tools/wptrunner/wptrunner/executors/message-queue.js
+++ b/tools/wptrunner/wptrunner/executors/message-queue.js
@@ -54,15 +54,19 @@
case "complete":
var tests = data.tests;
var status = data.status;
-
- var subtest_results = tests.map(function(x) {
- return [x.name, x.status, x.message, x.stack];
- });
- payload = [status.status,
- status.message,
- status.stack,
- subtest_results];
- clearTimeout(window.__wptrunner_timer);
+ if (tests && status) {
+ var subtest_results = tests.map(function(x) {
+ return [x.name, x.status, x.message, x.stack];
+ });
+ payload = [status.status,
+ status.message,
+ status.stack,
+ subtest_results];
+ clearTimeout(window.__wptrunner_timer);
+ } else {
+ // Non-testharness test.
+ payload = [];
+ }
break;
case "action":
payload = data;
diff --git a/tools/wptrunner/wptrunner/executors/test-wait.js b/tools/wptrunner/wptrunner/executors/test-wait.js
index ad08ad7d76fb029..25f9868290a2d81 100644
--- a/tools/wptrunner/wptrunner/executors/test-wait.js
+++ b/tools/wptrunner/wptrunner/executors/test-wait.js
@@ -1,6 +1,8 @@
-var callback = arguments[arguments.length - 1];
-var observer = null;
-var root = document.documentElement;
+window.__wptrunner_url = arguments.length > 1 ? arguments[0] : location.href;
+window.__wptrunner_testdriver_callback = arguments[arguments.length - 1];
+if (window.__wptrunner_process_next_event) {
+ window.__wptrunner_process_next_event();
+}
function wait_load() {
if (Document.prototype.hasOwnProperty("fonts")) {
@@ -13,7 +15,6 @@ function wait_load() {
}
}
-
function wait_paints() {
// As of 2017-04-05, the Chromium web browser exhibits a rendering bug
// (https://bugs.chromium.org/p/chromium/issues/detail?id=708757) that
@@ -32,24 +33,35 @@ function wait_paints() {
}
function screenshot_if_ready() {
+ var root = document.documentElement;
if (root &&
root.classList.contains("%(classname)s") &&
- observer === null) {
- observer = new MutationObserver(wait_paints);
- observer.observe(root, {attributes: true});
+ !window.__wptrunner_observer) {
+ window.__wptrunner_observer = new MutationObserver(wait_paints);
+ __wptrunner_observer.observe(root, {attributes: true});
var event = new Event("TestRendered", {bubbles: true});
root.dispatchEvent(event);
return;
}
- if (observer !== null) {
- observer.disconnect();
+ if (window.__wptrunner_observer) {
+ __wptrunner_observer.disconnect();
+ }
+ if (window.__wptrunner_message_queue) {
+ __wptrunner_message_queue.push({type: "complete"});
+ } else {
+ // Not using `testdriver.js`, so manually post a raw completion message
+ // that the executor understands.
+ __wptrunner_testdriver_callback([__wptrunner_url, "complete", []]);
}
- callback();
}
if (document.readyState != "complete") {
- addEventListener('load', wait_load);
+ if (!window.__wptrunner_wait_load) {
+ window.__wptrunner_wait_load = wait_load;
+ addEventListener('load', __wptrunner_wait_load);
+ }
} else {
wait_load();
}
+// TODO: Should we do anything about unhandled rejections?
diff --git a/tools/wptrunner/wptrunner/testdriver-extra.js b/tools/wptrunner/wptrunner/testdriver-extra.js
index 079081c3a6d44dd..3def602cecce78e 100644
--- a/tools/wptrunner/wptrunner/testdriver-extra.js
+++ b/tools/wptrunner/wptrunner/testdriver-extra.js
@@ -59,7 +59,13 @@
});
function is_test_context() {
- return !!window.__wptrunner_is_test_context;
+ let rootClasses = document.documentElement.classList;
+ // For non-testharness tests, the presence of `(ref)test-wait` indicates
+ // it's the "main" browsing context through which testdriver actions are
+ // routed.
+ return !!window.__wptrunner_is_test_context
+ || rootClasses.contains("reftest-wait")
+ || rootClasses.contains("test-wait");
}
// Code copied from /common/utils.js
diff --git a/tools/wptrunner/wptrunner/wptrunner.py b/tools/wptrunner/wptrunner/wptrunner.py
index f390f29c0794f0e..b56ce47f6b7e655 100644
--- a/tools/wptrunner/wptrunner/wptrunner.py
+++ b/tools/wptrunner/wptrunner/wptrunner.py
@@ -258,24 +258,21 @@ def run_test_iteration(test_status, test_loader, test_queue_builder,
logger.test_end(test.id, status="SKIP", subsuite=subsuite_name)
test_status.skipped += 1
- if test_type == "testharness":
- for test in test_loader.tests[subsuite_name][test_type]:
- skip_reason = None
- if test.testdriver and not executor_cls.supports_testdriver:
- skip_reason = "Executor does not support testdriver.js"
- elif test.jsshell and not executor_cls.supports_jsshell:
- skip_reason = "Executor does not support jsshell"
- if skip_reason:
- logger.test_start(test.id, subsuite=subsuite_name)
- logger.test_end(test.id,
- status="SKIP",
- subsuite=subsuite_name,
- message=skip_reason)
- test_status.skipped += 1
- else:
- tests_to_run[(subsuite_name, test_type)].append(test)
- else:
- tests_to_run[(subsuite_name, test_type)] = test_loader.tests[subsuite_name][test_type]
+ for test in test_loader.tests[subsuite_name][test_type]:
+ skip_reason = None
+ if getattr(test, "testdriver", False) and not executor_cls.supports_testdriver:
+ skip_reason = "Executor does not support testdriver.js"
+ elif test_type == "testharness" and test.jsshell and not executor_cls.supports_jsshell:
+ skip_reason = "Executor does not support jsshell"
+ if skip_reason:
+ logger.test_start(test.id, subsuite=subsuite_name)
+ logger.test_end(test.id,
+ status="SKIP",
+ subsuite=subsuite_name,
+ message=skip_reason)
+ test_status.skipped += 1
+ else:
+ tests_to_run[(subsuite_name, test_type)].append(test)
unexpected_fail_tests = defaultdict(list)
unexpected_pass_tests = defaultdict(list)
diff --git a/tools/wptrunner/wptrunner/wpttest.py b/tools/wptrunner/wptrunner/wpttest.py
index 2e3fd974d4d43ee..42214f07e399f4d 100644
--- a/tools/wptrunner/wptrunner/wpttest.py
+++ b/tools/wptrunner/wptrunner/wpttest.py
@@ -535,7 +535,7 @@ class ReftestTest(Test):
def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, references,
timeout=None, path=None, viewport_size=None, dpi=None, fuzzy=None,
- protocol="http", subdomain=False):
+ protocol="http", subdomain=False, testdriver=False):
Test.__init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, timeout,
path, protocol, subdomain)
@@ -546,6 +546,7 @@ def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, r
self.references = references
self.viewport_size = self.get_viewport_size(viewport_size)
self.dpi = dpi
+ self.testdriver = testdriver
self._fuzzy = fuzzy or {}
@classmethod
@@ -553,7 +554,8 @@ def cls_kwargs(cls, manifest_test):
return {"viewport_size": manifest_test.viewport_size,
"dpi": manifest_test.dpi,
"protocol": server_protocol(manifest_test),
- "fuzzy": manifest_test.fuzzy}
+ "fuzzy": manifest_test.fuzzy,
+ "testdriver": bool(getattr(manifest_test, "testdriver", False))}
@classmethod
def from_manifest(cls,
@@ -692,10 +694,10 @@ class PrintReftestTest(ReftestTest):
def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, references,
timeout=None, path=None, viewport_size=None, dpi=None, fuzzy=None,
- page_ranges=None, protocol="http", subdomain=False):
+ page_ranges=None, protocol="http", subdomain=False, testdriver=False):
super().__init__(url_base, tests_root, url, inherit_metadata, test_metadata,
references, timeout, path, viewport_size, dpi,
- fuzzy, protocol, subdomain=subdomain)
+ fuzzy, protocol, subdomain=subdomain, testdriver=testdriver)
self._page_ranges = page_ranges
@classmethod
@@ -726,6 +728,26 @@ class CrashTest(Test):
result_cls = CrashtestResult
test_type = "crashtest"
+ def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata,
+ timeout=None, path=None, protocol="http", subdomain=False, testdriver=False):
+ super().__init__(url_base, tests_root, url, inherit_metadata, test_metadata,
+ timeout, path, protocol, subdomain=subdomain)
+ self.testdriver = testdriver
+
+ @classmethod
+ def from_manifest(cls, manifest_file, manifest_item, inherit_metadata, test_metadata):
+ timeout = cls.long_timeout if manifest_item.timeout == "long" else cls.default_timeout
+ return cls(manifest_file.url_base,
+ manifest_file.tests_root,
+ manifest_item.url,
+ inherit_metadata,
+ test_metadata,
+ timeout=timeout,
+ path=os.path.join(manifest_file.tests_root, manifest_item.path),
+ protocol=server_protocol(manifest_item),
+ subdomain=manifest_item.subdomain,
+ testdriver=bool(getattr(manifest_item, "testdriver", False)))
+
manifest_test_cls = {"reftest": ReftestTest,
"print-reftest": PrintReftestTest,