diff --git a/.eslintrc.js b/.eslintrc.js index 0a6b96877ec0c2..8acba223d986eb 100644 --- a/.eslintrc.js +++ b/.eslintrc.js @@ -324,5 +324,6 @@ module.exports = { TextEncoder: 'readable', TextDecoder: 'readable', queueMicrotask: 'readable', + globalThis: 'readable', }, }; diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index d319d377249b8b..428bbf678a34c3 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -3,6 +3,6 @@ contact_links: - name: ⁉️ Need help with Node.js? url: https://github.com/nodejs/help about: Please file an issue in our help repo. - - name: 🌐 Found a problem with nodejs.org? + - name: 🌐 Found a problem with nodejs.org beyond the API reference docs? url: https://github.com/nodejs/nodejs.org/issues/new/choose about: Please file an issue in the Node.js website repo. diff --git a/.travis.yml b/.travis.yml index b8811073331932..e1069317ecc2c5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -88,3 +88,4 @@ jobs: - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics allow_failures: # TODO (cclauss): remove this when dependencies are clean - name: "Find syntax errors in our Python dependencies" + - name: "First commit message adheres to guidelines at https://goo.gl/p2fr5Q" diff --git a/BUILDING.md b/BUILDING.md index b44604ecf07734..d188aaff5a3a27 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -23,7 +23,7 @@ file a new issue. * [Unix and macOS](#unix-and-macos) * [Unix prerequisites](#unix-prerequisites) * [macOS prerequisites](#macos-prerequisites) - * [Building Node.js](#building-nodejs) + * [Building Node.js](#building-nodejs-1) * [Running Tests](#running-tests) * [Running Coverage](#running-coverage) * [Building the documentation](#building-the-documentation) @@ -31,7 +31,7 @@ file a new issue. * [Windows](#windows) * [Prerequisites](#prerequisites) * [Option 1: Manual install](#option-1-manual-install) - * [Option 1: Automated install with Boxstarter](#option-1-automated-install-with-boxstarter) + * [Option 2: Automated install with Boxstarter](#option-2-automated-install-with-boxstarter) * [Building Node.js](#building-nodejs-1) * [Android/Android-based devices (e.g. Firefox OS)](#androidandroid-based-devices-eg-firefox-os) * [`Intl` (ECMA-402) support](#intl-ecma-402-support) @@ -67,30 +67,25 @@ There are three support tiers: * **Tier 1**: These platforms represent the majority of Node.js users. The Node.js Build Working Group maintains infrastructure for full test coverage. - All commits to the Node.js repository are tested on these platforms. Test - failures on tier 1 platforms will block releases. + Test failures on tier 1 platforms will block releases. * **Tier 2**: These platforms represent smaller segments of the Node.js user base. The Node.js Build Working Group maintains infrastructure for full test - coverage. All commits to the Node.js repository are tested on these platforms. - Test failures on tier 2 platforms will block releases. Delays in release of - binaries for these platforms are acceptable where necessary due to - infrastructure concerns. + coverage. Test failures on tier 2 platforms will block releases. + Infrastructure issues may delay the release of binaries for these platforms. * **Experimental**: May not compile or test suite may not pass. The core team does not create releases for these platforms. Test failures on experimental platforms do not block releases. Contributions to improve support for these platforms are welcome. Platforms may move between tiers between major release lines. The table below -will be updated to reflect those changes. +will reflect those changes. ### Platform list -Compiling and running Node.js is supported for a limited set of operating -systems, architectures and libc versions. The table below lists the -combinations that the core team has committed to supporting and the nature of -that support as per the support tiers above. A list of -[supported compile toolchains](#supported-toolchains) is also supplied for -tier 1 platforms. +Node.js compilation/execution support depends on operating system, architecture, +and libc version. The table below lists the support tier for each supported +combination. A list of [supported compile toolchains](#supported-toolchains) is +also supplied for tier 1 platforms. **For production applications, run Node.js on supported platforms only.** @@ -116,26 +111,26 @@ platforms. This is true regardless of entries in the table below. | AIX | ppc64be >=power7 | >= 7.2 TL02 | Tier 2 | | | FreeBSD | x64 | >= 11 | Experimental | Downgraded as of Node.js 12 [7](#fn7) | -1: GCC 6 is not provided on the base platform, users will +1: GCC 6 is not provided on the base platform. Users will need the [Toolchain test builds PPA](https://launchpad.net/~ubuntu-toolchain-r/+archive/ubuntu/test?field.series_filter=xenial) or similar to source a newer compiler. -2: GCC 6 is not provided on the base platform, users will +2: GCC 6 is not provided on the base platform. Users will need the [devtoolset-6](https://www.softwarecollections.org/en/scls/rhscl/devtoolset-6/) or later to source a newer compiler. -3: Older kernel versions may work for ARM64, however the +3: Older kernel versions may work for ARM64. However the Node.js test infrastructure only tests >= 4.5. 4: On Windows, running Node.js in Windows terminal emulators like `mintty` requires the usage of [winpty](https://github.com/rprichard/winpty) - for the tty channels to work correctly (e.g. `winpty node.exe script.js`). + for the tty channels to work (e.g. `winpty node.exe script.js`). In "Git bash" if you call the node shell alias (`node` without the `.exe` extension), `winpty` is used automatically. -5: The Windows Subsystem for Linux (WSL) is not directly +5: The Windows Subsystem for Linux (WSL) is not supported, but the GNU/Linux build process and binaries should work. The community will only address issues that reproduce on native GNU/Linux systems. Issues that only reproduce on WSL should be reported in the @@ -145,11 +140,11 @@ platforms. This is true regardless of entries in the table below. 6: Running Node.js on x86 Windows should work and binaries are provided. However, tests in our infrastructure only run on WoW64. -Furthermore, compiling on x86 Windows is currently considered Experimental and +Furthermore, compiling on x86 Windows is Experimental and may not be possible. 7: The default FreeBSD 12.0 compiler is Clang 6.0.1, but -FreeBSD 12.1 upgrades to 8.0.1. Other Clang/LLVM versions are provided +FreeBSD 12.1 upgrades to 8.0.1. Other Clang/LLVM versions are available via the system's package manager, including Clang 9.0. ### Supported toolchains diff --git a/CPP_STYLE_GUIDE.md b/CPP_STYLE_GUIDE.md index f29c8df3210caa..f3dcd4e647b04e 100644 --- a/CPP_STYLE_GUIDE.md +++ b/CPP_STYLE_GUIDE.md @@ -14,9 +14,9 @@ codebase not related to stylistic issues. * [Align function arguments vertically](#align-function-arguments-vertically) * [Initialization lists](#initialization-lists) * [CamelCase for methods, functions, and classes](#camelcase-for-methods-functions-and-classes) - * [snake\_case for local variables and parameters](#snake_case-for-local-variables-and-parameters) - * [snake\_case\_ for private class fields](#snake_case_-for-private-class-fields) - * [snake\_case for C-like structs](#snake_case-for-c-like-structs) + * [`snake_case` for local variables and parameters](#snake_case-for-local-variables-and-parameters) + * [`snake_case_` for private class fields](#snake_case_-for-private-class-fields) + * [`snake_case` for C-like structs](#snake_case-for-c-like-structs) * [Space after `template`](#space-after-template) * [Memory Management](#memory-management) * [Memory allocation](#memory-allocation) @@ -155,7 +155,7 @@ class FooBar { }; ``` -### snake\_case for local variables and parameters +### `snake_case` for local variables and parameters ```c++ int FunctionThatDoesSomething(const char* important_string) { @@ -163,7 +163,7 @@ int FunctionThatDoesSomething(const char* important_string) { } ``` -### snake\_case\_ for private class fields +### `snake_case_` for private class fields ```c++ class Foo { @@ -172,7 +172,8 @@ class Foo { }; ``` -### snake\_case for C-like structs +### `snake_case` for C-like structs + For plain C-like structs snake_case can be used. ```c++ diff --git a/LICENSE b/LICENSE index f57c3dc0c37194..db59472ba13151 100644 --- a/LICENSE +++ b/LICENSE @@ -1526,3 +1526,28 @@ The externally maintained libraries used by Node.js are: ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """ + +- uvwasi, located at deps/uvwasi, is licensed as follows: + """ + MIT License + + Copyright (c) 2019 Colin Ihrig and Contributors + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + """ diff --git a/Makefile b/Makefile index 56f53582d1f494..05eaa73abbe0c6 100644 --- a/Makefile +++ b/Makefile @@ -842,6 +842,7 @@ endif endif endif endif +endif ifeq ($(DESTCPU),x64) ARCH=x64 else @@ -871,7 +872,6 @@ endif endif endif endif -endif # node and v8 use different arch names (e.g. node 'x86' vs v8 'ia32'). # pass the proper v8 arch name to $V8_ARCH based on user-specified $DESTCPU. diff --git a/SECURITY.md b/SECURITY.md index 7e984b7ba36872..3196055ccb78e5 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -23,8 +23,8 @@ the HackerOne platform. See for further details. ## Reporting a Bug in a third party module Security bugs in third party modules should be reported to their respective -maintainers and should also be coordinated through the Node Ecosystem Security -Team via [HackerOne](https://hackerone.com/nodejs-ecosystem). +maintainers and should also be coordinated through the Node.js Ecosystem +Security Team via [HackerOne](https://hackerone.com/nodejs-ecosystem). Details regarding this process can be found in the [Security Working Group repository](https://github.com/nodejs/security-wg/blob/master/processes/third_party_vuln_process.md). diff --git a/benchmark/README.md b/benchmark/README.md index 45256670fdd2aa..c5fdad093471b5 100644 --- a/benchmark/README.md +++ b/benchmark/README.md @@ -5,7 +5,7 @@ of different Node.js implementations and different ways of writing JavaScript run by the built-in JavaScript engine. For a detailed guide on how to write and run benchmarks in this -directory, see [the guide on benchmarks](../doc/guides/writing-and-running-benchmarks.md). +directory, see [the guide on benchmarks](writing-and-running-benchmarks.md). ## Table of Contents @@ -74,21 +74,21 @@ The common.js module is used by benchmarks for consistency across repeated tasks. It has a number of helpful functions and properties to help with writing benchmarks. -### createBenchmark(fn, configs\[, options\]) +### `createBenchmark(fn, configs[, options])` -See [the guide on writing benchmarks](../doc/guides/writing-and-running-benchmarks.md#basics-of-a-benchmark). +See [the guide on writing benchmarks](writing-and-running-benchmarks.md#basics-of-a-benchmark). -### default\_http\_benchmarker +### `default_http_benchmarker` The default benchmarker used to run HTTP benchmarks. -See [the guide on writing HTTP benchmarks](../doc/guides/writing-and-running-benchmarks.md#creating-an-http-benchmark). +See [the guide on writing HTTP benchmarks](writing-and-running-benchmarks.md#creating-an-http-benchmark). -### PORT +### `PORT` The default port used to run HTTP benchmarks. -See [the guide on writing HTTP benchmarks](../doc/guides/writing-and-running-benchmarks.md#creating-an-http-benchmark). +See [the guide on writing HTTP benchmarks](writing-and-running-benchmarks.md#creating-an-http-benchmark). -### sendResult(data) +### `sendResult(data)` Used in special benchmarks that can't use `createBenchmark` and the object it returns to accomplish what they need. This function reports timing diff --git a/benchmark/_cli.js b/benchmark/_cli.js index 8b9f22d0fb4b57..771cc72bff1964 100644 --- a/benchmark/_cli.js +++ b/benchmark/_cli.js @@ -34,7 +34,7 @@ function CLI(usage, settings) { if (arg === '--') { // Only items can follow -- mode = 'item'; - } else if (['both', 'option'].includes(mode) && arg[0] === '-') { + } else if ('both' === mode && arg[0] === '-') { // Optional arguments declaration if (arg[1] === '-') { @@ -82,13 +82,12 @@ CLI.prototype.abort = function(msg) { CLI.prototype.benchmarks = function() { const paths = []; - const filter = this.optional.filter || false; for (const category of this.items) { if (benchmarks[category] === undefined) continue; for (const scripts of benchmarks[category]) { - if (filter && scripts.lastIndexOf(filter) === -1) continue; + if (this.shouldSkip(scripts)) continue; paths.push(path.join(category, scripts)); } @@ -96,3 +95,23 @@ CLI.prototype.benchmarks = function() { return paths; }; + +CLI.prototype.shouldSkip = function(scripts) { + const filters = this.optional.filter || []; + const excludes = this.optional.exclude || []; + let skip = filters.length > 0; + + for (const filter of filters) { + if (scripts.lastIndexOf(filter) !== -1) { + skip = false; + } + } + + for (const exclude of excludes) { + if (scripts.lastIndexOf(exclude) !== -1) { + skip = true; + } + } + + return skip; +}; diff --git a/benchmark/_http-benchmarkers.js b/benchmark/_http-benchmarkers.js index a4d623003947eb..7dd06be2655744 100644 --- a/benchmark/_http-benchmarkers.js +++ b/benchmark/_http-benchmarkers.js @@ -5,7 +5,7 @@ const path = require('path'); const fs = require('fs'); const requirementsURL = - 'https://github.com/nodejs/node/blob/master/doc/guides/writing-and-running-benchmarks.md#http-benchmark-requirements'; + 'https://github.com/nodejs/node/blob/master/benchmark/writing-and-running-benchmarks.md#http-benchmark-requirements'; // The port used by servers and wrk exports.PORT = Number(process.env.PORT) || 12346; diff --git a/benchmark/async_hooks/http-server.js b/benchmark/async_hooks/http-server.js new file mode 100644 index 00000000000000..493500fd1f2d66 --- /dev/null +++ b/benchmark/async_hooks/http-server.js @@ -0,0 +1,40 @@ +'use strict'; +const common = require('../common.js'); + +const bench = common.createBenchmark(main, { + asyncHooks: ['init', 'before', 'after', 'all', 'disabled', 'none'], + connections: [50, 500] +}); + +function main({ asyncHooks, connections }) { + if (asyncHooks !== 'none') { + let hooks = { + init() {}, + before() {}, + after() {}, + destroy() {}, + promiseResolve() {} + }; + if (asyncHooks !== 'all' || asyncHooks !== 'disabled') { + hooks = { + [asyncHooks]: () => {} + }; + } + const hook = require('async_hooks').createHook(hooks); + if (asyncHooks !== 'disabled') { + hook.enable(); + } + } + const server = require('../fixtures/simple-http-server.js') + .listen(common.PORT) + .on('listening', () => { + const path = '/buffer/4/4/normal/1'; + + bench.http({ + connections, + path, + }, () => { + server.close(); + }); + }); +} diff --git a/benchmark/buffers/buffer-bytelength.js b/benchmark/buffers/buffer-bytelength.js index 3246a42d96b34c..1b324a49f89de8 100644 --- a/benchmark/buffers/buffer-bytelength.js +++ b/benchmark/buffers/buffer-bytelength.js @@ -16,8 +16,8 @@ const chars = [ ]; function main({ n, len, encoding }) { - var strings = []; - var results = [ len * 16 ]; + let strings = []; + let results = [ len * 16 ]; if (encoding === 'buffer') { strings = [ Buffer.alloc(len * 16, 'a') ]; } else { diff --git a/benchmark/buffers/buffer-compare-instance-method.js b/benchmark/buffers/buffer-compare-instance-method.js index 284001c236d99d..eb18fa518037f4 100644 --- a/benchmark/buffers/buffer-compare-instance-method.js +++ b/benchmark/buffers/buffer-compare-instance-method.js @@ -12,7 +12,6 @@ function main({ n, size, args }) { const b1 = Buffer.alloc(size, 'a'); const b0Len = b0.length; const b1Len = b1.length; - var i; b1[size - 1] = 'b'.charCodeAt(0); @@ -20,7 +19,7 @@ function main({ n, size, args }) { case 2: b0.compare(b1, 0); bench.start(); - for (i = 0; i < n; i++) { + for (let i = 0; i < n; i++) { b0.compare(b1, 0); } bench.end(n); @@ -28,7 +27,7 @@ function main({ n, size, args }) { case 3: b0.compare(b1, 0, b1Len); bench.start(); - for (i = 0; i < n; i++) { + for (let i = 0; i < n; i++) { b0.compare(b1, 0, b1Len); } bench.end(n); @@ -36,7 +35,7 @@ function main({ n, size, args }) { case 4: b0.compare(b1, 0, b1Len, 0); bench.start(); - for (i = 0; i < n; i++) { + for (let i = 0; i < n; i++) { b0.compare(b1, 0, b1Len, 0); } bench.end(n); @@ -44,7 +43,7 @@ function main({ n, size, args }) { case 5: b0.compare(b1, 0, b1Len, 0, b0Len); bench.start(); - for (i = 0; i < n; i++) { + for (let i = 0; i < n; i++) { b0.compare(b1, 0, b1Len, 0, b0Len); } bench.end(n); @@ -52,7 +51,7 @@ function main({ n, size, args }) { default: b0.compare(b1); bench.start(); - for (i = 0; i < n; i++) { + for (let i = 0; i < n; i++) { b0.compare(b1); } bench.end(n); diff --git a/benchmark/buffers/buffer-hex.js b/benchmark/buffers/buffer-hex.js index 1b49ca745381da..33d2b44d6a5950 100644 --- a/benchmark/buffers/buffer-hex.js +++ b/benchmark/buffers/buffer-hex.js @@ -9,16 +9,15 @@ const bench = common.createBenchmark(main, { function main({ len, n }) { const buf = Buffer.alloc(len); - var i; - for (i = 0; i < buf.length; i++) + for (let i = 0; i < buf.length; i++) buf[i] = i & 0xff; const hex = buf.toString('hex'); bench.start(); - for (i = 0; i < n; i += 1) + for (let i = 0; i < n; i += 1) Buffer.from(hex, 'hex'); bench.end(n); diff --git a/benchmark/buffers/buffer-indexof.js b/benchmark/buffers/buffer-indexof.js index e635da2351c4a7..4c0993a1ef0ef9 100644 --- a/benchmark/buffers/buffer-indexof.js +++ b/benchmark/buffers/buffer-indexof.js @@ -25,7 +25,7 @@ const bench = common.createBenchmark(main, { }); function main({ n, search, encoding, type }) { - var aliceBuffer = fs.readFileSync( + let aliceBuffer = fs.readFileSync( path.resolve(__dirname, '../fixtures/alice.html') ); diff --git a/benchmark/buffers/buffer-iterate.js b/benchmark/buffers/buffer-iterate.js index e81ce2b3035d90..de002108a95d65 100644 --- a/benchmark/buffers/buffer-iterate.js +++ b/benchmark/buffers/buffer-iterate.js @@ -29,15 +29,15 @@ function main({ size, type, method, n }) { } function benchFor(buffer, n) { - for (var k = 0; k < n; k++) { - for (var i = 0; i < buffer.length; i++) { + for (let k = 0; k < n; k++) { + for (let i = 0; i < buffer.length; i++) { assert(buffer[i] === 0); } } } function benchForOf(buffer, n) { - for (var k = 0; k < n; k++) { + for (let k = 0; k < n; k++) { for (const b of buffer) { assert(b === 0); } @@ -45,7 +45,7 @@ function benchForOf(buffer, n) { } function benchIterator(buffer, n) { - for (var k = 0; k < n; k++) { + for (let k = 0; k < n; k++) { const iter = buffer[Symbol.iterator](); let cur = iter.next(); diff --git a/benchmark/buffers/buffer-swap.js b/benchmark/buffers/buffer-swap.js index d22bf2fd5504ea..a33bac4ae3ed78 100644 --- a/benchmark/buffers/buffer-swap.js +++ b/benchmark/buffers/buffer-swap.js @@ -36,7 +36,7 @@ Buffer.prototype.htons = function htons() { Buffer.prototype.htonl = function htonl() { if (this.length % 4 !== 0) throw new RangeError(); - for (var i = 0; i < this.length; i += 4) { + for (let i = 0; i < this.length; i += 4) { swap(this, i, i + 3); swap(this, i + 1, i + 2); } @@ -66,7 +66,7 @@ function createBuffer(len, aligned) { function genMethod(method) { const fnString = ` return function ${method}(n, buf) { - for (var i = 0; i <= n; i++) + for (let i = 0; i <= n; i++) buf.${method}(); }`; return (new Function(fnString))(); diff --git a/benchmark/buffers/buffer-tostring.js b/benchmark/buffers/buffer-tostring.js index 88543d8fd3af5a..98d6ab3fa08311 100644 --- a/benchmark/buffers/buffer-tostring.js +++ b/benchmark/buffers/buffer-tostring.js @@ -15,29 +15,28 @@ function main({ encoding, args, len, n }) { if (encoding.length === 0) encoding = undefined; - var i; switch (args) { case 1: bench.start(); - for (i = 0; i < n; i += 1) + for (let i = 0; i < n; i += 1) buf.toString(encoding); bench.end(n); break; case 2: bench.start(); - for (i = 0; i < n; i += 1) + for (let i = 0; i < n; i += 1) buf.toString(encoding, 0); bench.end(n); break; case 3: bench.start(); - for (i = 0; i < n; i += 1) + for (let i = 0; i < n; i += 1) buf.toString(encoding, 0, len); bench.end(n); break; default: bench.start(); - for (i = 0; i < n; i += 1) + for (let i = 0; i < n; i += 1) buf.toString(); bench.end(n); break; diff --git a/benchmark/buffers/buffer-write-string.js b/benchmark/buffers/buffer-write-string.js index b30edcf8867adf..cd24d379728cda 100644 --- a/benchmark/buffers/buffer-write-string.js +++ b/benchmark/buffers/buffer-write-string.js @@ -15,21 +15,19 @@ function main({ len, n, encoding, args }) { let start = 0; const buf = Buffer.allocUnsafe(len); - var i; - switch (args) { case 'offset': string = 'a'.repeat(Math.floor(len / 2)); start = len - string.length; if (encoding) { bench.start(); - for (i = 0; i < n; ++i) { + for (let i = 0; i < n; ++i) { buf.write(string, start, encoding); } bench.end(n); } else { bench.start(); - for (i = 0; i < n; ++i) { + for (let i = 0; i < n; ++i) { buf.write(string, start); } bench.end(n); @@ -39,13 +37,13 @@ function main({ len, n, encoding, args }) { string = 'a'.repeat(len); if (encoding) { bench.start(); - for (i = 0; i < n; ++i) { + for (let i = 0; i < n; ++i) { buf.write(string, 0, buf.length, encoding); } bench.end(n); } else { bench.start(); - for (i = 0; i < n; ++i) { + for (let i = 0; i < n; ++i) { buf.write(string, 0, buf.length); } bench.end(n); @@ -55,13 +53,13 @@ function main({ len, n, encoding, args }) { string = 'a'.repeat(len); if (encoding) { bench.start(); - for (i = 0; i < n; ++i) { + for (let i = 0; i < n; ++i) { buf.write(string, encoding); } bench.end(n); } else { bench.start(); - for (i = 0; i < n; ++i) { + for (let i = 0; i < n; ++i) { buf.write(string); } bench.end(n); diff --git a/benchmark/child_process/child-process-exec-stdout.js b/benchmark/child_process/child-process-exec-stdout.js index a1dc4aa04c72a9..f750e2cf3e68f2 100644 --- a/benchmark/child_process/child-process-exec-stdout.js +++ b/benchmark/child_process/child-process-exec-stdout.js @@ -19,7 +19,7 @@ function childProcessExecStdout({ dur, len }) { const cmd = `yes "${'.'.repeat(len)}"`; const child = exec(cmd, { 'stdio': ['ignore', 'pipe', 'ignore'] }); - var bytes = 0; + let bytes = 0; child.stdout.on('data', (msg) => { bytes += msg.length; }); diff --git a/benchmark/child_process/child-process-read-ipc.js b/benchmark/child_process/child-process-read-ipc.js index a9e9cdf7fd7914..827f75b1e54bd1 100644 --- a/benchmark/child_process/child-process-read-ipc.js +++ b/benchmark/child_process/child-process-read-ipc.js @@ -26,7 +26,7 @@ if (process.argv[2] === 'child') { const child = spawn(process.argv[0], [process.argv[1], 'child', len], options); - var bytes = 0; + let bytes = 0; child.on('message', (msg) => { bytes += msg.length; }); setTimeout(() => { diff --git a/benchmark/child_process/child-process-read.js b/benchmark/child_process/child-process-read.js index 3c0144116f589f..01e9846be22b56 100644 --- a/benchmark/child_process/child-process-read.js +++ b/benchmark/child_process/child-process-read.js @@ -24,7 +24,7 @@ function main({ dur, len }) { const options = { 'stdio': ['ignore', 'pipe', 'ignore'] }; const child = child_process.spawn('yes', [msg], options); - var bytes = 0; + let bytes = 0; child.stdout.on('data', (msg) => { bytes += msg.length; }); diff --git a/benchmark/cluster/echo.js b/benchmark/cluster/echo.js index 73c5971cd41eb2..9def87cf8b0e98 100644 --- a/benchmark/cluster/echo.js +++ b/benchmark/cluster/echo.js @@ -7,15 +7,24 @@ if (cluster.isMaster) { workers: [1], payload: ['string', 'object'], sendsPerBroadcast: [1, 10], + serialization: ['json', 'advanced'], n: [1e5] }); - function main({ n, workers, sendsPerBroadcast, payload }) { + function main({ + n, + workers, + sendsPerBroadcast, + payload, + serialization + }) { const expectedPerBroadcast = sendsPerBroadcast * workers; - var readies = 0; - var broadcasts = 0; - var msgCount = 0; - var data; + let readies = 0; + let broadcasts = 0; + let msgCount = 0; + let data; + + cluster.settings.serialization = serialization; switch (payload) { case 'string': @@ -28,7 +37,7 @@ if (cluster.isMaster) { throw new Error('Unsupported payload type'); } - for (var i = 0; i < workers; ++i) + for (let i = 0; i < workers; ++i) cluster.fork().on('online', onOnline).on('message', onMessage); function onOnline() { @@ -39,16 +48,15 @@ if (cluster.isMaster) { } function broadcast() { - var id; if (broadcasts++ === n) { bench.end(n); - for (id in cluster.workers) + for (const id in cluster.workers) cluster.workers[id].disconnect(); return; } - for (id in cluster.workers) { + for (const id in cluster.workers) { const worker = cluster.workers[id]; - for (var i = 0; i < sendsPerBroadcast; ++i) + for (let i = 0; i < sendsPerBroadcast; ++i) worker.send(data); } } diff --git a/benchmark/compare.js b/benchmark/compare.js index bd7c4a95cbb617..53f82bb4b9f1b9 100644 --- a/benchmark/compare.js +++ b/benchmark/compare.js @@ -18,10 +18,13 @@ const cli = CLI(`usage: ./node compare.js [options] [--] ... --new ./new-node-binary new node binary (required) --old ./old-node-binary old node binary (required) --runs 30 number of samples - --filter pattern string to filter benchmark scripts + --filter pattern includes only benchmark scripts matching + (can be repeated) + --exclude pattern excludes scripts matching (can be + repeated) --set variable=value set benchmark variable (can be repeated) --no-progress don't show benchmark progress indicator -`, { arrayArgs: ['set'], boolArgs: ['no-progress'] }); +`, { arrayArgs: ['set', 'filter', 'exclude'], boolArgs: ['no-progress'] }); if (!cli.optional.new || !cli.optional.old) { cli.abort(cli.usage); diff --git a/benchmark/crypto/aes-gcm-throughput.js b/benchmark/crypto/aes-gcm-throughput.js index cd8f29c8c7d7b2..b1b08c481700ea 100644 --- a/benchmark/crypto/aes-gcm-throughput.js +++ b/benchmark/crypto/aes-gcm-throughput.js @@ -25,7 +25,7 @@ function AEAD_Bench(cipher, message, associate_data, key, iv, n, len) { const bits = written * 8; const mbits = bits / (1024 * 1024); - for (var i = 0; i < n; i++) { + for (let i = 0; i < n; i++) { const alice = crypto.createCipheriv(cipher, key, iv); alice.setAAD(associate_data); const enc = alice.update(message); diff --git a/benchmark/crypto/cipher-stream.js b/benchmark/crypto/cipher-stream.js index f426a32769d0dd..4bb1695e2d20cc 100644 --- a/benchmark/crypto/cipher-stream.js +++ b/benchmark/crypto/cipher-stream.js @@ -38,8 +38,8 @@ function main({ api, cipher, type, len, writes }) { const alice_cipher = crypto.createCipher(cipher, alice_secret); const bob_cipher = crypto.createDecipher(cipher, bob_secret); - var message; - var encoding; + let message; + let encoding; switch (type) { case 'asc': message = 'a'.repeat(len); @@ -65,7 +65,7 @@ function main({ api, cipher, type, len, writes }) { } function streamWrite(alice, bob, message, encoding, writes) { - var written = 0; + let written = 0; bob.on('data', (c) => { written += c.length; }); @@ -86,9 +86,9 @@ function streamWrite(alice, bob, message, encoding, writes) { } function legacyWrite(alice, bob, message, encoding, writes) { - var written = 0; - var enc, dec; - for (var i = 0; i < writes; i++) { + let written = 0; + let enc, dec; + for (let i = 0; i < writes; i++) { enc = alice.update(message, encoding); dec = bob.update(enc); written += dec.length; diff --git a/benchmark/crypto/get-ciphers.js b/benchmark/crypto/get-ciphers.js index 5bbe0915311484..1bf910a6a80ba3 100644 --- a/benchmark/crypto/get-ciphers.js +++ b/benchmark/crypto/get-ciphers.js @@ -9,7 +9,7 @@ const bench = common.createBenchmark(main, { function main({ n, v }) { const method = require(v).getCiphers; - var i = 0; + let i = 0; // First call to getCiphers will dominate the results if (n > 1) { for (; i < n; i++) diff --git a/benchmark/crypto/hash-stream-creation.js b/benchmark/crypto/hash-stream-creation.js index 0a98eb1471c9d4..c21eb6eaaaed99 100644 --- a/benchmark/crypto/hash-stream-creation.js +++ b/benchmark/crypto/hash-stream-creation.js @@ -20,8 +20,8 @@ function main({ api, type, len, out, writes, algo }) { api = 'legacy'; } - var message; - var encoding; + let message; + let encoding; switch (type) { case 'asc': message = 'a'.repeat(len); @@ -52,7 +52,7 @@ function legacyWrite(algo, message, encoding, writes, len, outEnc) { while (writes-- > 0) { const h = crypto.createHash(algo); h.update(message, encoding); - var res = h.digest(outEnc); + let res = h.digest(outEnc); // Include buffer creation costs for older versions if (outEnc === 'buffer' && typeof res === 'string') diff --git a/benchmark/crypto/hash-stream-throughput.js b/benchmark/crypto/hash-stream-throughput.js index 52be3d4caf8501..b86d70604616b5 100644 --- a/benchmark/crypto/hash-stream-throughput.js +++ b/benchmark/crypto/hash-stream-throughput.js @@ -19,8 +19,8 @@ function main({ api, type, len, algo, writes }) { api = 'legacy'; } - var message; - var encoding; + let message; + let encoding; switch (type) { case 'asc': message = 'a'.repeat(len); diff --git a/benchmark/crypto/rsa-encrypt-decrypt-throughput.js b/benchmark/crypto/rsa-encrypt-decrypt-throughput.js index 13153c20cad569..9791160263c9ec 100644 --- a/benchmark/crypto/rsa-encrypt-decrypt-throughput.js +++ b/benchmark/crypto/rsa-encrypt-decrypt-throughput.js @@ -35,7 +35,7 @@ function StreamWrite(algo, keylen, message, n, len) { const privateKey = RSA_PrivatePem[keylen]; const publicKey = RSA_PublicPem[keylen]; - for (var i = 0; i < n; i++) { + for (let i = 0; i < n; i++) { const enc = crypto.privateEncrypt(privateKey, message); crypto.publicDecrypt(publicKey, enc); } diff --git a/benchmark/dgram/array-vs-concat.js b/benchmark/dgram/array-vs-concat.js index d260a48063d489..ae9a759a983c04 100644 --- a/benchmark/dgram/array-vs-concat.js +++ b/benchmark/dgram/array-vs-concat.js @@ -18,12 +18,12 @@ const bench = common.createBenchmark(main, { function main({ dur, len, num, type, chunks }) { const chunk = []; - for (var i = 0; i < chunks; i++) { + for (let i = 0; i < chunks; i++) { chunk.push(Buffer.allocUnsafe(Math.round(len / chunks))); } // Server - var sent = 0; + let sent = 0; const socket = dgram.createSocket('udp4'); const onsend = type === 'concat' ? onsendConcat : onsendMulti; @@ -32,7 +32,7 @@ function main({ dur, len, num, type, chunks }) { // The setImmediate() is necessary to have event loop progress on OSes // that only perform synchronous I/O on nonblocking UDP sockets. setImmediate(() => { - for (var i = 0; i < num; i++) { + for (let i = 0; i < num; i++) { socket.send(Buffer.concat(chunk), PORT, '127.0.0.1', onsend); } }); @@ -44,7 +44,7 @@ function main({ dur, len, num, type, chunks }) { // The setImmediate() is necessary to have event loop progress on OSes // that only perform synchronous I/O on nonblocking UDP sockets. setImmediate(() => { - for (var i = 0; i < num; i++) { + for (let i = 0; i < num; i++) { socket.send(chunk, PORT, '127.0.0.1', onsend); } }); diff --git a/benchmark/dgram/bind-params.js b/benchmark/dgram/bind-params.js index ea1f430eed929b..5f7999f7a39241 100644 --- a/benchmark/dgram/bind-params.js +++ b/benchmark/dgram/bind-params.js @@ -15,11 +15,10 @@ const noop = () => {}; function main({ n, port, address }) { port = port === 'true' ? 0 : undefined; address = address === 'true' ? '0.0.0.0' : undefined; - var i; if (port !== undefined && address !== undefined) { bench.start(); - for (i = 0; i < n; i++) { + for (let i = 0; i < n; i++) { dgram.createSocket('udp4').bind(port, address) .on('error', noop) .unref(); @@ -27,7 +26,7 @@ function main({ n, port, address }) { bench.end(n); } else if (port !== undefined) { bench.start(); - for (i = 0; i < n; i++) { + for (let i = 0; i < n; i++) { dgram.createSocket('udp4') .bind(port) .on('error', noop) @@ -36,7 +35,7 @@ function main({ n, port, address }) { bench.end(n); } else if (port === undefined && address === undefined) { bench.start(); - for (i = 0; i < n; i++) { + for (let i = 0; i < n; i++) { dgram.createSocket('udp4') .bind() .on('error', noop) diff --git a/benchmark/dgram/multi-buffer.js b/benchmark/dgram/multi-buffer.js index 7b69a82255ed4b..add77cd7845ce1 100644 --- a/benchmark/dgram/multi-buffer.js +++ b/benchmark/dgram/multi-buffer.js @@ -18,11 +18,11 @@ const bench = common.createBenchmark(main, { function main({ dur, len, num, type, chunks }) { const chunk = []; - for (var i = 0; i < chunks; i++) { + for (let i = 0; i < chunks; i++) { chunk.push(Buffer.allocUnsafe(Math.round(len / chunks))); } - var sent = 0; - var received = 0; + let sent = 0; + let received = 0; const socket = dgram.createSocket('udp4'); function onsend() { @@ -30,7 +30,7 @@ function main({ dur, len, num, type, chunks }) { // The setImmediate() is necessary to have event loop progress on OSes // that only perform synchronous I/O on nonblocking UDP sockets. setImmediate(() => { - for (var i = 0; i < num; i++) { + for (let i = 0; i < num; i++) { socket.send(chunk, PORT, '127.0.0.1', onsend); } }); diff --git a/benchmark/dgram/offset-length.js b/benchmark/dgram/offset-length.js index 696fa6a7a0c0bd..eea0b75b773ca8 100644 --- a/benchmark/dgram/offset-length.js +++ b/benchmark/dgram/offset-length.js @@ -17,8 +17,8 @@ const bench = common.createBenchmark(main, { function main({ dur, len, num, type }) { const chunk = Buffer.allocUnsafe(len); - var sent = 0; - var received = 0; + let sent = 0; + let received = 0; const socket = dgram.createSocket('udp4'); function onsend() { @@ -26,7 +26,7 @@ function main({ dur, len, num, type }) { // The setImmediate() is necessary to have event loop progress on OSes // that only perform synchronous I/O on nonblocking UDP sockets. setImmediate(() => { - for (var i = 0; i < num; i++) { + for (let i = 0; i < num; i++) { socket.send(chunk, 0, chunk.length, PORT, '127.0.0.1', onsend); } }); diff --git a/benchmark/dgram/single-buffer.js b/benchmark/dgram/single-buffer.js index 5c95b17887d37a..b02f785bb052f2 100644 --- a/benchmark/dgram/single-buffer.js +++ b/benchmark/dgram/single-buffer.js @@ -17,8 +17,8 @@ const bench = common.createBenchmark(main, { function main({ dur, len, num, type }) { const chunk = Buffer.allocUnsafe(len); - var sent = 0; - var received = 0; + let sent = 0; + let received = 0; const socket = dgram.createSocket('udp4'); function onsend() { @@ -26,7 +26,7 @@ function main({ dur, len, num, type }) { // The setImmediate() is necessary to have event loop progress on OSes // that only perform synchronous I/O on nonblocking UDP sockets. setImmediate(() => { - for (var i = 0; i < num; i++) { + for (let i = 0; i < num; i++) { socket.send(chunk, PORT, '127.0.0.1', onsend); } }); diff --git a/benchmark/fs/bench-opendir.js b/benchmark/fs/bench-opendir.js index 419c3a231a850b..20e178d9cc5236 100644 --- a/benchmark/fs/bench-opendir.js +++ b/benchmark/fs/bench-opendir.js @@ -7,10 +7,11 @@ const path = require('path'); const bench = common.createBenchmark(main, { n: [100], dir: [ 'lib', 'test/parallel'], - mode: [ 'async', 'sync', 'callback' ] + mode: [ 'async', 'sync', 'callback' ], + bufferSize: [ 4, 32, 1024 ] }); -async function main({ n, dir, mode }) { +async function main({ n, dir, mode, bufferSize }) { const fullPath = path.resolve(__dirname, '../../', dir); bench.start(); @@ -18,11 +19,12 @@ async function main({ n, dir, mode }) { let counter = 0; for (let i = 0; i < n; i++) { if (mode === 'async') { + const dir = await fs.promises.opendir(fullPath, { bufferSize }); // eslint-disable-next-line no-unused-vars - for await (const entry of await fs.promises.opendir(fullPath)) + for await (const entry of dir) counter++; } else if (mode === 'callback') { - const dir = await fs.promises.opendir(fullPath); + const dir = await fs.promises.opendir(fullPath, { bufferSize }); await new Promise((resolve, reject) => { function read() { dir.read((err, entry) => { @@ -40,7 +42,7 @@ async function main({ n, dir, mode }) { read(); }); } else { - const dir = fs.opendirSync(fullPath); + const dir = fs.opendirSync(fullPath, { bufferSize }); while (dir.readSync() !== null) counter++; dir.closeSync(); diff --git a/benchmark/net/net-pipe.js b/benchmark/net/net-pipe.js index f19e30b5450785..06426129f7f271 100644 --- a/benchmark/net/net-pipe.js +++ b/benchmark/net/net-pipe.js @@ -6,7 +6,7 @@ const net = require('net'); const PORT = common.PORT; const bench = common.createBenchmark(main, { - len: [64, 102400, 1024 * 1024 * 16], + len: [2, 64, 102400, 1024 * 1024 * 16], type: ['utf', 'asc', 'buf'], dur: [5], }); diff --git a/benchmark/run.js b/benchmark/run.js index 2eb1ab1a4b0905..8e81a2c5e16ab7 100644 --- a/benchmark/run.js +++ b/benchmark/run.js @@ -8,10 +8,13 @@ const cli = CLI(`usage: ./node run.js [options] [--] ... Run each benchmark in the directory a single time, more than one directory can be specified. - --filter pattern string to filter benchmark scripts + --filter pattern includes only benchmark scripts matching + (can be repeated) + --exclude pattern excludes scripts matching (can be + repeated) --set variable=value set benchmark variable (can be repeated) --format [simple|csv] optional value that specifies the output format -`, { arrayArgs: ['set'] }); +`, { arrayArgs: ['set', 'filter', 'exclude'] }); const benchmarks = cli.benchmarks(); if (benchmarks.length === 0) { diff --git a/benchmark/streams/writable-manywrites.js b/benchmark/streams/writable-manywrites.js index 0ed38d0357a438..049bf8eb281db2 100644 --- a/benchmark/streams/writable-manywrites.js +++ b/benchmark/streams/writable-manywrites.js @@ -5,23 +5,39 @@ const Writable = require('stream').Writable; const bench = common.createBenchmark(main, { n: [2e6], - sync: ['yes', 'no'] + sync: ['yes', 'no'], + writev: ['yes', 'no'], + callback: ['yes', 'no'] }); -function main({ n, sync }) { +function main({ n, sync, writev, callback }) { const b = Buffer.allocUnsafe(1024); const s = new Writable(); sync = sync === 'yes'; - s._write = function(chunk, encoding, cb) { + + const writecb = (cb) => { if (sync) cb(); else process.nextTick(cb); }; + if (writev === 'yes') { + s._writev = (chunks, cb) => writecb(cb); + } else { + s._write = (chunk, encoding, cb) => writecb(cb); + } + + const cb = callback === 'yes' ? () => {} : null; + bench.start(); - for (var k = 0; k < n; ++k) { - s.write(b); + + let k = 0; + function run() { + while (k++ < n && s.write(b, cb)); + if (k >= n) + bench.end(n); } - bench.end(n); + s.on('drain', run); + run(); } diff --git a/benchmark/tls/secure-pair.js b/benchmark/tls/secure-pair.js index c0409febacda00..c52f4cbf918a1d 100644 --- a/benchmark/tls/secure-pair.js +++ b/benchmark/tls/secure-pair.js @@ -2,8 +2,8 @@ const common = require('../common.js'); const bench = common.createBenchmark(main, { dur: [5], - securing: ['SecurePair', 'TLSSocket'], - size: [2, 1024, 1024 * 1024] + securing: ['SecurePair', 'TLSSocket', 'clear'], + size: [2, 100, 1024, 1024 * 1024] }); const fixtures = require('../../test/common/fixtures'); @@ -37,7 +37,8 @@ function main({ dur, size, securing }) { isServer: false, rejectUnauthorized: false, }; - const conn = tls.connect(clientOptions, () => { + const network = securing === 'clear' ? net : tls; + const conn = network.connect(clientOptions, () => { setTimeout(() => { const mbits = (received * 8) / (1024 * 1024); bench.end(mbits); @@ -69,6 +70,9 @@ function main({ dur, size, securing }) { case 'TLSSocket': secureTLSSocket(conn, client); break; + case 'clear': + conn.pipe(client); + break; default: throw new Error('Invalid securing method'); } diff --git a/doc/guides/writing-and-running-benchmarks.md b/benchmark/writing-and-running-benchmarks.md similarity index 83% rename from doc/guides/writing-and-running-benchmarks.md rename to benchmark/writing-and-running-benchmarks.md index 6097830aeeba1f..1db72d22de5324 100644 --- a/doc/guides/writing-and-running-benchmarks.md +++ b/benchmark/writing-and-running-benchmarks.md @@ -8,6 +8,7 @@ * [Running benchmarks](#running-benchmarks) * [Running individual benchmarks](#running-individual-benchmarks) * [Running all benchmarks](#running-all-benchmarks) + * [Filtering benchmarks](#filtering-benchmarks) * [Comparing Node.js versions](#comparing-nodejs-versions) * [Comparing parameters](#comparing-parameters) * [Running Benchmarks on the CI](#running-benchmarks-on-the-ci) @@ -149,6 +150,87 @@ It is possible to execute more groups by adding extra process arguments. $ node benchmark/run.js assert async_hooks ``` +#### Filtering benchmarks + +`benchmark/run.js` and `benchmark/compare.js` have `--filter pattern` and +`--exclude pattern` options, which can be used to run a subset of benchmarks or +to exclude specific benchmarks from the execution, respectively. + +```console +$ node benchmark/run.js --filter "deepequal-b" assert + +assert/deepequal-buffer.js +assert/deepequal-buffer.js method="deepEqual" strict=0 len=100 n=20000: 773,200.4995493788 +assert/deepequal-buffer.js method="notDeepEqual" strict=0 len=100 n=20000: 964,411.712953848 + +$ node benchmark/run.js --exclude "deepequal-b" assert + +assert/deepequal-map.js +assert/deepequal-map.js method="deepEqual_primitiveOnly" strict=0 len=500 n=500: 20,445.06368453332 +assert/deepequal-map.js method="deepEqual_objectOnly" strict=0 len=500 n=500: 1,393.3481642240833 +... + +assert/deepequal-object.js +assert/deepequal-object.js method="deepEqual" strict=0 size=100 n=5000: 1,053.1950937538475 +assert/deepequal-object.js method="notDeepEqual" strict=0 size=100 n=5000: 9,734.193251965213 +... +``` + +`--filter` and `--exclude` can be repeated to provide multiple patterns. + +```console +$ node benchmark/run.js --filter "deepequal-b" --filter "deepequal-m" assert + +assert/deepequal-buffer.js +assert/deepequal-buffer.js method="deepEqual" strict=0 len=100 n=20000: 773,200.4995493788 +assert/deepequal-buffer.js method="notDeepEqual" strict=0 len=100 n=20000: 964,411.712953848 + +assert/deepequal-map.js +assert/deepequal-map.js method="deepEqual_primitiveOnly" strict=0 len=500 n=500: 20,445.06368453332 +assert/deepequal-map.js method="deepEqual_objectOnly" strict=0 len=500 n=500: 1,393.3481642240833 + +$ node benchmark/run.js --exclude "deepequal-b" --exclude "deepequal-m" assert + +assert/deepequal-object.js +assert/deepequal-object.js method="deepEqual" strict=0 size=100 n=5000: 1,053.1950937538475 +assert/deepequal-object.js method="notDeepEqual" strict=0 size=100 n=5000: 9,734.193251965213 +... + +assert/deepequal-prims-and-objs-big-array-set.js +assert/deepequal-prims-and-objs-big-array-set.js method="deepEqual_Array" strict=0 len=20000 n=25 primitive="string": 865.2977195251661 +assert/deepequal-prims-and-objs-big-array-set.js method="notDeepEqual_Array" strict=0 len=20000 n=25 primitive="string": 827.8297281403861 +assert/deepequal-prims-and-objs-big-array-set.js method="deepEqual_Set" strict=0 len=20000 n=25 primitive="string": 28,826.618268696366 +... +``` + +If `--filter` and `--exclude` are used together, `--filter` is applied first, +and `--exclude` is applied on the result of `--filter`: + +```console +$ node benchmark/run.js --filter "bench-" process + +process/bench-env.js +process/bench-env.js operation="get" n=1000000: 2,356,946.0770617095 +process/bench-env.js operation="set" n=1000000: 1,295,176.3266261867 +process/bench-env.js operation="enumerate" n=1000000: 24,592.32231990992 +process/bench-env.js operation="query" n=1000000: 3,625,787.2150573144 +process/bench-env.js operation="delete" n=1000000: 1,521,131.5742806569 + +process/bench-hrtime.js +process/bench-hrtime.js type="raw" n=1000000: 13,178,002.113936031 +process/bench-hrtime.js type="diff" n=1000000: 11,585,435.712423025 +process/bench-hrtime.js type="bigint" n=1000000: 13,342,884.703919787 + +$ node benchmark/run.js --filter "bench-" --exclude "hrtime" process + +process/bench-env.js +process/bench-env.js operation="get" n=1000000: 2,356,946.0770617095 +process/bench-env.js operation="set" n=1000000: 1,295,176.3266261867 +process/bench-env.js operation="enumerate" n=1000000: 24,592.32231990992 +process/bench-env.js operation="query" n=1000000: 3,625,787.2150573144 +process/bench-env.js operation="delete" n=1000000: 1,521,131.5742806569 +``` + ### Comparing Node.js versions To compare the effect of a new Node.js version use the `compare.js` tool. This diff --git a/common.gypi b/common.gypi index b86e5e05d7df9a..2b61a19a29cad0 100644 --- a/common.gypi +++ b/common.gypi @@ -38,7 +38,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.16', + 'v8_embedder_string': '-node.30', ##### V8 defaults for Node.js ##### @@ -161,8 +161,8 @@ 'ldflags': [ '-Wl,-bbigtoc' ], }], ['OS == "android"', { - 'cflags': [ '-fPIE' ], - 'ldflags': [ '-fPIE', '-pie' ] + 'cflags': [ '-fPIC' ], + 'ldflags': [ '-fPIC' ] }], ], 'msvs_settings': { @@ -221,8 +221,8 @@ ], },], ['OS == "android"', { - 'cflags': [ '-fPIE' ], - 'ldflags': [ '-fPIE', '-pie' ] + 'cflags': [ '-fPIC' ], + 'ldflags': [ '-fPIC' ] }], ], 'msvs_settings': { @@ -361,6 +361,7 @@ [ 'OS in "linux freebsd openbsd solaris android aix cloudabi"', { 'cflags': [ '-Wall', '-Wextra', '-Wno-unused-parameter', ], 'cflags_cc': [ '-fno-rtti', '-fno-exceptions', '-std=gnu++1y' ], + 'defines': [ '__STDC_FORMAT_MACROS' ], 'ldflags': [ '-rdynamic' ], 'target_conditions': [ # The 1990s toolchain on SmartOS can't handle thin archives. diff --git a/configure.py b/configure.py index 20cce214dbb113..6537a62d3954d8 100755 --- a/configure.py +++ b/configure.py @@ -401,13 +401,12 @@ parser.add_option('--use-largepages', action='store_true', dest='node_use_large_pages', - help='build with Large Pages support. This feature is supported only on Linux kernel' + - '>= 2.6.38 with Transparent Huge pages enabled and FreeBSD') + help='This option has no effect. --use-largepages is now a runtime option.') parser.add_option('--use-largepages-script-lld', action='store_true', dest='node_use_large_pages_script_lld', - help='link against the LLVM ld linker script. Implies -fuse-ld=lld in the linker flags') + help='This option has no effect. --use-largepages is now a runtime option.') intl_optgroup.add_option('--with-intl', action='store', @@ -444,6 +443,14 @@ 'the icu4c source archive. ' 'v%d.x or later recommended.' % icu_versions['minimum_icu']) +intl_optgroup.add_option('--with-icu-default-data-dir', + action='store', + dest='with_icu_default_data_dir', + help='Path to the icuXXdt{lb}.dat file. If unspecified, ICU data will ' + 'only be read if the NODE_ICU_DATA environment variable or the ' + '--icu-data-dir runtime argument is used. This option has effect ' + 'only when Node.js is built with --with-intl=small-icu.') + parser.add_option('--with-ltcg', action='store_true', dest='with_ltcg', @@ -454,6 +461,11 @@ dest='without_node_snapshot', help='Turn off V8 snapshot integration. Currently experimental.') +parser.add_option('--without-node-code-cache', + action='store_true', + dest='without_node_code_cache', + help='Turn off V8 Code cache integration.') + intl_optgroup.add_option('--download', action='store', dest='download_list', @@ -975,10 +987,17 @@ def configure_node(o): if not options.without_node_snapshot: o['variables']['node_use_node_snapshot'] = b( - not cross_compiling and want_snapshots) + not cross_compiling and want_snapshots and not options.shared) else: o['variables']['node_use_node_snapshot'] = 'false' + if not options.without_node_code_cache: + # TODO(refack): fix this when implementing embedded code-cache when cross-compiling. + o['variables']['node_use_node_code_cache'] = b( + not cross_compiling and not options.shared) + else: + o['variables']['node_use_node_code_cache'] = 'false' + if target_arch == 'arm': configure_arm(o) elif target_arch in ('mips', 'mipsel', 'mips64el'): @@ -1041,27 +1060,12 @@ def configure_node(o): else: o['variables']['node_use_dtrace'] = 'false' - if options.node_use_large_pages and not flavor in ('linux', 'freebsd', 'mac'): - raise Exception( - 'Large pages are supported only on Linux, FreeBSD and MacOS Systems.') - if options.node_use_large_pages and flavor in ('linux', 'freebsd', 'mac'): - if options.shared or options.enable_static: - raise Exception( - 'Large pages are supported only while creating node executable.') - if target_arch!="x64": - raise Exception( - 'Large pages are supported only x64 platform.') - if flavor == 'mac': - info('macOS server with 32GB or more is recommended') - if flavor == 'linux': - # Example full version string: 2.6.32-696.28.1.el6.x86_64 - FULL_KERNEL_VERSION=os.uname()[2] - KERNEL_VERSION=FULL_KERNEL_VERSION.split('-')[0] - if KERNEL_VERSION < "2.6.38" and flavor == 'linux': - raise Exception( - 'Large pages need Linux kernel version >= 2.6.38') - o['variables']['node_use_large_pages'] = b(options.node_use_large_pages) - o['variables']['node_use_large_pages_script_lld'] = b(options.node_use_large_pages_script_lld) + if options.node_use_large_pages or options.node_use_large_pages_script_lld: + warn('''The `--use-largepages` and `--use-largepages-script-lld` options + have no effect during build time. Support for mapping to large pages is + now a runtime option of Node.js. Run `node --use-largepages` or add + `--use-largepages` to the `NODE_OPTIONS` environment variable once + Node.js is built to enable mapping to large pages.''') if options.no_ifaddrs: o['defines'] += ['SUNOS_NO_IFADDRS'] @@ -1099,20 +1103,21 @@ def configure_node(o): o['variables']['debug_nghttp2'] = 'false' o['variables']['node_no_browser_globals'] = b(options.no_browser_globals) - # TODO(refack): fix this when implementing embedded code-cache when cross-compiling. - if o['variables']['want_separate_host_toolset'] == 0: - o['variables']['node_code_cache'] = 'yes' # For testing + o['variables']['node_shared'] = b(options.shared) node_module_version = getmoduleversion.get_version() - if sys.platform == 'darwin': + if options.dest_os == 'android': + shlib_suffix = 'so' + elif sys.platform == 'darwin': shlib_suffix = '%s.dylib' elif sys.platform.startswith('aix'): shlib_suffix = '%s.a' else: shlib_suffix = 'so.%s' + if '%s' in shlib_suffix: + shlib_suffix %= node_module_version - shlib_suffix %= node_module_version o['variables']['node_module_version'] = int(node_module_version) o['variables']['shlib_suffix'] = shlib_suffix @@ -1373,6 +1378,7 @@ def write_config(data, name): locs.add('root') # must have root o['variables']['icu_locales'] = ','.join(str(loc) for loc in locs) # We will check a bit later if we can use the canned deps/icu-small + o['variables']['icu_default_data'] = options.with_icu_default_data_dir or '' elif with_intl == 'full-icu': # full ICU o['variables']['v8_enable_i18n_support'] = 1 diff --git a/deps/http_parser/Makefile b/deps/http_parser/Makefile index 6cf63bd35ea12b..23be2884ea81b3 100644 --- a/deps/http_parser/Makefile +++ b/deps/http_parser/Makefile @@ -23,8 +23,8 @@ HELPER ?= BINEXT ?= SOLIBNAME = libhttp_parser SOMAJOR = 2 -SOMINOR = 8 -SOREV = 0 +SOMINOR = 9 +SOREV = 1 ifeq (darwin,$(PLATFORM)) SOEXT ?= dylib SONAME ?= $(SOLIBNAME).$(SOMAJOR).$(SOMINOR).$(SOEXT) diff --git a/deps/http_parser/README.md b/deps/http_parser/README.md index 9c4c9999c2663f..b265d71715f771 100644 --- a/deps/http_parser/README.md +++ b/deps/http_parser/README.md @@ -148,7 +148,7 @@ callback in a threadsafe manner. This allows `http_parser` to be used in multi-threaded contexts. Example: -``` +```c typedef struct { socket_t sock; void* buffer; @@ -184,7 +184,7 @@ void http_parser_thread(socket_t sock) { parser supplied to callback functions */ parser->data = my_data; - http_parser_settings settings; / * set up callbacks */ + http_parser_settings settings; /* set up callbacks */ settings.on_url = my_url_callback; /* execute parser */ diff --git a/deps/http_parser/bench.c b/deps/http_parser/bench.c index 5b452fa1cdb6e6..678f5556c59880 100644 --- a/deps/http_parser/bench.c +++ b/deps/http_parser/bench.c @@ -20,10 +20,14 @@ */ #include "http_parser.h" #include +#include #include #include #include +/* 8 gb */ +static const int64_t kBytes = 8LL << 30; + static const char data[] = "POST /joyent/http-parser HTTP/1.1\r\n" "Host: github.com\r\n" @@ -38,7 +42,7 @@ static const char data[] = "Referer: https://github.com/joyent/http-parser\r\n" "Connection: keep-alive\r\n" "Transfer-Encoding: chunked\r\n" - "Cache-Control: max-age=0\r\n\r\nb\r\nhello world\r\n0\r\n\r\n"; + "Cache-Control: max-age=0\r\n\r\nb\r\nhello world\r\n0\r\n"; static const size_t data_len = sizeof(data) - 1; static int on_info(http_parser* p) { @@ -67,13 +71,13 @@ int bench(int iter_count, int silent) { int err; struct timeval start; struct timeval end; - float rps; if (!silent) { err = gettimeofday(&start, NULL); assert(err == 0); } + fprintf(stderr, "req_len=%d\n", (int) data_len); for (i = 0; i < iter_count; i++) { size_t parsed; http_parser_init(&parser, HTTP_REQUEST); @@ -83,17 +87,27 @@ int bench(int iter_count, int silent) { } if (!silent) { + double elapsed; + double bw; + double total; + err = gettimeofday(&end, NULL); assert(err == 0); fprintf(stdout, "Benchmark result:\n"); - rps = (float) (end.tv_sec - start.tv_sec) + - (end.tv_usec - start.tv_usec) * 1e-6f; - fprintf(stdout, "Took %f seconds to run\n", rps); + elapsed = (double) (end.tv_sec - start.tv_sec) + + (end.tv_usec - start.tv_usec) * 1e-6f; + + total = (double) iter_count * data_len; + bw = (double) total / elapsed; + + fprintf(stdout, "%.2f mb | %.2f mb/s | %.2f req/sec | %.2f s\n", + (double) total / (1024 * 1024), + bw / (1024 * 1024), + (double) iter_count / elapsed, + elapsed); - rps = (float) iter_count / rps; - fprintf(stdout, "%f req/sec\n", rps); fflush(stdout); } @@ -101,11 +115,14 @@ int bench(int iter_count, int silent) { } int main(int argc, char** argv) { + int64_t iterations; + + iterations = kBytes / (int64_t) data_len; if (argc == 2 && strcmp(argv[1], "infinite") == 0) { for (;;) - bench(5000000, 1); + bench(iterations, 1); return 0; } else { - return bench(5000000, 0); + return bench(iterations, 0); } } diff --git a/deps/http_parser/http_parser.c b/deps/http_parser/http_parser.c index 46764bced09478..2ea228eb0fdc77 100644 --- a/deps/http_parser/http_parser.c +++ b/deps/http_parser/http_parser.c @@ -51,6 +51,7 @@ static uint32_t max_header_size = HTTP_MAX_HEADER_SIZE; #define SET_ERRNO(e) \ do { \ + parser->nread = nread; \ parser->http_errno = (e); \ } while(0) @@ -58,6 +59,7 @@ do { \ #define UPDATE_STATE(V) p_state = (enum state) (V); #define RETURN(V) \ do { \ + parser->nread = nread; \ parser->state = CURRENT_STATE(); \ return (V); \ } while (0); @@ -151,8 +153,8 @@ do { \ */ #define COUNT_HEADER_SIZE(V) \ do { \ - parser->nread += (V); \ - if (UNLIKELY(parser->nread > max_header_size)) { \ + nread += (uint32_t)(V); \ + if (UNLIKELY(nread > max_header_size)) { \ SET_ERRNO(HPE_HEADER_OVERFLOW); \ goto error; \ } \ @@ -194,7 +196,7 @@ static const char tokens[256] = { /* 24 can 25 em 26 sub 27 esc 28 fs 29 gs 30 rs 31 us */ 0, 0, 0, 0, 0, 0, 0, 0, /* 32 sp 33 ! 34 " 35 # 36 $ 37 % 38 & 39 ' */ - 0, '!', 0, '#', '$', '%', '&', '\'', + ' ', '!', 0, '#', '$', '%', '&', '\'', /* 40 ( 41 ) 42 * 43 + 44 , 45 - 46 . 47 / */ 0, 0, '*', '+', 0, '-', '.', 0, /* 48 0 49 1 50 2 51 3 52 4 53 5 54 6 55 7 */ @@ -314,6 +316,8 @@ enum state , s_req_http_HT , s_req_http_HTT , s_req_http_HTTP + , s_req_http_I + , s_req_http_IC , s_req_http_major , s_req_http_dot , s_req_http_minor @@ -421,14 +425,14 @@ enum http_host_state (c) == ';' || (c) == ':' || (c) == '&' || (c) == '=' || (c) == '+' || \ (c) == '$' || (c) == ',') -#define STRICT_TOKEN(c) (tokens[(unsigned char)c]) +#define STRICT_TOKEN(c) ((c == ' ') ? 0 : tokens[(unsigned char)c]) #if HTTP_PARSER_STRICT -#define TOKEN(c) (tokens[(unsigned char)c]) +#define TOKEN(c) STRICT_TOKEN(c) #define IS_URL_CHAR(c) (BIT_AT(normal_url_char, (unsigned char)c)) #define IS_HOST_CHAR(c) (IS_ALPHANUM(c) || (c) == '.' || (c) == '-') #else -#define TOKEN(c) ((c == ' ') ? ' ' : tokens[(unsigned char)c]) +#define TOKEN(c) tokens[(unsigned char)c] #define IS_URL_CHAR(c) \ (BIT_AT(normal_url_char, (unsigned char)c) || ((c) & 0x80)) #define IS_HOST_CHAR(c) \ @@ -542,7 +546,7 @@ parse_url_char(enum state s, const char ch) return s_dead; } - /* FALLTHROUGH */ + /* fall through */ case s_req_server_start: case s_req_server: if (ch == '/') { @@ -646,6 +650,7 @@ size_t http_parser_execute (http_parser *parser, const char *status_mark = 0; enum state p_state = (enum state) parser->state; const unsigned int lenient = parser->lenient_http_headers; + uint32_t nread = parser->nread; /* We're in an error state. Don't bother doing anything. */ if (HTTP_PARSER_ERRNO(parser) != HPE_OK) { @@ -757,21 +762,16 @@ size_t http_parser_execute (http_parser *parser, case s_start_res: { + if (ch == CR || ch == LF) + break; parser->flags = 0; parser->content_length = ULLONG_MAX; - switch (ch) { - case 'H': - UPDATE_STATE(s_res_H); - break; - - case CR: - case LF: - break; - - default: - SET_ERRNO(HPE_INVALID_CONSTANT); - goto error; + if (ch == 'H') { + UPDATE_STATE(s_res_H); + } else { + SET_ERRNO(HPE_INVALID_CONSTANT); + goto error; } CALLBACK_NOTIFY(message_begin); @@ -1088,11 +1088,17 @@ size_t http_parser_execute (http_parser *parser, case s_req_http_start: switch (ch) { + case ' ': + break; case 'H': UPDATE_STATE(s_req_http_H); break; - case ' ': - break; + case 'I': + if (parser->method == HTTP_SOURCE) { + UPDATE_STATE(s_req_http_I); + break; + } + /* fall through */ default: SET_ERRNO(HPE_INVALID_CONSTANT); goto error; @@ -1114,6 +1120,16 @@ size_t http_parser_execute (http_parser *parser, UPDATE_STATE(s_req_http_HTTP); break; + case s_req_http_I: + STRICT_CHECK(ch != 'C'); + UPDATE_STATE(s_req_http_IC); + break; + + case s_req_http_IC: + STRICT_CHECK(ch != 'E'); + UPDATE_STATE(s_req_http_HTTP); /* Treat "ICE" as "HTTP". */ + break; + case s_req_http_HTTP: STRICT_CHECK(ch != '/'); UPDATE_STATE(s_req_http_major); @@ -1240,8 +1256,14 @@ size_t http_parser_execute (http_parser *parser, break; switch (parser->header_state) { - case h_general: + case h_general: { + size_t limit = data + len - p; + limit = MIN(limit, max_header_size); + while (p+1 < data + limit && TOKEN(p[1])) { + p++; + } break; + } case h_C: parser->index++; @@ -1341,13 +1363,14 @@ size_t http_parser_execute (http_parser *parser, } } - COUNT_HEADER_SIZE(p - start); - if (p == data + len) { --p; + COUNT_HEADER_SIZE(p - start); break; } + COUNT_HEADER_SIZE(p - start); + if (ch == ':') { UPDATE_STATE(s_header_value_discard_ws); CALLBACK_DATA(header_field); @@ -1371,7 +1394,7 @@ size_t http_parser_execute (http_parser *parser, break; } - /* FALLTHROUGH */ + /* fall through */ case s_header_value_start: { @@ -1413,6 +1436,11 @@ size_t http_parser_execute (http_parser *parser, parser->header_state = h_content_length_num; break; + /* when obsolete line folding is encountered for content length + * continue to the s_header_value state */ + case h_content_length_ws: + break; + case h_connection: /* looking for 'Connection: keep-alive' */ if (c == 'k') { @@ -1468,29 +1496,24 @@ size_t http_parser_execute (http_parser *parser, switch (h_state) { case h_general: - { - const char* p_cr; - const char* p_lf; - size_t limit = data + len - p; - - limit = MIN(limit, max_header_size); - - p_cr = (const char*) memchr(p, CR, limit); - p_lf = (const char*) memchr(p, LF, limit); - if (p_cr != NULL) { - if (p_lf != NULL && p_cr >= p_lf) - p = p_lf; - else - p = p_cr; - } else if (UNLIKELY(p_lf != NULL)) { - p = p_lf; - } else { - p = data + len; + { + const char* limit = p + MIN(data + len - p, max_header_size); + + for (; p != limit; p++) { + ch = *p; + if (ch == CR || ch == LF) { + --p; + break; + } + if (!lenient && !IS_HEADER_CHAR(ch)) { + SET_ERRNO(HPE_INVALID_HEADER_TOKEN); + goto error; + } + } + if (p == data + len) + --p; + break; } - --p; - - break; - } case h_connection: case h_transfer_encoding: @@ -1500,7 +1523,7 @@ size_t http_parser_execute (http_parser *parser, case h_content_length: if (ch == ' ') break; h_state = h_content_length_num; - /* FALLTHROUGH */ + /* fall through */ case h_content_length_num: { @@ -1636,10 +1659,10 @@ size_t http_parser_execute (http_parser *parser, } parser->header_state = h_state; - COUNT_HEADER_SIZE(p - start); - if (p == data + len) --p; + + COUNT_HEADER_SIZE(p - start); break; } @@ -1657,6 +1680,10 @@ size_t http_parser_execute (http_parser *parser, case s_header_value_lws: { if (ch == ' ' || ch == '\t') { + if (parser->header_state == h_content_length_num) { + /* treat obsolete line folding as space */ + parser->header_state = h_content_length_ws; + } UPDATE_STATE(s_header_value_start); REEXECUTE(); } @@ -1709,6 +1736,11 @@ size_t http_parser_execute (http_parser *parser, case h_transfer_encoding_chunked: parser->flags |= F_CHUNKED; break; + case h_content_length: + /* do not allow empty content length */ + SET_ERRNO(HPE_INVALID_CONTENT_LENGTH); + goto error; + break; default: break; } @@ -1772,7 +1804,7 @@ size_t http_parser_execute (http_parser *parser, case 2: parser->upgrade = 1; - /* FALLTHROUGH */ + /* fall through */ case 1: parser->flags |= F_SKIPBODY; break; @@ -1796,6 +1828,7 @@ size_t http_parser_execute (http_parser *parser, STRICT_CHECK(ch != LF); parser->nread = 0; + nread = 0; hasBody = parser->flags & F_CHUNKED || (parser->content_length > 0 && parser->content_length != ULLONG_MAX); @@ -1890,7 +1923,7 @@ size_t http_parser_execute (http_parser *parser, case s_chunk_size_start: { - assert(parser->nread == 1); + assert(nread == 1); assert(parser->flags & F_CHUNKED); unhex_val = unhex[(unsigned char)ch]; @@ -1958,6 +1991,7 @@ size_t http_parser_execute (http_parser *parser, STRICT_CHECK(ch != LF); parser->nread = 0; + nread = 0; if (parser->content_length == 0) { parser->flags |= F_TRAILING; @@ -2004,6 +2038,7 @@ size_t http_parser_execute (http_parser *parser, assert(parser->flags & F_CHUNKED); STRICT_CHECK(ch != LF); parser->nread = 0; + nread = 0; UPDATE_STATE(s_chunk_size_start); CALLBACK_NOTIFY(chunk_complete); break; @@ -2015,7 +2050,7 @@ size_t http_parser_execute (http_parser *parser, } } - /* Run callbacks for any marks that we have leftover after we ran our of + /* Run callbacks for any marks that we have leftover after we ran out of * bytes. There should be at most one of these set, so it's OK to invoke * them in series (unset marks will not result in callbacks). * @@ -2097,6 +2132,16 @@ http_method_str (enum http_method m) return ELEM_AT(method_strings, m, ""); } +const char * +http_status_str (enum http_status s) +{ + switch (s) { +#define XX(num, name, string) case HTTP_STATUS_##name: return #string; + HTTP_STATUS_MAP(XX) +#undef XX + default: return ""; + } +} void http_parser_init (http_parser *parser, enum http_parser_type t) @@ -2157,7 +2202,7 @@ http_parse_host_char(enum http_host_state s, const char ch) { return s_http_host; } - /* FALLTHROUGH */ + /* fall through */ case s_http_host_v6_end: if (ch == ':') { return s_http_host_port_start; @@ -2170,7 +2215,7 @@ http_parse_host_char(enum http_host_state s, const char ch) { return s_http_host_v6_end; } - /* FALLTHROUGH */ + /* fall through */ case s_http_host_v6_start: if (IS_HEX(ch) || ch == ':' || ch == '.') { return s_http_host_v6; @@ -2186,7 +2231,7 @@ http_parse_host_char(enum http_host_state s, const char ch) { return s_http_host_v6_end; } - /* FALLTHROUGH */ + /* fall through */ case s_http_host_v6_zone_start: /* RFC 6874 Zone ID consists of 1*( unreserved / pct-encoded) */ if (IS_ALPHANUM(ch) || ch == '%' || ch == '.' || ch == '-' || ch == '_' || @@ -2211,12 +2256,13 @@ http_parse_host_char(enum http_host_state s, const char ch) { static int http_parse_host(const char * buf, struct http_parser_url *u, int found_at) { - assert(u->field_set & (1 << UF_HOST)); enum http_host_state s; const char *p; size_t buflen = u->field_data[UF_HOST].off + u->field_data[UF_HOST].len; + assert(u->field_set & (1 << UF_HOST)); + u->field_data[UF_HOST].len = 0; s = found_at ? s_http_userinfo_start : s_http_host_start; @@ -2231,14 +2277,14 @@ http_parse_host(const char * buf, struct http_parser_url *u, int found_at) { switch(new_s) { case s_http_host: if (s != s_http_host) { - u->field_data[UF_HOST].off = p - buf; + u->field_data[UF_HOST].off = (uint16_t)(p - buf); } u->field_data[UF_HOST].len++; break; case s_http_host_v6: if (s != s_http_host_v6) { - u->field_data[UF_HOST].off = p - buf; + u->field_data[UF_HOST].off = (uint16_t)(p - buf); } u->field_data[UF_HOST].len++; break; @@ -2250,7 +2296,7 @@ http_parse_host(const char * buf, struct http_parser_url *u, int found_at) { case s_http_host_port: if (s != s_http_host_port) { - u->field_data[UF_PORT].off = p - buf; + u->field_data[UF_PORT].off = (uint16_t)(p - buf); u->field_data[UF_PORT].len = 0; u->field_set |= (1 << UF_PORT); } @@ -2259,7 +2305,7 @@ http_parse_host(const char * buf, struct http_parser_url *u, int found_at) { case s_http_userinfo: if (s != s_http_userinfo) { - u->field_data[UF_USERINFO].off = p - buf ; + u->field_data[UF_USERINFO].off = (uint16_t)(p - buf); u->field_data[UF_USERINFO].len = 0; u->field_set |= (1 << UF_USERINFO); } @@ -2304,6 +2350,10 @@ http_parser_parse_url(const char *buf, size_t buflen, int is_connect, enum http_parser_url_fields uf, old_uf; int found_at = 0; + if (buflen == 0) { + return 1; + } + u->port = u->field_set = 0; s = is_connect ? s_req_server_start : s_req_spaces_before_url; old_uf = UF_MAX; @@ -2331,7 +2381,7 @@ http_parser_parse_url(const char *buf, size_t buflen, int is_connect, case s_req_server_with_at: found_at = 1; - /* FALLTHROUGH */ + /* fall through */ case s_req_server: uf = UF_HOST; break; @@ -2359,7 +2409,7 @@ http_parser_parse_url(const char *buf, size_t buflen, int is_connect, continue; } - u->field_data[uf].off = p - buf; + u->field_data[uf].off = (uint16_t)(p - buf); u->field_data[uf].len = 1; u->field_set |= (1 << uf); @@ -2422,6 +2472,7 @@ http_parser_pause(http_parser *parser, int paused) { */ if (HTTP_PARSER_ERRNO(parser) == HPE_OK || HTTP_PARSER_ERRNO(parser) == HPE_PAUSED) { + uint32_t nread = parser->nread; /* used by the SET_ERRNO macro */ SET_ERRNO((paused) ? HPE_PAUSED : HPE_OK); } else { assert(0 && "Attempting to pause parser in error state"); diff --git a/deps/http_parser/http_parser.h b/deps/http_parser/http_parser.h index ea7bafef2c3178..471250bc798cdc 100644 --- a/deps/http_parser/http_parser.h +++ b/deps/http_parser/http_parser.h @@ -26,8 +26,8 @@ extern "C" { /* Also update SONAME in the Makefile whenever you change these. */ #define HTTP_PARSER_VERSION_MAJOR 2 -#define HTTP_PARSER_VERSION_MINOR 8 -#define HTTP_PARSER_VERSION_PATCH 0 +#define HTTP_PARSER_VERSION_MINOR 9 +#define HTTP_PARSER_VERSION_PATCH 1 #include #if defined(_WIN32) && !defined(__MINGW32__) && \ @@ -407,6 +407,9 @@ int http_should_keep_alive(const http_parser *parser); /* Returns a string version of the HTTP method. */ const char *http_method_str(enum http_method m); +/* Returns a string version of the HTTP status code. */ +const char *http_status_str(enum http_status s); + /* Return a string name of the given error */ const char *http_errno_name(enum http_errno err); diff --git a/deps/http_parser/test.c b/deps/http_parser/test.c index cb445cea860701..0140a18b7a2c66 100644 --- a/deps/http_parser/test.c +++ b/deps/http_parser/test.c @@ -27,9 +27,7 @@ #include #if defined(__APPLE__) -# undef strlcat # undef strlncpy -# undef strlcpy #endif /* defined(__APPLE__) */ #undef TRUE @@ -43,7 +41,9 @@ #define MIN(a,b) ((a) < (b) ? (a) : (b)) -static http_parser *parser; +#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*x)) + +static http_parser parser; struct message { const char *name; // for debugging purposes @@ -153,10 +153,10 @@ const struct message requests[] = ,.body= "" } -#define DUMBFUCK 2 -, {.name= "dumbfuck" +#define DUMBLUCK 2 +, {.name= "dumbluck" ,.type= HTTP_REQUEST - ,.raw= "GET /dumbfuck HTTP/1.1\r\n" + ,.raw= "GET /dumbluck HTTP/1.1\r\n" "aaaaaaaaaaaaa:++++++++++\r\n" "\r\n" ,.should_keep_alive= TRUE @@ -166,8 +166,8 @@ const struct message requests[] = ,.method= HTTP_GET ,.query_string= "" ,.fragment= "" - ,.request_path= "/dumbfuck" - ,.request_url= "/dumbfuck" + ,.request_path= "/dumbluck" + ,.request_url= "/dumbluck" ,.num_headers= 1 ,.headers= { { "aaaaaaaaaaaaa", "++++++++++" } @@ -371,13 +371,13 @@ const struct message requests[] = ,.chunk_lengths= { 5, 6 } } -#define CHUNKED_W_BULLSHIT_AFTER_LENGTH 11 -, {.name= "with bullshit after the length" +#define CHUNKED_W_NONSENSE_AFTER_LENGTH 11 +, {.name= "with nonsense after the length" ,.type= HTTP_REQUEST - ,.raw= "POST /chunked_w_bullshit_after_length HTTP/1.1\r\n" + ,.raw= "POST /chunked_w_nonsense_after_length HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" - "5; ihatew3;whatthefuck=aretheseparametersfor\r\nhello\r\n" + "5; ilovew3;whattheluck=aretheseparametersfor\r\nhello\r\n" "6; blahblah; blah\r\n world\r\n" "0\r\n" "\r\n" @@ -388,8 +388,8 @@ const struct message requests[] = ,.method= HTTP_POST ,.query_string= "" ,.fragment= "" - ,.request_path= "/chunked_w_bullshit_after_length" - ,.request_url= "/chunked_w_bullshit_after_length" + ,.request_path= "/chunked_w_nonsense_after_length" + ,.request_url= "/chunked_w_nonsense_after_length" ,.num_headers= 1 ,.headers= { { "Transfer-Encoding", "chunked" } @@ -1174,7 +1174,25 @@ const struct message requests[] = ,.body= "" } -, {.name= NULL } /* sentinel */ +#define SOURCE_ICE_REQUEST 42 +, {.name = "source request" + ,.type= HTTP_REQUEST + ,.raw= "SOURCE /music/sweet/music ICE/1.0\r\n" + "Host: example.com\r\n" + "\r\n" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 0 + ,.method= HTTP_SOURCE + ,.request_path= "/music/sweet/music" + ,.request_url= "/music/sweet/music" + ,.query_string= "" + ,.fragment= "" + ,.num_headers= 1 + ,.headers= { { "Host", "example.com" } } + ,.body= "" + } }; /* * R E S P O N S E S * */ @@ -1952,8 +1970,6 @@ const struct message responses[] = ,.num_chunks_complete= 3 ,.chunk_lengths= { 2, 2 } } - -, {.name= NULL } /* sentinel */ }; /* strnlen() is a POSIX.2008 addition. Can't rely on it being available so @@ -1993,12 +2009,6 @@ strlncat(char *dst, size_t len, const char *src, size_t n) return slen + dlen; } -size_t -strlcat(char *dst, const char *src, size_t len) -{ - return strlncat(dst, len, src, (size_t) -1); -} - size_t strlncpy(char *dst, size_t len, const char *src, size_t n) { @@ -2017,16 +2027,10 @@ strlncpy(char *dst, size_t len, const char *src, size_t n) return slen; } -size_t -strlcpy(char *dst, const char *src, size_t len) -{ - return strlncpy(dst, len, src, (size_t) -1); -} - int request_url_cb (http_parser *p, const char *buf, size_t len) { - assert(p == parser); + assert(p == &parser); strlncat(messages[num_messages].request_url, sizeof(messages[num_messages].request_url), buf, @@ -2037,7 +2041,7 @@ request_url_cb (http_parser *p, const char *buf, size_t len) int header_field_cb (http_parser *p, const char *buf, size_t len) { - assert(p == parser); + assert(p == &parser); struct message *m = &messages[num_messages]; if (m->last_header_element != FIELD) @@ -2056,7 +2060,7 @@ header_field_cb (http_parser *p, const char *buf, size_t len) int header_value_cb (http_parser *p, const char *buf, size_t len) { - assert(p == parser); + assert(p == &parser); struct message *m = &messages[num_messages]; strlncat(m->headers[m->num_headers-1][1], @@ -2085,7 +2089,7 @@ check_body_is_final (const http_parser *p) int body_cb (http_parser *p, const char *buf, size_t len) { - assert(p == parser); + assert(p == &parser); strlncat(messages[num_messages].body, sizeof(messages[num_messages].body), buf, @@ -2099,7 +2103,7 @@ body_cb (http_parser *p, const char *buf, size_t len) int count_body_cb (http_parser *p, const char *buf, size_t len) { - assert(p == parser); + assert(p == &parser); assert(buf); messages[num_messages].body_size += len; check_body_is_final(p); @@ -2109,7 +2113,8 @@ count_body_cb (http_parser *p, const char *buf, size_t len) int message_begin_cb (http_parser *p) { - assert(p == parser); + assert(p == &parser); + assert(!messages[num_messages].message_begin_cb_called); messages[num_messages].message_begin_cb_called = TRUE; return 0; } @@ -2117,21 +2122,22 @@ message_begin_cb (http_parser *p) int headers_complete_cb (http_parser *p) { - assert(p == parser); - messages[num_messages].method = parser->method; - messages[num_messages].status_code = parser->status_code; - messages[num_messages].http_major = parser->http_major; - messages[num_messages].http_minor = parser->http_minor; + assert(p == &parser); + messages[num_messages].method = parser.method; + messages[num_messages].status_code = parser.status_code; + messages[num_messages].http_major = parser.http_major; + messages[num_messages].http_minor = parser.http_minor; messages[num_messages].headers_complete_cb_called = TRUE; - messages[num_messages].should_keep_alive = http_should_keep_alive(parser); + messages[num_messages].should_keep_alive = http_should_keep_alive(&parser); return 0; } int message_complete_cb (http_parser *p) { - assert(p == parser); - if (messages[num_messages].should_keep_alive != http_should_keep_alive(parser)) + assert(p == &parser); + if (messages[num_messages].should_keep_alive != + http_should_keep_alive(&parser)) { fprintf(stderr, "\n\n *** Error http_should_keep_alive() should have same " "value in both on_message_complete and on_headers_complete " @@ -2162,7 +2168,7 @@ message_complete_cb (http_parser *p) int response_status_cb (http_parser *p, const char *buf, size_t len) { - assert(p == parser); + assert(p == &parser); messages[num_messages].status_cb_called = TRUE; @@ -2176,7 +2182,7 @@ response_status_cb (http_parser *p, const char *buf, size_t len) int chunk_header_cb (http_parser *p) { - assert(p == parser); + assert(p == &parser); int chunk_idx = messages[num_messages].num_chunks; messages[num_messages].num_chunks++; if (chunk_idx < MAX_CHUNKS) { @@ -2189,7 +2195,7 @@ chunk_header_cb (http_parser *p) int chunk_complete_cb (http_parser *p) { - assert(p == parser); + assert(p == &parser); /* Here we want to verify that each chunk_header_cb is matched by a * chunk_complete_cb, so not only should the total number of calls to @@ -2394,7 +2400,7 @@ connect_headers_complete_cb (http_parser *p) int connect_message_complete_cb (http_parser *p) { - messages[num_messages].should_keep_alive = http_should_keep_alive(parser); + messages[num_messages].should_keep_alive = http_should_keep_alive(&parser); return message_complete_cb(p); } @@ -2467,30 +2473,15 @@ void parser_init (enum http_parser_type type) { num_messages = 0; - - assert(parser == NULL); - - parser = malloc(sizeof(http_parser)); - - http_parser_init(parser, type); - + http_parser_init(&parser, type); memset(&messages, 0, sizeof messages); - -} - -void -parser_free () -{ - assert(parser); - free(parser); - parser = NULL; } size_t parse (const char *buf, size_t len) { size_t nparsed; currently_parsing_eof = (len == 0); - nparsed = http_parser_execute(parser, &settings, buf, len); + nparsed = http_parser_execute(&parser, &settings, buf, len); return nparsed; } @@ -2498,7 +2489,7 @@ size_t parse_count_body (const char *buf, size_t len) { size_t nparsed; currently_parsing_eof = (len == 0); - nparsed = http_parser_execute(parser, &settings_count_body, buf, len); + nparsed = http_parser_execute(&parser, &settings_count_body, buf, len); return nparsed; } @@ -2509,7 +2500,7 @@ size_t parse_pause (const char *buf, size_t len) currently_parsing_eof = (len == 0); current_pause_parser = &s; - nparsed = http_parser_execute(parser, current_pause_parser, buf, len); + nparsed = http_parser_execute(&parser, current_pause_parser, buf, len); return nparsed; } @@ -2517,7 +2508,7 @@ size_t parse_connect (const char *buf, size_t len) { size_t nparsed; currently_parsing_eof = (len == 0); - nparsed = http_parser_execute(parser, &settings_connect, buf, len); + nparsed = http_parser_execute(&parser, &settings_connect, buf, len); return nparsed; } @@ -2737,7 +2728,7 @@ static void print_error (const char *raw, size_t error_location) { fprintf(stderr, "\n*** %s ***\n\n", - http_errno_description(HTTP_PARSER_ERRNO(parser))); + http_errno_description(HTTP_PARSER_ERRNO(&parser))); int this_line = 0, char_len = 0; size_t i, j, len = strlen(raw), error_location_line = 0; @@ -3280,6 +3271,24 @@ const struct url_test url_tests[] = ,.rv=1 /* s_dead */ } +, {.name="empty url" + ,.url="" + ,.is_connect=0 + ,.rv=1 + } + +, {.name="NULL url" + ,.url=NULL + ,.is_connect=0 + ,.rv=1 + } + +, {.name="full of spaces url" + ,.url=" " + ,.is_connect=0 + ,.rv=1 + } + #if HTTP_PARSER_STRICT , {.name="tab in URL" @@ -3364,7 +3373,7 @@ test_parse_url (void) memset(&u, 0, sizeof(u)); rv = http_parser_parse_url(test->url, - strlen(test->url), + test->url ? strlen(test->url) : 0, test->is_connect, &u); @@ -3404,6 +3413,14 @@ test_method_str (void) assert(0 == strcmp("", http_method_str(1337))); } +void +test_status_str (void) +{ + assert(0 == strcmp("OK", http_status_str(HTTP_STATUS_OK))); + assert(0 == strcmp("Not Found", http_status_str(HTTP_STATUS_NOT_FOUND))); + assert(0 == strcmp("", http_status_str(1337))); +} + void test_message (const struct message *message) { @@ -3418,9 +3435,18 @@ test_message (const struct message *message) size_t msg2len = raw_len - msg1len; if (msg1len) { + assert(num_messages == 0); + messages[0].headers_complete_cb_called = FALSE; + read = parse(msg1, msg1len); - if (message->upgrade && parser->upgrade && num_messages > 0) { + if (!messages[0].headers_complete_cb_called && parser.nread != read) { + assert(parser.nread == read); + print_error(msg1, read); + abort(); + } + + if (message->upgrade && parser.upgrade && num_messages > 0) { messages[num_messages - 1].upgrade = msg1 + read; goto test; } @@ -3434,7 +3460,7 @@ test_message (const struct message *message) read = parse(msg2, msg2len); - if (message->upgrade && parser->upgrade) { + if (message->upgrade && parser.upgrade) { messages[num_messages - 1].upgrade = msg2 + read; goto test; } @@ -3459,8 +3485,6 @@ test_message (const struct message *message) } if(!message_eq(0, 0, message)) abort(); - - parser_free(); } } @@ -3496,8 +3520,6 @@ test_message_count_body (const struct message *message) } if(!message_eq(0, 0, message)) abort(); - - parser_free(); } void @@ -3510,11 +3532,9 @@ test_simple_type (const char *buf, enum http_errno err; parse(buf, strlen(buf)); - err = HTTP_PARSER_ERRNO(parser); + err = HTTP_PARSER_ERRNO(&parser); parse(NULL, 0); - parser_free(); - /* In strict mode, allow us to pass with an unexpected HPE_STRICT as * long as the caller isn't expecting success. */ @@ -3854,7 +3874,7 @@ test_multiple3 (const struct message *r1, const struct message *r2, const struct read = parse(total, strlen(total)); - if (parser->upgrade) { + if (parser.upgrade) { upgrade_message_fix(total, read, 3, r1, r2, r3); goto test; } @@ -3881,8 +3901,6 @@ test_multiple3 (const struct message *r1, const struct message *r2, const struct if (!message_eq(0, 0, r1)) abort(); if (message_count > 1 && !message_eq(1, 0, r2)) abort(); if (message_count > 2 && !message_eq(2, 0, r3)) abort(); - - parser_free(); } /* SCAN through every possible breaking to make sure the @@ -3936,9 +3954,17 @@ test_scan (const struct message *r1, const struct message *r2, const struct mess strlncpy(buf3, sizeof(buf1), total+j, buf3_len); buf3[buf3_len] = 0; + assert(num_messages == 0); + messages[0].headers_complete_cb_called = FALSE; + read = parse(buf1, buf1_len); - if (parser->upgrade) goto test; + if (!messages[0].headers_complete_cb_called && parser.nread != read) { + print_error(buf1, read); + goto error; + } + + if (parser.upgrade) goto test; if (read != buf1_len) { print_error(buf1, read); @@ -3947,7 +3973,7 @@ test_scan (const struct message *r1, const struct message *r2, const struct mess read += parse(buf2, buf2_len); - if (parser->upgrade) goto test; + if (parser.upgrade) goto test; if (read != buf1_len + buf2_len) { print_error(buf2, read); @@ -3956,7 +3982,7 @@ test_scan (const struct message *r1, const struct message *r2, const struct mess read += parse(buf3, buf3_len); - if (parser->upgrade) goto test; + if (parser.upgrade) goto test; if (read != buf1_len + buf2_len + buf3_len) { print_error(buf3, read); @@ -3966,7 +3992,7 @@ test_scan (const struct message *r1, const struct message *r2, const struct mess parse(NULL, 0); test: - if (parser->upgrade) { + if (parser.upgrade) { upgrade_message_fix(total, read, 3, r1, r2, r3); } @@ -3990,8 +4016,6 @@ test_scan (const struct message *r1, const struct message *r2, const struct mess fprintf(stderr, "\n\nError matching messages[2] in test_scan.\n"); goto error; } - - parser_free(); } } } @@ -4055,7 +4079,7 @@ test_message_pause (const struct message *msg) // completion callback. if (messages[0].message_complete_cb_called && msg->upgrade && - parser->upgrade) { + parser.upgrade) { messages[0].upgrade = buf + nread; goto test; } @@ -4063,17 +4087,16 @@ test_message_pause (const struct message *msg) if (nread < buflen) { // Not much do to if we failed a strict-mode check - if (HTTP_PARSER_ERRNO(parser) == HPE_STRICT) { - parser_free(); + if (HTTP_PARSER_ERRNO(&parser) == HPE_STRICT) { return; } - assert (HTTP_PARSER_ERRNO(parser) == HPE_PAUSED); + assert (HTTP_PARSER_ERRNO(&parser) == HPE_PAUSED); } buf += nread; buflen -= nread; - http_parser_pause(parser, 0); + http_parser_pause(&parser, 0); } while (buflen > 0); nread = parse_pause(NULL, 0); @@ -4086,8 +4109,6 @@ test_message_pause (const struct message *msg) } if(!message_eq(0, 0, msg)) abort(); - - parser_free(); } /* Verify that body and next message won't be parsed in responses to CONNECT */ @@ -4107,17 +4128,12 @@ test_message_connect (const struct message *msg) } if(!message_eq(0, 1, msg)) abort(); - - parser_free(); } int main (void) { - parser = NULL; - int i, j, k; - int request_count; - int response_count; + unsigned i, j, k; unsigned long version; unsigned major; unsigned minor; @@ -4131,13 +4147,11 @@ main (void) printf("sizeof(http_parser) = %u\n", (unsigned int)sizeof(http_parser)); - for (request_count = 0; requests[request_count].name; request_count++); - for (response_count = 0; responses[response_count].name; response_count++); - //// API test_preserve_data(); test_parse_url(); test_method_str(); + test_status_str(); //// NREAD test_header_nread_value(); @@ -4168,6 +4182,13 @@ main (void) test_invalid_header_field_token_error(HTTP_RESPONSE); test_invalid_header_field_content_error(HTTP_RESPONSE); + test_simple_type( + "POST / HTTP/1.1\r\n" + "Content-Length:\r\n" // empty + "\r\n", + HPE_INVALID_CONTENT_LENGTH, + HTTP_REQUEST); + test_simple_type( "POST / HTTP/1.1\r\n" "Content-Length: 42 \r\n" // Note the surrounding whitespace. @@ -4189,6 +4210,20 @@ main (void) HPE_INVALID_CONTENT_LENGTH, HTTP_REQUEST); + test_simple_type( + "POST / HTTP/1.1\r\n" + "Content-Length: 42\r\n" + " Hello world!\r\n", + HPE_INVALID_CONTENT_LENGTH, + HTTP_REQUEST); + + test_simple_type( + "POST / HTTP/1.1\r\n" + "Content-Length: 42\r\n" + " \r\n", + HPE_OK, + HTTP_REQUEST); + //// RESPONSES test_simple_type("HTP/1.1 200 OK\r\n\r\n", HPE_INVALID_VERSION, HTTP_RESPONSE); @@ -4196,24 +4231,25 @@ main (void) test_simple_type("HTTP/11.1 200 OK\r\n\r\n", HPE_INVALID_VERSION, HTTP_RESPONSE); test_simple_type("HTTP/1.01 200 OK\r\n\r\n", HPE_INVALID_VERSION, HTTP_RESPONSE); test_simple_type("HTTP/1.1\t200 OK\r\n\r\n", HPE_INVALID_VERSION, HTTP_RESPONSE); + test_simple_type("\rHTTP/1.1\t200 OK\r\n\r\n", HPE_INVALID_VERSION, HTTP_RESPONSE); - for (i = 0; i < response_count; i++) { + for (i = 0; i < ARRAY_SIZE(responses); i++) { test_message(&responses[i]); } - for (i = 0; i < response_count; i++) { + for (i = 0; i < ARRAY_SIZE(responses); i++) { test_message_pause(&responses[i]); } - for (i = 0; i < response_count; i++) { + for (i = 0; i < ARRAY_SIZE(responses); i++) { test_message_connect(&responses[i]); } - for (i = 0; i < response_count; i++) { + for (i = 0; i < ARRAY_SIZE(responses); i++) { if (!responses[i].should_keep_alive) continue; - for (j = 0; j < response_count; j++) { + for (j = 0; j < ARRAY_SIZE(responses); j++) { if (!responses[j].should_keep_alive) continue; - for (k = 0; k < response_count; k++) { + for (k = 0; k < ARRAY_SIZE(responses); k++) { test_multiple3(&responses[i], &responses[j], &responses[k]); } } @@ -4273,11 +4309,16 @@ main (void) /// REQUESTS + test_simple("GET / IHTTP/1.0\r\n\r\n", HPE_INVALID_CONSTANT); + test_simple("GET / ICE/1.0\r\n\r\n", HPE_INVALID_CONSTANT); test_simple("GET / HTP/1.1\r\n\r\n", HPE_INVALID_VERSION); test_simple("GET / HTTP/01.1\r\n\r\n", HPE_INVALID_VERSION); test_simple("GET / HTTP/11.1\r\n\r\n", HPE_INVALID_VERSION); test_simple("GET / HTTP/1.01\r\n\r\n", HPE_INVALID_VERSION); + test_simple("GET / HTTP/1.0\r\nHello: w\1rld\r\n\r\n", HPE_INVALID_HEADER_TOKEN); + test_simple("GET / HTTP/1.0\r\nHello: woooo\2rld\r\n\r\n", HPE_INVALID_HEADER_TOKEN); + // Extended characters - see nodejs/test/parallel/test-http-headers-obstext.js test_simple("GET / HTTP/1.1\r\n" "Test: Düsseldorf\r\n", @@ -4360,9 +4401,9 @@ main (void) "\r\n", HPE_INVALID_HEADER_TOKEN); - const char *dumbfuck2 = + const char *dumbluck2 = "GET / HTTP/1.1\r\n" - "X-SSL-Bullshit: -----BEGIN CERTIFICATE-----\r\n" + "X-SSL-Nonsense: -----BEGIN CERTIFICATE-----\r\n" "\tMIIFbTCCBFWgAwIBAgICH4cwDQYJKoZIhvcNAQEFBQAwcDELMAkGA1UEBhMCVUsx\r\n" "\tETAPBgNVBAoTCGVTY2llbmNlMRIwEAYDVQQLEwlBdXRob3JpdHkxCzAJBgNVBAMT\r\n" "\tAkNBMS0wKwYJKoZIhvcNAQkBFh5jYS1vcGVyYXRvckBncmlkLXN1cHBvcnQuYWMu\r\n" @@ -4395,7 +4436,7 @@ main (void) "\tRA==\r\n" "\t-----END CERTIFICATE-----\r\n" "\r\n"; - test_simple(dumbfuck2, HPE_OK); + test_simple(dumbluck2, HPE_OK); const char *corrupted_connection = "GET / HTTP/1.1\r\n" @@ -4429,19 +4470,19 @@ main (void) /* check to make sure our predefined requests are okay */ - for (i = 0; requests[i].name; i++) { + for (i = 0; i < ARRAY_SIZE(requests); i++) { test_message(&requests[i]); } - for (i = 0; i < request_count; i++) { + for (i = 0; i < ARRAY_SIZE(requests); i++) { test_message_pause(&requests[i]); } - for (i = 0; i < request_count; i++) { + for (i = 0; i < ARRAY_SIZE(requests); i++) { if (!requests[i].should_keep_alive) continue; - for (j = 0; j < request_count; j++) { + for (j = 0; j < ARRAY_SIZE(requests); j++) { if (!requests[j].should_keep_alive) continue; - for (k = 0; k < request_count; k++) { + for (k = 0; k < ARRAY_SIZE(requests); k++) { test_multiple3(&requests[i], &requests[j], &requests[k]); } } @@ -4462,7 +4503,7 @@ main (void) printf("request scan 3/4 "); test_scan( &requests[TWO_CHUNKS_MULT_ZERO_END] , &requests[CHUNKED_W_TRAILING_HEADERS] - , &requests[CHUNKED_W_BULLSHIT_AFTER_LENGTH] + , &requests[CHUNKED_W_NONSENSE_AFTER_LENGTH] ); printf("request scan 4/4 "); diff --git a/deps/openssl/openssl_common.gypi b/deps/openssl/openssl_common.gypi index 67640a6325eb52..93b87064b3fb3e 100644 --- a/deps/openssl/openssl_common.gypi +++ b/deps/openssl/openssl_common.gypi @@ -24,7 +24,7 @@ }, 'OS=="win"', { 'defines': [ ## default of Win. See INSTALL in openssl repo. - 'OPENSSLDIR="C:\Program Files\Common Files\SSL"', + 'OPENSSLDIR="C:\\\Program\ Files\\\Common\ Files\\\SSL"', 'ENGINESDIR="NUL"', 'OPENSSL_SYS_WIN32', 'WIN32_LEAN_AND_MEAN', 'L_ENDIAN', '_CRT_SECURE_NO_DEPRECATE', 'UNICODE', '_UNICODE', diff --git a/deps/uv/AUTHORS b/deps/uv/AUTHORS index 408cfd6541a339..dcc36e8c17f929 100644 --- a/deps/uv/AUTHORS +++ b/deps/uv/AUTHORS @@ -411,3 +411,4 @@ Ouyang Yadong ZYSzys Carl Lei Stefan Bender +nia diff --git a/deps/uv/CMakeLists.txt b/deps/uv/CMakeLists.txt index 7da5e688166c04..2ab6d17edddd72 100644 --- a/deps/uv/CMakeLists.txt +++ b/deps/uv/CMakeLists.txt @@ -275,6 +275,8 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Android") src/unix/linux-syscalls.c src/unix/procfs-exepath.c src/unix/pthread-fixes.c + src/unix/random-getrandom.c + src/unix/random-sysctl-linux.c src/unix/sysinfo-loadavg.c) endif() @@ -320,7 +322,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Linux") src/unix/linux-syscalls.c src/unix/procfs-exepath.c src/unix/random-getrandom.c - src/unix/random-sysctl.c + src/unix/random-sysctl-linux.c src/unix/sysinfo-loadavg.c) endif() diff --git a/deps/uv/ChangeLog b/deps/uv/ChangeLog index cd4451ae69c0c1..a0509e6e153f65 100644 --- a/deps/uv/ChangeLog +++ b/deps/uv/ChangeLog @@ -1,3 +1,40 @@ +2019.12.05, Version 1.34.0 (Stable), 15ae750151ac9341e5945eb38f8982d59fb99201 + +Changes since version 1.33.1: + +* unix: move random-sysctl to random-sysctl-linux (nia) + +* netbsd: use KERN_ARND sysctl to get entropy (nia) + +* unix: refactor uv__fs_copyfile() logic (cjihrig) + +* build: fix android build, add missing sources (Ben Noordhuis) + +* build: fix android build, fix symbol redefinition (Ben Noordhuis) + +* build: fix android autotools build (Ben Noordhuis) + +* fs: handle non-functional statx system call (Milad Farazmand) + +* unix,win: add uv_sleep() (cjihrig) + +* doc: add richardlau to maintainers (Richard Lau) + +* aix: fix netmask for IPv6 (Richard Lau) + +* aix: clean up after errors in uv_interface_addresses() (Richard Lau) + +* aix: fix setting of physical addresses (Richard Lau) + +* fs: add uv_fs_mkstemp (Saúl Ibarra Corretgé) + +* unix: switch uv_sleep() to nanosleep() (Ben Noordhuis) + +* unix: retry on EINTR in uv_sleep() (Ben Noordhuis) + +* zos: fix nanosleep() emulation (Ben Noordhuis) + + 2019.10.20, Version 1.33.1 (Stable), 07ad32138f4d2285ba2226b5e20462b27b091a59 Changes since version 1.33.0: diff --git a/deps/uv/MAINTAINERS.md b/deps/uv/MAINTAINERS.md index a5a11c8dfff16c..0870b88eb6896b 100644 --- a/deps/uv/MAINTAINERS.md +++ b/deps/uv/MAINTAINERS.md @@ -17,6 +17,8 @@ libuv is currently managed by the following individuals: - GPG key: 9DFE AA5F 481B BF77 2D90 03CE D592 4925 2F8E C41A (pubkey-iwuzhere) * **Jameson Nash** ([@vtjnash](https://github.com/vtjnash)) * **John Barboza** ([@jbarz](https://github.com/jbarz)) +* **Richard Lau** ([@richardlau](https://github.com/richardlau)) + - GPG key: C82F A3AE 1CBE DC6B E46B 9360 C43C EC45 C17A B93C (pubkey-richardlau) * **Santiago Gimeno** ([@santigimeno](https://github.com/santigimeno)) - GPG key: 612F 0EAD 9401 6223 79DF 4402 F28C 3C8D A33C 03BE (pubkey-santigimeno) * **Saúl Ibarra Corretgé** ([@saghul](https://github.com/saghul)) diff --git a/deps/uv/Makefile.am b/deps/uv/Makefile.am index ce4ca274b217ee..088b4bbd76f375 100644 --- a/deps/uv/Makefile.am +++ b/deps/uv/Makefile.am @@ -387,7 +387,14 @@ endif if ANDROID uvinclude_HEADERS += include/uv/android-ifaddrs.h libuv_la_SOURCES += src/unix/android-ifaddrs.c \ - src/unix/pthread-fixes.c + src/unix/linux-core.c \ + src/unix/linux-inotify.c \ + src/unix/linux-syscalls.c \ + src/unix/procfs-exepath.c \ + src/unix/pthread-fixes.c \ + src/unix/random-getrandom.c \ + src/unix/random-sysctl-linux.c \ + src/unix/sysinfo-loadavg.c endif if CYGWIN @@ -467,7 +474,7 @@ libuv_la_SOURCES += src/unix/linux-core.c \ src/unix/procfs-exepath.c \ src/unix/proctitle.c \ src/unix/random-getrandom.c \ - src/unix/random-sysctl.c \ + src/unix/random-sysctl-linux.c \ src/unix/sysinfo-loadavg.c test_run_tests_LDFLAGS += -lutil endif diff --git a/deps/uv/configure.ac b/deps/uv/configure.ac index 07ad0cde81a656..6ea6b6a06cd1db 100644 --- a/deps/uv/configure.ac +++ b/deps/uv/configure.ac @@ -13,7 +13,7 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. AC_PREREQ(2.57) -AC_INIT([libuv], [1.33.1], [https://github.com/libuv/libuv/issues]) +AC_INIT([libuv], [1.34.0], [https://github.com/libuv/libuv/issues]) AC_CONFIG_MACRO_DIR([m4]) m4_include([m4/libuv-extra-automake-flags.m4]) m4_include([m4/as_case.m4]) diff --git a/deps/uv/docs/src/fs.rst b/deps/uv/docs/src/fs.rst index dc16ff08e65a93..28356c2d442e43 100644 --- a/deps/uv/docs/src/fs.rst +++ b/deps/uv/docs/src/fs.rst @@ -99,7 +99,8 @@ Data types UV_FS_LCHOWN, UV_FS_OPENDIR, UV_FS_READDIR, - UV_FS_CLOSEDIR + UV_FS_CLOSEDIR, + UV_FS_MKSTEMP } uv_fs_type; .. c:type:: uv_statfs_t @@ -245,10 +246,14 @@ API .. c:function:: int uv_fs_mkdtemp(uv_loop_t* loop, uv_fs_t* req, const char* tpl, uv_fs_cb cb) - Equivalent to :man:`mkdtemp(3)`. + Equivalent to :man:`mkdtemp(3)`. The result can be found as a null terminated string at `req->path`. - .. note:: - The result can be found as a null terminated string at `req->path`. +.. c:function:: int uv_fs_mkstemp(uv_loop_t* loop, uv_fs_t* req, const char* tpl, uv_fs_cb cb) + + Equivalent to :man:`mkstemp(3)`. The created file path can be found as a null terminated string at `req->path`. + The file descriptor can be found as an integer at `req->result`. + + .. versionadded:: 1.34.0 .. c:function:: int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) diff --git a/deps/uv/docs/src/misc.rst b/deps/uv/docs/src/misc.rst index 8e167e3ec4ec1d..3264973bcce151 100644 --- a/deps/uv/docs/src/misc.rst +++ b/deps/uv/docs/src/misc.rst @@ -679,6 +679,7 @@ API :man:`sysctl(2)`. - FreeBSD: `getrandom(2) _`, or `/dev/urandom` after reading from `/dev/random` once. + - NetBSD: `KERN_ARND` `sysctl(3) _` - macOS, OpenBSD: `getentropy(2) _` if available, or `/dev/urandom` after reading from `/dev/random` once. - AIX: `/dev/random`. @@ -693,3 +694,9 @@ API are not used and can be set to `NULL`. .. versionadded:: 1.33.0 + +.. c:function:: void uv_sleep(unsigned int msec) + + Causes the calling thread to sleep for `msec` milliseconds. + + .. versionadded:: 1.34.0 diff --git a/deps/uv/include/uv.h b/deps/uv/include/uv.h index 0e8132e4384be0..626cebabd8c9ea 100644 --- a/deps/uv/include/uv.h +++ b/deps/uv/include/uv.h @@ -1258,7 +1258,8 @@ typedef enum { UV_FS_OPENDIR, UV_FS_READDIR, UV_FS_CLOSEDIR, - UV_FS_STATFS + UV_FS_STATFS, + UV_FS_MKSTEMP } uv_fs_type; struct uv_dir_s { @@ -1349,6 +1350,10 @@ UV_EXTERN int uv_fs_mkdtemp(uv_loop_t* loop, uv_fs_t* req, const char* tpl, uv_fs_cb cb); +UV_EXTERN int uv_fs_mkstemp(uv_loop_t* loop, + uv_fs_t* req, + const char* tpl, + uv_fs_cb cb); UV_EXTERN int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, @@ -1641,6 +1646,7 @@ UV_EXTERN uint64_t uv_get_total_memory(void); UV_EXTERN uint64_t uv_get_constrained_memory(void); UV_EXTERN uint64_t uv_hrtime(void); +UV_EXTERN void uv_sleep(unsigned int msec); UV_EXTERN void uv_disable_stdio_inheritance(void); diff --git a/deps/uv/include/uv/version.h b/deps/uv/include/uv/version.h index ca94be6dd4fba6..8017302600d9f3 100644 --- a/deps/uv/include/uv/version.h +++ b/deps/uv/include/uv/version.h @@ -31,8 +31,8 @@ */ #define UV_VERSION_MAJOR 1 -#define UV_VERSION_MINOR 33 -#define UV_VERSION_PATCH 1 +#define UV_VERSION_MINOR 34 +#define UV_VERSION_PATCH 0 #define UV_VERSION_IS_RELEASE 1 #define UV_VERSION_SUFFIX "" diff --git a/deps/uv/src/random.c b/deps/uv/src/random.c index 8c4fe32013dc9a..491bf703309955 100644 --- a/deps/uv/src/random.c +++ b/deps/uv/src/random.c @@ -40,6 +40,8 @@ static int uv__random(void* buf, size_t buflen) { rc = uv__random_getentropy(buf, buflen); if (rc == UV_ENOSYS) rc = uv__random_devurandom(buf, buflen); +#elif defined(__NetBSD__) + rc = uv__random_sysctl(buf, buflen); #elif defined(__FreeBSD__) || defined(__linux__) rc = uv__random_getrandom(buf, buflen); if (rc == UV_ENOSYS) diff --git a/deps/uv/src/unix/aix-common.c b/deps/uv/src/unix/aix-common.c index b9d313c0c5d7cb..e96e34c46373bb 100644 --- a/deps/uv/src/unix/aix-common.c +++ b/deps/uv/src/unix/aix-common.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -158,28 +159,42 @@ int uv_exepath(char* buffer, size_t* size) { int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { uv_interface_address_t* address; - int sockfd, inet6, size = 1; + int sockfd, sock6fd, inet6, i, r, size = 1; struct ifconf ifc; struct ifreq *ifr, *p, flg; + struct in6_ifreq if6; struct sockaddr_dl* sa_addr; + ifc.ifc_req = NULL; + sock6fd = -1; + r = 0; *count = 0; *addresses = NULL; if (0 > (sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP))) { - return UV__ERR(errno); + r = UV__ERR(errno); + goto cleanup; + } + + if (0 > (sock6fd = socket(AF_INET6, SOCK_DGRAM, IPPROTO_IP))) { + r = UV__ERR(errno); + goto cleanup; } if (ioctl(sockfd, SIOCGSIZIFCONF, &size) == -1) { - uv__close(sockfd); - return UV__ERR(errno); + r = UV__ERR(errno); + goto cleanup; } ifc.ifc_req = (struct ifreq*)uv__malloc(size); + if (ifc.ifc_req == NULL) { + r = UV_ENOMEM; + goto cleanup; + } ifc.ifc_len = size; if (ioctl(sockfd, SIOCGIFCONF, &ifc) == -1) { - uv__close(sockfd); - return UV__ERR(errno); + r = UV__ERR(errno); + goto cleanup; } #define ADDR_SIZE(p) MAX((p).sa_len, sizeof(p)) @@ -197,8 +212,8 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name)); if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) { - uv__close(sockfd); - return UV__ERR(errno); + r = UV__ERR(errno); + goto cleanup; } if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING)) @@ -207,16 +222,14 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { (*count)++; } - if (*count == 0) { - uv__close(sockfd); - return 0; - } + if (*count == 0) + goto cleanup; /* Alloc the return interface structs */ - *addresses = uv__malloc(*count * sizeof(uv_interface_address_t)); + *addresses = uv__calloc(*count, sizeof(**addresses)); if (!(*addresses)) { - uv__close(sockfd); - return UV_ENOMEM; + r = UV_ENOMEM; + goto cleanup; } address = *addresses; @@ -233,10 +246,8 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { inet6 = (p->ifr_addr.sa_family == AF_INET6); memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name)); - if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) { - uv__close(sockfd); - return UV_ENOSYS; - } + if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) + goto syserror; if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING)) continue; @@ -250,28 +261,67 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { else address->address.address4 = *((struct sockaddr_in*) &p->ifr_addr); - sa_addr = (struct sockaddr_dl*) &p->ifr_addr; - memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr)); - - if (ioctl(sockfd, SIOCGIFNETMASK, p) == -1) { - uv__close(sockfd); - return UV_ENOSYS; - } - - if (inet6) - address->netmask.netmask6 = *((struct sockaddr_in6*) &p->ifr_addr); - else + if (inet6) { + memset(&if6, 0, sizeof(if6)); + r = uv__strscpy(if6.ifr_name, p->ifr_name, sizeof(if6.ifr_name)); + if (r == UV_E2BIG) + goto cleanup; + r = 0; + memcpy(&if6.ifr_Addr, &p->ifr_addr, sizeof(if6.ifr_Addr)); + if (ioctl(sock6fd, SIOCGIFNETMASK6, &if6) == -1) + goto syserror; + address->netmask.netmask6 = *((struct sockaddr_in6*) &if6.ifr_Addr); + /* Explicitly set family as the ioctl call appears to return it as 0. */ + address->netmask.netmask6.sin6_family = AF_INET6; + } else { + if (ioctl(sockfd, SIOCGIFNETMASK, p) == -1) + goto syserror; address->netmask.netmask4 = *((struct sockaddr_in*) &p->ifr_addr); + /* Explicitly set family as the ioctl call appears to return it as 0. */ + address->netmask.netmask4.sin_family = AF_INET; + } address->is_internal = flg.ifr_flags & IFF_LOOPBACK ? 1 : 0; address++; } + /* Fill in physical addresses. */ + ifr = ifc.ifc_req; + while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) { + p = ifr; + ifr = (struct ifreq*) + ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr)); + + if (p->ifr_addr.sa_family != AF_LINK) + continue; + + address = *addresses; + for (i = 0; i < *count; i++) { + if (strcmp(address->name, p->ifr_name) == 0) { + sa_addr = (struct sockaddr_dl*) &p->ifr_addr; + memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr)); + } + address++; + } + } + #undef ADDR_SIZE + goto cleanup; - uv__close(sockfd); - return 0; +syserror: + uv_free_interface_addresses(*addresses, *count); + *addresses = NULL; + *count = 0; + r = UV_ENOSYS; + +cleanup: + if (sockfd != -1) + uv__close(sockfd); + if (sock6fd != -1) + uv__close(sock6fd); + uv__free(ifc.ifc_req); + return r; } diff --git a/deps/uv/src/unix/core.c b/deps/uv/src/unix/core.c index ffce948c957403..04999dce36d193 100644 --- a/deps/uv/src/unix/core.c +++ b/deps/uv/src/unix/core.c @@ -1555,3 +1555,17 @@ int uv_gettimeofday(uv_timeval64_t* tv) { tv->tv_usec = (int32_t) time.tv_usec; return 0; } + +void uv_sleep(unsigned int msec) { + struct timespec timeout; + int rc; + + timeout.tv_sec = msec / 1000; + timeout.tv_nsec = (msec % 1000) * 1000 * 1000; + + do + rc = nanosleep(&timeout, &timeout); + while (rc == -1 && errno == EINTR); + + assert(rc == 0); +} diff --git a/deps/uv/src/unix/fs.c b/deps/uv/src/unix/fs.c index b37cfbbc7a04ee..be256bfca6c58a 100644 --- a/deps/uv/src/unix/fs.c +++ b/deps/uv/src/unix/fs.c @@ -30,6 +30,7 @@ #include "internal.h" #include +#include #include #include #include @@ -258,6 +259,80 @@ static ssize_t uv__fs_mkdtemp(uv_fs_t* req) { } +static int uv__fs_mkstemp(uv_fs_t* req) { + int r; +#ifdef O_CLOEXEC + int (*mkostemp_function)(char*, int); + static int no_cloexec_support; +#endif + static const char pattern[] = "XXXXXX"; + static const size_t pattern_size = sizeof(pattern) - 1; + char* path; + size_t path_length; + + path = (char*) req->path; + path_length = strlen(path); + + /* EINVAL can be returned for 2 reasons: + 1. The template's last 6 characters were not XXXXXX + 2. open() didn't support O_CLOEXEC + We want to avoid going to the fallback path in case + of 1, so it's manually checked before. */ + if (path_length < pattern_size || + strcmp(path + path_length - pattern_size, pattern)) { + errno = EINVAL; + return -1; + } + +#ifdef O_CLOEXEC + if (no_cloexec_support == 0) { + *(int**)(&mkostemp_function) = dlsym(RTLD_DEFAULT, "mkostemp"); + + /* We don't care about errors, but we do want to clean them up. + If there has been no error, then dlerror() will just return + NULL. */ + dlerror(); + + if (mkostemp_function != NULL) { + r = mkostemp_function(path, O_CLOEXEC); + + if (r >= 0) + return r; + + /* If mkostemp() returns EINVAL, it means the kernel doesn't + support O_CLOEXEC, so we just fallback to mkstemp() below. */ + if (errno != EINVAL) + return r; + + /* We set the static variable so that next calls don't even + try to use mkostemp. */ + no_cloexec_support = 1; + } + } +#endif /* O_CLOEXEC */ + + if (req->cb != NULL) + uv_rwlock_rdlock(&req->loop->cloexec_lock); + + r = mkstemp(path); + + /* In case of failure `uv__cloexec` will leave error in `errno`, + * so it is enough to just set `r` to `-1`. + */ + if (r >= 0 && uv__cloexec(r, 1) != 0) { + r = uv__close(r); + if (r != 0) + abort(); + r = -1; + } + + if (req->cb != NULL) + uv_rwlock_rdunlock(&req->loop->cloexec_lock); + + return r; +} + + static ssize_t uv__fs_open(uv_fs_t* req) { #ifdef O_CLOEXEC return open(req->path, req->flags | O_CLOEXEC, req->mode); @@ -999,6 +1074,7 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) { int err; size_t bytes_to_send; int64_t in_offset; + ssize_t bytes_written; dstfd = -1; err = 0; @@ -1076,18 +1152,17 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) { bytes_to_send = src_statsbuf.st_size; in_offset = 0; while (bytes_to_send != 0) { - err = uv_fs_sendfile(NULL, - &fs_req, - dstfd, - srcfd, - in_offset, - bytes_to_send, - NULL); + uv_fs_sendfile(NULL, &fs_req, dstfd, srcfd, in_offset, bytes_to_send, NULL); + bytes_written = fs_req.result; uv_fs_req_cleanup(&fs_req); - if (err < 0) + + if (bytes_written < 0) { + err = bytes_written; break; - bytes_to_send -= fs_req.result; - in_offset += fs_req.result; + } + + bytes_to_send -= bytes_written; + in_offset += bytes_written; } out: @@ -1234,13 +1309,22 @@ static int uv__fs_statx(int fd, rc = uv__statx(dirfd, path, flags, mode, &statxbuf); - if (rc == -1) { + switch (rc) { + case 0: + break; + case -1: /* EPERM happens when a seccomp filter rejects the system call. * Has been observed with libseccomp < 2.3.3 and docker < 18.04. */ if (errno != EINVAL && errno != EPERM && errno != ENOSYS) return -1; - + /* Fall through. */ + default: + /* Normally on success, zero is returned and On error, -1 is returned. + * Observed on S390 RHEL running in a docker container with statx not + * implemented, rc might return 1 with 0 set as the error code in which + * case we return ENOSYS. + */ no_statx = 1; return UV_ENOSYS; } @@ -1415,6 +1499,7 @@ static void uv__fs_work(struct uv__work* w) { X(LINK, link(req->path, req->new_path)); X(MKDIR, mkdir(req->path, req->mode)); X(MKDTEMP, uv__fs_mkdtemp(req)); + X(MKSTEMP, uv__fs_mkstemp(req)); X(OPEN, uv__fs_open(req)); X(READ, uv__fs_read(req)); X(SCANDIR, uv__fs_scandir(req)); @@ -1639,6 +1724,18 @@ int uv_fs_mkdtemp(uv_loop_t* loop, } +int uv_fs_mkstemp(uv_loop_t* loop, + uv_fs_t* req, + const char* tpl, + uv_fs_cb cb) { + INIT(MKSTEMP); + req->path = uv__strdup(tpl); + if (req->path == NULL) + return UV_ENOMEM; + POST; +} + + int uv_fs_open(uv_loop_t* loop, uv_fs_t* req, const char* path, @@ -1857,10 +1954,12 @@ void uv_fs_req_cleanup(uv_fs_t* req) { /* Only necessary for asychronous requests, i.e., requests with a callback. * Synchronous ones don't copy their arguments and have req->path and - * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP is the - * exception to the rule, it always allocates memory. + * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and + * UV_FS_MKSTEMP are the exception to the rule, they always allocate memory. */ - if (req->path != NULL && (req->cb != NULL || req->fs_type == UV_FS_MKDTEMP)) + if (req->path != NULL && + (req->cb != NULL || + req->fs_type == UV_FS_MKDTEMP || req->fs_type == UV_FS_MKSTEMP)) uv__free((void*) req->path); /* Memory is shared with req->new_path. */ req->path = NULL; diff --git a/deps/uv/src/unix/netbsd.c b/deps/uv/src/unix/netbsd.c index cfe2c6a49dc3c7..690bd79ef91a36 100644 --- a/deps/uv/src/unix/netbsd.c +++ b/deps/uv/src/unix/netbsd.c @@ -234,3 +234,26 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { uv__free(cp_times); return 0; } + +int uv__random_sysctl(void* buf, size_t len) { + static int name[] = {CTL_KERN, KERN_ARND}; + size_t count, req; + unsigned char* p; + + p = buf; + while (len) { + req = len < 32 ? len : 32; + count = req; + + if (sysctl(name, ARRAY_SIZE(name), p, &count, NULL, 0) == -1) + return UV__ERR(errno); + + if (count != req) + return UV_EIO; /* Can't happen. */ + + p += count; + len -= count; + } + + return 0; +} diff --git a/deps/uv/src/unix/os390-syscalls.c b/deps/uv/src/unix/os390-syscalls.c index 1040d66979da04..d9abdebaeeda59 100644 --- a/deps/uv/src/unix/os390-syscalls.c +++ b/deps/uv/src/unix/os390-syscalls.c @@ -23,11 +23,11 @@ #include "os390-syscalls.h" #include #include -#include #include #include #include +#define CW_INTRPT 1 #define CW_CONDVAR 32 #pragma linkage(BPX4CTW, OS) @@ -350,27 +350,34 @@ int nanosleep(const struct timespec* req, struct timespec* rem) { unsigned secrem; unsigned nanorem; int rv; - int rc; + int err; int rsn; nano = (int)req->tv_nsec; seconds = req->tv_sec; - events = CW_CONDVAR; + events = CW_CONDVAR | CW_INTRPT; + secrem = 0; + nanorem = 0; #if defined(_LP64) - BPX4CTW(&seconds, &nano, &events, &secrem, &nanorem, &rv, &rc, &rsn); + BPX4CTW(&seconds, &nano, &events, &secrem, &nanorem, &rv, &err, &rsn); #else - BPX1CTW(&seconds, &nano, &events, &secrem, &nanorem, &rv, &rc, &rsn); + BPX1CTW(&seconds, &nano, &events, &secrem, &nanorem, &rv, &err, &rsn); #endif - assert(rv == -1 && errno == EAGAIN); + /* Don't clobber errno unless BPX1CTW/BPX4CTW errored. + * Don't leak EAGAIN, that just means the timeout expired. + */ + if (rv == -1) + if (err != EAGAIN) + errno = err; - if(rem != NULL) { + if (rem != NULL && (rv == 0 || err == EINTR || err == EAGAIN)) { rem->tv_nsec = nanorem; rem->tv_sec = secrem; } - return 0; + return rv; } diff --git a/deps/uv/src/unix/random-devurandom.c b/deps/uv/src/unix/random-devurandom.c index bfc40d20f88cb7..9aa762e372ea3f 100644 --- a/deps/uv/src/unix/random-devurandom.c +++ b/deps/uv/src/unix/random-devurandom.c @@ -74,10 +74,10 @@ int uv__random_readpath(const char* path, void* buf, size_t buflen) { static void uv__random_devurandom_init(void) { char c; - /* Linux's and NetBSD's random(4) man page suggests applications should read - * at least once from /dev/random before switching to /dev/urandom in order - * to seed the system RNG. Reads from /dev/random can of course block - * indefinitely until entropy is available but that's the point. + /* Linux's random(4) man page suggests applications should read at least + * once from /dev/random before switching to /dev/urandom in order to seed + * the system RNG. Reads from /dev/random can of course block indefinitely + * until entropy is available but that's the point. */ status = uv__random_readpath("/dev/random", &c, 1); } diff --git a/deps/uv/src/unix/random-sysctl.c b/deps/uv/src/unix/random-sysctl-linux.c similarity index 97% rename from deps/uv/src/unix/random-sysctl.c rename to deps/uv/src/unix/random-sysctl-linux.c index fb182ded09296b..66ba8d74ec22b7 100644 --- a/deps/uv/src/unix/random-sysctl.c +++ b/deps/uv/src/unix/random-sysctl-linux.c @@ -40,7 +40,6 @@ struct uv__sysctl_args { }; -/* TODO(bnoordhuis) Use {CTL_KERN, KERN_ARND} on FreeBSD (and NetBSD?) */ int uv__random_sysctl(void* buf, size_t buflen) { static int name[] = {1 /*CTL_KERN*/, 40 /*KERN_RANDOM*/, 6 /*RANDOM_UUID*/}; struct uv__sysctl_args args; diff --git a/deps/uv/src/win/fs.c b/deps/uv/src/win/fs.c index 3ab486080cdfa7..8502b072021a9e 100644 --- a/deps/uv/src/win/fs.c +++ b/deps/uv/src/win/fs.c @@ -1195,9 +1195,10 @@ void fs__mkdir(uv_fs_t* req) { } } +typedef int (*uv__fs_mktemp_func)(uv_fs_t* req); /* OpenBSD original: lib/libc/stdio/mktemp.c */ -void fs__mkdtemp(uv_fs_t* req) { +void fs__mktemp(uv_fs_t* req, uv__fs_mktemp_func func) { static const WCHAR *tempchars = L"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; static const size_t num_chars = 62; @@ -1227,13 +1228,11 @@ void fs__mkdtemp(uv_fs_t* req) { v /= num_chars; } - if (_wmkdir(req->file.pathw) == 0) { - len = strlen(req->path); - wcstombs((char*) req->path + len - num_x, ep - num_x, num_x); - SET_REQ_RESULT(req, 0); - break; - } else if (errno != EEXIST) { - SET_REQ_RESULT(req, -1); + if (func(req)) { + if (req->result >= 0) { + len = strlen(req->path); + wcstombs((char*) req->path + len - num_x, ep - num_x, num_x); + } break; } } while (--tries); @@ -1244,6 +1243,77 @@ void fs__mkdtemp(uv_fs_t* req) { } +static int fs__mkdtemp_func(uv_fs_t* req) { + if (_wmkdir(req->file.pathw) == 0) { + SET_REQ_RESULT(req, 0); + return 1; + } else if (errno != EEXIST) { + SET_REQ_RESULT(req, -1); + return 1; + } + + return 0; +} + + +void fs__mkdtemp(uv_fs_t* req) { + fs__mktemp(req, fs__mkdtemp_func); +} + + +static int fs__mkstemp_func(uv_fs_t* req) { + HANDLE file; + int fd; + + file = CreateFileW(req->file.pathw, + GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + NULL, + CREATE_NEW, + FILE_ATTRIBUTE_NORMAL, + NULL); + + if (file == INVALID_HANDLE_VALUE) { + DWORD error; + error = GetLastError(); + + /* If the file exists, the main fs__mktemp() function + will retry. If it's another error, we want to stop. */ + if (error != ERROR_FILE_EXISTS) { + SET_REQ_WIN32_ERROR(req, error); + return 1; + } + + return 0; + } + + fd = _open_osfhandle((intptr_t) file, 0); + if (fd < 0) { + /* The only known failure mode for _open_osfhandle() is EMFILE, in which + * case GetLastError() will return zero. However we'll try to handle other + * errors as well, should they ever occur. + */ + if (errno == EMFILE) + SET_REQ_UV_ERROR(req, UV_EMFILE, ERROR_TOO_MANY_OPEN_FILES); + else if (GetLastError() != ERROR_SUCCESS) + SET_REQ_WIN32_ERROR(req, GetLastError()); + else + SET_REQ_WIN32_ERROR(req, UV_UNKNOWN); + CloseHandle(file); + return 1; + } + + SET_REQ_RESULT(req, fd); + + return 1; +} + + +void fs__mkstemp(uv_fs_t* req) { + fs__mktemp(req, fs__mkstemp_func); +} + + void fs__scandir(uv_fs_t* req) { static const size_t dirents_initial_size = 32; @@ -2609,6 +2679,7 @@ static void uv__fs_work(struct uv__work* w) { XX(RMDIR, rmdir) XX(MKDIR, mkdir) XX(MKDTEMP, mkdtemp) + XX(MKSTEMP, mkstemp) XX(RENAME, rename) XX(SCANDIR, scandir) XX(READDIR, readdir) @@ -2785,8 +2856,10 @@ int uv_fs_mkdir(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode, } -int uv_fs_mkdtemp(uv_loop_t* loop, uv_fs_t* req, const char* tpl, - uv_fs_cb cb) { +int uv_fs_mkdtemp(uv_loop_t* loop, + uv_fs_t* req, + const char* tpl, + uv_fs_cb cb) { int err; INIT(UV_FS_MKDTEMP); @@ -2798,6 +2871,21 @@ int uv_fs_mkdtemp(uv_loop_t* loop, uv_fs_t* req, const char* tpl, } +int uv_fs_mkstemp(uv_loop_t* loop, + uv_fs_t* req, + const char* tpl, + uv_fs_cb cb) { + int err; + + INIT(UV_FS_MKSTEMP); + err = fs__capture_path(req, tpl, NULL, TRUE); + if (err) + return uv_translate_sys_error(err); + + POST; +} + + int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { int err; diff --git a/deps/uv/src/win/util.c b/deps/uv/src/win/util.c index 4bbeb3154123dd..4de638f5971c35 100644 --- a/deps/uv/src/win/util.c +++ b/deps/uv/src/win/util.c @@ -1873,3 +1873,7 @@ int uv__random_rtlgenrandom(void* buf, size_t buflen) { return 0; } + +void uv_sleep(unsigned int msec) { + Sleep(msec); +} diff --git a/deps/uv/test/runner-unix.c b/deps/uv/test/runner-unix.c index 432cf33d482cd9..716c15ff088fa9 100644 --- a/deps/uv/test/runner-unix.c +++ b/deps/uv/test/runner-unix.c @@ -448,17 +448,3 @@ void rewind_cursor(void) { fprintf(stderr, "\033[2K\r"); #endif } - - -/* Pause the calling thread for a number of milliseconds. */ -void uv_sleep(int msec) { - int sec; - int usec; - - sec = msec / 1000; - usec = (msec % 1000) * 1000; - if (sec > 0) - sleep(sec); - if (usec > 0) - usleep(usec); -} diff --git a/deps/uv/test/runner-win.c b/deps/uv/test/runner-win.c index 6bb41a5d0629c8..4167a386921ec7 100644 --- a/deps/uv/test/runner-win.c +++ b/deps/uv/test/runner-win.c @@ -355,9 +355,3 @@ void rewind_cursor() { fprintf(stderr, "\n"); } } - - -/* Pause the calling thread for a number of milliseconds. */ -void uv_sleep(int msec) { - Sleep(msec); -} diff --git a/deps/uv/test/task.h b/deps/uv/test/task.h index e763f89f09dcd1..bc7b53369b538a 100644 --- a/deps/uv/test/task.h +++ b/deps/uv/test/task.h @@ -131,9 +131,6 @@ typedef enum { int run_helper_##name(void); \ int run_helper_##name(void) -/* Pause the calling thread for a number of milliseconds. */ -void uv_sleep(int msec); - /* Format big numbers nicely. WARNING: leaks memory. */ const char* fmt(double d); diff --git a/deps/uv/test/test-fs.c b/deps/uv/test/test-fs.c index 9326c6bc2753f0..ded1eec8dc632c 100644 --- a/deps/uv/test/test-fs.c +++ b/deps/uv/test/test-fs.c @@ -73,6 +73,7 @@ static int write_cb_count; static int unlink_cb_count; static int mkdir_cb_count; static int mkdtemp_cb_count; +static int mkstemp_cb_count; static int rmdir_cb_count; static int scandir_cb_count; static int stat_cb_count; @@ -107,6 +108,9 @@ static uv_fs_t close_req; static uv_fs_t mkdir_req; static uv_fs_t mkdtemp_req1; static uv_fs_t mkdtemp_req2; +static uv_fs_t mkstemp_req1; +static uv_fs_t mkstemp_req2; +static uv_fs_t mkstemp_req3; static uv_fs_t rmdir_req; static uv_fs_t scandir_req; static uv_fs_t stat_req; @@ -538,6 +542,32 @@ static void mkdtemp_cb(uv_fs_t* req) { } +static void check_mkstemp_result(uv_fs_t* req) { + int r; + + ASSERT(req->fs_type == UV_FS_MKSTEMP); + ASSERT(req->result >= 0); + ASSERT(req->path); + ASSERT(strlen(req->path) == 16); + ASSERT(memcmp(req->path, "test_file_", 10) == 0); + ASSERT(memcmp(req->path + 10, "XXXXXX", 6) != 0); + check_permission(req->path, 0600); + + /* Check if req->path is actually a file */ + r = uv_fs_stat(NULL, &stat_req, req->path, NULL); + ASSERT(r == 0); + ASSERT(stat_req.statbuf.st_mode & S_IFREG); + uv_fs_req_cleanup(&stat_req); +} + + +static void mkstemp_cb(uv_fs_t* req) { + ASSERT(req == &mkstemp_req1); + check_mkstemp_result(req); + mkstemp_cb_count++; +} + + static void rmdir_cb(uv_fs_t* req) { ASSERT(req == &rmdir_req); ASSERT(req->fs_type == UV_FS_RMDIR); @@ -1208,6 +1238,69 @@ TEST_IMPL(fs_mkdtemp) { } +TEST_IMPL(fs_mkstemp) { + int r; + int fd; + const char path_template[] = "test_file_XXXXXX"; + uv_fs_t req; + + loop = uv_default_loop(); + + r = uv_fs_mkstemp(loop, &mkstemp_req1, path_template, mkstemp_cb); + ASSERT(r == 0); + + uv_run(loop, UV_RUN_DEFAULT); + ASSERT(mkstemp_cb_count == 1); + + /* sync mkstemp */ + r = uv_fs_mkstemp(NULL, &mkstemp_req2, path_template, NULL); + ASSERT(r >= 0); + check_mkstemp_result(&mkstemp_req2); + + /* mkstemp return different values on subsequent calls */ + ASSERT(strcmp(mkstemp_req1.path, mkstemp_req2.path) != 0); + + /* invalid template returns EINVAL */ + ASSERT(uv_fs_mkstemp(NULL, &mkstemp_req3, "test_file", NULL) == UV_EINVAL); + + /* We can write to the opened file */ + iov = uv_buf_init(test_buf, sizeof(test_buf)); + r = uv_fs_write(NULL, &req, mkstemp_req1.result, &iov, 1, -1, NULL); + ASSERT(r == sizeof(test_buf)); + ASSERT(req.result == sizeof(test_buf)); + uv_fs_req_cleanup(&req); + + /* Cleanup */ + uv_fs_close(NULL, &req, mkstemp_req1.result, NULL); + uv_fs_req_cleanup(&req); + uv_fs_close(NULL, &req, mkstemp_req2.result, NULL); + uv_fs_req_cleanup(&req); + + fd = uv_fs_open(NULL, &req, mkstemp_req1.path , O_RDONLY, 0, NULL); + ASSERT(fd >= 0); + uv_fs_req_cleanup(&req); + + memset(buf, 0, sizeof(buf)); + iov = uv_buf_init(buf, sizeof(buf)); + r = uv_fs_read(NULL, &req, fd, &iov, 1, -1, NULL); + ASSERT(r >= 0); + ASSERT(req.result >= 0); + ASSERT(strcmp(buf, test_buf) == 0); + uv_fs_req_cleanup(&req); + + uv_fs_close(NULL, &req, fd, NULL); + uv_fs_req_cleanup(&req); + + unlink(mkstemp_req1.path); + unlink(mkstemp_req2.path); + uv_fs_req_cleanup(&mkstemp_req1); + uv_fs_req_cleanup(&mkstemp_req2); + + MAKE_VALGRIND_HAPPY(); + return 0; +} + + TEST_IMPL(fs_fstat) { int r; uv_fs_t req; @@ -3784,6 +3877,9 @@ TEST_IMPL(fs_null_req) { r = uv_fs_mkdtemp(NULL, NULL, NULL, NULL); ASSERT(r == UV_EINVAL); + r = uv_fs_mkstemp(NULL, NULL, NULL, NULL); + ASSERT(r == UV_EINVAL); + r = uv_fs_rmdir(NULL, NULL, NULL, NULL); ASSERT(r == UV_EINVAL); diff --git a/deps/uv/test/test-list.h b/deps/uv/test/test-list.h index ad94c52d0c5866..a6cfc6bb9284bd 100644 --- a/deps/uv/test/test-list.h +++ b/deps/uv/test/test-list.h @@ -310,6 +310,7 @@ TEST_DECLARE (fs_async_dir) TEST_DECLARE (fs_async_sendfile) TEST_DECLARE (fs_async_sendfile_nodata) TEST_DECLARE (fs_mkdtemp) +TEST_DECLARE (fs_mkstemp) TEST_DECLARE (fs_fstat) TEST_DECLARE (fs_access) TEST_DECLARE (fs_chmod) @@ -920,6 +921,7 @@ TASK_LIST_START TEST_ENTRY (fs_async_sendfile) TEST_ENTRY (fs_async_sendfile_nodata) TEST_ENTRY (fs_mkdtemp) + TEST_ENTRY (fs_mkstemp) TEST_ENTRY (fs_fstat) TEST_ENTRY (fs_access) TEST_ENTRY (fs_chmod) diff --git a/deps/uv/uv.gyp b/deps/uv/uv.gyp index 051bdc937c9fd3..c4564c04086bc0 100644 --- a/deps/uv/uv.gyp +++ b/deps/uv/uv.gyp @@ -245,7 +245,7 @@ 'src/unix/linux-syscalls.h', 'src/unix/procfs-exepath.c', 'src/unix/random-getrandom.c', - 'src/unix/random-sysctl.c', + 'src/unix/random-sysctl-linux.c', 'src/unix/sysinfo-loadavg.c', ], 'link_settings': { @@ -261,8 +261,9 @@ 'src/unix/pthread-fixes.c', 'src/unix/android-ifaddrs.c', 'src/unix/procfs-exepath.c', + 'src/unix/random-getrandom.c', + 'src/unix/random-sysctl-linux.c', 'src/unix/sysinfo-loadavg.c', - 'src/unix/sysinfo-memory.c', ], 'link_settings': { 'libraries': [ '-ldl' ], diff --git a/deps/uvwasi/LICENSE b/deps/uvwasi/LICENSE new file mode 100644 index 00000000000000..dfb8546af52b03 --- /dev/null +++ b/deps/uvwasi/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Colin Ihrig and Contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/deps/uvwasi/include/clocks.h b/deps/uvwasi/include/clocks.h new file mode 100644 index 00000000000000..7437d03df6bfa7 --- /dev/null +++ b/deps/uvwasi/include/clocks.h @@ -0,0 +1,13 @@ +#ifndef __UVWASI_CLOCKS_H__ +#define __UVWASI_CLOCKS_H__ + +#include "wasi_types.h" + +uvwasi_errno_t uvwasi__clock_gettime_realtime(uvwasi_timestamp_t* time); +uvwasi_errno_t uvwasi__clock_gettime_process_cputime(uvwasi_timestamp_t* time); +uvwasi_errno_t uvwasi__clock_gettime_thread_cputime(uvwasi_timestamp_t* time); + +uvwasi_errno_t uvwasi__clock_getres_process_cputime(uvwasi_timestamp_t* time); +uvwasi_errno_t uvwasi__clock_getres_thread_cputime(uvwasi_timestamp_t* time); + +#endif /* __UVWASI_CLOCKS_H__ */ diff --git a/deps/uvwasi/include/fd_table.h b/deps/uvwasi/include/fd_table.h new file mode 100644 index 00000000000000..639ff9abc8d34f --- /dev/null +++ b/deps/uvwasi/include/fd_table.h @@ -0,0 +1,57 @@ +#ifndef __UVWASI_FD_TABLE_H__ +#define __UVWASI_FD_TABLE_H__ + +#include +#include "uv.h" +#include "wasi_types.h" +#include "uv_mapping.h" + +struct uvwasi_s; + +struct uvwasi_fd_wrap_t { + uvwasi_fd_t id; + uv_file fd; + char* path; + char* real_path; + uvwasi_filetype_t type; + uvwasi_rights_t rights_base; + uvwasi_rights_t rights_inheriting; + int preopen; + uv_mutex_t mutex; +}; + +struct uvwasi_fd_table_t { + struct uvwasi_fd_wrap_t** fds; + uint32_t size; + uint32_t used; + uv_rwlock_t rwlock; +}; + +uvwasi_errno_t uvwasi_fd_table_init(struct uvwasi_s* uvwasi, + struct uvwasi_fd_table_t* table, + uint32_t init_size); +void uvwasi_fd_table_free(struct uvwasi_s* uvwasi, + struct uvwasi_fd_table_t* table); +uvwasi_errno_t uvwasi_fd_table_insert_preopen(struct uvwasi_s* uvwasi, + struct uvwasi_fd_table_t* table, + const uv_file fd, + const char* path, + const char* real_path); +uvwasi_errno_t uvwasi_fd_table_insert_fd(struct uvwasi_s* uvwasi, + struct uvwasi_fd_table_t* table, + const uv_file fd, + const int flags, + const char* path, + uvwasi_rights_t rights_base, + uvwasi_rights_t rights_inheriting, + struct uvwasi_fd_wrap_t* wrap); +uvwasi_errno_t uvwasi_fd_table_get(const struct uvwasi_fd_table_t* table, + const uvwasi_fd_t id, + struct uvwasi_fd_wrap_t** wrap, + uvwasi_rights_t rights_base, + uvwasi_rights_t rights_inheriting); +uvwasi_errno_t uvwasi_fd_table_remove(struct uvwasi_s* uvwasi, + struct uvwasi_fd_table_t* table, + const uvwasi_fd_t id); + +#endif /* __UVWASI_FD_TABLE_H__ */ diff --git a/deps/uvwasi/include/uv_mapping.h b/deps/uvwasi/include/uv_mapping.h new file mode 100644 index 00000000000000..d835ca507a4856 --- /dev/null +++ b/deps/uvwasi/include/uv_mapping.h @@ -0,0 +1,15 @@ +#ifndef __UVWASI_UV_MAPPING_H__ +#define __UVWASI_UV_MAPPING_H__ + +#include "uv.h" +#include "wasi_types.h" + +#define NANOS_PER_SEC 1000000000 + +uvwasi_errno_t uvwasi__translate_uv_error(int err); +int uvwasi__translate_to_uv_signal(uvwasi_signal_t sig); +uvwasi_timestamp_t uvwasi__timespec_to_timestamp(const uv_timespec_t* ts); +uvwasi_filetype_t uvwasi__stat_to_filetype(const uv_stat_t* stat); +void uvwasi__stat_to_filestat(const uv_stat_t* stat, uvwasi_filestat_t* fs); + +#endif /* __UVWASI_UV_MAPPING_H__ */ diff --git a/deps/uvwasi/include/uvwasi.h b/deps/uvwasi/include/uvwasi.h new file mode 100644 index 00000000000000..2fbcbc583dcbfb --- /dev/null +++ b/deps/uvwasi/include/uvwasi.h @@ -0,0 +1,267 @@ +#ifndef __UVWASI_H__ +#define __UVWASI_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "wasi_types.h" +#include "uv_mapping.h" +#include "fd_table.h" + +#define UVWASI_VERSION_MAJOR 0 +#define UVWASI_VERSION_MINOR 0 +#define UVWASI_VERSION_PATCH 3 +#define UVWASI_VERSION_HEX ((UVWASI_VERSION_MAJOR << 16) | \ + (UVWASI_VERSION_MINOR << 8) | \ + (UVWASI_VERSION_PATCH)) +#define UVWASI_STRINGIFY(v) UVWASI_STRINGIFY_HELPER(v) +#define UVWASI_STRINGIFY_HELPER(v) #v +#define UVWASI_VERSION_STRING UVWASI_STRINGIFY(UVWASI_VERSION_MAJOR) "." \ + UVWASI_STRINGIFY(UVWASI_VERSION_MINOR) "." \ + UVWASI_STRINGIFY(UVWASI_VERSION_PATCH) +#define UVWASI_VERSION_WASI "snapshot_1" + +typedef void* (*uvwasi_malloc)(size_t size, void* mem_user_data); +typedef void (*uvwasi_free)(void* ptr, void* mem_user_data); +typedef void* (*uvwasi_calloc)(size_t nmemb, size_t size, void* mem_user_data); +typedef void* (*uvwasi_realloc)(void* ptr, size_t size, void* mem_user_data); + +typedef struct uvwasi_mem_s { + void* mem_user_data; + uvwasi_malloc malloc; + uvwasi_free free; + uvwasi_calloc calloc; + uvwasi_realloc realloc; +} uvwasi_mem_t; + +typedef struct uvwasi_s { + struct uvwasi_fd_table_t fds; + size_t argc; + char** argv; + char* argv_buf; + size_t argv_buf_size; + size_t envc; + char** env; + char* env_buf; + size_t env_buf_size; + const uvwasi_mem_t* allocator; +} uvwasi_t; + +typedef struct uvwasi_preopen_s { + char* mapped_path; + char* real_path; +} uvwasi_preopen_t; + +typedef struct uvwasi_options_s { + size_t fd_table_size; + size_t preopenc; + uvwasi_preopen_t* preopens; + size_t argc; + char** argv; + char** envp; + const uvwasi_mem_t* allocator; +} uvwasi_options_t; + +// Embedder API. +uvwasi_errno_t uvwasi_init(uvwasi_t* uvwasi, uvwasi_options_t* options); +void uvwasi_destroy(uvwasi_t* uvwasi); +uvwasi_errno_t uvwasi_embedder_remap_fd(uvwasi_t* uvwasi, + const uvwasi_fd_t fd, + uv_file new_host_fd); +const char* uvwasi_embedder_err_code_to_string(uvwasi_errno_t code); + + +// WASI system call API. +uvwasi_errno_t uvwasi_args_get(uvwasi_t* uvwasi, char** argv, char* argv_buf); +uvwasi_errno_t uvwasi_args_sizes_get(uvwasi_t* uvwasi, + size_t* argc, + size_t* argv_buf_size); +uvwasi_errno_t uvwasi_clock_res_get(uvwasi_t* uvwasi, + uvwasi_clockid_t clock_id, + uvwasi_timestamp_t* resolution); +uvwasi_errno_t uvwasi_clock_time_get(uvwasi_t* uvwasi, + uvwasi_clockid_t clock_id, + uvwasi_timestamp_t precision, + uvwasi_timestamp_t* time); +uvwasi_errno_t uvwasi_environ_get(uvwasi_t* uvwasi, + char** environment, + char* environ_buf); +uvwasi_errno_t uvwasi_environ_sizes_get(uvwasi_t* uvwasi, + size_t* environ_count, + size_t* environ_buf_size); +uvwasi_errno_t uvwasi_fd_advise(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_filesize_t offset, + uvwasi_filesize_t len, + uvwasi_advice_t advice); +uvwasi_errno_t uvwasi_fd_allocate(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_filesize_t offset, + uvwasi_filesize_t len); +uvwasi_errno_t uvwasi_fd_close(uvwasi_t* uvwasi, uvwasi_fd_t fd); +uvwasi_errno_t uvwasi_fd_datasync(uvwasi_t* uvwasi, uvwasi_fd_t fd); +uvwasi_errno_t uvwasi_fd_fdstat_get(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_fdstat_t* buf); +uvwasi_errno_t uvwasi_fd_fdstat_set_flags(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_fdflags_t flags); +uvwasi_errno_t uvwasi_fd_fdstat_set_rights(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_rights_t fs_rights_base, + uvwasi_rights_t fs_rights_inheriting + ); +uvwasi_errno_t uvwasi_fd_filestat_get(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_filestat_t* buf); +uvwasi_errno_t uvwasi_fd_filestat_set_size(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_filesize_t st_size); +uvwasi_errno_t uvwasi_fd_filestat_set_times(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_timestamp_t st_atim, + uvwasi_timestamp_t st_mtim, + uvwasi_fstflags_t fst_flags); +uvwasi_errno_t uvwasi_fd_pread(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + const uvwasi_iovec_t* iovs, + size_t iovs_len, + uvwasi_filesize_t offset, + size_t* nread); +uvwasi_errno_t uvwasi_fd_prestat_get(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_prestat_t* buf); +uvwasi_errno_t uvwasi_fd_prestat_dir_name(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + char* path, + size_t path_len); +uvwasi_errno_t uvwasi_fd_pwrite(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + const uvwasi_ciovec_t* iovs, + size_t iovs_len, + uvwasi_filesize_t offset, + size_t* nwritten); +uvwasi_errno_t uvwasi_fd_read(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + const uvwasi_iovec_t* iovs, + size_t iovs_len, + size_t* nread); +uvwasi_errno_t uvwasi_fd_readdir(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + void* buf, + size_t buf_len, + uvwasi_dircookie_t cookie, + size_t* bufused); +uvwasi_errno_t uvwasi_fd_renumber(uvwasi_t* uvwasi, + uvwasi_fd_t from, + uvwasi_fd_t to); +uvwasi_errno_t uvwasi_fd_seek(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_filedelta_t offset, + uvwasi_whence_t whence, + uvwasi_filesize_t* newoffset); +uvwasi_errno_t uvwasi_fd_sync(uvwasi_t* uvwasi, uvwasi_fd_t fd); +uvwasi_errno_t uvwasi_fd_tell(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_filesize_t* offset); +uvwasi_errno_t uvwasi_fd_write(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + const uvwasi_ciovec_t* iovs, + size_t iovs_len, + size_t* nwritten); +uvwasi_errno_t uvwasi_path_create_directory(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + const char* path, + size_t path_len); +uvwasi_errno_t uvwasi_path_filestat_get(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_lookupflags_t flags, + const char* path, + size_t path_len, + uvwasi_filestat_t* buf); +uvwasi_errno_t uvwasi_path_filestat_set_times(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_lookupflags_t flags, + const char* path, + size_t path_len, + uvwasi_timestamp_t st_atim, + uvwasi_timestamp_t st_mtim, + uvwasi_fstflags_t fst_flags); +uvwasi_errno_t uvwasi_path_link(uvwasi_t* uvwasi, + uvwasi_fd_t old_fd, + uvwasi_lookupflags_t old_flags, + const char* old_path, + size_t old_path_len, + uvwasi_fd_t new_fd, + const char* new_path, + size_t new_path_len); +uvwasi_errno_t uvwasi_path_open(uvwasi_t* uvwasi, + uvwasi_fd_t dirfd, + uvwasi_lookupflags_t dirflags, + const char* path, + size_t path_len, + uvwasi_oflags_t o_flags, + uvwasi_rights_t fs_rights_base, + uvwasi_rights_t fs_rights_inheriting, + uvwasi_fdflags_t fs_flags, + uvwasi_fd_t* fd); +uvwasi_errno_t uvwasi_path_readlink(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + const char* path, + size_t path_len, + char* buf, + size_t buf_len, + size_t* bufused); +uvwasi_errno_t uvwasi_path_remove_directory(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + const char* path, + size_t path_len); +uvwasi_errno_t uvwasi_path_rename(uvwasi_t* uvwasi, + uvwasi_fd_t old_fd, + const char* old_path, + size_t old_path_len, + uvwasi_fd_t new_fd, + const char* new_path, + size_t new_path_len); +uvwasi_errno_t uvwasi_path_symlink(uvwasi_t* uvwasi, + const char* old_path, + size_t old_path_len, + uvwasi_fd_t fd, + const char* new_path, + size_t new_path_len); +uvwasi_errno_t uvwasi_path_unlink_file(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + const char* path, + size_t path_len); +uvwasi_errno_t uvwasi_poll_oneoff(uvwasi_t* uvwasi, + const uvwasi_subscription_t* in, + uvwasi_event_t* out, + size_t nsubscriptions, + size_t* nevents); +uvwasi_errno_t uvwasi_proc_exit(uvwasi_t* uvwasi, uvwasi_exitcode_t rval); +uvwasi_errno_t uvwasi_proc_raise(uvwasi_t* uvwasi, uvwasi_signal_t sig); +uvwasi_errno_t uvwasi_random_get(uvwasi_t* uvwasi, void* buf, size_t buf_len); +uvwasi_errno_t uvwasi_sched_yield(uvwasi_t* uvwasi); +uvwasi_errno_t uvwasi_sock_recv(uvwasi_t* uvwasi, + uvwasi_fd_t sock, + const uvwasi_iovec_t* ri_data, + size_t ri_data_len, + uvwasi_riflags_t ri_flags, + size_t* ro_datalen, + uvwasi_roflags_t* ro_flags); +uvwasi_errno_t uvwasi_sock_send(uvwasi_t* uvwasi, + uvwasi_fd_t sock, + const uvwasi_ciovec_t* si_data, + size_t si_data_len, + uvwasi_siflags_t si_flags, + size_t* so_datalen); +uvwasi_errno_t uvwasi_sock_shutdown(uvwasi_t* uvwasi, + uvwasi_fd_t sock, + uvwasi_sdflags_t how); + +#ifdef __cplusplus +} +#endif + +#endif /* __UVWASI_H__ */ diff --git a/deps/uvwasi/include/wasi_types.h b/deps/uvwasi/include/wasi_types.h new file mode 100644 index 00000000000000..ec1013663f6a76 --- /dev/null +++ b/deps/uvwasi/include/wasi_types.h @@ -0,0 +1,322 @@ +#ifndef __UVWASI_WASI_TYPES_H__ +#define __UVWASI_WASI_TYPES_H__ + +#include +#include + +/* API: https://github.com/WebAssembly/WASI/blob/master/phases/unstable/docs/wasi_unstable_preview0.md */ + +typedef uint8_t uvwasi_advice_t; +#define UVWASI_ADVICE_NORMAL 0 +#define UVWASI_ADVICE_SEQUENTIAL 1 +#define UVWASI_ADVICE_RANDOM 2 +#define UVWASI_ADVICE_WILLNEED 3 +#define UVWASI_ADVICE_DONTNEED 4 +#define UVWASI_ADVICE_NOREUSE 5 + +typedef struct uvwasi_ciovec_s { + const void* buf; + size_t buf_len; +} uvwasi_ciovec_t; + +typedef uint32_t uvwasi_clockid_t; +#define UVWASI_CLOCK_REALTIME 0 +#define UVWASI_CLOCK_MONOTONIC 1 +#define UVWASI_CLOCK_PROCESS_CPUTIME_ID 2 +#define UVWASI_CLOCK_THREAD_CPUTIME_ID 3 + +typedef uint64_t uvwasi_device_t; + +typedef uint64_t uvwasi_dircookie_t; +#define UVWASI_DIRCOOKIE_START 0 + +typedef uint16_t uvwasi_errno_t; +#define UVWASI_ESUCCESS 0 +#define UVWASI_E2BIG 1 +#define UVWASI_EACCES 2 +#define UVWASI_EADDRINUSE 3 +#define UVWASI_EADDRNOTAVAIL 4 +#define UVWASI_EAFNOSUPPORT 5 +#define UVWASI_EAGAIN 6 +#define UVWASI_EALREADY 7 +#define UVWASI_EBADF 8 +#define UVWASI_EBADMSG 9 +#define UVWASI_EBUSY 10 +#define UVWASI_ECANCELED 11 +#define UVWASI_ECHILD 12 +#define UVWASI_ECONNABORTED 13 +#define UVWASI_ECONNREFUSED 14 +#define UVWASI_ECONNRESET 15 +#define UVWASI_EDEADLK 16 +#define UVWASI_EDESTADDRREQ 17 +#define UVWASI_EDOM 18 +#define UVWASI_EDQUOT 19 +#define UVWASI_EEXIST 20 +#define UVWASI_EFAULT 21 +#define UVWASI_EFBIG 22 +#define UVWASI_EHOSTUNREACH 23 +#define UVWASI_EIDRM 24 +#define UVWASI_EILSEQ 25 +#define UVWASI_EINPROGRESS 26 +#define UVWASI_EINTR 27 +#define UVWASI_EINVAL 28 +#define UVWASI_EIO 29 +#define UVWASI_EISCONN 30 +#define UVWASI_EISDIR 31 +#define UVWASI_ELOOP 32 +#define UVWASI_EMFILE 33 +#define UVWASI_EMLINK 34 +#define UVWASI_EMSGSIZE 35 +#define UVWASI_EMULTIHOP 36 +#define UVWASI_ENAMETOOLONG 37 +#define UVWASI_ENETDOWN 38 +#define UVWASI_ENETRESET 39 +#define UVWASI_ENETUNREACH 40 +#define UVWASI_ENFILE 41 +#define UVWASI_ENOBUFS 42 +#define UVWASI_ENODEV 43 +#define UVWASI_ENOENT 44 +#define UVWASI_ENOEXEC 45 +#define UVWASI_ENOLCK 46 +#define UVWASI_ENOLINK 47 +#define UVWASI_ENOMEM 48 +#define UVWASI_ENOMSG 49 +#define UVWASI_ENOPROTOOPT 50 +#define UVWASI_ENOSPC 51 +#define UVWASI_ENOSYS 52 +#define UVWASI_ENOTCONN 53 +#define UVWASI_ENOTDIR 54 +#define UVWASI_ENOTEMPTY 55 +#define UVWASI_ENOTRECOVERABLE 56 +#define UVWASI_ENOTSOCK 57 +#define UVWASI_ENOTSUP 58 +#define UVWASI_ENOTTY 59 +#define UVWASI_ENXIO 60 +#define UVWASI_EOVERFLOW 61 +#define UVWASI_EOWNERDEAD 62 +#define UVWASI_EPERM 63 +#define UVWASI_EPIPE 64 +#define UVWASI_EPROTO 65 +#define UVWASI_EPROTONOSUPPORT 66 +#define UVWASI_EPROTOTYPE 67 +#define UVWASI_ERANGE 68 +#define UVWASI_EROFS 69 +#define UVWASI_ESPIPE 70 +#define UVWASI_ESRCH 71 +#define UVWASI_ESTALE 72 +#define UVWASI_ETIMEDOUT 73 +#define UVWASI_ETXTBSY 74 +#define UVWASI_EXDEV 75 +#define UVWASI_ENOTCAPABLE 76 + +typedef uint16_t uvwasi_eventrwflags_t; /* Bitfield */ +#define UVWASI_EVENT_FD_READWRITE_HANGUP (1 << 0) + +typedef uint8_t uvwasi_eventtype_t; +#define UVWASI_EVENTTYPE_CLOCK 0 +#define UVWASI_EVENTTYPE_FD_READ 1 +#define UVWASI_EVENTTYPE_FD_WRITE 2 + +typedef uint32_t uvwasi_exitcode_t; + +typedef uint32_t uvwasi_fd_t; + +typedef uint16_t uvwasi_fdflags_t; /* Bitfield */ +#define UVWASI_FDFLAG_APPEND (1 << 0) +#define UVWASI_FDFLAG_DSYNC (1 << 1) +#define UVWASI_FDFLAG_NONBLOCK (1 << 2) +#define UVWASI_FDFLAG_RSYNC (1 << 3) +#define UVWASI_FDFLAG_SYNC (1 << 4) + +typedef int64_t uvwasi_filedelta_t; + +typedef uint64_t uvwasi_filesize_t; + +typedef uint8_t uvwasi_filetype_t; +#define UVWASI_FILETYPE_UNKNOWN 0 +#define UVWASI_FILETYPE_BLOCK_DEVICE 1 +#define UVWASI_FILETYPE_CHARACTER_DEVICE 2 +#define UVWASI_FILETYPE_DIRECTORY 3 +#define UVWASI_FILETYPE_REGULAR_FILE 4 +#define UVWASI_FILETYPE_SOCKET_DGRAM 5 +#define UVWASI_FILETYPE_SOCKET_STREAM 6 +#define UVWASI_FILETYPE_SYMBOLIC_LINK 7 + +typedef uint16_t uvwasi_fstflags_t; /* Bitfield */ +#define UVWASI_FILESTAT_SET_ATIM (1 << 0) +#define UVWASI_FILESTAT_SET_ATIM_NOW (1 << 1) +#define UVWASI_FILESTAT_SET_MTIM (1 << 2) +#define UVWASI_FILESTAT_SET_MTIM_NOW (1 << 3) + +typedef uint64_t uvwasi_inode_t; + +typedef struct uvwasi_iovec_s { + void* buf; + size_t buf_len; +} uvwasi_iovec_t; + +typedef uint64_t uvwasi_linkcount_t; + +typedef uint32_t uvwasi_lookupflags_t; /* Bitfield */ +#define UVWASI_LOOKUP_SYMLINK_FOLLOW (1 << 0) + +typedef uint16_t uvwasi_oflags_t; /* Bitfield */ +#define UVWASI_O_CREAT (1 << 0) +#define UVWASI_O_DIRECTORY (1 << 1) +#define UVWASI_O_EXCL (1 << 2) +#define UVWASI_O_TRUNC (1 << 3) + +typedef uint8_t uvwasi_preopentype_t; +#define UVWASI_PREOPENTYPE_DIR 0 + +typedef struct uvwasi_prestat_s { + uvwasi_preopentype_t pr_type; + union uvwasi_prestat_u { + struct uvwasi_prestat_dir_t { + size_t pr_name_len; + } dir; + } u; +} uvwasi_prestat_t; + +typedef uint16_t uvwasi_riflags_t; /* Bitfield */ +#define UVWASI_SOCK_RECV_PEEK (1 << 0) +#define UVWASI_SOCK_RECV_WAITALL (1 << 1) + +typedef uint64_t uvwasi_rights_t; /* Bitfield */ +#define UVWASI_RIGHT_FD_DATASYNC (1 << 0) +#define UVWASI_RIGHT_FD_READ (1 << 1) +#define UVWASI_RIGHT_FD_SEEK (1 << 2) +#define UVWASI_RIGHT_FD_FDSTAT_SET_FLAGS (1 << 3) +#define UVWASI_RIGHT_FD_SYNC (1 << 4) +#define UVWASI_RIGHT_FD_TELL (1 << 5) +#define UVWASI_RIGHT_FD_WRITE (1 << 6) +#define UVWASI_RIGHT_FD_ADVISE (1 << 7) +#define UVWASI_RIGHT_FD_ALLOCATE (1 << 8) +#define UVWASI_RIGHT_PATH_CREATE_DIRECTORY (1 << 9) +#define UVWASI_RIGHT_PATH_CREATE_FILE (1 << 10) +#define UVWASI_RIGHT_PATH_LINK_SOURCE (1 << 11) +#define UVWASI_RIGHT_PATH_LINK_TARGET (1 << 12) +#define UVWASI_RIGHT_PATH_OPEN (1 << 13) +#define UVWASI_RIGHT_FD_READDIR (1 << 14) +#define UVWASI_RIGHT_PATH_READLINK (1 << 15) +#define UVWASI_RIGHT_PATH_RENAME_SOURCE (1 << 16) +#define UVWASI_RIGHT_PATH_RENAME_TARGET (1 << 17) +#define UVWASI_RIGHT_PATH_FILESTAT_GET (1 << 18) +#define UVWASI_RIGHT_PATH_FILESTAT_SET_SIZE (1 << 19) +#define UVWASI_RIGHT_PATH_FILESTAT_SET_TIMES (1 << 20) +#define UVWASI_RIGHT_FD_FILESTAT_GET (1 << 21) +#define UVWASI_RIGHT_FD_FILESTAT_SET_SIZE (1 << 22) +#define UVWASI_RIGHT_FD_FILESTAT_SET_TIMES (1 << 23) +#define UVWASI_RIGHT_PATH_SYMLINK (1 << 24) +#define UVWASI_RIGHT_PATH_REMOVE_DIRECTORY (1 << 25) +#define UVWASI_RIGHT_PATH_UNLINK_FILE (1 << 26) +#define UVWASI_RIGHT_POLL_FD_READWRITE (1 << 27) +#define UVWASI_RIGHT_SOCK_SHUTDOWN (1 << 28) + +typedef uint16_t uvwasi_roflags_t; /* Bitfield */ +#define UVWASI_SOCK_RECV_DATA_TRUNCATED (1 << 0) + +typedef uint8_t uvwasi_sdflags_t; /* Bitfield */ +#define UVWASI_SHUT_RD (1 << 0) +#define UVWASI_SHUT_WR (1 << 1) + +typedef uint16_t uvwasi_siflags_t; /* Bitfield */ + +typedef uint8_t uvwasi_signal_t; +#define UVWASI_SIGHUP 1 +#define UVWASI_SIGINT 2 +#define UVWASI_SIGQUIT 3 +#define UVWASI_SIGILL 4 +#define UVWASI_SIGTRAP 5 +#define UVWASI_SIGABRT 6 +#define UVWASI_SIGBUS 7 +#define UVWASI_SIGFPE 8 +#define UVWASI_SIGKILL 9 +#define UVWASI_SIGUSR1 10 +#define UVWASI_SIGSEGV 11 +#define UVWASI_SIGUSR2 12 +#define UVWASI_SIGPIPE 13 +#define UVWASI_SIGALRM 14 +#define UVWASI_SIGTERM 15 +#define UVWASI_SIGCHLD 16 +#define UVWASI_SIGCONT 17 +#define UVWASI_SIGSTOP 18 +#define UVWASI_SIGTSTP 19 +#define UVWASI_SIGTTIN 20 +#define UVWASI_SIGTTOU 21 +#define UVWASI_SIGURG 22 +#define UVWASI_SIGXCPU 23 +#define UVWASI_SIGXFSZ 24 +#define UVWASI_SIGVTALRM 25 +#define UVWASI_SIGPROF 26 +#define UVWASI_SIGWINCH 27 +#define UVWASI_SIGPOLL 28 +#define UVWASI_SIGPWR 29 +#define UVWASI_SIGSYS 30 + +typedef uint16_t uvwasi_subclockflags_t; /* Bitfield */ +#define UVWASI_SUBSCRIPTION_CLOCK_ABSTIME (1 << 0) + +typedef uint64_t uvwasi_timestamp_t; + +typedef uint64_t uvwasi_userdata_t; + +typedef struct uvwasi_subscription_s { + uvwasi_userdata_t userdata; + uvwasi_eventtype_t type; + union { + struct { + uvwasi_clockid_t clock_id; + uvwasi_timestamp_t timeout; + uvwasi_timestamp_t precision; + uvwasi_subclockflags_t flags; + } clock; + struct { + uvwasi_fd_t fd; + } fd_readwrite; + } u; +} uvwasi_subscription_t; + +typedef struct uvwasi_dirent_s { + uvwasi_dircookie_t d_next; + uvwasi_inode_t d_ino; + uint32_t d_namlen; + uvwasi_filetype_t d_type; +} uvwasi_dirent_t; + +typedef struct uvwasi_fdstat_s { + uvwasi_filetype_t fs_filetype; + uvwasi_fdflags_t fs_flags; + uvwasi_rights_t fs_rights_base; + uvwasi_rights_t fs_rights_inheriting; +} uvwasi_fdstat_t; + +typedef struct uvwasi_filestat_s { + uvwasi_device_t st_dev; + uvwasi_inode_t st_ino; + uvwasi_filetype_t st_filetype; + uvwasi_linkcount_t st_nlink; + uvwasi_filesize_t st_size; + uvwasi_timestamp_t st_atim; + uvwasi_timestamp_t st_mtim; + uvwasi_timestamp_t st_ctim; +} uvwasi_filestat_t; + +typedef struct uvwasi_event_s { + uvwasi_userdata_t userdata; + uvwasi_errno_t error; + uvwasi_eventtype_t type; + union { + struct { + uvwasi_filesize_t nbytes; + uvwasi_eventrwflags_t flags; + } fd_readwrite; + } u; +} uvwasi_event_t; + +typedef uint8_t uvwasi_whence_t; +#define UVWASI_WHENCE_SET 0 +#define UVWASI_WHENCE_CUR 1 +#define UVWASI_WHENCE_END 2 + +#endif /* __UVWASI_WASI_TYPES_H__ */ diff --git a/deps/uvwasi/src/clocks.c b/deps/uvwasi/src/clocks.c new file mode 100644 index 00000000000000..e1fbc696b62f05 --- /dev/null +++ b/deps/uvwasi/src/clocks.c @@ -0,0 +1,194 @@ +#ifndef _WIN32 +# include +# include +# include +# include +#endif /* _WIN32 */ + +#include "uv.h" +#include "wasi_types.h" +#include "uv_mapping.h" + + +#define UVWASI__WIN_TIME_AND_RETURN(handle, time) \ + do { \ + FILETIME create; \ + FILETIME exit; \ + FILETIME system; \ + FILETIME user; \ + SYSTEMTIME sys_system; \ + SYSTEMTIME sys_user; \ + if (0 == GetProcessTimes((handle), &create, &exit, &system, &user)) { \ + return uvwasi__translate_uv_error( \ + uv_translate_sys_error(GetLastError()) \ + ); \ + } \ + \ + if (0 == FileTimeToSystemTime(&system, &sys_system)) { \ + return uvwasi__translate_uv_error( \ + uv_translate_sys_error(GetLastError()) \ + ); \ + } \ + \ + if (0 == FileTimeToSystemTime(&user, &sys_user)) { \ + return uvwasi__translate_uv_error( \ + uv_translate_sys_error(GetLastError()) \ + ); \ + } \ + \ + (time) = (((sys_system.wHour * 3600) + (sys_system.wMinute * 60) + \ + sys_system.wSecond) * NANOS_PER_SEC) + \ + (sys_system.wMilliseconds * 1000000) + \ + (((sys_user.wHour * 3600) + (sys_user.wMinute * 60) + \ + sys_user.wSecond) * NANOS_PER_SEC) + \ + (sys_user.wMilliseconds * 1000000); \ + return UVWASI_ESUCCESS; \ + } while (0) + + +#define UVWASI__CLOCK_GETTIME_AND_RETURN(clk, time) \ + do { \ + struct timespec ts; \ + if (0 != clock_gettime((clk), &ts)) \ + return uvwasi__translate_uv_error(uv_translate_sys_error(errno)); \ + (time) = (ts.tv_sec * NANOS_PER_SEC) + ts.tv_nsec; \ + return UVWASI_ESUCCESS; \ + } while (0) + + +#define UVWASI__GETRUSAGE_AND_RETURN(who, time) \ + do { \ + struct rusage ru; \ + if (0 != getrusage((who), &ru)) \ + return uvwasi__translate_uv_error(uv_translate_sys_error(errno)); \ + (time) = (ru.ru_utime.tv_sec * NANOS_PER_SEC) + \ + (ru.ru_utime.tv_usec * 1000) + \ + (ru.ru_stime.tv_sec * NANOS_PER_SEC) + \ + (ru.ru_stime.tv_usec * 1000); \ + return UVWASI_ESUCCESS; \ + } while (0) + + +#define UVWASI__OSX_THREADTIME_AND_RETURN(time) \ + do { \ + mach_port_t thread; \ + thread_basic_info_data_t info; \ + mach_msg_type_number_t count; \ + count = THREAD_BASIC_INFO_COUNT; \ + thread = pthread_mach_thread_np(pthread_self()); \ + if (KERN_SUCCESS != thread_info(thread, \ + THREAD_BASIC_INFO, \ + (thread_info_t) &info, \ + &count)) { \ + return UVWASI_ENOSYS; \ + } \ + (time) = (info.user_time.seconds * NANOS_PER_SEC) + \ + (info.user_time.microseconds * 1000) + \ + (info.system_time.seconds * NANOS_PER_SEC) + \ + (info.system_time.microseconds * 1000); \ + return UVWASI_ESUCCESS; \ + } while (0) + + +#define UVWASI__WIN_GETRES_AND_RETURN(time) \ + do { \ + /* The GetProcessTimes() docs claim a resolution of 100 ns. */ \ + (time) = 100; \ + return UVWASI_ESUCCESS; \ + } while (0) + + +#define UVWASI__CLOCK_GETRES_AND_RETURN(clk, time) \ + do { \ + struct timespec ts; \ + /* Try calling clock_getres(). If it doesn't succeed, then default to \ + 1000000. We implement all of the clocks, and some platforms (such as \ + SmartOS) don't support all of the clocks, even though they define \ + the constants for them. */ \ + if (0 != clock_getres((clk), &ts)) \ + (time) = 1000000; \ + else \ + (time) = (ts.tv_sec * NANOS_PER_SEC) + ts.tv_nsec; \ + return UVWASI_ESUCCESS; \ + } while (0) + + +#define UVWASI__SLOW_GETRES_AND_RETURN(time) \ + do { \ + /* Assume a "worst case" of 1000000 ns resolution. */ \ + (time) = 1000000; \ + return UVWASI_ESUCCESS; \ + } while (0) + + +uvwasi_errno_t uvwasi__clock_gettime_realtime(uvwasi_timestamp_t* time) { + uv_timeval64_t tv; + int r; + + r = uv_gettimeofday(&tv); + if (r != 0) + return uvwasi__translate_uv_error(r); + + *time = (tv.tv_sec * NANOS_PER_SEC) + (tv.tv_usec * 1000); + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi__clock_gettime_process_cputime(uvwasi_timestamp_t* time) { +#if defined(_WIN32) + UVWASI__WIN_TIME_AND_RETURN(GetCurrentProcess(), *time); +#elif defined(CLOCK_PROCESS_CPUTIME_ID) && \ + !defined(__APPLE__) && \ + !defined(__sun) + UVWASI__CLOCK_GETTIME_AND_RETURN(CLOCK_PROCESS_CPUTIME_ID, *time); +#else + UVWASI__GETRUSAGE_AND_RETURN(RUSAGE_SELF, *time); +#endif +} + + +uvwasi_errno_t uvwasi__clock_gettime_thread_cputime(uvwasi_timestamp_t* time) { +#if defined(_WIN32) + UVWASI__WIN_TIME_AND_RETURN(GetCurrentThread(), *time); +#elif defined(__APPLE__) + UVWASI__OSX_THREADTIME_AND_RETURN(*time); +#elif defined(CLOCK_THREAD_CPUTIME_ID) && !defined(__sun) + UVWASI__CLOCK_GETTIME_AND_RETURN(CLOCK_THREAD_CPUTIME_ID, *time); +#else +# if defined(RUSAGE_LWP) + UVWASI__GETRUSAGE_AND_RETURN(RUSAGE_LWP, *time); +# elif defined(RUSAGE_THREAD) + UVWASI__GETRUSAGE_AND_RETURN(RUSAGE_THREAD, *time); +# else + return UVWASI_ENOSYS; +# endif /* RUSAGE_LWP */ +#endif +} + + +uvwasi_errno_t uvwasi__clock_getres_process_cputime(uvwasi_timestamp_t* time) { +#if defined(_WIN32) + UVWASI__WIN_GETRES_AND_RETURN(*time); +#elif defined(CLOCK_PROCESS_CPUTIME_ID) && \ + !defined(__APPLE__) && \ + !defined(__sun) + UVWASI__CLOCK_GETRES_AND_RETURN(CLOCK_PROCESS_CPUTIME_ID, *time); +#else + UVWASI__SLOW_GETRES_AND_RETURN(*time); +#endif +} + + +uvwasi_errno_t uvwasi__clock_getres_thread_cputime(uvwasi_timestamp_t* time) { +#if defined(_WIN32) + UVWASI__WIN_GETRES_AND_RETURN(*time); +#elif defined(__APPLE__) + UVWASI__SLOW_GETRES_AND_RETURN(*time); +#elif defined(CLOCK_THREAD_CPUTIME_ID) && !defined(__sun) + UVWASI__CLOCK_GETTIME_AND_RETURN(CLOCK_THREAD_CPUTIME_ID, *time); +#elif defined(RUSAGE_THREAD) || defined(RUSAGE_LWP) + UVWASI__SLOW_GETRES_AND_RETURN(*time); +#else + return UVWASI_ENOSYS; +#endif +} diff --git a/deps/uvwasi/src/fd_table.c b/deps/uvwasi/src/fd_table.c new file mode 100644 index 00000000000000..f3855d4da54dbe --- /dev/null +++ b/deps/uvwasi/src/fd_table.c @@ -0,0 +1,524 @@ +#include +#include +#include + +#ifndef _WIN32 +# include +#endif /* _WIN32 */ + +#include "uv.h" +#include "fd_table.h" +#include "wasi_types.h" +#include "uv_mapping.h" +#include "uvwasi_alloc.h" + + +#define UVWASI__RIGHTS_ALL (UVWASI_RIGHT_FD_DATASYNC | \ + UVWASI_RIGHT_FD_READ | \ + UVWASI_RIGHT_FD_SEEK | \ + UVWASI_RIGHT_FD_FDSTAT_SET_FLAGS | \ + UVWASI_RIGHT_FD_SYNC | \ + UVWASI_RIGHT_FD_TELL | \ + UVWASI_RIGHT_FD_WRITE | \ + UVWASI_RIGHT_FD_ADVISE | \ + UVWASI_RIGHT_FD_ALLOCATE | \ + UVWASI_RIGHT_PATH_CREATE_DIRECTORY | \ + UVWASI_RIGHT_PATH_CREATE_FILE | \ + UVWASI_RIGHT_PATH_LINK_SOURCE | \ + UVWASI_RIGHT_PATH_LINK_TARGET | \ + UVWASI_RIGHT_PATH_OPEN | \ + UVWASI_RIGHT_FD_READDIR | \ + UVWASI_RIGHT_PATH_READLINK | \ + UVWASI_RIGHT_PATH_RENAME_SOURCE | \ + UVWASI_RIGHT_PATH_RENAME_TARGET | \ + UVWASI_RIGHT_PATH_FILESTAT_GET | \ + UVWASI_RIGHT_PATH_FILESTAT_SET_SIZE | \ + UVWASI_RIGHT_PATH_FILESTAT_SET_TIMES | \ + UVWASI_RIGHT_FD_FILESTAT_GET | \ + UVWASI_RIGHT_FD_FILESTAT_SET_TIMES | \ + UVWASI_RIGHT_FD_FILESTAT_SET_SIZE | \ + UVWASI_RIGHT_PATH_SYMLINK | \ + UVWASI_RIGHT_PATH_UNLINK_FILE | \ + UVWASI_RIGHT_PATH_REMOVE_DIRECTORY | \ + UVWASI_RIGHT_POLL_FD_READWRITE | \ + UVWASI_RIGHT_SOCK_SHUTDOWN) + +#define UVWASI__RIGHTS_BLOCK_DEVICE_BASE UVWASI__RIGHTS_ALL +#define UVWASI__RIGHTS_BLOCK_DEVICE_INHERITING UVWASI__RIGHTS_ALL + +#define UVWASI__RIGHTS_CHARACTER_DEVICE_BASE UVWASI__RIGHTS_ALL +#define UVWASI__RIGHTS_CHARACTER_DEVICE_INHERITING UVWASI__RIGHTS_ALL + +#define UVWASI__RIGHTS_REGULAR_FILE_BASE (UVWASI_RIGHT_FD_DATASYNC | \ + UVWASI_RIGHT_FD_READ | \ + UVWASI_RIGHT_FD_SEEK | \ + UVWASI_RIGHT_FD_FDSTAT_SET_FLAGS | \ + UVWASI_RIGHT_FD_SYNC | \ + UVWASI_RIGHT_FD_TELL | \ + UVWASI_RIGHT_FD_WRITE | \ + UVWASI_RIGHT_FD_ADVISE | \ + UVWASI_RIGHT_FD_ALLOCATE | \ + UVWASI_RIGHT_FD_FILESTAT_GET | \ + UVWASI_RIGHT_FD_FILESTAT_SET_SIZE | \ + UVWASI_RIGHT_FD_FILESTAT_SET_TIMES |\ + UVWASI_RIGHT_POLL_FD_READWRITE) +#define UVWASI__RIGHTS_REGULAR_FILE_INHERITING 0 + +#define UVWASI__RIGHTS_DIRECTORY_BASE (UVWASI_RIGHT_FD_FDSTAT_SET_FLAGS | \ + UVWASI_RIGHT_FD_SYNC | \ + UVWASI_RIGHT_FD_ADVISE | \ + UVWASI_RIGHT_PATH_CREATE_DIRECTORY | \ + UVWASI_RIGHT_PATH_CREATE_FILE | \ + UVWASI_RIGHT_PATH_LINK_SOURCE | \ + UVWASI_RIGHT_PATH_LINK_TARGET | \ + UVWASI_RIGHT_PATH_OPEN | \ + UVWASI_RIGHT_FD_READDIR | \ + UVWASI_RIGHT_PATH_READLINK | \ + UVWASI_RIGHT_PATH_RENAME_SOURCE | \ + UVWASI_RIGHT_PATH_RENAME_TARGET | \ + UVWASI_RIGHT_PATH_FILESTAT_GET | \ + UVWASI_RIGHT_PATH_FILESTAT_SET_SIZE | \ + UVWASI_RIGHT_PATH_FILESTAT_SET_TIMES | \ + UVWASI_RIGHT_FD_FILESTAT_GET | \ + UVWASI_RIGHT_FD_FILESTAT_SET_TIMES | \ + UVWASI_RIGHT_PATH_SYMLINK | \ + UVWASI_RIGHT_PATH_UNLINK_FILE | \ + UVWASI_RIGHT_PATH_REMOVE_DIRECTORY | \ + UVWASI_RIGHT_POLL_FD_READWRITE) +#define UVWASI__RIGHTS_DIRECTORY_INHERITING (UVWASI__RIGHTS_DIRECTORY_BASE | \ + UVWASI__RIGHTS_REGULAR_FILE_BASE) + +#define UVWASI__RIGHTS_SOCKET_BASE (UVWASI_RIGHT_FD_READ | \ + UVWASI_RIGHT_FD_FDSTAT_SET_FLAGS | \ + UVWASI_RIGHT_FD_WRITE | \ + UVWASI_RIGHT_FD_FILESTAT_GET | \ + UVWASI_RIGHT_POLL_FD_READWRITE | \ + UVWASI_RIGHT_SOCK_SHUTDOWN) +#define UVWASI__RIGHTS_SOCKET_INHERITING UVWASI__RIGHTS_ALL; + +#define UVWASI__RIGHTS_TTY_BASE (UVWASI_RIGHT_FD_READ | \ + UVWASI_RIGHT_FD_FDSTAT_SET_FLAGS | \ + UVWASI_RIGHT_FD_WRITE | \ + UVWASI_RIGHT_FD_FILESTAT_GET | \ + UVWASI_RIGHT_POLL_FD_READWRITE) +#define UVWASI__RIGHTS_TTY_INHERITING 0 + +static uvwasi_errno_t uvwasi__get_type_and_rights(uv_file fd, + int flags, + uvwasi_filetype_t* type, + uvwasi_rights_t* rights_base, + uvwasi_rights_t* rights_inheriting) { + uv_fs_t req; + uvwasi_filetype_t filetype; + int read_or_write_only; + int r; + + r = uv_fs_fstat(NULL, &req, fd, NULL); + filetype = uvwasi__stat_to_filetype(&req.statbuf); + uv_fs_req_cleanup(&req); + if (r != 0) + return uvwasi__translate_uv_error(r); + + *type = filetype; + switch (filetype) { + case UVWASI_FILETYPE_REGULAR_FILE: + *rights_base = UVWASI__RIGHTS_REGULAR_FILE_BASE; + *rights_inheriting = UVWASI__RIGHTS_REGULAR_FILE_INHERITING; + break; + + case UVWASI_FILETYPE_DIRECTORY: + *rights_base = UVWASI__RIGHTS_DIRECTORY_BASE; + *rights_inheriting = UVWASI__RIGHTS_DIRECTORY_INHERITING; + break; + + /* uvwasi__stat_to_filetype() cannot differentiate socket types. It only + returns UVWASI_FILETYPE_SOCKET_STREAM. */ + case UVWASI_FILETYPE_SOCKET_STREAM: + if (uv_guess_handle(fd) == UV_UDP) + *type = UVWASI_FILETYPE_SOCKET_DGRAM; + + *rights_base = UVWASI__RIGHTS_SOCKET_BASE; + *rights_inheriting = UVWASI__RIGHTS_SOCKET_INHERITING; + break; + + case UVWASI_FILETYPE_CHARACTER_DEVICE: + if (uv_guess_handle(fd) == UV_TTY) { + *rights_base = UVWASI__RIGHTS_TTY_BASE; + *rights_inheriting = UVWASI__RIGHTS_TTY_INHERITING; + } else { + *rights_base = UVWASI__RIGHTS_CHARACTER_DEVICE_BASE; + *rights_inheriting = UVWASI__RIGHTS_CHARACTER_DEVICE_INHERITING; + } + break; + + case UVWASI_FILETYPE_BLOCK_DEVICE: + *rights_base = UVWASI__RIGHTS_BLOCK_DEVICE_BASE; + *rights_inheriting = UVWASI__RIGHTS_BLOCK_DEVICE_INHERITING; + break; + + default: + *rights_base = 0; + *rights_inheriting = 0; + } + + if (*type == UVWASI_FILETYPE_UNKNOWN) + return UVWASI_EINVAL; + + /* Disable read/write bits depending on access mode. */ + read_or_write_only = flags & (UV_FS_O_RDONLY | UV_FS_O_WRONLY | UV_FS_O_RDWR); + + if (read_or_write_only == UV_FS_O_RDONLY) + *rights_base &= ~UVWASI_RIGHT_FD_WRITE; + else if (read_or_write_only == UV_FS_O_WRONLY) + *rights_base &= ~UVWASI_RIGHT_FD_READ; + + return UVWASI_ESUCCESS; +} + + +static uvwasi_errno_t uvwasi__fd_table_insert(uvwasi_t* uvwasi, + struct uvwasi_fd_table_t* table, + uv_file fd, + const char* mapped_path, + const char* real_path, + uvwasi_filetype_t type, + uvwasi_rights_t rights_base, + uvwasi_rights_t rights_inheriting, + int preopen, + struct uvwasi_fd_wrap_t** wrap) { + struct uvwasi_fd_wrap_t* entry; + struct uvwasi_fd_wrap_t** new_fds; + uvwasi_errno_t err; + uint32_t new_size; + int index; + uint32_t i; + int r; + size_t mp_len; + char* mp_copy; + size_t rp_len; + char* rp_copy; + + mp_len = strlen(mapped_path); + rp_len = strlen(real_path); + entry = (struct uvwasi_fd_wrap_t*) + uvwasi__malloc(uvwasi, sizeof(*entry) + mp_len + rp_len + 2); + if (entry == NULL) return UVWASI_ENOMEM; + + mp_copy = (char*)(entry + 1); + rp_copy = mp_copy + mp_len + 1; + memcpy(mp_copy, mapped_path, mp_len); + mp_copy[mp_len] = '\0'; + memcpy(rp_copy, real_path, rp_len); + rp_copy[rp_len] = '\0'; + + uv_rwlock_wrlock(&table->rwlock); + + /* Check that there is room for a new item. If there isn't, grow the table. */ + if (table->used >= table->size) { + new_size = table->size * 2; + new_fds = uvwasi__realloc(uvwasi, table->fds, new_size * sizeof(*new_fds)); + if (new_fds == NULL) { + uvwasi__free(uvwasi, entry); + err = UVWASI_ENOMEM; + goto exit; + } + + for (i = table->size; i < new_size; ++i) + new_fds[i] = NULL; + + index = table->size; + table->fds = new_fds; + table->size = new_size; + } else { + /* The table is big enough, so find an empty slot for the new data. */ + index = -1; + for (i = 0; i < table->size; ++i) { + if (table->fds[i] == NULL) { + index = i; + break; + } + } + + /* index should never be -1. */ + if (index == -1) { + uvwasi__free(uvwasi, entry); + err = UVWASI_ENOSPC; + goto exit; + } + } + + table->fds[index] = entry; + + r = uv_mutex_init(&entry->mutex); + if (r != 0) { + err = uvwasi__translate_uv_error(r); + goto exit; + } + + entry->id = index; + entry->fd = fd; + entry->path = mp_copy; + entry->real_path = rp_copy; + entry->type = type; + entry->rights_base = rights_base; + entry->rights_inheriting = rights_inheriting; + entry->preopen = preopen; + table->used++; + + if (wrap != NULL) + *wrap = entry; + + err = UVWASI_ESUCCESS; +exit: + uv_rwlock_wrunlock(&table->rwlock); + return err; +} + + +uvwasi_errno_t uvwasi_fd_table_init(uvwasi_t* uvwasi, + struct uvwasi_fd_table_t* table, + uint32_t init_size) { + struct uvwasi_fd_wrap_t* wrap; + uvwasi_filetype_t type; + uvwasi_rights_t base; + uvwasi_rights_t inheriting; + uvwasi_errno_t err; + uvwasi_fd_t i; + int r; + + /* Require an initial size of at least three to store the stdio FDs. */ + if (table == NULL || init_size < 3) + return UVWASI_EINVAL; + + table->fds = NULL; + table->used = 0; + table->size = init_size; + table->fds = uvwasi__calloc(uvwasi, + init_size, + sizeof(struct uvwasi_fd_wrap_t*)); + + if (table->fds == NULL) + return UVWASI_ENOMEM; + + r = uv_rwlock_init(&table->rwlock); + if (r != 0) { + err = uvwasi__translate_uv_error(r); + /* Free table->fds and set it to NULL here. This is done explicitly instead + of jumping to error_exit because uvwasi_fd_table_free() relies on fds + being NULL to know whether or not to destroy the rwlock. + */ + uvwasi__free(uvwasi, table->fds); + table->fds = NULL; + return err; + } + + /* Create the stdio FDs. */ + for (i = 0; i < 3; ++i) { + err = uvwasi__get_type_and_rights(i, + UV_FS_O_RDWR, + &type, + &base, + &inheriting); + if (err != UVWASI_ESUCCESS) + goto error_exit; + + err = uvwasi__fd_table_insert(uvwasi, + table, + i, + "", + "", + type, + base, + inheriting, + 0, + &wrap); + if (err != UVWASI_ESUCCESS) + goto error_exit; + + if (wrap->id != i || wrap->id != (uvwasi_fd_t) wrap->fd) { + err = UVWASI_EBADF; + goto error_exit; + } + } + + return UVWASI_ESUCCESS; +error_exit: + uvwasi_fd_table_free(uvwasi, table); + return err; +} + + +void uvwasi_fd_table_free(uvwasi_t* uvwasi, struct uvwasi_fd_table_t* table) { + struct uvwasi_fd_wrap_t* entry; + uint32_t i; + + if (table == NULL) + return; + + for (i = 0; i < table->size; i++) { + entry = table->fds[i]; + if (entry == NULL) continue; + + uv_mutex_destroy(&entry->mutex); + uvwasi__free(uvwasi, entry); + } + + if (table->fds != NULL) { + uvwasi__free(uvwasi, table->fds); + table->fds = NULL; + table->size = 0; + table->used = 0; + uv_rwlock_destroy(&table->rwlock); + } +} + + +uvwasi_errno_t uvwasi_fd_table_insert_preopen(uvwasi_t* uvwasi, + struct uvwasi_fd_table_t* table, + const uv_file fd, + const char* path, + const char* real_path) { + uvwasi_filetype_t type; + uvwasi_rights_t base; + uvwasi_rights_t inheriting; + uvwasi_errno_t err; + + if (table == NULL || path == NULL || real_path == NULL) + return UVWASI_EINVAL; + + err = uvwasi__get_type_and_rights(fd, 0, &type, &base, &inheriting); + if (err != UVWASI_ESUCCESS) + return err; + + if (type != UVWASI_FILETYPE_DIRECTORY) + return UVWASI_ENOTDIR; + + err = uvwasi__fd_table_insert(uvwasi, + table, + fd, + path, + real_path, + UVWASI_FILETYPE_DIRECTORY, + UVWASI__RIGHTS_DIRECTORY_BASE, + UVWASI__RIGHTS_DIRECTORY_INHERITING, + 1, + NULL); + if (err != UVWASI_ESUCCESS) + return err; + + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_fd_table_insert_fd(uvwasi_t* uvwasi, + struct uvwasi_fd_table_t* table, + const uv_file fd, + const int flags, + const char* path, + uvwasi_rights_t rights_base, + uvwasi_rights_t rights_inheriting, + struct uvwasi_fd_wrap_t* wrap) { + struct uvwasi_fd_wrap_t* fd_wrap; + uvwasi_filetype_t type; + uvwasi_rights_t max_base; + uvwasi_rights_t max_inheriting; + uvwasi_errno_t r; + + if (table == NULL || path == NULL || wrap == NULL) + return UVWASI_EINVAL; + + r = uvwasi__get_type_and_rights(fd, flags, &type, &max_base, &max_inheriting); + if (r != UVWASI_ESUCCESS) + return r; + + r = uvwasi__fd_table_insert(uvwasi, + table, + fd, + path, + path, + type, + rights_base & max_base, + rights_inheriting & max_inheriting, + 0, + &fd_wrap); + if (r != UVWASI_ESUCCESS) + return r; + + *wrap = *fd_wrap; + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_fd_table_get(const struct uvwasi_fd_table_t* table, + const uvwasi_fd_t id, + struct uvwasi_fd_wrap_t** wrap, + uvwasi_rights_t rights_base, + uvwasi_rights_t rights_inheriting) { + struct uvwasi_fd_wrap_t* entry; + uvwasi_errno_t err; + + if (table == NULL || wrap == NULL) + return UVWASI_EINVAL; + + uv_rwlock_rdlock((uv_rwlock_t *)&table->rwlock); + + if (id >= table->size) { + err = UVWASI_EBADF; + goto exit; + } + + entry = table->fds[id]; + + if (entry == NULL || entry->id != id) { + err = UVWASI_EBADF; + goto exit; + } + + /* Validate that the fd has the necessary rights. */ + if ((~entry->rights_base & rights_base) != 0 || + (~entry->rights_inheriting & rights_inheriting) != 0) { + err = UVWASI_ENOTCAPABLE; + goto exit; + } + + uv_mutex_lock(&entry->mutex); + *wrap = entry; + err = UVWASI_ESUCCESS; +exit: + uv_rwlock_rdunlock((uv_rwlock_t *)&table->rwlock); + return err; +} + + +uvwasi_errno_t uvwasi_fd_table_remove(uvwasi_t* uvwasi, + struct uvwasi_fd_table_t* table, + const uvwasi_fd_t id) { + struct uvwasi_fd_wrap_t* entry; + uvwasi_errno_t err; + + if (table == NULL) + return UVWASI_EINVAL; + + uv_rwlock_wrlock(&table->rwlock); + + if (id >= table->size) { + err = UVWASI_EBADF; + goto exit; + } + + entry = table->fds[id]; + + if (entry == NULL || entry->id != id) { + err = UVWASI_EBADF; + goto exit; + } + + uv_mutex_destroy(&entry->mutex); + uvwasi__free(uvwasi, entry); + table->fds[id] = NULL; + table->used--; + err = UVWASI_ESUCCESS; +exit: + uv_rwlock_wrunlock(&table->rwlock); + return err; +} diff --git a/deps/uvwasi/src/uv_mapping.c b/deps/uvwasi/src/uv_mapping.c new file mode 100644 index 00000000000000..846dcedbeb6b4f --- /dev/null +++ b/deps/uvwasi/src/uv_mapping.c @@ -0,0 +1,243 @@ +#include + +#ifndef _WIN32 +# include +#endif /* _WIN32 */ + +#include "uv.h" +#include "wasi_types.h" +#include "uv_mapping.h" + +#if !defined(S_ISREG) && defined(S_IFMT) && defined(S_IFREG) +# define S_ISREG(m) (((m) & S_IFMT) == S_IFREG) +#endif + +#if !defined(S_ISDIR) && defined(S_IFMT) && defined(S_IFDIR) +# define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) +#endif + +#if !defined(S_ISCHR) && defined(S_IFMT) && defined(S_IFCHR) +# define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR) +#endif + +#if !defined(S_ISLNK) && defined(S_IFMT) && defined(S_IFLNK) +# define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK) +#endif + + +uvwasi_errno_t uvwasi__translate_uv_error(int err) { + switch (err) { + case UV_E2BIG: return UVWASI_E2BIG; + case UV_EACCES: return UVWASI_EACCES; + case UV_EADDRINUSE: return UVWASI_EADDRINUSE; + case UV_EADDRNOTAVAIL: return UVWASI_EADDRNOTAVAIL; + case UV_EAFNOSUPPORT: return UVWASI_EAFNOSUPPORT; + case UV_EAGAIN: return UVWASI_EAGAIN; + case UV_EALREADY: return UVWASI_EALREADY; + case UV_EBADF: return UVWASI_EBADF; + case UV_EBUSY: return UVWASI_EBUSY; + case UV_ECANCELED: return UVWASI_ECANCELED; + case UV_ECONNABORTED: return UVWASI_ECONNABORTED; + case UV_ECONNREFUSED: return UVWASI_ECONNREFUSED; + case UV_ECONNRESET: return UVWASI_ECONNRESET; + case UV_EDESTADDRREQ: return UVWASI_EDESTADDRREQ; + case UV_EEXIST: return UVWASI_EEXIST; + case UV_EFAULT: return UVWASI_EFAULT; + case UV_EFBIG: return UVWASI_EFBIG; + case UV_EHOSTUNREACH: return UVWASI_EHOSTUNREACH; + case UV_EINTR: return UVWASI_EINTR; + case UV_EINVAL: return UVWASI_EINVAL; + case UV_EIO: return UVWASI_EIO; + case UV_EISCONN: return UVWASI_EISCONN; + case UV_EISDIR: return UVWASI_EISDIR; + case UV_ELOOP: return UVWASI_ELOOP; + case UV_EMFILE: return UVWASI_EMFILE; + case UV_EMLINK: return UVWASI_EMLINK; + case UV_EMSGSIZE: return UVWASI_EMSGSIZE; + case UV_ENAMETOOLONG: return UVWASI_ENAMETOOLONG; + case UV_ENETDOWN: return UVWASI_ENETDOWN; + case UV_ENETUNREACH: return UVWASI_ENETUNREACH; + case UV_ENFILE: return UVWASI_ENFILE; + case UV_ENOBUFS: return UVWASI_ENOBUFS; + case UV_ENODEV: return UVWASI_ENODEV; + case UV_ENOENT: return UVWASI_ENOENT; + case UV_ENOMEM: return UVWASI_ENOMEM; + case UV_ENOPROTOOPT: return UVWASI_ENOPROTOOPT; + case UV_ENOSPC: return UVWASI_ENOSPC; + case UV_ENOSYS: return UVWASI_ENOSYS; + case UV_ENOTCONN: return UVWASI_ENOTCONN; + case UV_ENOTDIR: return UVWASI_ENOTDIR; + /* On at least some AIX machines, ENOTEMPTY and EEXIST are equivalent. */ +#if ENOTEMPTY != EEXIST + case UV_ENOTEMPTY: return UVWASI_ENOTEMPTY; +#endif /* ENOTEMPTY != EEXIST */ + case UV_ENOTSOCK: return UVWASI_ENOTSOCK; + case UV_ENOTSUP: return UVWASI_ENOTSUP; + case UV_ENXIO: return UVWASI_ENXIO; + case UV_EPERM: return UVWASI_EPERM; + case UV_EPIPE: return UVWASI_EPIPE; + case UV_EPROTO: return UVWASI_EPROTO; + case UV_EPROTONOSUPPORT: return UVWASI_EPROTONOSUPPORT; + case UV_EPROTOTYPE: return UVWASI_EPROTOTYPE; + case UV_ERANGE: return UVWASI_ERANGE; + case UV_EROFS: return UVWASI_EROFS; + case UV_ESPIPE: return UVWASI_ESPIPE; + case UV_ESRCH: return UVWASI_ESRCH; + case UV_ETIMEDOUT: return UVWASI_ETIMEDOUT; + case UV_ETXTBSY: return UVWASI_ETXTBSY; + case UV_EXDEV: return UVWASI_EXDEV; + case 0: return UVWASI_ESUCCESS; + /* The following libuv error codes have no corresponding WASI error code: + UV_EAI_ADDRFAMILY, UV_EAI_AGAIN, UV_EAI_BADFLAGS, UV_EAI_BADHINTS, + UV_EAI_CANCELED, UV_EAI_FAIL, UV_EAI_FAMILY, UV_EAI_MEMORY, + UV_EAI_NODATA, UV_EAI_NONAME, UV_EAI_OVERFLOW, UV_EAI_PROTOCOL, + UV_EAI_SERVICE, UV_EAI_SOCKTYPE, UV_ECHARSET, UV_ENONET, UV_EOF, + UV_ESHUTDOWN, UV_UNKNOWN + */ + default: + /* libuv errors are < 0 */ + if (err > 0) + return err; + + return UVWASI_ENOSYS; + } +} + + +int uvwasi__translate_to_uv_signal(uvwasi_signal_t sig) { + switch (sig) { +#ifdef SIGABRT + case UVWASI_SIGABRT: return SIGABRT; +#endif +#ifdef SIGALRM + case UVWASI_SIGALRM: return SIGALRM; +#endif +#ifdef SIGBUS + case UVWASI_SIGBUS: return SIGBUS; +#endif +#ifdef SIGCHLD + case UVWASI_SIGCHLD: return SIGCHLD; +#endif +#ifdef SIGCONT + case UVWASI_SIGCONT: return SIGCONT; +#endif +#ifdef SIGFPE + case UVWASI_SIGFPE: return SIGFPE; +#endif +#ifdef SIGHUP + case UVWASI_SIGHUP: return SIGHUP; +#endif +#ifdef SIGILL + case UVWASI_SIGILL: return SIGILL; +#endif +#ifdef SIGINT + case UVWASI_SIGINT: return SIGINT; +#endif +#ifdef SIGKILL + case UVWASI_SIGKILL: return SIGKILL; +#endif +#ifdef SIGPIPE + case UVWASI_SIGPIPE: return SIGPIPE; +#endif +#ifdef SIGQUIT + case UVWASI_SIGQUIT: return SIGQUIT; +#endif +#ifdef SIGSEGV + case UVWASI_SIGSEGV: return SIGSEGV; +#endif +#ifdef SIGSTOP + case UVWASI_SIGSTOP: return SIGSTOP; +#endif +#ifdef SIGSYS + case UVWASI_SIGSYS: return SIGSYS; +#endif +#ifdef SIGTERM + case UVWASI_SIGTERM: return SIGTERM; +#endif +#ifdef SIGTRAP + case UVWASI_SIGTRAP: return SIGTRAP; +#endif +#ifdef SIGTSTP + case UVWASI_SIGTSTP: return SIGTSTP; +#endif +#ifdef SIGTTIN + case UVWASI_SIGTTIN: return SIGTTIN; +#endif +#ifdef SIGTTOU + case UVWASI_SIGTTOU: return SIGTTOU; +#endif +#ifdef SIGURG + case UVWASI_SIGURG: return SIGURG; +#endif +#ifdef SIGUSR1 + case UVWASI_SIGUSR1: return SIGUSR1; +#endif +#ifdef SIGUSR2 + case UVWASI_SIGUSR2: return SIGUSR2; +#endif +#ifdef SIGVTALRM + case UVWASI_SIGVTALRM: return SIGVTALRM; +#endif +#ifdef SIGXCPU + case UVWASI_SIGXCPU: return SIGXCPU; +#endif +#ifdef SIGXFSZ + case UVWASI_SIGXFSZ: return SIGXFSZ; +#endif + default: return -1; + } +} + + +uvwasi_timestamp_t uvwasi__timespec_to_timestamp(const uv_timespec_t* ts) { + /* TODO(cjihrig): Handle overflow. */ + return (uvwasi_timestamp_t) ts->tv_sec * NANOS_PER_SEC + ts->tv_nsec; +} + + +uvwasi_filetype_t uvwasi__stat_to_filetype(const uv_stat_t* stat) { + uint64_t mode; + + mode = stat->st_mode; + + if (S_ISREG(mode)) + return UVWASI_FILETYPE_REGULAR_FILE; + + if (S_ISDIR(mode)) + return UVWASI_FILETYPE_DIRECTORY; + + if (S_ISCHR(mode)) + return UVWASI_FILETYPE_CHARACTER_DEVICE; + + if (S_ISLNK(mode)) + return UVWASI_FILETYPE_SYMBOLIC_LINK; + +#ifdef S_ISSOCK + if (S_ISSOCK(mode)) + return UVWASI_FILETYPE_SOCKET_STREAM; +#endif /* S_ISSOCK */ + +#ifdef S_ISFIFO + if (S_ISFIFO(mode)) + return UVWASI_FILETYPE_SOCKET_STREAM; +#endif /* S_ISFIFO */ + +#ifdef S_ISBLK + if (S_ISBLK(mode)) + return UVWASI_FILETYPE_BLOCK_DEVICE; +#endif /* S_ISBLK */ + + return UVWASI_FILETYPE_UNKNOWN; +} + + +void uvwasi__stat_to_filestat(const uv_stat_t* stat, uvwasi_filestat_t* fs) { + fs->st_dev = stat->st_dev; + fs->st_ino = stat->st_ino; + fs->st_nlink = stat->st_nlink; + fs->st_size = stat->st_size; + fs->st_filetype = uvwasi__stat_to_filetype(stat); + fs->st_atim = uvwasi__timespec_to_timestamp(&stat->st_atim); + fs->st_mtim = uvwasi__timespec_to_timestamp(&stat->st_mtim); + fs->st_ctim = uvwasi__timespec_to_timestamp(&stat->st_ctim); +} diff --git a/deps/uvwasi/src/uvwasi.c b/deps/uvwasi/src/uvwasi.c new file mode 100644 index 00000000000000..28c6dcc26104c9 --- /dev/null +++ b/deps/uvwasi/src/uvwasi.c @@ -0,0 +1,2239 @@ +#include +#include + +#ifndef _WIN32 +# include +# include +# include +# include +# include +# define SLASH '/' +# define SLASH_STR "/" +# define IS_SLASH(c) ((c) == '/') +#else +# define SLASH '\\' +# define SLASH_STR "\\" +# define IS_SLASH(c) ((c) == '/' || (c) == '\\') +#endif /* _WIN32 */ + +#define UVWASI__READDIR_NUM_ENTRIES 1 + +#include "uvwasi.h" +#include "uvwasi_alloc.h" +#include "uv.h" +#include "uv_mapping.h" +#include "fd_table.h" +#include "clocks.h" + +/* TODO(cjihrig): PATH_MAX_BYTES shouldn't be stack allocated. On Windows, paths + can be 32k long, and this PATH_MAX_BYTES is an artificial limitation. */ +#ifdef _WIN32 +/* MAX_PATH is in characters, not bytes. Make sure we have enough headroom. */ +# define PATH_MAX_BYTES (MAX_PATH * 4) +#else +# include +# define PATH_MAX_BYTES (PATH_MAX) +#endif + +static void* default_malloc(size_t size, void* mem_user_data) { + return malloc(size); +} + +static void default_free(void* ptr, void* mem_user_data) { + free(ptr); +} + +static void* default_calloc(size_t nmemb, size_t size, void* mem_user_data) { + return calloc(nmemb, size); +} + +static void* default_realloc(void* ptr, size_t size, void* mem_user_data) { + return realloc(ptr, size); +} + +void* uvwasi__malloc(const uvwasi_t* uvwasi, size_t size) { + return uvwasi->allocator->malloc(size, uvwasi->allocator->mem_user_data); +} + +void uvwasi__free(const uvwasi_t* uvwasi, void* ptr) { + uvwasi->allocator->free(ptr, uvwasi->allocator->mem_user_data); +} + +void* uvwasi__calloc(const uvwasi_t* uvwasi, size_t nmemb, size_t size) { + return uvwasi->allocator->calloc(nmemb, + size, + uvwasi->allocator->mem_user_data); +} + +void* uvwasi__realloc(const uvwasi_t* uvwasi, void* ptr, size_t size) { + return uvwasi->allocator->realloc(ptr, + size, + uvwasi->allocator->mem_user_data); +} + +static const uvwasi_mem_t default_allocator = { + NULL, + default_malloc, + default_free, + default_calloc, + default_realloc, +}; + + +static int uvwasi__is_absolute_path(const char* path, size_t path_len) { + /* It's expected that only Unix style paths will be generated by WASI. */ + return path != NULL && path_len > 0 && path[0] == '/'; +} + + +static uvwasi_errno_t uvwasi__resolve_path(const uvwasi_t* uvwasi, + const struct uvwasi_fd_wrap_t* fd, + const char* path, + size_t path_len, + char* resolved_path, + uvwasi_lookupflags_t flags) { + uv_fs_t realpath_req; + uvwasi_errno_t err; + char* abs_path; + char* tok; + char* ptr; + int realpath_size; + int abs_size; + int input_is_absolute; + int r; +#ifdef _WIN32 + int i; +#endif /* _WIN32 */ + + err = UVWASI_ESUCCESS; + input_is_absolute = uvwasi__is_absolute_path(path, path_len); + + if (1 == input_is_absolute) { + /* TODO(cjihrig): Revisit this. Copying is probably not necessary here. */ + abs_size = path_len; + abs_path = uvwasi__malloc(uvwasi, abs_size); + if (abs_path == NULL) { + err = UVWASI_ENOMEM; + goto exit; + } + + memcpy(abs_path, path, abs_size); + } else { + /* Resolve the relative path to fd's real path. */ + abs_size = path_len + strlen(fd->real_path) + 2; + abs_path = uvwasi__malloc(uvwasi, abs_size); + if (abs_path == NULL) { + err = UVWASI_ENOMEM; + goto exit; + } + + r = snprintf(abs_path, abs_size, "%s/%s", fd->real_path, path); + if (r <= 0) { + err = uvwasi__translate_uv_error(uv_translate_sys_error(errno)); + goto exit; + } + } + +#ifdef _WIN32 + /* On Windows, convert slashes to backslashes. */ + for (i = 0; i < abs_size; ++i) { + if (abs_path[i] == '/') + abs_path[i] = SLASH; + } +#endif /* _WIN32 */ + + ptr = resolved_path; + tok = strtok(abs_path, SLASH_STR); + for (; tok != NULL; tok = strtok(NULL, SLASH_STR)) { + if (0 == strcmp(tok, ".")) + continue; + + if (0 == strcmp(tok, "..")) { + while (*ptr != SLASH && ptr != resolved_path) + ptr--; + *ptr = '\0'; + continue; + } + +#ifdef _WIN32 + /* On Windows, prevent a leading slash in the path. */ + if (ptr == resolved_path) + r = sprintf(ptr, "%s", tok); + else +#endif /* _WIN32 */ + r = sprintf(ptr, "%c%s", SLASH, tok); + + if (r < 1) { /* At least one character should have been written. */ + err = uvwasi__translate_uv_error(uv_translate_sys_error(errno)); + goto exit; + } + + ptr += r; + } + + if ((flags & UVWASI_LOOKUP_SYMLINK_FOLLOW) == UVWASI_LOOKUP_SYMLINK_FOLLOW) { + r = uv_fs_realpath(NULL, &realpath_req, resolved_path, NULL); + if (r == 0) { + realpath_size = strlen(realpath_req.ptr) + 1; + if (realpath_size > PATH_MAX_BYTES) { + err = UVWASI_ENOBUFS; + uv_fs_req_cleanup(&realpath_req); + goto exit; + } + + memcpy(resolved_path, realpath_req.ptr, realpath_size); + } else if (r != UV_ENOENT) { + /* Report errors except ENOENT. */ + err = uvwasi__translate_uv_error(r); + uv_fs_req_cleanup(&realpath_req); + goto exit; + } + + uv_fs_req_cleanup(&realpath_req); + } + + /* Verify that the resolved path is still in the sandbox. */ + if (resolved_path != strstr(resolved_path, fd->real_path)) { + err = UVWASI_ENOTCAPABLE; + goto exit; + } + +exit: + uvwasi__free(uvwasi, abs_path); + return err; +} + + +static uvwasi_errno_t uvwasi__lseek(uv_file fd, + uvwasi_filedelta_t offset, + uvwasi_whence_t whence, + uvwasi_filesize_t* newoffset) { + int real_whence; + + if (whence == UVWASI_WHENCE_CUR) + real_whence = SEEK_CUR; + else if (whence == UVWASI_WHENCE_END) + real_whence = SEEK_END; + else if (whence == UVWASI_WHENCE_SET) + real_whence = SEEK_SET; + else + return UVWASI_EINVAL; + +#ifdef _WIN32 + int64_t r; + + r = _lseeki64(fd, offset, real_whence); + if (-1L == r) + return uvwasi__translate_uv_error(uv_translate_sys_error(errno)); +#else + off_t r; + + r = lseek(fd, offset, real_whence); + if ((off_t) -1 == r) + return uvwasi__translate_uv_error(uv_translate_sys_error(errno)); +#endif /* _WIN32 */ + + *newoffset = r; + return UVWASI_ESUCCESS; +} + + +static uvwasi_errno_t uvwasi__setup_iovs(const uvwasi_t* uvwasi, + uv_buf_t** buffers, + const uvwasi_iovec_t* iovs, + size_t iovs_len) { + uv_buf_t* bufs; + size_t i; + + if ((iovs_len * sizeof(*bufs)) / (sizeof(*bufs)) != iovs_len) + return UVWASI_ENOMEM; + + bufs = uvwasi__malloc(uvwasi, iovs_len * sizeof(*bufs)); + if (bufs == NULL) + return UVWASI_ENOMEM; + + for (i = 0; i < iovs_len; ++i) + bufs[i] = uv_buf_init(iovs[i].buf, iovs[i].buf_len); + + *buffers = bufs; + return UVWASI_ESUCCESS; +} + + +static uvwasi_errno_t uvwasi__setup_ciovs(const uvwasi_t* uvwasi, + uv_buf_t** buffers, + const uvwasi_ciovec_t* iovs, + size_t iovs_len) { + uv_buf_t* bufs; + size_t i; + + if ((iovs_len * sizeof(*bufs)) / (sizeof(*bufs)) != iovs_len) + return UVWASI_ENOMEM; + + bufs = uvwasi__malloc(uvwasi, iovs_len * sizeof(*bufs)); + if (bufs == NULL) + return UVWASI_ENOMEM; + + for (i = 0; i < iovs_len; ++i) + bufs[i] = uv_buf_init((char*)iovs[i].buf, iovs[i].buf_len); + + *buffers = bufs; + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_init(uvwasi_t* uvwasi, uvwasi_options_t* options) { + uv_fs_t realpath_req; + uv_fs_t open_req; + uvwasi_errno_t err; + size_t args_size; + size_t size; + size_t offset; + size_t env_count; + size_t env_buf_size; + size_t i; + int r; + + if (uvwasi == NULL || options == NULL || options->fd_table_size == 0) + return UVWASI_EINVAL; + + uvwasi->allocator = options->allocator; + if (uvwasi->allocator == NULL) + uvwasi->allocator = &default_allocator; + + uvwasi->argv_buf = NULL; + uvwasi->argv = NULL; + uvwasi->env_buf = NULL; + uvwasi->env = NULL; + uvwasi->fds.fds = NULL; + + args_size = 0; + for (i = 0; i < options->argc; ++i) + args_size += strlen(options->argv[i]) + 1; + + uvwasi->argc = options->argc; + uvwasi->argv_buf_size = args_size; + + if (args_size > 0) { + uvwasi->argv_buf = uvwasi__malloc(uvwasi, args_size); + if (uvwasi->argv_buf == NULL) { + err = UVWASI_ENOMEM; + goto exit; + } + + uvwasi->argv = uvwasi__calloc(uvwasi, options->argc, sizeof(char*)); + if (uvwasi->argv == NULL) { + err = UVWASI_ENOMEM; + goto exit; + } + + offset = 0; + for (i = 0; i < options->argc; ++i) { + size = strlen(options->argv[i]) + 1; + memcpy(uvwasi->argv_buf + offset, options->argv[i], size); + uvwasi->argv[i] = uvwasi->argv_buf + offset; + offset += size; + } + } + + env_count = 0; + env_buf_size = 0; + if (options->envp != NULL) { + while (options->envp[env_count] != NULL) { + env_buf_size += strlen(options->envp[env_count]) + 1; + env_count++; + } + } + + uvwasi->envc = env_count; + uvwasi->env_buf_size = env_buf_size; + + if (env_buf_size > 0) { + uvwasi->env_buf = uvwasi__malloc(uvwasi, env_buf_size); + if (uvwasi->env_buf == NULL) { + err = UVWASI_ENOMEM; + goto exit; + } + + uvwasi->env = uvwasi__calloc(uvwasi, env_count, sizeof(char*)); + if (uvwasi->env == NULL) { + err = UVWASI_ENOMEM; + goto exit; + } + + offset = 0; + for (i = 0; i < env_count; ++i) { + size = strlen(options->envp[i]) + 1; + memcpy(uvwasi->env_buf + offset, options->envp[i], size); + uvwasi->env[i] = uvwasi->env_buf + offset; + offset += size; + } + } + + for (i = 0; i < options->preopenc; ++i) { + if (options->preopens[i].real_path == NULL || + options->preopens[i].mapped_path == NULL) { + err = UVWASI_EINVAL; + goto exit; + } + } + + err = uvwasi_fd_table_init(uvwasi, &uvwasi->fds, options->fd_table_size); + if (err != UVWASI_ESUCCESS) + goto exit; + + for (i = 0; i < options->preopenc; ++i) { + r = uv_fs_realpath(NULL, + &realpath_req, + options->preopens[i].real_path, + NULL); + if (r != 0) { + err = uvwasi__translate_uv_error(r); + uv_fs_req_cleanup(&realpath_req); + goto exit; + } + + r = uv_fs_open(NULL, &open_req, realpath_req.ptr, 0, 0666, NULL); + if (r < 0) { + err = uvwasi__translate_uv_error(r); + uv_fs_req_cleanup(&realpath_req); + uv_fs_req_cleanup(&open_req); + goto exit; + } + + err = uvwasi_fd_table_insert_preopen(uvwasi, + &uvwasi->fds, + open_req.result, + options->preopens[i].mapped_path, + realpath_req.ptr); + uv_fs_req_cleanup(&realpath_req); + uv_fs_req_cleanup(&open_req); + + if (err != UVWASI_ESUCCESS) + goto exit; + } + + return UVWASI_ESUCCESS; + +exit: + uvwasi_destroy(uvwasi); + return err; +} + + +void uvwasi_destroy(uvwasi_t* uvwasi) { + if (uvwasi == NULL) + return; + + uvwasi_fd_table_free(uvwasi, &uvwasi->fds); + uvwasi__free(uvwasi, uvwasi->argv_buf); + uvwasi__free(uvwasi, uvwasi->argv); + uvwasi__free(uvwasi, uvwasi->env_buf); + uvwasi__free(uvwasi, uvwasi->env); + uvwasi->argv_buf = NULL; + uvwasi->argv = NULL; + uvwasi->env_buf = NULL; + uvwasi->env = NULL; +} + + +uvwasi_errno_t uvwasi_embedder_remap_fd(uvwasi_t* uvwasi, + const uvwasi_fd_t fd, + uv_file new_host_fd) { + struct uvwasi_fd_wrap_t* wrap; + uvwasi_errno_t err; + + if (uvwasi == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, fd, &wrap, 0, 0); + if (err != UVWASI_ESUCCESS) + return err; + + wrap->fd = new_host_fd; + uv_mutex_unlock(&wrap->mutex); + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_args_get(uvwasi_t* uvwasi, char** argv, char* argv_buf) { + size_t i; + + if (uvwasi == NULL || argv == NULL || argv_buf == NULL) + return UVWASI_EINVAL; + + for (i = 0; i < uvwasi->argc; ++i) { + argv[i] = argv_buf + (uvwasi->argv[i] - uvwasi->argv_buf); + } + + memcpy(argv_buf, uvwasi->argv_buf, uvwasi->argv_buf_size); + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_args_sizes_get(uvwasi_t* uvwasi, + size_t* argc, + size_t* argv_buf_size) { + if (uvwasi == NULL || argc == NULL || argv_buf_size == NULL) + return UVWASI_EINVAL; + + *argc = uvwasi->argc; + *argv_buf_size = uvwasi->argv_buf_size; + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_clock_res_get(uvwasi_t* uvwasi, + uvwasi_clockid_t clock_id, + uvwasi_timestamp_t* resolution) { + if (uvwasi == NULL || resolution == NULL) + return UVWASI_EINVAL; + + switch (clock_id) { + case UVWASI_CLOCK_MONOTONIC: + case UVWASI_CLOCK_REALTIME: + *resolution = 1; /* Nanosecond precision. */ + return UVWASI_ESUCCESS; + case UVWASI_CLOCK_PROCESS_CPUTIME_ID: + return uvwasi__clock_getres_process_cputime(resolution); + case UVWASI_CLOCK_THREAD_CPUTIME_ID: + return uvwasi__clock_getres_thread_cputime(resolution); + default: + return UVWASI_EINVAL; + } +} + + +uvwasi_errno_t uvwasi_clock_time_get(uvwasi_t* uvwasi, + uvwasi_clockid_t clock_id, + uvwasi_timestamp_t precision, + uvwasi_timestamp_t* time) { + if (uvwasi == NULL || time == NULL) + return UVWASI_EINVAL; + + switch (clock_id) { + case UVWASI_CLOCK_MONOTONIC: + *time = uv_hrtime(); + return UVWASI_ESUCCESS; + case UVWASI_CLOCK_REALTIME: + return uvwasi__clock_gettime_realtime(time); + case UVWASI_CLOCK_PROCESS_CPUTIME_ID: + return uvwasi__clock_gettime_process_cputime(time); + case UVWASI_CLOCK_THREAD_CPUTIME_ID: + return uvwasi__clock_gettime_thread_cputime(time); + default: + return UVWASI_EINVAL; + } +} + + +uvwasi_errno_t uvwasi_environ_get(uvwasi_t* uvwasi, + char** environment, + char* environ_buf) { + size_t i; + + if (uvwasi == NULL || environment == NULL || environ_buf == NULL) + return UVWASI_EINVAL; + + for (i = 0; i < uvwasi->envc; ++i) { + environment[i] = environ_buf + (uvwasi->env[i] - uvwasi->env_buf); + } + + memcpy(environ_buf, uvwasi->env_buf, uvwasi->env_buf_size); + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_environ_sizes_get(uvwasi_t* uvwasi, + size_t* environ_count, + size_t* environ_buf_size) { + if (uvwasi == NULL || environ_count == NULL || environ_buf_size == NULL) + return UVWASI_EINVAL; + + *environ_count = uvwasi->envc; + *environ_buf_size = uvwasi->env_buf_size; + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_fd_advise(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_filesize_t offset, + uvwasi_filesize_t len, + uvwasi_advice_t advice) { + struct uvwasi_fd_wrap_t* wrap; + uvwasi_errno_t err; +#ifdef POSIX_FADV_NORMAL + int mapped_advice; + int r; +#endif /* POSIX_FADV_NORMAL */ + + if (uvwasi == NULL) + return UVWASI_EINVAL; + + switch (advice) { + case UVWASI_ADVICE_DONTNEED: +#ifdef POSIX_FADV_NORMAL + mapped_advice = POSIX_FADV_DONTNEED; +#endif /* POSIX_FADV_NORMAL */ + break; + case UVWASI_ADVICE_NOREUSE: +#ifdef POSIX_FADV_NORMAL + mapped_advice = POSIX_FADV_NOREUSE; +#endif /* POSIX_FADV_NORMAL */ + break; + case UVWASI_ADVICE_NORMAL: +#ifdef POSIX_FADV_NORMAL + mapped_advice = POSIX_FADV_NORMAL; +#endif /* POSIX_FADV_NORMAL */ + break; + case UVWASI_ADVICE_RANDOM: +#ifdef POSIX_FADV_NORMAL + mapped_advice = POSIX_FADV_RANDOM; +#endif /* POSIX_FADV_NORMAL */ + break; + case UVWASI_ADVICE_SEQUENTIAL: +#ifdef POSIX_FADV_NORMAL + mapped_advice = POSIX_FADV_SEQUENTIAL; +#endif /* POSIX_FADV_NORMAL */ + break; + case UVWASI_ADVICE_WILLNEED: +#ifdef POSIX_FADV_NORMAL + mapped_advice = POSIX_FADV_WILLNEED; +#endif /* POSIX_FADV_NORMAL */ + break; + default: + return UVWASI_EINVAL; + } + + err = uvwasi_fd_table_get(&uvwasi->fds, fd, &wrap, UVWASI_RIGHT_FD_ADVISE, 0); + if (err != UVWASI_ESUCCESS) + return err; + + err = UVWASI_ESUCCESS; + +#ifdef POSIX_FADV_NORMAL + r = posix_fadvise(wrap->fd, offset, len, mapped_advice); + if (r != 0) + err = uvwasi__translate_uv_error(uv_translate_sys_error(r)); +#endif /* POSIX_FADV_NORMAL */ + uv_mutex_unlock(&wrap->mutex); + return err; +} + + +uvwasi_errno_t uvwasi_fd_allocate(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_filesize_t offset, + uvwasi_filesize_t len) { +#if !defined(__POSIX__) + uv_fs_t req; + uint64_t st_size; +#endif /* !__POSIX__ */ + struct uvwasi_fd_wrap_t* wrap; + uvwasi_errno_t err; + int r; + + if (uvwasi == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, + fd, + &wrap, + UVWASI_RIGHT_FD_ALLOCATE, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + /* Try to use posix_fallocate(). If that's not an option, fall back to the + race condition prone combination of fstat() + ftruncate(). */ +#if defined(__POSIX__) + r = posix_fallocate(wrap->fd, offset, len); + if (r != 0) { + err = uvwasi__translate_uv_error(uv_translate_sys_error(r)); + goto exit; + } +#else + r = uv_fs_fstat(NULL, &req, wrap->fd, NULL); + st_size = req.statbuf.st_size; + uv_fs_req_cleanup(&req); + if (r != 0) { + err = uvwasi__translate_uv_error(r); + goto exit; + } + + if (st_size < offset + len) { + r = uv_fs_ftruncate(NULL, &req, wrap->fd, offset + len, NULL); + if (r != 0) { + err = uvwasi__translate_uv_error(r); + goto exit; + } + } +#endif /* __POSIX__ */ + + err = UVWASI_ESUCCESS; +exit: + uv_mutex_unlock(&wrap->mutex); + return err; +} + + +uvwasi_errno_t uvwasi_fd_close(uvwasi_t* uvwasi, uvwasi_fd_t fd) { + struct uvwasi_fd_wrap_t* wrap; + uvwasi_errno_t err; + uv_fs_t req; + int r; + + if (uvwasi == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, fd, &wrap, 0, 0); + if (err != UVWASI_ESUCCESS) + return err; + + r = uv_fs_close(NULL, &req, wrap->fd, NULL); + uv_mutex_unlock(&wrap->mutex); + uv_fs_req_cleanup(&req); + + if (r != 0) + return uvwasi__translate_uv_error(r); + + return uvwasi_fd_table_remove(uvwasi, &uvwasi->fds, fd); +} + + +uvwasi_errno_t uvwasi_fd_datasync(uvwasi_t* uvwasi, uvwasi_fd_t fd) { + struct uvwasi_fd_wrap_t* wrap; + uvwasi_errno_t err; + uv_fs_t req; + int r; + + if (uvwasi == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, + fd, + &wrap, + UVWASI_RIGHT_FD_DATASYNC, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + r = uv_fs_fdatasync(NULL, &req, wrap->fd, NULL); + uv_mutex_unlock(&wrap->mutex); + uv_fs_req_cleanup(&req); + + if (r != 0) + return uvwasi__translate_uv_error(r); + + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_fd_fdstat_get(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_fdstat_t* buf) { + struct uvwasi_fd_wrap_t* wrap; + uvwasi_errno_t err; +#ifndef _WIN32 + int r; +#endif + + if (uvwasi == NULL || buf == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, fd, &wrap, 0, 0); + if (err != UVWASI_ESUCCESS) + return err; + + buf->fs_filetype = wrap->type; + buf->fs_rights_base = wrap->rights_base; + buf->fs_rights_inheriting = wrap->rights_inheriting; +#ifdef _WIN32 + buf->fs_flags = 0; /* TODO(cjihrig): Missing Windows support. */ +#else + r = fcntl(wrap->fd, F_GETFL); + if (r < 0) { + err = uvwasi__translate_uv_error(uv_translate_sys_error(errno)); + uv_mutex_unlock(&wrap->mutex); + return err; + } + buf->fs_flags = r; +#endif /* _WIN32 */ + + uv_mutex_unlock(&wrap->mutex); + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_fd_fdstat_set_flags(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_fdflags_t flags) { +#ifdef _WIN32 + /* TODO(cjihrig): Missing Windows support. */ + return UVWASI_ENOSYS; +#else + struct uvwasi_fd_wrap_t* wrap; + uvwasi_errno_t err; + int mapped_flags; + int r; + + if (uvwasi == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, + fd, + &wrap, + UVWASI_RIGHT_FD_FDSTAT_SET_FLAGS, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + mapped_flags = 0; + + if ((flags & UVWASI_FDFLAG_APPEND) == UVWASI_FDFLAG_APPEND) + mapped_flags |= O_APPEND; + + if ((flags & UVWASI_FDFLAG_DSYNC) == UVWASI_FDFLAG_DSYNC) +#ifdef O_DSYNC + mapped_flags |= O_DSYNC; +#else + mapped_flags |= O_SYNC; +#endif /* O_DSYNC */ + + if ((flags & UVWASI_FDFLAG_NONBLOCK) == UVWASI_FDFLAG_NONBLOCK) + mapped_flags |= O_NONBLOCK; + + if ((flags & UVWASI_FDFLAG_RSYNC) == UVWASI_FDFLAG_RSYNC) +#ifdef O_RSYNC + mapped_flags |= O_RSYNC; +#else + mapped_flags |= O_SYNC; +#endif /* O_RSYNC */ + + if ((flags & UVWASI_FDFLAG_SYNC) == UVWASI_FDFLAG_SYNC) + mapped_flags |= O_SYNC; + + r = fcntl(wrap->fd, F_SETFL, mapped_flags); + if (r < 0) + err = uvwasi__translate_uv_error(uv_translate_sys_error(errno)); + else + err = UVWASI_ESUCCESS; + + uv_mutex_unlock(&wrap->mutex); + return err; +#endif /* _WIN32 */ +} + + +uvwasi_errno_t uvwasi_fd_fdstat_set_rights(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_rights_t fs_rights_base, + uvwasi_rights_t fs_rights_inheriting + ) { + struct uvwasi_fd_wrap_t* wrap; + uvwasi_errno_t err; + + if (uvwasi == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, fd, &wrap, 0, 0); + if (err != UVWASI_ESUCCESS) + return err; + + /* Check for attempts to add new permissions. */ + if ((fs_rights_base | wrap->rights_base) > wrap->rights_base) { + err = UVWASI_ENOTCAPABLE; + goto exit; + } + + if ((fs_rights_inheriting | wrap->rights_inheriting) > + wrap->rights_inheriting) { + err = UVWASI_ENOTCAPABLE; + goto exit; + } + + wrap->rights_base = fs_rights_base; + wrap->rights_inheriting = fs_rights_inheriting; + err = UVWASI_ESUCCESS; +exit: + uv_mutex_unlock(&wrap->mutex); + return err; +} + + +uvwasi_errno_t uvwasi_fd_filestat_get(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_filestat_t* buf) { + struct uvwasi_fd_wrap_t* wrap; + uv_fs_t req; + uvwasi_errno_t err; + int r; + + if (uvwasi == NULL || buf == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, + fd, + &wrap, + UVWASI_RIGHT_FD_FILESTAT_GET, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + r = uv_fs_fstat(NULL, &req, wrap->fd, NULL); + if (r != 0) { + err = uvwasi__translate_uv_error(r); + goto exit; + } + + uvwasi__stat_to_filestat(&req.statbuf, buf); + err = UVWASI_ESUCCESS; +exit: + uv_mutex_unlock(&wrap->mutex); + uv_fs_req_cleanup(&req); + return err; +} + + +uvwasi_errno_t uvwasi_fd_filestat_set_size(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_filesize_t st_size) { + /* TODO(cjihrig): uv_fs_ftruncate() takes an int64_t. st_size is uint64_t. */ + struct uvwasi_fd_wrap_t* wrap; + uv_fs_t req; + uvwasi_errno_t err; + int r; + + if (uvwasi == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, + fd, + &wrap, + UVWASI_RIGHT_FD_FILESTAT_SET_SIZE, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + r = uv_fs_ftruncate(NULL, &req, wrap->fd, st_size, NULL); + uv_mutex_unlock(&wrap->mutex); + uv_fs_req_cleanup(&req); + + if (r != 0) + return uvwasi__translate_uv_error(r); + + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_fd_filestat_set_times(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_timestamp_t st_atim, + uvwasi_timestamp_t st_mtim, + uvwasi_fstflags_t fst_flags) { + /* TODO(cjihrig): libuv does not currently support nanosecond precision. */ + struct uvwasi_fd_wrap_t* wrap; + uv_fs_t req; + uvwasi_errno_t err; + int r; + + if (uvwasi == NULL) + return UVWASI_EINVAL; + + if (fst_flags & ~(UVWASI_FILESTAT_SET_ATIM | UVWASI_FILESTAT_SET_ATIM_NOW | + UVWASI_FILESTAT_SET_MTIM | UVWASI_FILESTAT_SET_MTIM_NOW)) { + return UVWASI_EINVAL; + } + + err = uvwasi_fd_table_get(&uvwasi->fds, + fd, + &wrap, + UVWASI_RIGHT_FD_FILESTAT_SET_TIMES, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + /* TODO(cjihrig): st_atim and st_mtim should not be unconditionally passed. */ + r = uv_fs_futime(NULL, &req, wrap->fd, st_atim, st_mtim, NULL); + uv_mutex_unlock(&wrap->mutex); + uv_fs_req_cleanup(&req); + + if (r != 0) + return uvwasi__translate_uv_error(r); + + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_fd_pread(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + const uvwasi_iovec_t* iovs, + size_t iovs_len, + uvwasi_filesize_t offset, + size_t* nread) { + struct uvwasi_fd_wrap_t* wrap; + uv_buf_t* bufs; + uv_fs_t req; + uvwasi_errno_t err; + size_t uvread; + int r; + + if (uvwasi == NULL || iovs == NULL || nread == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, + fd, + &wrap, + UVWASI_RIGHT_FD_READ | UVWASI_RIGHT_FD_SEEK, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + err = uvwasi__setup_iovs(uvwasi, &bufs, iovs, iovs_len); + if (err != UVWASI_ESUCCESS) { + uv_mutex_unlock(&wrap->mutex); + return err; + } + + r = uv_fs_read(NULL, &req, wrap->fd, bufs, iovs_len, offset, NULL); + uv_mutex_unlock(&wrap->mutex); + uvread = req.result; + uv_fs_req_cleanup(&req); + uvwasi__free(uvwasi, bufs); + + if (r < 0) + return uvwasi__translate_uv_error(r); + + *nread = uvread; + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_fd_prestat_get(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_prestat_t* buf) { + struct uvwasi_fd_wrap_t* wrap; + uvwasi_errno_t err; + + if (uvwasi == NULL || buf == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, fd, &wrap, 0, 0); + if (err != UVWASI_ESUCCESS) + return err; + if (wrap->preopen != 1) { + err = UVWASI_EINVAL; + goto exit; + } + + buf->pr_type = UVWASI_PREOPENTYPE_DIR; + buf->u.dir.pr_name_len = strlen(wrap->path) + 1; + err = UVWASI_ESUCCESS; +exit: + uv_mutex_unlock(&wrap->mutex); + return err; +} + + +uvwasi_errno_t uvwasi_fd_prestat_dir_name(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + char* path, + size_t path_len) { + struct uvwasi_fd_wrap_t* wrap; + uvwasi_errno_t err; + size_t size; + + if (uvwasi == NULL || path == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, fd, &wrap, 0, 0); + if (err != UVWASI_ESUCCESS) + return err; + if (wrap->preopen != 1) { + err = UVWASI_EBADF; + goto exit; + } + + size = strlen(wrap->path) + 1; + if (size > path_len) { + err = UVWASI_ENOBUFS; + goto exit; + } + + memcpy(path, wrap->path, size); + err = UVWASI_ESUCCESS; +exit: + uv_mutex_unlock(&wrap->mutex); + return err; +} + + +uvwasi_errno_t uvwasi_fd_pwrite(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + const uvwasi_ciovec_t* iovs, + size_t iovs_len, + uvwasi_filesize_t offset, + size_t* nwritten) { + struct uvwasi_fd_wrap_t* wrap; + uv_buf_t* bufs; + uv_fs_t req; + uvwasi_errno_t err; + size_t uvwritten; + int r; + + if (uvwasi == NULL || iovs == NULL || nwritten == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, + fd, + &wrap, + UVWASI_RIGHT_FD_WRITE | UVWASI_RIGHT_FD_SEEK, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + err = uvwasi__setup_ciovs(uvwasi, &bufs, iovs, iovs_len); + if (err != UVWASI_ESUCCESS) { + uv_mutex_unlock(&wrap->mutex); + return err; + } + + r = uv_fs_write(NULL, &req, wrap->fd, bufs, iovs_len, offset, NULL); + uv_mutex_unlock(&wrap->mutex); + uvwritten = req.result; + uv_fs_req_cleanup(&req); + uvwasi__free(uvwasi, bufs); + + if (r < 0) + return uvwasi__translate_uv_error(r); + + *nwritten = uvwritten; + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_fd_read(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + const uvwasi_iovec_t* iovs, + size_t iovs_len, + size_t* nread) { + struct uvwasi_fd_wrap_t* wrap; + uv_buf_t* bufs; + uv_fs_t req; + uvwasi_errno_t err; + size_t uvread; + int r; + + if (uvwasi == NULL || iovs == NULL || nread == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, fd, &wrap, UVWASI_RIGHT_FD_READ, 0); + if (err != UVWASI_ESUCCESS) + return err; + + err = uvwasi__setup_iovs(uvwasi, &bufs, iovs, iovs_len); + if (err != UVWASI_ESUCCESS) { + uv_mutex_unlock(&wrap->mutex); + return err; + } + + r = uv_fs_read(NULL, &req, wrap->fd, bufs, iovs_len, -1, NULL); + uv_mutex_unlock(&wrap->mutex); + uvread = req.result; + uv_fs_req_cleanup(&req); + uvwasi__free(uvwasi, bufs); + + if (r < 0) + return uvwasi__translate_uv_error(r); + + *nread = uvread; + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_fd_readdir(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + void* buf, + size_t buf_len, + uvwasi_dircookie_t cookie, + size_t* bufused) { + /* TODO(cjihrig): Support Windows where seekdir() and telldir() are used. */ + /* TODO(cjihrig): Avoid opening and closing the directory on each call. */ + struct uvwasi_fd_wrap_t* wrap; + uvwasi_dirent_t dirent; + uv_dirent_t dirents[UVWASI__READDIR_NUM_ENTRIES]; + uv_dir_t* dir; + uv_fs_t req; + uvwasi_errno_t err; + size_t name_len; + size_t available; + size_t size_to_cp; + long tell; + int i; + int r; + + if (uvwasi == NULL || buf == NULL || bufused == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, + fd, + &wrap, + UVWASI_RIGHT_FD_READDIR, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + /* Open the directory. */ + r = uv_fs_opendir(NULL, &req, wrap->real_path, NULL); + if (r != 0) { + uv_mutex_unlock(&wrap->mutex); + return uvwasi__translate_uv_error(r); + } + + /* Setup for reading the directory. */ + dir = req.ptr; + dir->dirents = dirents; + dir->nentries = UVWASI__READDIR_NUM_ENTRIES; + uv_fs_req_cleanup(&req); + +#ifndef _WIN32 + /* TODO(cjihrig): Need a Windows equivalent of this logic. */ + /* Seek to the proper location in the directory. */ + if (cookie != UVWASI_DIRCOOKIE_START) + seekdir(dir->dir, cookie); +#endif + + /* Read the directory entries into the provided buffer. */ + err = UVWASI_ESUCCESS; + *bufused = 0; + while (0 != (r = uv_fs_readdir(NULL, &req, dir, NULL))) { + if (r < 0) { + err = uvwasi__translate_uv_error(r); + uv_fs_req_cleanup(&req); + goto exit; + } + + for (i = 0; i < r; i++) { + /* TODO(cjihrig): This should probably be serialized to the buffer + consistently across platforms. In other words, d_next should always + be 8 bytes, d_ino should always be 8 bytes, d_namlen should always be + 4 bytes, and d_type should always be 1 byte. */ +#ifndef _WIN32 + tell = telldir(dir->dir); + if (tell < 0) { + err = uvwasi__translate_uv_error(uv_translate_sys_error(errno)); + uv_fs_req_cleanup(&req); + goto exit; + } +#else + tell = 0; /* TODO(cjihrig): Need to support Windows. */ +#endif /* _WIN32 */ + + name_len = strlen(dirents[i].name); + dirent.d_next = (uvwasi_dircookie_t) tell; + /* TODO(cjihrig): Missing ino libuv (and Windows) support. fstat()? */ + dirent.d_ino = 0; + dirent.d_namlen = name_len; + + switch (dirents[i].type) { + case UV_DIRENT_FILE: + dirent.d_type = UVWASI_FILETYPE_REGULAR_FILE; + break; + case UV_DIRENT_DIR: + dirent.d_type = UVWASI_FILETYPE_DIRECTORY; + break; + case UV_DIRENT_SOCKET: + dirent.d_type = UVWASI_FILETYPE_SOCKET_STREAM; + break; + case UV_DIRENT_LINK: + dirent.d_type = UVWASI_FILETYPE_SYMBOLIC_LINK; + break; + case UV_DIRENT_CHAR: + dirent.d_type = UVWASI_FILETYPE_CHARACTER_DEVICE; + break; + case UV_DIRENT_BLOCK: + dirent.d_type = UVWASI_FILETYPE_BLOCK_DEVICE; + break; + case UV_DIRENT_FIFO: + case UV_DIRENT_UNKNOWN: + default: + dirent.d_type = UVWASI_FILETYPE_UNKNOWN; + break; + } + + /* Write dirent to the buffer. */ + available = buf_len - *bufused; + size_to_cp = sizeof(dirent) > available ? available : sizeof(dirent); + memcpy((char*)buf + *bufused, &dirent, size_to_cp); + *bufused += size_to_cp; + /* Write the entry name to the buffer. */ + available = buf_len - *bufused; + size_to_cp = name_len > available ? available : name_len; + memcpy((char*)buf + *bufused, &dirents[i].name, size_to_cp); + *bufused += size_to_cp; + } + + uv_fs_req_cleanup(&req); + + if (*bufused >= buf_len) + break; + } + +exit: + /* Close the directory. */ + r = uv_fs_closedir(NULL, &req, dir, NULL); + uv_mutex_unlock(&wrap->mutex); + uv_fs_req_cleanup(&req); + if (r != 0) + return uvwasi__translate_uv_error(r); + + return err; +} + + +uvwasi_errno_t uvwasi_fd_renumber(uvwasi_t* uvwasi, + uvwasi_fd_t from, + uvwasi_fd_t to) { + struct uvwasi_fd_wrap_t* to_wrap; + struct uvwasi_fd_wrap_t* from_wrap; + uv_fs_t req; + uvwasi_errno_t err; + int r; + + if (uvwasi == NULL) + return UVWASI_EINVAL; + + if (from == to) + return UVWASI_ESUCCESS; + + err = uvwasi_fd_table_get(&uvwasi->fds, from, &from_wrap, 0, 0); + if (err != UVWASI_ESUCCESS) + return err; + + err = uvwasi_fd_table_get(&uvwasi->fds, to, &to_wrap, 0, 0); + if (err != UVWASI_ESUCCESS) { + uv_mutex_unlock(&from_wrap->mutex); + return err; + } + + r = uv_fs_close(NULL, &req, to_wrap->fd, NULL); + uv_fs_req_cleanup(&req); + if (r != 0) { + uv_mutex_unlock(&from_wrap->mutex); + uv_mutex_unlock(&to_wrap->mutex); + return uvwasi__translate_uv_error(r); + } + + memcpy(to_wrap, from_wrap, sizeof(*to_wrap)); + to_wrap->id = to; + uv_mutex_unlock(&from_wrap->mutex); + uv_mutex_unlock(&to_wrap->mutex); + return uvwasi_fd_table_remove(uvwasi, &uvwasi->fds, from); +} + + +uvwasi_errno_t uvwasi_fd_seek(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_filedelta_t offset, + uvwasi_whence_t whence, + uvwasi_filesize_t* newoffset) { + struct uvwasi_fd_wrap_t* wrap; + uvwasi_errno_t err; + + if (uvwasi == NULL || newoffset == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, fd, &wrap, UVWASI_RIGHT_FD_SEEK, 0); + if (err != UVWASI_ESUCCESS) + return err; + + err = uvwasi__lseek(wrap->fd, offset, whence, newoffset); + uv_mutex_unlock(&wrap->mutex); + return err; +} + + +uvwasi_errno_t uvwasi_fd_sync(uvwasi_t* uvwasi, uvwasi_fd_t fd) { + struct uvwasi_fd_wrap_t* wrap; + uv_fs_t req; + uvwasi_errno_t err; + int r; + + if (uvwasi == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, + fd, + &wrap, + UVWASI_RIGHT_FD_SYNC, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + r = uv_fs_fsync(NULL, &req, wrap->fd, NULL); + uv_mutex_unlock(&wrap->mutex); + uv_fs_req_cleanup(&req); + + if (r != 0) + return uvwasi__translate_uv_error(r); + + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_fd_tell(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_filesize_t* offset) { + struct uvwasi_fd_wrap_t* wrap; + uvwasi_errno_t err; + + if (uvwasi == NULL || offset == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, fd, &wrap, UVWASI_RIGHT_FD_TELL, 0); + if (err != UVWASI_ESUCCESS) + return err; + + err = uvwasi__lseek(wrap->fd, 0, UVWASI_WHENCE_CUR, offset); + uv_mutex_unlock(&wrap->mutex); + return err; +} + + +uvwasi_errno_t uvwasi_fd_write(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + const uvwasi_ciovec_t* iovs, + size_t iovs_len, + size_t* nwritten) { + struct uvwasi_fd_wrap_t* wrap; + uv_buf_t* bufs; + uv_fs_t req; + uvwasi_errno_t err; + size_t uvwritten; + int r; + + if (uvwasi == NULL || iovs == NULL || nwritten == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, fd, &wrap, UVWASI_RIGHT_FD_WRITE, 0); + if (err != UVWASI_ESUCCESS) + return err; + + err = uvwasi__setup_ciovs(uvwasi, &bufs, iovs, iovs_len); + if (err != UVWASI_ESUCCESS) { + uv_mutex_unlock(&wrap->mutex); + return err; + } + + r = uv_fs_write(NULL, &req, wrap->fd, bufs, iovs_len, -1, NULL); + uv_mutex_unlock(&wrap->mutex); + uvwritten = req.result; + uv_fs_req_cleanup(&req); + uvwasi__free(uvwasi, bufs); + + if (r < 0) + return uvwasi__translate_uv_error(r); + + *nwritten = uvwritten; + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_path_create_directory(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + const char* path, + size_t path_len) { + char resolved_path[PATH_MAX_BYTES]; + struct uvwasi_fd_wrap_t* wrap; + uv_fs_t req; + uvwasi_errno_t err; + int r; + + if (uvwasi == NULL || path == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, + fd, + &wrap, + UVWASI_RIGHT_PATH_CREATE_DIRECTORY, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + err = uvwasi__resolve_path(uvwasi, wrap, path, path_len, resolved_path, 0); + if (err != UVWASI_ESUCCESS) + goto exit; + + r = uv_fs_mkdir(NULL, &req, resolved_path, 0777, NULL); + uv_fs_req_cleanup(&req); + + if (r != 0) { + err = uvwasi__translate_uv_error(r); + goto exit; + } + + err = UVWASI_ESUCCESS; +exit: + uv_mutex_unlock(&wrap->mutex); + return err; +} + + +uvwasi_errno_t uvwasi_path_filestat_get(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_lookupflags_t flags, + const char* path, + size_t path_len, + uvwasi_filestat_t* buf) { + char resolved_path[PATH_MAX_BYTES]; + struct uvwasi_fd_wrap_t* wrap; + uv_fs_t req; + uvwasi_errno_t err; + int r; + + if (uvwasi == NULL || path == NULL || buf == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, + fd, + &wrap, + UVWASI_RIGHT_PATH_FILESTAT_GET, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + err = uvwasi__resolve_path(uvwasi, + wrap, + path, + path_len, + resolved_path, + flags); + if (err != UVWASI_ESUCCESS) + goto exit; + + r = uv_fs_stat(NULL, &req, resolved_path, NULL); + if (r != 0) { + uv_fs_req_cleanup(&req); + err = uvwasi__translate_uv_error(r); + goto exit; + } + + uvwasi__stat_to_filestat(&req.statbuf, buf); + uv_fs_req_cleanup(&req); + err = UVWASI_ESUCCESS; +exit: + uv_mutex_unlock(&wrap->mutex); + return err; +} + + +uvwasi_errno_t uvwasi_path_filestat_set_times(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + uvwasi_lookupflags_t flags, + const char* path, + size_t path_len, + uvwasi_timestamp_t st_atim, + uvwasi_timestamp_t st_mtim, + uvwasi_fstflags_t fst_flags) { + /* TODO(cjihrig): libuv does not currently support nanosecond precision. */ + char resolved_path[PATH_MAX_BYTES]; + struct uvwasi_fd_wrap_t* wrap; + uv_fs_t req; + uvwasi_errno_t err; + int r; + + if (uvwasi == NULL || path == NULL) + return UVWASI_EINVAL; + + if (fst_flags & ~(UVWASI_FILESTAT_SET_ATIM | UVWASI_FILESTAT_SET_ATIM_NOW | + UVWASI_FILESTAT_SET_MTIM | UVWASI_FILESTAT_SET_MTIM_NOW)) { + return UVWASI_EINVAL; + } + + err = uvwasi_fd_table_get(&uvwasi->fds, + fd, + &wrap, + UVWASI_RIGHT_PATH_FILESTAT_SET_TIMES, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + err = uvwasi__resolve_path(uvwasi, + wrap, + path, + path_len, + resolved_path, + flags); + if (err != UVWASI_ESUCCESS) + goto exit; + + /* TODO(cjihrig): st_atim and st_mtim should not be unconditionally passed. */ + r = uv_fs_utime(NULL, &req, resolved_path, st_atim, st_mtim, NULL); + uv_fs_req_cleanup(&req); + + if (r != 0) { + err = uvwasi__translate_uv_error(r); + goto exit; + } + + err = UVWASI_ESUCCESS; +exit: + uv_mutex_unlock(&wrap->mutex); + return err; +} + + +uvwasi_errno_t uvwasi_path_link(uvwasi_t* uvwasi, + uvwasi_fd_t old_fd, + uvwasi_lookupflags_t old_flags, + const char* old_path, + size_t old_path_len, + uvwasi_fd_t new_fd, + const char* new_path, + size_t new_path_len) { + char resolved_old_path[PATH_MAX_BYTES]; + char resolved_new_path[PATH_MAX_BYTES]; + struct uvwasi_fd_wrap_t* old_wrap; + struct uvwasi_fd_wrap_t* new_wrap; + uvwasi_errno_t err; + uv_fs_t req; + int r; + + if (uvwasi == NULL || old_path == NULL || new_path == NULL) + return UVWASI_EINVAL; + + if (old_fd == new_fd) { + err = uvwasi_fd_table_get(&uvwasi->fds, + old_fd, + &old_wrap, + UVWASI_RIGHT_PATH_LINK_SOURCE | + UVWASI_RIGHT_PATH_LINK_TARGET, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + new_wrap = old_wrap; + } else { + err = uvwasi_fd_table_get(&uvwasi->fds, + old_fd, + &old_wrap, + UVWASI_RIGHT_PATH_LINK_SOURCE, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + err = uvwasi_fd_table_get(&uvwasi->fds, + new_fd, + &new_wrap, + UVWASI_RIGHT_PATH_LINK_TARGET, + 0); + if (err != UVWASI_ESUCCESS) { + uv_mutex_unlock(&old_wrap->mutex); + return err; + } + } + + err = uvwasi__resolve_path(uvwasi, + old_wrap, + old_path, + old_path_len, + resolved_old_path, + old_flags); + if (err != UVWASI_ESUCCESS) + goto exit; + + err = uvwasi__resolve_path(uvwasi, + new_wrap, + new_path, + new_path_len, + resolved_new_path, + 0); + if (err != UVWASI_ESUCCESS) + goto exit; + + r = uv_fs_link(NULL, &req, resolved_old_path, resolved_new_path, NULL); + uv_fs_req_cleanup(&req); + if (r != 0) { + err = uvwasi__translate_uv_error(r); + goto exit; + } + + err = UVWASI_ESUCCESS; +exit: + uv_mutex_unlock(&old_wrap->mutex); + if (old_fd != new_fd) + uv_mutex_unlock(&new_wrap->mutex); + return err; +} + + +uvwasi_errno_t uvwasi_path_open(uvwasi_t* uvwasi, + uvwasi_fd_t dirfd, + uvwasi_lookupflags_t dirflags, + const char* path, + size_t path_len, + uvwasi_oflags_t o_flags, + uvwasi_rights_t fs_rights_base, + uvwasi_rights_t fs_rights_inheriting, + uvwasi_fdflags_t fs_flags, + uvwasi_fd_t* fd) { + char resolved_path[PATH_MAX_BYTES]; + uvwasi_rights_t needed_inheriting; + uvwasi_rights_t needed_base; + struct uvwasi_fd_wrap_t* dirfd_wrap; + struct uvwasi_fd_wrap_t wrap; + uvwasi_errno_t err; + uv_fs_t req; + int flags; + int read; + int write; + int r; + + if (uvwasi == NULL || path == NULL || fd == NULL) + return UVWASI_EINVAL; + + read = 0 != (fs_rights_base & (UVWASI_RIGHT_FD_READ | + UVWASI_RIGHT_FD_READDIR)); + write = 0 != (fs_rights_base & (UVWASI_RIGHT_FD_DATASYNC | + UVWASI_RIGHT_FD_WRITE | + UVWASI_RIGHT_FD_ALLOCATE | + UVWASI_RIGHT_FD_FILESTAT_SET_SIZE)); + flags = write ? read ? UV_FS_O_RDWR : UV_FS_O_WRONLY : UV_FS_O_RDONLY; + needed_base = UVWASI_RIGHT_PATH_OPEN; + needed_inheriting = fs_rights_base | fs_rights_inheriting; + + if ((o_flags & UVWASI_O_CREAT) != 0) { + flags |= UV_FS_O_CREAT; + needed_base |= UVWASI_RIGHT_PATH_CREATE_FILE; + } + if ((o_flags & UVWASI_O_DIRECTORY) != 0) + flags |= UV_FS_O_DIRECTORY; + if ((o_flags & UVWASI_O_EXCL) != 0) + flags |= UV_FS_O_EXCL; + if ((o_flags & UVWASI_O_TRUNC) != 0) { + flags |= UV_FS_O_TRUNC; + needed_base |= UVWASI_RIGHT_PATH_FILESTAT_SET_SIZE; + } + + if ((fs_flags & UVWASI_FDFLAG_APPEND) != 0) + flags |= UV_FS_O_APPEND; + if ((fs_flags & UVWASI_FDFLAG_DSYNC) != 0) { + flags |= UV_FS_O_DSYNC; + needed_inheriting |= UVWASI_RIGHT_FD_DATASYNC; + } + if ((fs_flags & UVWASI_FDFLAG_NONBLOCK) != 0) + flags |= UV_FS_O_NONBLOCK; + if ((fs_flags & UVWASI_FDFLAG_RSYNC) != 0) { +#ifdef O_RSYNC + flags |= O_RSYNC; /* libuv has no UV_FS_O_RSYNC. */ +#else + flags |= UV_FS_O_SYNC; +#endif + needed_inheriting |= UVWASI_RIGHT_FD_SYNC; + } + if ((fs_flags & UVWASI_FDFLAG_SYNC) != 0) { + flags |= UV_FS_O_SYNC; + needed_inheriting |= UVWASI_RIGHT_FD_SYNC; + } + if (write && (flags & (UV_FS_O_APPEND | UV_FS_O_TRUNC)) == 0) + needed_inheriting |= UVWASI_RIGHT_FD_SEEK; + + err = uvwasi_fd_table_get(&uvwasi->fds, + dirfd, + &dirfd_wrap, + needed_base, + needed_inheriting); + if (err != UVWASI_ESUCCESS) + return err; + + err = uvwasi__resolve_path(uvwasi, + dirfd_wrap, + path, + path_len, + resolved_path, + dirflags); + if (err != UVWASI_ESUCCESS) { + uv_mutex_unlock(&dirfd_wrap->mutex); + return err; + } + + r = uv_fs_open(NULL, &req, resolved_path, flags, 0666, NULL); + uv_fs_req_cleanup(&req); + + if (r < 0) { + uv_mutex_unlock(&dirfd_wrap->mutex); + return uvwasi__translate_uv_error(r); + } + + err = uvwasi_fd_table_insert_fd(uvwasi, + &uvwasi->fds, + r, + flags, + resolved_path, + fs_rights_base, + fs_rights_inheriting, + &wrap); + if (err != UVWASI_ESUCCESS) { + uv_mutex_unlock(&dirfd_wrap->mutex); + goto close_file_and_error_exit; + } + + /* Not all platforms support UV_FS_O_DIRECTORY, so enforce it here as well. */ + if ((o_flags & UVWASI_O_DIRECTORY) != 0 && + wrap.type != UVWASI_FILETYPE_DIRECTORY) { + uv_mutex_unlock(&dirfd_wrap->mutex); + uvwasi_fd_table_remove(uvwasi, &uvwasi->fds, wrap.id); + err = UVWASI_ENOTDIR; + goto close_file_and_error_exit; + } + + *fd = wrap.id; + uv_mutex_unlock(&dirfd_wrap->mutex); + return UVWASI_ESUCCESS; + +close_file_and_error_exit: + uv_fs_close(NULL, &req, r, NULL); + uv_fs_req_cleanup(&req); + return err; +} + + +uvwasi_errno_t uvwasi_path_readlink(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + const char* path, + size_t path_len, + char* buf, + size_t buf_len, + size_t* bufused) { + char resolved_path[PATH_MAX_BYTES]; + struct uvwasi_fd_wrap_t* wrap; + uvwasi_errno_t err; + uv_fs_t req; + size_t len; + int r; + + if (uvwasi == NULL || path == NULL || buf == NULL || bufused == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, + fd, + &wrap, + UVWASI_RIGHT_PATH_READLINK, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + err = uvwasi__resolve_path(uvwasi, wrap, path, path_len, resolved_path, 0); + if (err != UVWASI_ESUCCESS) { + uv_mutex_unlock(&wrap->mutex); + return err; + } + + r = uv_fs_readlink(NULL, &req, resolved_path, NULL); + uv_mutex_unlock(&wrap->mutex); + if (r != 0) { + uv_fs_req_cleanup(&req); + return uvwasi__translate_uv_error(r); + } + + len = strnlen(req.ptr, buf_len); + if (len >= buf_len) { + uv_fs_req_cleanup(&req); + return UVWASI_ENOBUFS; + } + + memcpy(buf, req.ptr, len); + buf[len] = '\0'; + *bufused = len + 1; + uv_fs_req_cleanup(&req); + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_path_remove_directory(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + const char* path, + size_t path_len) { + char resolved_path[PATH_MAX_BYTES]; + struct uvwasi_fd_wrap_t* wrap; + uv_fs_t req; + uvwasi_errno_t err; + int r; + + if (uvwasi == NULL || path == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, + fd, + &wrap, + UVWASI_RIGHT_PATH_REMOVE_DIRECTORY, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + err = uvwasi__resolve_path(uvwasi, wrap, path, path_len, resolved_path, 0); + if (err != UVWASI_ESUCCESS) { + uv_mutex_unlock(&wrap->mutex); + return err; + } + + r = uv_fs_rmdir(NULL, &req, resolved_path, NULL); + uv_mutex_unlock(&wrap->mutex); + uv_fs_req_cleanup(&req); + + if (r != 0) + return uvwasi__translate_uv_error(r); + + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_path_rename(uvwasi_t* uvwasi, + uvwasi_fd_t old_fd, + const char* old_path, + size_t old_path_len, + uvwasi_fd_t new_fd, + const char* new_path, + size_t new_path_len) { + char resolved_old_path[PATH_MAX_BYTES]; + char resolved_new_path[PATH_MAX_BYTES]; + struct uvwasi_fd_wrap_t* old_wrap; + struct uvwasi_fd_wrap_t* new_wrap; + uvwasi_errno_t err; + uv_fs_t req; + int r; + + if (uvwasi == NULL || old_path == NULL || new_path == NULL) + return UVWASI_EINVAL; + + if (old_fd == new_fd) { + err = uvwasi_fd_table_get(&uvwasi->fds, + old_fd, + &old_wrap, + UVWASI_RIGHT_PATH_RENAME_SOURCE | + UVWASI_RIGHT_PATH_RENAME_TARGET, + 0); + if (err != UVWASI_ESUCCESS) + return err; + new_wrap = old_wrap; + } else { + err = uvwasi_fd_table_get(&uvwasi->fds, + old_fd, + &old_wrap, + UVWASI_RIGHT_PATH_RENAME_SOURCE, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + err = uvwasi_fd_table_get(&uvwasi->fds, + new_fd, + &new_wrap, + UVWASI_RIGHT_PATH_RENAME_TARGET, + 0); + if (err != UVWASI_ESUCCESS) { + uv_mutex_unlock(&old_wrap->mutex); + return err; + } + } + + err = uvwasi__resolve_path(uvwasi, + old_wrap, + old_path, + old_path_len, + resolved_old_path, + 0); + if (err != UVWASI_ESUCCESS) + goto exit; + + err = uvwasi__resolve_path(uvwasi, + new_wrap, + new_path, + new_path_len, + resolved_new_path, + 0); + if (err != UVWASI_ESUCCESS) + goto exit; + + r = uv_fs_rename(NULL, &req, resolved_old_path, resolved_new_path, NULL); + uv_fs_req_cleanup(&req); + if (r != 0) { + err = uvwasi__translate_uv_error(r); + goto exit; + } + + err = UVWASI_ESUCCESS; +exit: + uv_mutex_unlock(&old_wrap->mutex); + if (old_fd != new_fd) + uv_mutex_unlock(&new_wrap->mutex); + + return err; +} + + +uvwasi_errno_t uvwasi_path_symlink(uvwasi_t* uvwasi, + const char* old_path, + size_t old_path_len, + uvwasi_fd_t fd, + const char* new_path, + size_t new_path_len) { + char resolved_new_path[PATH_MAX_BYTES]; + struct uvwasi_fd_wrap_t* wrap; + uvwasi_errno_t err; + uv_fs_t req; + int r; + + if (uvwasi == NULL || old_path == NULL || new_path == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, + fd, + &wrap, + UVWASI_RIGHT_PATH_SYMLINK, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + err = uvwasi__resolve_path(uvwasi, + wrap, + new_path, + new_path_len, + resolved_new_path, + 0); + if (err != UVWASI_ESUCCESS) { + uv_mutex_unlock(&wrap->mutex); + return err; + } + + /* Windows support may require setting the flags option. */ + r = uv_fs_symlink(NULL, &req, old_path, resolved_new_path, 0, NULL); + uv_mutex_unlock(&wrap->mutex); + uv_fs_req_cleanup(&req); + if (r != 0) + return uvwasi__translate_uv_error(r); + + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_path_unlink_file(uvwasi_t* uvwasi, + uvwasi_fd_t fd, + const char* path, + size_t path_len) { + char resolved_path[PATH_MAX_BYTES]; + struct uvwasi_fd_wrap_t* wrap; + uv_fs_t req; + uvwasi_errno_t err; + int r; + + if (uvwasi == NULL || path == NULL) + return UVWASI_EINVAL; + + err = uvwasi_fd_table_get(&uvwasi->fds, + fd, + &wrap, + UVWASI_RIGHT_PATH_UNLINK_FILE, + 0); + if (err != UVWASI_ESUCCESS) + return err; + + err = uvwasi__resolve_path(uvwasi, wrap, path, path_len, resolved_path, 0); + if (err != UVWASI_ESUCCESS) { + uv_mutex_unlock(&wrap->mutex); + return err; + } + + r = uv_fs_unlink(NULL, &req, resolved_path, NULL); + uv_mutex_unlock(&wrap->mutex); + uv_fs_req_cleanup(&req); + + if (r != 0) + return uvwasi__translate_uv_error(r); + + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_poll_oneoff(uvwasi_t* uvwasi, + const uvwasi_subscription_t* in, + uvwasi_event_t* out, + size_t nsubscriptions, + size_t* nevents) { + /* TODO(cjihrig): Implement this. */ + return UVWASI_ENOTSUP; +} + + +uvwasi_errno_t uvwasi_proc_exit(uvwasi_t* uvwasi, uvwasi_exitcode_t rval) { + exit(rval); + return UVWASI_ESUCCESS; /* This doesn't happen. */ +} + + +uvwasi_errno_t uvwasi_proc_raise(uvwasi_t* uvwasi, uvwasi_signal_t sig) { + int r; + + if (uvwasi == NULL) + return UVWASI_EINVAL; + + r = uvwasi__translate_to_uv_signal(sig); + if (r == -1) + return UVWASI_ENOSYS; + + r = uv_kill(uv_os_getpid(), r); + if (r != 0) + return uvwasi__translate_uv_error(r); + + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_random_get(uvwasi_t* uvwasi, void* buf, size_t buf_len) { + int r; + + if (uvwasi == NULL || buf == NULL) + return UVWASI_EINVAL; + + r = uv_random(NULL, NULL, buf, buf_len, 0, NULL); + if (r != 0) + return uvwasi__translate_uv_error(r); + + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_sched_yield(uvwasi_t* uvwasi) { + if (uvwasi == NULL) + return UVWASI_EINVAL; + +#ifndef _WIN32 + if (0 != sched_yield()) + return uvwasi__translate_uv_error(uv_translate_sys_error(errno)); +#else + SwitchToThread(); +#endif /* _WIN32 */ + + return UVWASI_ESUCCESS; +} + + +uvwasi_errno_t uvwasi_sock_recv(uvwasi_t* uvwasi, + uvwasi_fd_t sock, + const uvwasi_iovec_t* ri_data, + size_t ri_data_len, + uvwasi_riflags_t ri_flags, + size_t* ro_datalen, + uvwasi_roflags_t* ro_flags) { + /* TODO(cjihrig): Waiting to implement, pending + https://github.com/WebAssembly/WASI/issues/4 */ + return UVWASI_ENOTSUP; +} + + +uvwasi_errno_t uvwasi_sock_send(uvwasi_t* uvwasi, + uvwasi_fd_t sock, + const uvwasi_ciovec_t* si_data, + size_t si_data_len, + uvwasi_siflags_t si_flags, + size_t* so_datalen) { + /* TODO(cjihrig): Waiting to implement, pending + https://github.com/WebAssembly/WASI/issues/4 */ + return UVWASI_ENOTSUP; +} + + +uvwasi_errno_t uvwasi_sock_shutdown(uvwasi_t* uvwasi, + uvwasi_fd_t sock, + uvwasi_sdflags_t how) { + /* TODO(cjihrig): Waiting to implement, pending + https://github.com/WebAssembly/WASI/issues/4 */ + return UVWASI_ENOTSUP; +} + + +const char* uvwasi_embedder_err_code_to_string(uvwasi_errno_t code) { + switch (code) { +#define V(errcode) case errcode: return #errcode; + V(UVWASI_E2BIG) + V(UVWASI_EACCES) + V(UVWASI_EADDRINUSE) + V(UVWASI_EADDRNOTAVAIL) + V(UVWASI_EAFNOSUPPORT) + V(UVWASI_EAGAIN) + V(UVWASI_EALREADY) + V(UVWASI_EBADF) + V(UVWASI_EBADMSG) + V(UVWASI_EBUSY) + V(UVWASI_ECANCELED) + V(UVWASI_ECHILD) + V(UVWASI_ECONNABORTED) + V(UVWASI_ECONNREFUSED) + V(UVWASI_ECONNRESET) + V(UVWASI_EDEADLK) + V(UVWASI_EDESTADDRREQ) + V(UVWASI_EDOM) + V(UVWASI_EDQUOT) + V(UVWASI_EEXIST) + V(UVWASI_EFAULT) + V(UVWASI_EFBIG) + V(UVWASI_EHOSTUNREACH) + V(UVWASI_EIDRM) + V(UVWASI_EILSEQ) + V(UVWASI_EINPROGRESS) + V(UVWASI_EINTR) + V(UVWASI_EINVAL) + V(UVWASI_EIO) + V(UVWASI_EISCONN) + V(UVWASI_EISDIR) + V(UVWASI_ELOOP) + V(UVWASI_EMFILE) + V(UVWASI_EMLINK) + V(UVWASI_EMSGSIZE) + V(UVWASI_EMULTIHOP) + V(UVWASI_ENAMETOOLONG) + V(UVWASI_ENETDOWN) + V(UVWASI_ENETRESET) + V(UVWASI_ENETUNREACH) + V(UVWASI_ENFILE) + V(UVWASI_ENOBUFS) + V(UVWASI_ENODEV) + V(UVWASI_ENOENT) + V(UVWASI_ENOEXEC) + V(UVWASI_ENOLCK) + V(UVWASI_ENOLINK) + V(UVWASI_ENOMEM) + V(UVWASI_ENOMSG) + V(UVWASI_ENOPROTOOPT) + V(UVWASI_ENOSPC) + V(UVWASI_ENOSYS) + V(UVWASI_ENOTCONN) + V(UVWASI_ENOTDIR) + V(UVWASI_ENOTEMPTY) + V(UVWASI_ENOTRECOVERABLE) + V(UVWASI_ENOTSOCK) + V(UVWASI_ENOTSUP) + V(UVWASI_ENOTTY) + V(UVWASI_ENXIO) + V(UVWASI_EOVERFLOW) + V(UVWASI_EOWNERDEAD) + V(UVWASI_EPERM) + V(UVWASI_EPIPE) + V(UVWASI_EPROTO) + V(UVWASI_EPROTONOSUPPORT) + V(UVWASI_EPROTOTYPE) + V(UVWASI_ERANGE) + V(UVWASI_EROFS) + V(UVWASI_ESPIPE) + V(UVWASI_ESRCH) + V(UVWASI_ESTALE) + V(UVWASI_ETIMEDOUT) + V(UVWASI_ETXTBSY) + V(UVWASI_EXDEV) + V(UVWASI_ENOTCAPABLE) + V(UVWASI_ESUCCESS) +#undef V + default: + return "UVWASI_UNKNOWN_ERROR"; + } +} diff --git a/deps/uvwasi/src/uvwasi_alloc.h b/deps/uvwasi/src/uvwasi_alloc.h new file mode 100644 index 00000000000000..1486f313911b01 --- /dev/null +++ b/deps/uvwasi/src/uvwasi_alloc.h @@ -0,0 +1,11 @@ +#ifndef __UVWASI_ALLOC_H__ +#define __UVWASI_ALLOC_H__ + +#include "uvwasi.h" + +void* uvwasi__malloc(const uvwasi_t* uvwasi, size_t size); +void uvwasi__free(const uvwasi_t* uvwasi, void* ptr); +void* uvwasi__calloc(const uvwasi_t* uvwasi, size_t nmemb, size_t size); +void* uvwasi__realloc(const uvwasi_t* uvwasi, void* ptr, size_t size); + +#endif diff --git a/deps/uvwasi/uvwasi.gyp b/deps/uvwasi/uvwasi.gyp new file mode 100644 index 00000000000000..c07b07b608b724 --- /dev/null +++ b/deps/uvwasi/uvwasi.gyp @@ -0,0 +1,25 @@ +{ + 'targets': [ + { + 'target_name': 'uvwasi', + 'type': 'static_library', + 'cflags': ['-fvisibility=hidden'], + 'xcode_settings': { + 'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES', # -fvisibility=hidden + }, + 'include_dirs': ['include'], + 'sources': [ + 'src/clocks.c', + 'src/fd_table.c', + 'src/uv_mapping.c', + 'src/uvwasi.c', + ], + 'dependencies': [ + '../uv/uv.gyp:libuv', + ], + 'direct_dependent_settings': { + 'include_dirs': ['include'] + }, + } + ] +} diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index ce47fa36103f45..6a9bbd67997311 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -52,6 +52,8 @@ /test/test262/data /test/test262/harness /test/wasm-js/data +/test/wasm-js/tests +/test/wasm-js/tests.tar.gz /test/wasm-spec-tests/tests /test/wasm-spec-tests/tests.tar.gz /third_party/* diff --git a/deps/v8/.gn b/deps/v8/.gn index 328778fb46bcdf..d4ad959954845f 100644 --- a/deps/v8/.gn +++ b/deps/v8/.gn @@ -16,4 +16,5 @@ check_targets = [] # These are the list of GN files that run exec_script. This whitelist exists # to force additional review for new uses of exec_script, which is strongly # discouraged except for gypi_to_gn calls. -exec_script_whitelist = build_dotfile_settings.exec_script_whitelist + [] +exec_script_whitelist = build_dotfile_settings.exec_script_whitelist + + [ "//build_overrides/build.gni" ] diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 827d124b0dcf0d..1198de8f358fbc 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -39,6 +39,7 @@ Vewd Software AS <*@vewd.com> Groupon <*@groupon.com> Meteor Development Group <*@meteor.com> Cloudflare, Inc. <*@cloudflare.com> +Julia Computing, Inc. <*@juliacomputing.com> Aaron Bieber Abdulla Kamar @@ -74,6 +75,7 @@ Colin Ihrig Daniel Andersson Daniel Bevenius Daniel James +David Carlier Deepak Mohan Deon Dior Dominic Farolini @@ -163,6 +165,7 @@ Rob Wu Robert Meijer Robert Mustacchi Robert Nagy +Rong Wang Ross Kirsling Ruben Bridgewater Ryan Dahl diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index efca4a626f1633..90ec4097d7a2b8 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -91,7 +91,7 @@ declare_args() { # Enable embedded builtins. v8_enable_embedded_builtins = true - # Enable the registration of unwinding info for Windows/x64. + # Enable the registration of unwinding info for Windows x64 and ARM64. v8_win64_unwinding_info = true # Enable code comments for builtins in the snapshot (impacts performance). @@ -187,15 +187,21 @@ declare_args() { # Enable sharing read-only space across isolates. # Sets -DV8_SHARED_RO_HEAP. v8_enable_shared_ro_heap = "" -} -# We reuse the snapshot toolchain for building torque and other generators to -# avoid building v8_libbase on the host more than once. On mips with big endian, -# the snapshot toolchain is the target toolchain and, hence, can't be used. -v8_generator_toolchain = v8_snapshot_toolchain -if (host_cpu == "x64" && - (v8_current_cpu == "mips" || v8_current_cpu == "mips64")) { - v8_generator_toolchain = "//build/toolchain/linux:clang_x64" + # Enable lazy source positions by default. + v8_enable_lazy_source_positions = true + + # Disable write barriers when GCs are non-incremental and + # heap has single generation. + v8_disable_write_barriers = false + + # Redirect allocation in young generation so that there will be + # only one single generation. + v8_enable_single_generation = "" + + # Use token threaded dispatch for the regular expression interpreter. + # Use switch-based dispatch if this is false + v8_enable_regexp_interpreter_threaded_dispatch = true } # Derived defaults. @@ -231,6 +237,13 @@ if (v8_enable_fast_torque == "") { v8_enable_fast_torque = v8_enable_fast_mksnapshot } +if (v8_enable_single_generation == "") { + v8_enable_single_generation = v8_disable_write_barriers +} + +assert(!v8_disable_write_barriers || v8_enable_single_generation, + "Disabling write barriers works only with single generation") + assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations, "Untrusted code mitigations are unsupported on ia32") @@ -424,12 +437,21 @@ config("features") { defines += [ "V8_SNAPSHOT_NATIVE_CODE_COUNTERS" ] } } + if (v8_enable_single_generation) { + defines += [ "V8_ENABLE_SINGLE_GENERATION" ] + } + if (v8_disable_write_barriers) { + defines += [ "V8_DISABLE_WRITE_BARRIERS" ] + } if (v8_use_external_startup_data) { defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ] } if (v8_enable_concurrent_marking) { defines += [ "V8_CONCURRENT_MARKING" ] } + if (v8_enable_lazy_source_positions) { + defines += [ "V8_ENABLE_LAZY_SOURCE_POSITIONS" ] + } if (v8_check_microtasks_scopes_consistency) { defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ] } @@ -451,6 +473,9 @@ config("features") { if (v8_win64_unwinding_info) { defines += [ "V8_WIN64_UNWINDING_INFO" ] } + if (v8_enable_regexp_interpreter_threaded_dispatch) { + defines += [ "V8_ENABLE_REGEXP_INTERPRETER_THREADED_DISPATCH" ] + } } config("toolchain") { @@ -870,6 +895,8 @@ action("postmortem-metadata") { "src/objects/code.h", "src/objects/data-handler.h", "src/objects/data-handler-inl.h", + "src/objects/descriptor-array.h", + "src/objects/descriptor-array-inl.h", "src/objects/feedback-cell.h", "src/objects/feedback-cell-inl.h", "src/objects/fixed-array-inl.h", @@ -968,16 +995,21 @@ torque_files = [ "src/builtins/proxy-set-prototype-of.tq", "src/builtins/proxy.tq", "src/builtins/reflect.tq", + "src/builtins/regexp-match.tq", "src/builtins/regexp-replace.tq", + "src/builtins/regexp-source.tq", + "src/builtins/regexp-test.tq", "src/builtins/regexp.tq", "src/builtins/string.tq", "src/builtins/string-endswith.tq", "src/builtins/string-html.tq", "src/builtins/string-iterator.tq", + "src/builtins/string-pad.tq", "src/builtins/string-repeat.tq", "src/builtins/string-slice.tq", "src/builtins/string-startswith.tq", "src/builtins/string-substring.tq", + "src/builtins/torque-internal.tq", "src/builtins/typed-array-createtypedarray.tq", "src/builtins/typed-array-every.tq", "src/builtins/typed-array-filter.tq", @@ -1002,6 +1034,7 @@ if (!v8_enable_i18n_support) { action("run_torque") { visibility = [ ":*", + "tools/debug_helper/:*", "tools/gcmole/:*", "test/cctest/:*", ] @@ -1023,6 +1056,8 @@ action("run_torque") { "$target_gen_dir/torque-generated/class-definitions-tq.cc", "$target_gen_dir/torque-generated/class-definitions-tq-inl.h", "$target_gen_dir/torque-generated/class-definitions-tq.h", + "$target_gen_dir/torque-generated/class-debug-readers-tq.cc", + "$target_gen_dir/torque-generated/class-debug-readers-tq.h", "$target_gen_dir/torque-generated/exported-macros-assembler-tq.cc", "$target_gen_dir/torque-generated/exported-macros-assembler-tq.h", "$target_gen_dir/torque-generated/csa-types-tq.h", @@ -1752,6 +1787,8 @@ v8_compiler_sources = [ "src/compiler/escape-analysis-reducer.h", "src/compiler/escape-analysis.cc", "src/compiler/escape-analysis.h", + "src/compiler/feedback-source.cc", + "src/compiler/feedback-source.h", "src/compiler/frame-states.cc", "src/compiler/frame-states.h", "src/compiler/frame.cc", @@ -1892,8 +1929,6 @@ v8_compiler_sources = [ "src/compiler/types.h", "src/compiler/value-numbering-reducer.cc", "src/compiler/value-numbering-reducer.h", - "src/compiler/vector-slot-pair.cc", - "src/compiler/vector-slot-pair.h", "src/compiler/verifier.cc", "src/compiler/verifier.h", "src/compiler/wasm-compiler.cc", @@ -2031,7 +2066,6 @@ v8_source_set("v8_base_without_compiler") { "src/builtins/builtins-internal.cc", "src/builtins/builtins-intl.cc", "src/builtins/builtins-json.cc", - "src/builtins/builtins-math.cc", "src/builtins/builtins-number.cc", "src/builtins/builtins-object.cc", "src/builtins/builtins-promise.cc", @@ -2095,6 +2129,7 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/register-arch.h", "src/codegen/register-configuration.cc", "src/codegen/register-configuration.h", + "src/codegen/register.cc", "src/codegen/register.h", "src/codegen/reglist.h", "src/codegen/reloc-info.cc", @@ -2117,6 +2152,7 @@ v8_source_set("v8_base_without_compiler") { "src/common/assert-scope.cc", "src/common/assert-scope.h", "src/common/checks.h", + "src/common/message-template.h", "src/common/ptr-compr-inl.h", "src/common/ptr-compr.h", "src/compiler-dispatcher/compiler-dispatcher.cc", @@ -2189,11 +2225,13 @@ v8_source_set("v8_base_without_compiler") { "src/execution/isolate-utils.h", "src/execution/isolate.cc", "src/execution/isolate.h", - "src/execution/message-template.h", "src/execution/messages.cc", "src/execution/messages.h", "src/execution/microtask-queue.cc", "src/execution/microtask-queue.h", + "src/execution/protectors-inl.h", + "src/execution/protectors.cc", + "src/execution/protectors.h", "src/execution/runtime-profiler.cc", "src/execution/runtime-profiler.h", "src/execution/simulator-base.cc", @@ -2758,7 +2796,6 @@ v8_source_set("v8_base_without_compiler") { "src/runtime/runtime-typedarray.cc", "src/runtime/runtime-utils.h", "src/runtime/runtime-wasm.cc", - "src/runtime/runtime-weak-refs.cc", "src/runtime/runtime.cc", "src/runtime/runtime.h", "src/sanitizer/asan.h", @@ -2922,6 +2959,8 @@ v8_source_set("v8_base_without_compiler") { "src/wasm/wasm-memory.h", "src/wasm/wasm-module-builder.cc", "src/wasm/wasm-module-builder.h", + "src/wasm/wasm-module-sourcemap.cc", + "src/wasm/wasm-module-sourcemap.h", "src/wasm/wasm-module.cc", "src/wasm/wasm-module.h", "src/wasm/wasm-objects-inl.h", @@ -3109,6 +3148,7 @@ v8_source_set("v8_base_without_compiler") { "src/diagnostics/arm64/eh-frame-arm64.cc", "src/execution/arm64/frame-constants-arm64.cc", "src/execution/arm64/frame-constants-arm64.h", + "src/execution/arm64/pointer-auth-arm64.cc", "src/execution/arm64/simulator-arm64.cc", "src/execution/arm64/simulator-arm64.h", "src/execution/arm64/simulator-logic-arm64.cc", @@ -3116,6 +3156,12 @@ v8_source_set("v8_base_without_compiler") { "src/regexp/arm64/regexp-macro-assembler-arm64.h", "src/wasm/baseline/arm64/liftoff-assembler-arm64.h", ] + if (is_win) { + sources += [ + "src/diagnostics/unwinding-info-win64.cc", + "src/diagnostics/unwinding-info-win64.h", + ] + } jumbo_excluded_sources += [ # TODO(mostynb@vewd.com): fix this code so it doesn't need # to be excluded, see the comments inside. @@ -3325,6 +3371,7 @@ v8_source_set("torque_base") { "src/torque/ast.h", "src/torque/cfg.cc", "src/torque/cfg.h", + "src/torque/class-debug-reader-generator.cc", "src/torque/constants.h", "src/torque/contextual.h", "src/torque/csa-generator.cc", @@ -3351,6 +3398,8 @@ v8_source_set("torque_base") { "src/torque/torque-compiler.h", "src/torque/torque-parser.cc", "src/torque/torque-parser.h", + "src/torque/type-inference.cc", + "src/torque/type-inference.h", "src/torque/type-oracle.cc", "src/torque/type-oracle.h", "src/torque/type-visitor.cc", diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 27afc18a5117cd..be6a58859c5394 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,1453 @@ +2019-09-04: Version 7.8.279 + + Performance and stability improvements on all platforms. + + +2019-09-04: Version 7.8.278 + + Performance and stability improvements on all platforms. + + +2019-09-04: Version 7.8.277 + + Performance and stability improvements on all platforms. + + +2019-09-04: Version 7.8.276 + + Performance and stability improvements on all platforms. + + +2019-09-04: Version 7.8.275 + + Performance and stability improvements on all platforms. + + +2019-09-04: Version 7.8.274 + + Performance and stability improvements on all platforms. + + +2019-09-04: Version 7.8.273 + + Performance and stability improvements on all platforms. + + +2019-09-04: Version 7.8.272 + + Performance and stability improvements on all platforms. + + +2019-09-04: Version 7.8.271 + + Performance and stability improvements on all platforms. + + +2019-09-04: Version 7.8.270 + + Performance and stability improvements on all platforms. + + +2019-09-03: Version 7.8.269 + + Performance and stability improvements on all platforms. + + +2019-09-03: Version 7.8.268 + + Performance and stability improvements on all platforms. + + +2019-09-03: Version 7.8.267 + + Performance and stability improvements on all platforms. + + +2019-09-03: Version 7.8.266 + + Performance and stability improvements on all platforms. + + +2019-09-03: Version 7.8.265 + + Performance and stability improvements on all platforms. + + +2019-09-03: Version 7.8.264 + + Performance and stability improvements on all platforms. + + +2019-09-03: Version 7.8.263 + + Performance and stability improvements on all platforms. + + +2019-09-03: Version 7.8.262 + + Performance and stability improvements on all platforms. + + +2019-09-03: Version 7.8.261 + + Performance and stability improvements on all platforms. + + +2019-09-02: Version 7.8.260 + + Performance and stability improvements on all platforms. + + +2019-09-02: Version 7.8.259 + + Performance and stability improvements on all platforms. + + +2019-09-02: Version 7.8.258 + + Performance and stability improvements on all platforms. + + +2019-09-02: Version 7.8.257 + + Performance and stability improvements on all platforms. + + +2019-09-02: Version 7.8.256 + + Performance and stability improvements on all platforms. + + +2019-09-02: Version 7.8.255 + + Performance and stability improvements on all platforms. + + +2019-09-02: Version 7.8.254 + + Performance and stability improvements on all platforms. + + +2019-09-02: Version 7.8.253 + + Performance and stability improvements on all platforms. + + +2019-09-02: Version 7.8.252 + + Performance and stability improvements on all platforms. + + +2019-09-02: Version 7.8.251 + + Performance and stability improvements on all platforms. + + +2019-09-02: Version 7.8.250 + + Performance and stability improvements on all platforms. + + +2019-09-02: Version 7.8.249 + + Performance and stability improvements on all platforms. + + +2019-09-02: Version 7.8.248 + + Performance and stability improvements on all platforms. + + +2019-09-02: Version 7.8.247 + + Performance and stability improvements on all platforms. + + +2019-09-02: Version 7.8.246 + + Performance and stability improvements on all platforms. + + +2019-09-02: Version 7.8.245 + + Performance and stability improvements on all platforms. + + +2019-09-01: Version 7.8.244 + + Performance and stability improvements on all platforms. + + +2019-08-31: Version 7.8.243 + + Performance and stability improvements on all platforms. + + +2019-08-31: Version 7.8.242 + + Performance and stability improvements on all platforms. + + +2019-08-30: Version 7.8.241 + + Performance and stability improvements on all platforms. + + +2019-08-30: Version 7.8.240 + + Performance and stability improvements on all platforms. + + +2019-08-30: Version 7.8.239 + + Performance and stability improvements on all platforms. + + +2019-08-30: Version 7.8.238 + + Performance and stability improvements on all platforms. + + +2019-08-30: Version 7.8.237 + + Performance and stability improvements on all platforms. + + +2019-08-30: Version 7.8.236 + + Performance and stability improvements on all platforms. + + +2019-08-30: Version 7.8.235 + + Performance and stability improvements on all platforms. + + +2019-08-30: Version 7.8.234 + + Performance and stability improvements on all platforms. + + +2019-08-30: Version 7.8.233 + + Performance and stability improvements on all platforms. + + +2019-08-30: Version 7.8.232 + + Performance and stability improvements on all platforms. + + +2019-08-29: Version 7.8.231 + + Performance and stability improvements on all platforms. + + +2019-08-29: Version 7.8.230 + + Performance and stability improvements on all platforms. + + +2019-08-29: Version 7.8.229 + + Performance and stability improvements on all platforms. + + +2019-08-29: Version 7.8.228 + + Performance and stability improvements on all platforms. + + +2019-08-29: Version 7.8.227 + + Performance and stability improvements on all platforms. + + +2019-08-29: Version 7.8.226 + + Performance and stability improvements on all platforms. + + +2019-08-29: Version 7.8.225 + + Performance and stability improvements on all platforms. + + +2019-08-29: Version 7.8.224 + + Performance and stability improvements on all platforms. + + +2019-08-29: Version 7.8.223 + + Performance and stability improvements on all platforms. + + +2019-08-29: Version 7.8.222 + + Performance and stability improvements on all platforms. + + +2019-08-29: Version 7.8.221 + + Performance and stability improvements on all platforms. + + +2019-08-28: Version 7.8.220 + + Performance and stability improvements on all platforms. + + +2019-08-28: Version 7.8.219 + + Performance and stability improvements on all platforms. + + +2019-08-28: Version 7.8.218 + + Performance and stability improvements on all platforms. + + +2019-08-28: Version 7.8.217 + + Performance and stability improvements on all platforms. + + +2019-08-28: Version 7.8.216 + + Performance and stability improvements on all platforms. + + +2019-08-28: Version 7.8.215 + + Performance and stability improvements on all platforms. + + +2019-08-28: Version 7.8.214 + + Performance and stability improvements on all platforms. + + +2019-08-28: Version 7.8.213 + + Performance and stability improvements on all platforms. + + +2019-08-28: Version 7.8.212 + + Performance and stability improvements on all platforms. + + +2019-08-28: Version 7.8.211 + + Performance and stability improvements on all platforms. + + +2019-08-28: Version 7.8.210 + + Performance and stability improvements on all platforms. + + +2019-08-28: Version 7.8.209 + + Performance and stability improvements on all platforms. + + +2019-08-28: Version 7.8.208 + + Performance and stability improvements on all platforms. + + +2019-08-27: Version 7.8.207 + + Performance and stability improvements on all platforms. + + +2019-08-27: Version 7.8.206 + + Performance and stability improvements on all platforms. + + +2019-08-27: Version 7.8.205 + + Performance and stability improvements on all platforms. + + +2019-08-27: Version 7.8.204 + + Performance and stability improvements on all platforms. + + +2019-08-27: Version 7.8.203 + + Performance and stability improvements on all platforms. + + +2019-08-27: Version 7.8.202 + + Performance and stability improvements on all platforms. + + +2019-08-27: Version 7.8.201 + + Performance and stability improvements on all platforms. + + +2019-08-26: Version 7.8.200 + + Performance and stability improvements on all platforms. + + +2019-08-26: Version 7.8.199 + + Performance and stability improvements on all platforms. + + +2019-08-26: Version 7.8.198 + + Performance and stability improvements on all platforms. + + +2019-08-26: Version 7.8.197 + + Performance and stability improvements on all platforms. + + +2019-08-26: Version 7.8.196 + + Performance and stability improvements on all platforms. + + +2019-08-26: Version 7.8.195 + + Performance and stability improvements on all platforms. + + +2019-08-26: Version 7.8.194 + + Performance and stability improvements on all platforms. + + +2019-08-26: Version 7.8.193 + + Performance and stability improvements on all platforms. + + +2019-08-26: Version 7.8.192 + + Performance and stability improvements on all platforms. + + +2019-08-26: Version 7.8.191 + + Performance and stability improvements on all platforms. + + +2019-08-26: Version 7.8.190 + + Performance and stability improvements on all platforms. + + +2019-08-25: Version 7.8.189 + + Performance and stability improvements on all platforms. + + +2019-08-23: Version 7.8.188 + + Performance and stability improvements on all platforms. + + +2019-08-23: Version 7.8.187 + + Performance and stability improvements on all platforms. + + +2019-08-23: Version 7.8.186 + + Performance and stability improvements on all platforms. + + +2019-08-23: Version 7.8.185 + + Performance and stability improvements on all platforms. + + +2019-08-23: Version 7.8.184 + + Performance and stability improvements on all platforms. + + +2019-08-23: Version 7.8.183 + + Performance and stability improvements on all platforms. + + +2019-08-23: Version 7.8.182 + + Performance and stability improvements on all platforms. + + +2019-08-23: Version 7.8.181 + + Performance and stability improvements on all platforms. + + +2019-08-23: Version 7.8.180 + + Performance and stability improvements on all platforms. + + +2019-08-23: Version 7.8.179 + + Performance and stability improvements on all platforms. + + +2019-08-23: Version 7.8.178 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.177 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.176 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.175 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.174 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.173 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.172 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.171 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.170 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.169 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.168 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.167 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.166 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.165 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.164 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.163 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.162 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.161 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.160 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.159 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.158 + + Performance and stability improvements on all platforms. + + +2019-08-22: Version 7.8.157 + + Performance and stability improvements on all platforms. + + +2019-08-21: Version 7.8.156 + + Performance and stability improvements on all platforms. + + +2019-08-21: Version 7.8.155 + + Performance and stability improvements on all platforms. + + +2019-08-21: Version 7.8.154 + + Performance and stability improvements on all platforms. + + +2019-08-21: Version 7.8.153 + + Performance and stability improvements on all platforms. + + +2019-08-21: Version 7.8.152 + + Performance and stability improvements on all platforms. + + +2019-08-21: Version 7.8.151 + + Performance and stability improvements on all platforms. + + +2019-08-21: Version 7.8.150 + + Performance and stability improvements on all platforms. + + +2019-08-21: Version 7.8.149 + + Performance and stability improvements on all platforms. + + +2019-08-21: Version 7.8.148 + + Performance and stability improvements on all platforms. + + +2019-08-21: Version 7.8.147 + + Performance and stability improvements on all platforms. + + +2019-08-21: Version 7.8.146 + + Performance and stability improvements on all platforms. + + +2019-08-21: Version 7.8.145 + + Performance and stability improvements on all platforms. + + +2019-08-21: Version 7.8.144 + + Performance and stability improvements on all platforms. + + +2019-08-21: Version 7.8.143 + + Performance and stability improvements on all platforms. + + +2019-08-21: Version 7.8.142 + + Performance and stability improvements on all platforms. + + +2019-08-21: Version 7.8.141 + + Performance and stability improvements on all platforms. + + +2019-08-21: Version 7.8.140 + + Performance and stability improvements on all platforms. + + +2019-08-21: Version 7.8.139 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.138 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.137 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.136 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.135 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.134 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.133 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.132 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.131 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.130 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.129 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.128 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.127 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.126 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.125 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.124 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.123 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.122 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.121 + + Performance and stability improvements on all platforms. + + +2019-08-20: Version 7.8.120 + + Performance and stability improvements on all platforms. + + +2019-08-19: Version 7.8.119 + + Performance and stability improvements on all platforms. + + +2019-08-19: Version 7.8.118 + + Performance and stability improvements on all platforms. + + +2019-08-19: Version 7.8.117 + + Performance and stability improvements on all platforms. + + +2019-08-19: Version 7.8.116 + + Performance and stability improvements on all platforms. + + +2019-08-19: Version 7.8.115 + + Performance and stability improvements on all platforms. + + +2019-08-19: Version 7.8.114 + + Performance and stability improvements on all platforms. + + +2019-08-16: Version 7.8.113 + + Performance and stability improvements on all platforms. + + +2019-08-15: Version 7.8.112 + + Performance and stability improvements on all platforms. + + +2019-08-14: Version 7.8.111 + + Performance and stability improvements on all platforms. + + +2019-08-14: Version 7.8.110 + + Performance and stability improvements on all platforms. + + +2019-08-14: Version 7.8.109 + + Performance and stability improvements on all platforms. + + +2019-08-14: Version 7.8.108 + + Performance and stability improvements on all platforms. + + +2019-08-14: Version 7.8.107 + + Performance and stability improvements on all platforms. + + +2019-08-14: Version 7.8.106 + + Performance and stability improvements on all platforms. + + +2019-08-13: Version 7.8.105 + + Performance and stability improvements on all platforms. + + +2019-08-13: Version 7.8.104 + + Performance and stability improvements on all platforms. + + +2019-08-13: Version 7.8.103 + + Performance and stability improvements on all platforms. + + +2019-08-13: Version 7.8.102 + + Performance and stability improvements on all platforms. + + +2019-08-13: Version 7.8.101 + + Performance and stability improvements on all platforms. + + +2019-08-13: Version 7.8.100 + + Performance and stability improvements on all platforms. + + +2019-08-13: Version 7.8.99 + + Performance and stability improvements on all platforms. + + +2019-08-13: Version 7.8.98 + + Performance and stability improvements on all platforms. + + +2019-08-13: Version 7.8.97 + + Performance and stability improvements on all platforms. + + +2019-08-13: Version 7.8.96 + + Performance and stability improvements on all platforms. + + +2019-08-13: Version 7.8.95 + + Performance and stability improvements on all platforms. + + +2019-08-13: Version 7.8.94 + + Performance and stability improvements on all platforms. + + +2019-08-13: Version 7.8.93 + + Performance and stability improvements on all platforms. + + +2019-08-12: Version 7.8.92 + + Performance and stability improvements on all platforms. + + +2019-08-12: Version 7.8.91 + + Performance and stability improvements on all platforms. + + +2019-08-12: Version 7.8.90 + + Performance and stability improvements on all platforms. + + +2019-08-12: Version 7.8.89 + + Performance and stability improvements on all platforms. + + +2019-08-12: Version 7.8.88 + + Performance and stability improvements on all platforms. + + +2019-08-12: Version 7.8.87 + + Performance and stability improvements on all platforms. + + +2019-08-12: Version 7.8.86 + + Performance and stability improvements on all platforms. + + +2019-08-12: Version 7.8.85 + + Performance and stability improvements on all platforms. + + +2019-08-12: Version 7.8.84 + + Performance and stability improvements on all platforms. + + +2019-08-12: Version 7.8.83 + + Performance and stability improvements on all platforms. + + +2019-08-12: Version 7.8.82 + + Performance and stability improvements on all platforms. + + +2019-08-12: Version 7.8.81 + + Performance and stability improvements on all platforms. + + +2019-08-12: Version 7.8.80 + + Performance and stability improvements on all platforms. + + +2019-08-12: Version 7.8.79 + + Performance and stability improvements on all platforms. + + +2019-08-12: Version 7.8.78 + + Performance and stability improvements on all platforms. + + +2019-08-09: Version 7.8.77 + + Performance and stability improvements on all platforms. + + +2019-08-09: Version 7.8.76 + + Performance and stability improvements on all platforms. + + +2019-08-09: Version 7.8.75 + + Performance and stability improvements on all platforms. + + +2019-08-09: Version 7.8.74 + + Performance and stability improvements on all platforms. + + +2019-08-09: Version 7.8.73 + + Performance and stability improvements on all platforms. + + +2019-08-09: Version 7.8.72 + + Performance and stability improvements on all platforms. + + +2019-08-08: Version 7.8.71 + + Performance and stability improvements on all platforms. + + +2019-08-08: Version 7.8.70 + + Performance and stability improvements on all platforms. + + +2019-08-08: Version 7.8.69 + + Performance and stability improvements on all platforms. + + +2019-08-08: Version 7.8.68 + + Performance and stability improvements on all platforms. + + +2019-08-07: Version 7.8.67 + + Performance and stability improvements on all platforms. + + +2019-08-07: Version 7.8.66 + + Performance and stability improvements on all platforms. + + +2019-08-07: Version 7.8.65 + + Performance and stability improvements on all platforms. + + +2019-08-06: Version 7.8.64 + + Performance and stability improvements on all platforms. + + +2019-08-06: Version 7.8.63 + + Performance and stability improvements on all platforms. + + +2019-08-06: Version 7.8.62 + + Performance and stability improvements on all platforms. + + +2019-08-05: Version 7.8.61 + + Performance and stability improvements on all platforms. + + +2019-08-05: Version 7.8.60 + + Performance and stability improvements on all platforms. + + +2019-08-04: Version 7.8.59 + + Performance and stability improvements on all platforms. + + +2019-08-02: Version 7.8.58 + + Performance and stability improvements on all platforms. + + +2019-08-02: Version 7.8.57 + + Performance and stability improvements on all platforms. + + +2019-08-02: Version 7.8.56 + + Performance and stability improvements on all platforms. + + +2019-08-02: Version 7.8.55 + + Performance and stability improvements on all platforms. + + +2019-08-02: Version 7.8.54 + + Performance and stability improvements on all platforms. + + +2019-08-02: Version 7.8.53 + + Performance and stability improvements on all platforms. + + +2019-08-01: Version 7.8.52 + + Performance and stability improvements on all platforms. + + +2019-08-01: Version 7.8.51 + + Performance and stability improvements on all platforms. + + +2019-08-01: Version 7.8.50 + + Performance and stability improvements on all platforms. + + +2019-08-01: Version 7.8.49 + + Performance and stability improvements on all platforms. + + +2019-08-01: Version 7.8.48 + + Performance and stability improvements on all platforms. + + +2019-08-01: Version 7.8.47 + + Performance and stability improvements on all platforms. + + +2019-07-31: Version 7.8.46 + + Performance and stability improvements on all platforms. + + +2019-07-31: Version 7.8.45 + + Performance and stability improvements on all platforms. + + +2019-07-31: Version 7.8.44 + + Performance and stability improvements on all platforms. + + +2019-07-31: Version 7.8.43 + + Performance and stability improvements on all platforms. + + +2019-07-31: Version 7.8.42 + + Performance and stability improvements on all platforms. + + +2019-07-31: Version 7.8.41 + + Performance and stability improvements on all platforms. + + +2019-07-31: Version 7.8.40 + + Performance and stability improvements on all platforms. + + +2019-07-31: Version 7.8.39 + + Performance and stability improvements on all platforms. + + +2019-07-31: Version 7.8.38 + + Performance and stability improvements on all platforms. + + +2019-07-31: Version 7.8.37 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.36 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.35 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.34 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.33 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.32 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.31 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.30 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.29 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.28 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.27 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.26 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.25 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.24 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.23 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.22 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.21 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.20 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.19 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.18 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.17 + + Performance and stability improvements on all platforms. + + +2019-07-30: Version 7.8.16 + + Performance and stability improvements on all platforms. + + +2019-07-29: Version 7.8.15 + + Performance and stability improvements on all platforms. + + +2019-07-29: Version 7.8.14 + + Performance and stability improvements on all platforms. + + +2019-07-29: Version 7.8.13 + + Performance and stability improvements on all platforms. + + +2019-07-29: Version 7.8.12 + + Performance and stability improvements on all platforms. + + +2019-07-29: Version 7.8.11 + + Performance and stability improvements on all platforms. + + +2019-07-29: Version 7.8.10 + + Performance and stability improvements on all platforms. + + +2019-07-29: Version 7.8.9 + + Performance and stability improvements on all platforms. + + +2019-07-29: Version 7.8.8 + + Performance and stability improvements on all platforms. + + +2019-07-29: Version 7.8.7 + + Performance and stability improvements on all platforms. + + +2019-07-29: Version 7.8.6 + + Performance and stability improvements on all platforms. + + +2019-07-29: Version 7.8.5 + + Performance and stability improvements on all platforms. + + +2019-07-29: Version 7.8.4 + + Performance and stability improvements on all platforms. + + +2019-07-29: Version 7.8.3 + + Performance and stability improvements on all platforms. + + +2019-07-29: Version 7.8.2 + + Performance and stability improvements on all platforms. + + +2019-07-28: Version 7.8.1 + + Performance and stability improvements on all platforms. + + +2019-07-24: Version 7.7.310 + + Performance and stability improvements on all platforms. + + +2019-07-24: Version 7.7.309 + + Performance and stability improvements on all platforms. + + +2019-07-24: Version 7.7.308 + + Performance and stability improvements on all platforms. + + +2019-07-24: Version 7.7.307 + + Performance and stability improvements on all platforms. + + +2019-07-24: Version 7.7.306 + + Performance and stability improvements on all platforms. + + +2019-07-24: Version 7.7.305 + + Performance and stability improvements on all platforms. + + +2019-07-24: Version 7.7.304 + + Performance and stability improvements on all platforms. + + +2019-07-24: Version 7.7.303 + + Performance and stability improvements on all platforms. + + +2019-07-24: Version 7.7.302 + + Performance and stability improvements on all platforms. + + +2019-07-24: Version 7.7.301 + + Performance and stability improvements on all platforms. + + +2019-07-24: Version 7.7.300 + + Performance and stability improvements on all platforms. + + 2019-07-23: Version 7.7.299 Performance and stability improvements on all platforms. diff --git a/deps/v8/DEPS b/deps/v8/DEPS index 986264356f99cc..a7d4081edb856c 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -3,6 +3,21 @@ # all paths in here must match this assumption. vars = { + # Fetches only the SDK boot images which match at least one of the whitelist + # entries in a comma-separated list. + # + # Only the X64 and ARM64 QEMU images are downloaded by default. Developers + # that need to boot on other target architectures or devices can opt to + # download more boot images. Example of images include: + # + # Emulation: + # qemu.x64, qemu.arm64 + # Hardware: + # generic.x64, generic.arm64 + # + # Wildcards are supported (e.g. "qemu.*"). + 'checkout_fuchsia_boot_images': "qemu.x64,qemu.arm64", + 'checkout_instrumented_libraries': False, 'chromium_url': 'https://chromium.googlesource.com', 'android_url': 'https://android.googlesource.com', @@ -12,7 +27,7 @@ vars = { 'check_v8_header_includes': False, # GN CIPD package version. - 'gn_version': 'git_revision:972ed755f8e6d31cae9ba15fcd08136ae1a7886f', + 'gn_version': 'git_revision:152c5144ceed9592c20f0c8fd55769646077569b', # luci-go CIPD package version. 'luci_go': 'git_revision:7d11fd9e66407c49cb6c8546a2ae45ea993a240c', @@ -20,7 +35,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version # and whatever else without interference from each other. - 'android_sdk_build-tools_version': 'DLK621q5_Bga5EsOr7cp6bHWWxFKx6UHLu_Ix_m3AckC', + 'android_sdk_build-tools_version': '5DL7LQQjVMLClXLzLgmGysccPGsGcjJdvH9z5-uetiIC', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_emulator_version # and whatever else without interference from each other. @@ -57,15 +72,15 @@ vars = { deps = { 'v8/build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '1e5d7d692f816af8136c738b79fe9e8dde8057f6', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '693faeda4ee025796c7e473d953a5a7b6ad64c93', 'v8/third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'ee7b9dda90e409fb92031d511151debe5db7db9f', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'f38bc1796282c61087dcf15abc61b8fd18a68402', 'v8/third_party/icu': - Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'fd97d4326fac6da84452b2d5fe75ff0949368dab', + Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '53f6b233a41ec982d8445996247093f7aaf41639', 'v8/third_party/instrumented_libraries': Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'b1c3ca20848c117eb935b02c25d441f03e6fbc5e', 'v8/buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '67b293ca1316d06f7f00160ce35c92b8849a9dc9', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '74cfb57006f83cfe050817526db359d5c8a11628', 'v8/buildtools/clang_format/script': Var('chromium_url') + '/chromium/llvm-project/cfe/tools/clang-format.git' + '@' + '96636aa0e9f047f17447f2d45a094d0b59ed7917', 'v8/buildtools/linux64': { @@ -105,9 +120,9 @@ deps = { 'condition': 'host_os == "win"', }, 'v8/base/trace_event/common': - Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'cfe8887fa6ac3170e23a68949930e28d4705a16f', + Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '5e4fce17a9d2439c44a7b57ceecef6df9287ec2f', 'v8/third_party/android_ndk': { - 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '4e2cea441bfd43f0863d14f57b1e1844260b9884', + 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '62582753e869484bf0cc7f7e8d184ce0077033c2', 'condition': 'checkout_android', }, 'v8/third_party/android_sdk/public': { @@ -153,7 +168,7 @@ deps = { 'dep_type': 'cipd', }, 'v8/third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '53913cecb11a3ef993f6496b9110964e2e2aeec3', + 'url': Var('chromium_url') + '/catapult.git' + '@' + 'e7c719c3e85f76938bf4fef0ba37c27f89246f71', 'condition': 'checkout_android', }, 'v8/third_party/colorama/src': { @@ -161,11 +176,11 @@ deps = { 'condition': 'checkout_android', }, 'v8/third_party/fuchsia-sdk': { - 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '5fd29151cf35c0813c33cc368a7c78389e3f5caa', + 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '1785f0ac8e1fe81cb25e260acbe7de8f62fa3e44', 'condition': 'checkout_fuchsia', }, 'v8/third_party/googletest/src': - Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '6077f444da944d96d311d358d761164261f1cdd0', + Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '565f1b848215b77c3732bca345fe76a0431d8b34', 'v8/third_party/jinja2': Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'b41863e42637544c2941b574c7877d3e1f663e25', 'v8/third_party/markupsafe': @@ -177,7 +192,7 @@ deps = { 'v8/test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'v8/test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '26a2268436f28f64c4539d9aab9ebd0f0b7c99c5', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '59a1a016b7cf5cf43f66b274c7d1db4ec6066935', 'v8/test/test262/harness': Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '4555345a943d0c99a9461182705543fb171dda4b', 'v8/third_party/qemu-linux-x64': { @@ -201,7 +216,7 @@ deps = { 'dep_type': 'cipd', }, 'v8/tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'f485a21a9cb05494161d97d545c3b29447610ffb', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '2fef805e5b05b26a8c87c47865590b5f43218611', 'v8/tools/luci-go': { 'packages': [ { @@ -230,10 +245,8 @@ deps = { 'condition': 'checkout_mac', 'dep_type': 'cipd', }, - 'v8/test/wasm-js/data': - Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '1a411f713d9850ce7da24719aba5bb80c535f562', 'v8/third_party/perfetto': - Var('android_url') + '/platform/external/perfetto.git' + '@' + '0e8281399fd854de13461f2c1c9f2fb0b8e9c3ae', + Var('android_url') + '/platform/external/perfetto.git' + '@' + '01615892494a9a8dc84414962d0a817bf97de2c2', 'v8/third_party/protobuf': Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + 'b68a347f56137b4b1a746e8c7438495a6ac1bd91', } @@ -346,6 +359,17 @@ hooks = [ '-s', 'v8/test/wasm-spec-tests/tests.tar.gz.sha1', ], }, + { + 'name': 'wasm_js', + 'pattern': '.', + 'action': [ 'download_from_google_storage', + '--no_resume', + '--no_auth', + '-u', + '--bucket', 'v8-wasm-spec-tests', + '-s', 'v8/test/wasm-js/tests.tar.gz.sha1', + ], + }, { 'name': 'sysroot_arm', 'pattern': '.', @@ -410,6 +434,13 @@ hooks = [ 'condition': 'checkout_win', 'action': ['python', 'v8/build/vs_toolchain.py', 'update'], }, + { + # Update the Mac toolchain if necessary. + 'name': 'mac_toolchain', + 'pattern': '.', + 'condition': 'checkout_mac', + 'action': ['python', 'v8/build/mac_toolchain.py'], + }, # Pull binutils for linux, enabled debug fission for faster linking / # debugging when used with clang on Ubuntu Precise. # https://code.google.com/p/chromium/issues/detail?id=352046 @@ -444,6 +475,7 @@ hooks = [ 'action': [ 'python', 'v8/build/fuchsia/update_sdk.py', + '--boot-images={checkout_fuchsia_boot_images}', ], }, { diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS index be360966665b38..9ab84b1e2759de 100644 --- a/deps/v8/OWNERS +++ b/deps/v8/OWNERS @@ -1,31 +1,31 @@ # Eng reviewer. Please reach out before adding new top-level directories. # Disagreement among owners should be escalated to eng reviewers. -file://ENG_REVIEW_OWNERS +file:ENG_REVIEW_OWNERS -per-file .clang-format=file://INFRA_OWNERS -per-file .clang-tidy=file://INFRA_OWNERS -per-file .editorconfig=file://INFRA_OWNERS -per-file .flake8=file://INFRA_OWNERS -per-file .git-blame-ignore-revs=file://INFRA_OWNERS -per-file .gitattributes=file://INFRA_OWNERS -per-file .gitignore=file://INFRA_OWNERS -per-file .gn=file://INFRA_OWNERS -per-file .vpython=file://INFRA_OWNERS -per-file .ycm_extra_conf.py=file://INFRA_OWNERS -per-file BUILD.gn=file://COMMON_OWNERS -per-file DEPS=file://INFRA_OWNERS +per-file .clang-format=file:INFRA_OWNERS +per-file .clang-tidy=file:INFRA_OWNERS +per-file .editorconfig=file:INFRA_OWNERS +per-file .flake8=file:INFRA_OWNERS +per-file .git-blame-ignore-revs=file:INFRA_OWNERS +per-file .gitattributes=file:INFRA_OWNERS +per-file .gitignore=file:INFRA_OWNERS +per-file .gn=file:INFRA_OWNERS +per-file .vpython=file:INFRA_OWNERS +per-file .ycm_extra_conf.py=file:INFRA_OWNERS +per-file BUILD.gn=file:COMMON_OWNERS +per-file DEPS=file:INFRA_OWNERS # For Test262 rolls. per-file DEPS=mathias@chromium.org -per-file PRESUBMIT=file://INFRA_OWNERS -per-file codereview.settings=file://INFRA_OWNERS +per-file PRESUBMIT=file:INFRA_OWNERS +per-file codereview.settings=file:INFRA_OWNERS -per-file AUTHORS=file://COMMON_OWNERS -per-file WATCHLIST=file://COMMON_OWNERS +per-file AUTHORS=file:COMMON_OWNERS +per-file WATCHLIST=file:COMMON_OWNERS -per-file *-mips*=file://MIPS_OWNERS -per-file *-mips64*=file://MIPS_OWNERS -per-file *-ppc*=file://PPC_OWNERS -per-file *-s390*=file://S390_OWNERS +per-file *-mips*=file:MIPS_OWNERS +per-file *-mips64*=file:MIPS_OWNERS +per-file *-ppc*=file:PPC_OWNERS +per-file *-s390*=file:S390_OWNERS # TEAM: v8-dev@googlegroups.com # COMPONENT: Blink>JavaScript diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h index f1878a18da91c6..57ac0254d96376 100644 --- a/deps/v8/base/trace_event/common/trace_event_common.h +++ b/deps/v8/base/trace_event/common/trace_event_common.h @@ -256,6 +256,13 @@ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \ TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val, \ arg2_name, arg2_val) +#define TRACE_EVENT_INSTANT_WITH_FLAGS0(category_group, name, scope_and_flags) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \ + scope_and_flags) +#define TRACE_EVENT_INSTANT_WITH_FLAGS1(category_group, name, scope_and_flags, \ + arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \ + scope_and_flags, arg1_name, arg1_val) #define TRACE_EVENT_INSTANT_WITH_TIMESTAMP0(category_group, name, scope, \ timestamp) \ @@ -285,12 +292,12 @@ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \ arg2_name, arg2_val) -#define TRACE_EVENT_COPY_BEGIN0(category_group, name) \ - INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \ - TRACE_EVENT_FLAG_COPY) -#define TRACE_EVENT_COPY_BEGIN1(category_group, name, arg1_name, arg1_val) \ - INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \ - TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val) +#define TRACE_EVENT_BEGIN_WITH_FLAGS0(category_group, name, flags) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, flags) +#define TRACE_EVENT_BEGIN_WITH_FLAGS1(category_group, name, flags, arg1_name, \ + arg1_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \ + flags, arg1_name, arg1_val) #define TRACE_EVENT_COPY_BEGIN2(category_group, name, arg1_name, arg1_val, \ arg2_name, arg2_val) \ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \ @@ -341,12 +348,12 @@ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \ arg2_name, arg2_val) -#define TRACE_EVENT_COPY_END0(category_group, name) \ - INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \ - TRACE_EVENT_FLAG_COPY) -#define TRACE_EVENT_COPY_END1(category_group, name, arg1_name, arg1_val) \ - INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \ - TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val) +#define TRACE_EVENT_END_WITH_FLAGS0(category_group, name, flags) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, flags) +#define TRACE_EVENT_END_WITH_FLAGS1(category_group, name, flags, arg1_name, \ + arg1_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, flags, \ + arg1_name, arg1_val) #define TRACE_EVENT_COPY_END2(category_group, name, arg1_name, arg1_val, \ arg2_name, arg2_val) \ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \ @@ -580,6 +587,9 @@ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val) +#define TRACE_EVENT_ASYNC_BEGIN_WITH_FLAGS0(category_group, name, id, flags) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \ + category_group, name, id, flags) // Similar to TRACE_EVENT_ASYNC_BEGINx but with a custom |at| timestamp // provided. @@ -606,6 +616,11 @@ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY) +#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP_AND_FLAGS0( \ + category_group, name, id, timestamp, flags) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \ + TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, flags) // Records a single ASYNC_STEP_INTO event for |step| immediately. If the // category is not enabled, then this does nothing. The |name| and |id| must @@ -677,6 +692,9 @@ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val) +#define TRACE_EVENT_ASYNC_END_WITH_FLAGS0(category_group, name, id, flags) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \ + category_group, name, id, flags) // Similar to TRACE_EVENT_ASYNC_ENDx but with a custom |at| timestamp provided. #define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP0(category_group, name, id, \ @@ -702,6 +720,11 @@ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY) +#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP_AND_FLAGS0(category_group, name, \ + id, timestamp, flags) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \ + TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, flags) // NESTABLE_ASYNC_* APIs are used to describe an async operation, which can // be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC @@ -935,6 +958,9 @@ #define TRACE_TASK_EXECUTION(run_function, task) \ INTERNAL_TRACE_TASK_EXECUTION(run_function, task) +#define TRACE_LOG_MESSAGE(file, message, line) \ + INTERNAL_TRACE_LOG_MESSAGE(file, message, line) + // TRACE_EVENT_METADATA* events are information related to other // injected events, not events in their own right. #define TRACE_EVENT_METADATA1(category_group, name, arg1_name, arg1_val) \ @@ -1075,6 +1101,8 @@ // TODO(eseckler): Remove once we have native support for typed proto events in // TRACE_EVENT macros. #define TRACE_EVENT_FLAG_TYPED_PROTO_ARGS (static_cast(1 << 15)) +#define TRACE_EVENT_FLAG_JAVA_STRING_LITERALS \ + (static_cast(1 << 16)) #define TRACE_EVENT_FLAG_SCOPE_MASK \ (static_cast(TRACE_EVENT_FLAG_SCOPE_OFFSET | \ diff --git a/deps/v8/benchmarks/OWNERS b/deps/v8/benchmarks/OWNERS index 852d438bb0a884..3c70cea2fd5e6a 100644 --- a/deps/v8/benchmarks/OWNERS +++ b/deps/v8/benchmarks/OWNERS @@ -1 +1 @@ -file://COMMON_OWNERS +file:../COMMON_OWNERS diff --git a/deps/v8/build_overrides/OWNERS b/deps/v8/build_overrides/OWNERS index bdb1d555a4fb98..cb04fa0838fbb5 100644 --- a/deps/v8/build_overrides/OWNERS +++ b/deps/v8/build_overrides/OWNERS @@ -1 +1 @@ -file://INFRA_OWNERS +file:../INFRA_OWNERS diff --git a/deps/v8/build_overrides/build.gni b/deps/v8/build_overrides/build.gni index 12ef8b28d6b612..5b99eb94022596 100644 --- a/deps/v8/build_overrides/build.gni +++ b/deps/v8/build_overrides/build.gni @@ -35,5 +35,16 @@ tsan_suppressions_file = "//build/sanitizers/tsan_suppressions.cc" # Skip assertions about 4GiB file size limit. ignore_elf32_limitations = true -# Use the system install of Xcode for tools like ibtool, libtool, etc. -use_system_xcode = true +if (host_os == "mac") { + _result = exec_script("//build/mac/should_use_hermetic_xcode.py", + [ target_os ], + "value") + assert(_result != 2, + "Do not allow building targets with the default" + + "hermetic toolchain if the minimum OS version is not met.") + assert(_result != 3, + "iOS does not support building with a hermetic toolchain. " + + "Please install Xcode.") + + use_system_xcode = _result == 0 +} diff --git a/deps/v8/custom_deps/OWNERS b/deps/v8/custom_deps/OWNERS index bdb1d555a4fb98..cb04fa0838fbb5 100644 --- a/deps/v8/custom_deps/OWNERS +++ b/deps/v8/custom_deps/OWNERS @@ -1 +1 @@ -file://INFRA_OWNERS +file:../INFRA_OWNERS diff --git a/deps/v8/gni/OWNERS b/deps/v8/gni/OWNERS index bdb1d555a4fb98..cb04fa0838fbb5 100644 --- a/deps/v8/gni/OWNERS +++ b/deps/v8/gni/OWNERS @@ -1 +1 @@ -file://INFRA_OWNERS +file:../INFRA_OWNERS diff --git a/deps/v8/gni/snapshot_toolchain.gni b/deps/v8/gni/snapshot_toolchain.gni index f4f1f1d88e258d..b5fb1823b382e0 100644 --- a/deps/v8/gni/snapshot_toolchain.gni +++ b/deps/v8/gni/snapshot_toolchain.gni @@ -107,3 +107,12 @@ if (v8_snapshot_toolchain == "") { assert(v8_snapshot_toolchain != "", "Do not know how to build a snapshot for $current_toolchain " + "on $host_os $host_cpu") + +# We reuse the snapshot toolchain for building torque and other generators to +# avoid building v8_libbase on the host more than once. On mips with big endian, +# the snapshot toolchain is the target toolchain and, hence, can't be used. +v8_generator_toolchain = v8_snapshot_toolchain +if (host_cpu == "x64" && + (v8_current_cpu == "mips" || v8_current_cpu == "mips64")) { + v8_generator_toolchain = "//build/toolchain/linux:clang_x64" +} diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS index 7ffbf74ce94d90..b64069847bc1cc 100644 --- a/deps/v8/include/OWNERS +++ b/deps/v8/include/OWNERS @@ -3,8 +3,8 @@ danno@chromium.org ulan@chromium.org yangguo@chromium.org -per-file *DEPS=file://COMMON_OWNERS -per-file v8-internal.h=file://COMMON_OWNERS +per-file *DEPS=file:../COMMON_OWNERS +per-file v8-internal.h=file:../COMMON_OWNERS per-file v8-inspector.h=dgozman@chromium.org per-file v8-inspector.h=pfeldman@chromium.org per-file v8-inspector.h=kozyatinskiy@chromium.org diff --git a/deps/v8/include/libplatform/v8-tracing.h b/deps/v8/include/libplatform/v8-tracing.h index e7cd8bfcdb66d0..df145e95bf723b 100644 --- a/deps/v8/include/libplatform/v8-tracing.h +++ b/deps/v8/include/libplatform/v8-tracing.h @@ -244,6 +244,8 @@ class V8_PLATFORM_EXPORT TracingController TracingController(); ~TracingController() override; + + // Takes ownership of |trace_buffer|. void Initialize(TraceBuffer* trace_buffer); #ifdef V8_USE_PERFETTO // Must be called before StartTracing() if V8_USE_PERFETTO is true. Provides diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h index fe2ce67e0df04e..8ce88fb3efcba8 100644 --- a/deps/v8/include/v8-internal.h +++ b/deps/v8/include/v8-internal.h @@ -63,8 +63,8 @@ struct SmiTagging<4> { V8_INLINE static int SmiToInt(const internal::Address value) { int shift_bits = kSmiTagSize + kSmiShiftSize; - // Shift down (requires >> to be sign extending). - return static_cast(static_cast(value)) >> shift_bits; + // Truncate and shift down (requires >> to be sign extending). + return static_cast(static_cast(value)) >> shift_bits; } V8_INLINE static constexpr bool IsValidSmi(intptr_t value) { // Is value in range [kSmiMinValue, kSmiMaxValue]. diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h index 645920d9c1b357..2c7d4b12e29123 100644 --- a/deps/v8/include/v8-profiler.h +++ b/deps/v8/include/v8-profiler.h @@ -26,6 +26,10 @@ struct CpuProfileDeoptFrame { size_t position; }; +namespace internal { +class CpuProfile; +} // namespace internal + } // namespace v8 #ifdef V8_OS_WIN @@ -48,75 +52,6 @@ template class V8_EXPORT std::vector; namespace v8 { -// TickSample captures the information collected for each sample. -struct V8_EXPORT TickSample { - // Internal profiling (with --prof + tools/$OS-tick-processor) wants to - // include the runtime function we're calling. Externally exposed tick - // samples don't care. - enum RecordCEntryFrame { kIncludeCEntryFrame, kSkipCEntryFrame }; - - TickSample() - : state(OTHER), - pc(nullptr), - external_callback_entry(nullptr), - frames_count(0), - has_external_callback(false), - update_stats(true) {} - - /** - * Initialize a tick sample from the isolate. - * \param isolate The isolate. - * \param state Execution state. - * \param record_c_entry_frame Include or skip the runtime function. - * \param update_stats Whether update the sample to the aggregated stats. - * \param use_simulator_reg_state When set to true and V8 is running under a - * simulator, the method will use the simulator - * register state rather than the one provided - * with |state| argument. Otherwise the method - * will use provided register |state| as is. - */ - void Init(Isolate* isolate, const v8::RegisterState& state, - RecordCEntryFrame record_c_entry_frame, bool update_stats, - bool use_simulator_reg_state = true); - /** - * Get a call stack sample from the isolate. - * \param isolate The isolate. - * \param state Register state. - * \param record_c_entry_frame Include or skip the runtime function. - * \param frames Caller allocated buffer to store stack frames. - * \param frames_limit Maximum number of frames to capture. The buffer must - * be large enough to hold the number of frames. - * \param sample_info The sample info is filled up by the function - * provides number of actual captured stack frames and - * the current VM state. - * \param use_simulator_reg_state When set to true and V8 is running under a - * simulator, the method will use the simulator - * register state rather than the one provided - * with |state| argument. Otherwise the method - * will use provided register |state| as is. - * \note GetStackSample is thread and signal safe and should only be called - * when the JS thread is paused or interrupted. - * Otherwise the behavior is undefined. - */ - static bool GetStackSample(Isolate* isolate, v8::RegisterState* state, - RecordCEntryFrame record_c_entry_frame, - void** frames, size_t frames_limit, - v8::SampleInfo* sample_info, - bool use_simulator_reg_state = true); - StateTag state; // The state of the VM. - void* pc; // Instruction pointer. - union { - void* tos; // Top stack value (*sp). - void* external_callback_entry; - }; - static const unsigned kMaxFramesCountLog2 = 8; - static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1; - void* stack[kMaxFramesCount]; // Call stack. - unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames. - bool has_external_callback : 1; - bool update_stats : 1; // Whether the sample should update aggregated stats. -}; - /** * CpuProfileNode represents a node in a call graph. */ @@ -307,6 +242,15 @@ enum CpuProfilingNamingMode { kDebugNaming, }; +enum CpuProfilingLoggingMode { + // Enables logging when a profile is active, and disables logging when all + // profiles are detached. + kLazyLogging, + // Enables logging for the lifetime of the CpuProfiler. Calls to + // StartRecording are faster, at the expense of runtime overhead. + kEagerLogging, +}; + /** * Optional profiling attributes. */ @@ -340,6 +284,11 @@ class V8_EXPORT CpuProfilingOptions { int sampling_interval_us() const { return sampling_interval_us_; } private: + friend class internal::CpuProfile; + + bool has_filter_context() const; + void* raw_filter_context() const; + CpuProfilingMode mode_; unsigned max_samples_; int sampling_interval_us_; @@ -359,6 +308,9 @@ class V8_EXPORT CpuProfiler { static CpuProfiler* New(Isolate* isolate); static CpuProfiler* New(Isolate* isolate, CpuProfilingNamingMode mode); + static CpuProfiler* New(Isolate* isolate, + CpuProfilingNamingMode namingMode, + CpuProfilingLoggingMode loggingMode); /** * Synchronously collect current stack sample in all profilers attached to diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index a87655058a058d..38881eb38fcbbf 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -9,9 +9,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 7 -#define V8_MINOR_VERSION 7 -#define V8_BUILD_NUMBER 299 -#define V8_PATCH_LEVEL 13 +#define V8_MINOR_VERSION 8 +#define V8_BUILD_NUMBER 279 +#define V8_PATCH_LEVEL 23 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index f3fbdc696294e4..aa19cf8caadf79 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -19,6 +19,8 @@ #include #include #include +#include +#include #include #include @@ -129,6 +131,7 @@ class PropertyCallbackArguments; class FunctionCallbackArguments; class GlobalHandles; class ScopedExternalStringLock; +class ThreadLocalTop; namespace wasm { class NativeModule; @@ -823,24 +826,43 @@ template using UniquePersistent = Global; /** - * A traced handle with move semantics, similar to std::unique_ptr. The handle - * is to be used together with |v8::EmbedderHeapTracer| and specifies edges from - * the embedder into V8's heap. + * Trait specifying behavior of |TracedGlobal|. + */ +template +struct TracedGlobalTrait { + /** + * Specifies whether |TracedGlobal| should clear its handle on destruction. + * + * V8 will *not* clear the embedder-side memory of the handle. The embedder is + * expected to report all |TracedGlobal| handles through + * |EmbedderHeapTracer| upon garabge collection. + * + * See |EmbedderHeapTracer::IsRootForNonTracingGC| for handling with + * non-tracing GCs in V8. + */ + static constexpr bool kRequiresExplicitDestruction = true; +}; + +/** + * A traced handle with copy and move semantics. The handle is to be used + * together with |v8::EmbedderHeapTracer| and specifies edges from the embedder + * into V8's heap. * * The exact semantics are: * - Tracing garbage collections use |v8::EmbedderHeapTracer|. * - Non-tracing garbage collections refer to * |v8::EmbedderHeapTracer::IsRootForNonTracingGC()| whether the handle should * be treated as root or not. + * + * For destruction semantics see |TracedGlobalTrait|. */ template -class V8_EXPORT TracedGlobal { +class TracedGlobal { public: /** * An empty TracedGlobal without storage cell. */ TracedGlobal() = default; - ~TracedGlobal() { Reset(); } /** * Construct a TracedGlobal from a Local. @@ -857,7 +879,41 @@ class V8_EXPORT TracedGlobal { /** * Move constructor initializing TracedGlobal from an existing one. */ - V8_INLINE TracedGlobal(TracedGlobal&& other); + V8_INLINE TracedGlobal(TracedGlobal&& other) { + // Forward to operator=. + *this = std::move(other); + } + + /** + * Move constructor initializing TracedGlobal from an existing one. + */ + template + V8_INLINE TracedGlobal(TracedGlobal&& other) { + // Forward to operator=. + *this = std::move(other); + } + + /** + * Copy constructor initializing TracedGlobal from an existing one. + */ + V8_INLINE TracedGlobal(const TracedGlobal& other) { + // Forward to operator=; + *this = other; + } + + /** + * Copy constructor initializing TracedGlobal from an existing one. + */ + template + V8_INLINE TracedGlobal(const TracedGlobal& other) { + // Forward to operator=; + *this = other; + } + + /** + * Move assignment operator initializing TracedGlobal from an existing one. + */ + V8_INLINE TracedGlobal& operator=(TracedGlobal&& rhs); /** * Move assignment operator initializing TracedGlobal from an existing one. @@ -866,10 +922,21 @@ class V8_EXPORT TracedGlobal { V8_INLINE TracedGlobal& operator=(TracedGlobal&& rhs); /** - * TracedGlobal only supports move semantics and forbids copying. + * Copy assignment operator initializing TracedGlobal from an existing one. + * + * Note: Prohibited when |other| has a finalization callback set through + * |SetFinalizationCallback|. + */ + V8_INLINE TracedGlobal& operator=(const TracedGlobal& rhs); + + /** + * Copy assignment operator initializing TracedGlobal from an existing one. + * + * Note: Prohibited when |other| has a finalization callback set through + * |SetFinalizationCallback|. */ - TracedGlobal(const TracedGlobal&) = delete; - void operator=(const TracedGlobal&) = delete; + template + V8_INLINE TracedGlobal& operator=(const TracedGlobal& rhs); /** * Returns true if this TracedGlobal is empty, i.e., has not been assigned an @@ -903,8 +970,8 @@ class V8_EXPORT TracedGlobal { template V8_INLINE bool operator==(const TracedGlobal& that) const { - internal::Address* a = reinterpret_cast(this->val_); - internal::Address* b = reinterpret_cast(that.val_); + internal::Address* a = reinterpret_cast(**this); + internal::Address* b = reinterpret_cast(*that); if (a == nullptr) return b == nullptr; if (b == nullptr) return false; return *a == *b; @@ -912,8 +979,8 @@ class V8_EXPORT TracedGlobal { template V8_INLINE bool operator==(const Local& that) const { - internal::Address* a = reinterpret_cast(this->val_); - internal::Address* b = reinterpret_cast(that.val_); + internal::Address* a = reinterpret_cast(**this); + internal::Address* b = reinterpret_cast(*that); if (a == nullptr) return b == nullptr; if (b == nullptr) return false; return *a == *b; @@ -954,11 +1021,32 @@ class V8_EXPORT TracedGlobal { void* parameter, WeakCallbackInfo::Callback callback); private: - V8_INLINE static T* New(Isolate* isolate, T* that, T** slot); + // Wrapping type used when clearing on destruction is required. + struct WrappedForDestruction { + T* value; + + explicit WrappedForDestruction(T* val) : value(val) {} + ~WrappedForDestruction(); + operator T*() const { return value; } + T* operator*() const { return value; } + T* operator->() const { return value; } + WrappedForDestruction& operator=(const WrappedForDestruction& other) { + value = other.value; + return *this; + } + WrappedForDestruction& operator=(T* val) { + value = val; + return *this; + } + }; + + V8_INLINE static T* New(Isolate* isolate, T* that, void* slot); T* operator*() const { return this->val_; } - T* val_ = nullptr; + typename std::conditional< + TracedGlobalTrait>::kRequiresExplicitDestruction, + WrappedForDestruction, T*>::type val_{nullptr}; friend class EmbedderHeapTracer; template @@ -1385,11 +1473,19 @@ class V8_EXPORT Module { /** * Set this module's exported value for the name export_name to the specified * export_value. This method must be called only on Modules created via - * CreateSyntheticModule. export_name must be one of the export_names that - * were passed in that CreateSyntheticModule call. + * CreateSyntheticModule. An error will be thrown if export_name is not one + * of the export_names that were passed in that CreateSyntheticModule call. + * Returns Just(true) on success, Nothing() if an error was thrown. */ - void SetSyntheticModuleExport(Local export_name, - Local export_value); + V8_WARN_UNUSED_RESULT Maybe SetSyntheticModuleExport( + Isolate* isolate, Local export_name, Local export_value); + V8_DEPRECATE_SOON( + "Use the preceding SetSyntheticModuleExport with an Isolate parameter, " + "instead of the one that follows. The former will throw a runtime " + "error if called for an export that doesn't exist (as per spec); " + "the latter will crash with a failed CHECK().", + void SetSyntheticModuleExport(Local export_name, + Local export_value)); }; /** @@ -3782,6 +3878,15 @@ class V8_EXPORT Object : public Value { */ bool IsConstructor(); + /** + * True if this object can carry information relevant to the embedder in its + * embedder fields, false otherwise. This is generally true for objects + * constructed through function templates but also holds for other types where + * V8 automatically adds internal fields at compile time, such as e.g. + * v8::ArrayBuffer. + */ + bool IsApiWrapper(); + /** * Call an Object as a function if a callback is set by the * ObjectTemplate::SetCallAsFunctionHandler method. @@ -4850,8 +4955,8 @@ class V8_EXPORT ArrayBuffer : public Object { bool IsDetachable() const; // TODO(913887): fix the use of 'neuter' in the API. - V8_DEPRECATE_SOON("Use IsDetachable() instead.", - inline bool IsNeuterable() const) { + V8_DEPRECATED("Use IsDetachable() instead.", + inline bool IsNeuterable() const) { return IsDetachable(); } @@ -4864,7 +4969,7 @@ class V8_EXPORT ArrayBuffer : public Object { void Detach(); // TODO(913887): fix the use of 'neuter' in the API. - V8_DEPRECATE_SOON("Use Detach() instead.", inline void Neuter()) { Detach(); } + V8_DEPRECATED("Use Detach() instead.", inline void Neuter()) { Detach(); } /** * Make this ArrayBuffer external. The pointer to underlying memory block @@ -5502,6 +5607,32 @@ class V8_EXPORT RegExp : public Object { static void CheckCast(Value* obj); }; +/** + * An instance of the built-in FinalizationGroup constructor. + * + * This API is experimental and may change significantly. + */ +class V8_EXPORT FinalizationGroup : public Object { + public: + /** + * Runs the cleanup callback of the given FinalizationGroup. + * + * V8 will inform the embedder that there are finalizer callbacks be + * called through HostCleanupFinalizationGroupCallback. + * + * HostCleanupFinalizationGroupCallback should schedule a task to + * call FinalizationGroup::Cleanup() at some point in the + * future. It's the embedders responsiblity to make this call at a + * time which does not interrupt synchronous ECMAScript code + * execution. + * + * If the result is Nothing then an exception has + * occurred. Otherwise the result is |true| if the cleanup callback + * was called successfully. The result is never |false|. + */ + static V8_WARN_UNUSED_RESULT Maybe Cleanup( + Local finalization_group); +}; /** * A JavaScript value that wraps a C++ void*. This type of value is mainly used @@ -6743,10 +6874,34 @@ typedef void* (*CreateHistogramCallback)(const char* name, typedef void (*AddHistogramSampleCallback)(void* histogram, int sample); +// --- Crashkeys Callback --- +enum class CrashKeyId { + kIsolateAddress, + kReadonlySpaceFirstPageAddress, + kMapSpaceFirstPageAddress, + kCodeSpaceFirstPageAddress, +}; + +typedef void (*AddCrashKeyCallback)(CrashKeyId id, const std::string& value); + // --- Enter/Leave Script Callback --- typedef void (*BeforeCallEnteredCallback)(Isolate*); typedef void (*CallCompletedCallback)(Isolate*); +/** + * HostCleanupFinalizationGroupCallback is called when we require the + * embedder to enqueue a task that would call + * FinalizationGroup::Cleanup(). + * + * The FinalizationGroup is the one for which the embedder needs to + * call FinalizationGroup::Cleanup() on. + * + * The context provided is the one in which the FinalizationGroup was + * created in. + */ +typedef void (*HostCleanupFinalizationGroupCallback)( + Local context, Local fg); + /** * HostImportModuleDynamicallyCallback is called when we require the * embedder to load a module. This is used as part of the dynamic @@ -7006,6 +7161,10 @@ typedef void (*WasmStreamingCallback)(const FunctionCallbackInfo&); // --- Callback for checking if WebAssembly threads are enabled --- typedef bool (*WasmThreadsEnabledCallback)(Local context); +// --- Callback for loading source map file for WASM profiling support +typedef Local (*WasmLoadSourceMapCallback)(Isolate* isolate, + const char* name); + // --- Garbage Collection Callbacks --- /** @@ -7382,7 +7541,7 @@ class V8_EXPORT EmbedderHeapTracer { /** * Called at the beginning of a GC cycle. */ - V8_DEPRECATE_SOON("Use version with flags.", virtual void TracePrologue()) {} + V8_DEPRECATED("Use version with flags.", virtual void TracePrologue()) {} virtual void TracePrologue(TraceFlags flags); /** @@ -7433,14 +7592,37 @@ class V8_EXPORT EmbedderHeapTracer { /** * Returns true if the TracedGlobal handle should be considered as root for * the currently running non-tracing garbage collection and false otherwise. + * The default implementation will keep all TracedGlobal references as roots. * - * Default implementation will keep all TracedGlobal references as roots. + * If this returns false, then V8 may decide that the object referred to by + * such a handle is reclaimed. In that case: + * - No action is required if handles are used with destructors. + * - When run without destructors (by specializing + * |TracedGlobalTrait::kRequiresExplicitDestruction|) V8 calls + * |ResetHandleInNonTracingGC|. + * + * Note that the |handle| is different from the |TracedGlobal| handle that + * the embedder holds for retaining the object. The embedder may use + * |TracedGlobal::WrapperClassId()| to distinguish cases where it wants + * handles to be treated as roots from not being treated as roots. */ virtual bool IsRootForNonTracingGC( const v8::TracedGlobal& handle) { return true; } + /** + * Used in combination with |IsRootForNonTracingGC|. Called by V8 when an + * object that is backed by a handle is reclaimed by a non-tracing garbage + * collection. It is up to the embedder to reset the original handle. + * + * Note that the |handle| is different from the |TracedGlobal| handle that + * the embedder holds for retaining the object. It is up to the embedder to + * find the orignal |TracedGlobal| handle via the object or class id. + */ + virtual void ResetHandleInNonTracingGC( + const v8::TracedGlobal& handle) {} + /* * Called by the embedder to immediately perform a full garbage collection. * @@ -7660,7 +7842,6 @@ class V8_EXPORT Isolate { class V8_EXPORT SuppressMicrotaskExecutionScope { public: explicit SuppressMicrotaskExecutionScope(Isolate* isolate); - explicit SuppressMicrotaskExecutionScope(MicrotaskQueue* microtask_queue); ~SuppressMicrotaskExecutionScope(); // Prevent copying of Scope objects. @@ -7671,7 +7852,15 @@ class V8_EXPORT Isolate { private: internal::Isolate* const isolate_; - internal::MicrotaskQueue* const microtask_queue_; + internal::Address previous_stack_height_; + static_assert(sizeof(internal::Address) == + sizeof(internal::MicrotaskQueue*) && + alignof(internal::Address) == + alignof(internal::MicrotaskQueue*), + "The previous_stack_height_ field can replace the " + "microtask_queue_ field ABI-wise"); + + friend class internal::ThreadLocalTop; }; /** @@ -7785,9 +7974,10 @@ class V8_EXPORT Isolate { kStringNormalize = 75, kCallSiteAPIGetFunctionSloppyCall = 76, kCallSiteAPIGetThisSloppyCall = 77, + kRegExpMatchAllWithNonGlobalRegExp = 78, // If you add new values here, you'll also need to update Chromium's: - // web_feature.mojom, UseCounterCallback.cpp, and enums.xml. V8 changes to + // web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to // this list need to be landed first, then changes on the Chromium side. kUseCounterFeatureCount // This enum value must be last. }; @@ -7845,6 +8035,18 @@ class V8_EXPORT Isolate { */ static Isolate* GetCurrent(); + /** + * Clears the set of objects held strongly by the heap. This set of + * objects are originally built when a WeakRef is created or + * successfully dereferenced. + * + * The embedder is expected to call this when a synchronous sequence + * of ECMAScript execution completes. It's the embedders + * responsiblity to make this call at a time which does not + * interrupt synchronous ECMAScript code execution. + */ + void ClearKeptObjects(); + /** * Custom callback used by embedders to help V8 determine if it should abort * when it throws and no internal handler is predicted to catch the @@ -7858,6 +8060,14 @@ class V8_EXPORT Isolate { void SetAbortOnUncaughtExceptionCallback( AbortOnUncaughtExceptionCallback callback); + /** + * This specifies the callback to be called when finalization groups + * are ready to be cleaned up and require FinalizationGroup::Cleanup() + * to be called in a future task. + */ + void SetHostCleanupFinalizationGroupCallback( + HostCleanupFinalizationGroupCallback callback); + /** * This specifies the callback called by the upcoming dynamic * import() language feature to load modules. @@ -8411,6 +8621,13 @@ class V8_EXPORT Isolate { void SetCreateHistogramFunction(CreateHistogramCallback); void SetAddHistogramSampleFunction(AddHistogramSampleCallback); + /** + * Enables the host application to provide a mechanism for recording a + * predefined set of data as crash keys to be used in postmortem debugging in + * case of a crash. + */ + void SetAddCrashKeyCallback(AddCrashKeyCallback); + /** * Optional notification that the embedder is idle. * V8 uses the notification to perform garbage collection. @@ -8610,6 +8827,8 @@ class V8_EXPORT Isolate { void SetWasmThreadsEnabledCallback(WasmThreadsEnabledCallback callback); + void SetWasmLoadSourceMapCallback(WasmLoadSourceMapCallback callback); + /** * Check if V8 is dead and therefore unusable. This is the case after * fatal errors such as out-of-memory situations. @@ -8970,11 +9189,14 @@ class V8_EXPORT V8 { internal::Address* handle); static internal::Address* GlobalizeTracedReference(internal::Isolate* isolate, internal::Address* handle, - internal::Address* slot); + internal::Address* slot, + bool has_destructor); static void MoveGlobalReference(internal::Address** from, internal::Address** to); static void MoveTracedGlobalReference(internal::Address** from, internal::Address** to); + static void CopyTracedGlobalReference(const internal::Address* const* from, + internal::Address** to); static internal::Address* CopyGlobalReference(internal::Address* from); static void DisposeGlobal(internal::Address* global_handle); static void DisposeTracedGlobal(internal::Address* global_handle); @@ -10093,18 +10315,26 @@ Global& Global::operator=(Global&& rhs) { } template -T* TracedGlobal::New(Isolate* isolate, T* that, T** slot) { +TracedGlobal::WrappedForDestruction::~WrappedForDestruction() { + if (value == nullptr) return; + V8::DisposeTracedGlobal(reinterpret_cast(value)); + value = nullptr; +} + +template +T* TracedGlobal::New(Isolate* isolate, T* that, void* slot) { if (that == nullptr) return nullptr; internal::Address* p = reinterpret_cast(that); return reinterpret_cast(V8::GlobalizeTracedReference( reinterpret_cast(isolate), p, - reinterpret_cast(slot))); + reinterpret_cast(slot), + TracedGlobalTrait>::kRequiresExplicitDestruction)); } template void TracedGlobal::Reset() { if (IsEmpty()) return; - V8::DisposeTracedGlobal(reinterpret_cast(val_)); + V8::DisposeTracedGlobal(reinterpret_cast(**this)); val_ = nullptr; } @@ -10118,19 +10348,23 @@ void TracedGlobal::Reset(Isolate* isolate, const Local& other) { } template -TracedGlobal::TracedGlobal(TracedGlobal&& other) : val_(other.val_) { - if (other.val_ != nullptr) { - V8::MoveTracedGlobalReference( - reinterpret_cast(&other.val_), - reinterpret_cast(&this->val_)); - other.val_ = nullptr; - } +template +TracedGlobal& TracedGlobal::operator=(TracedGlobal&& rhs) { + TYPE_CHECK(T, S); + *this = std::move(rhs.template As()); + return *this; } template template -TracedGlobal& TracedGlobal::operator=(TracedGlobal&& rhs) { +TracedGlobal& TracedGlobal::operator=(const TracedGlobal& rhs) { TYPE_CHECK(T, S); + *this = rhs.template As(); + return *this; +} + +template +TracedGlobal& TracedGlobal::operator=(TracedGlobal&& rhs) { if (this != &rhs) { this->Reset(); if (rhs.val_ != nullptr) { @@ -10144,11 +10378,24 @@ TracedGlobal& TracedGlobal::operator=(TracedGlobal&& rhs) { return *this; } +template +TracedGlobal& TracedGlobal::operator=(const TracedGlobal& rhs) { + if (this != &rhs) { + this->Reset(); + if (rhs.val_ != nullptr) { + V8::CopyTracedGlobalReference( + reinterpret_cast(&rhs.val_), + reinterpret_cast(&this->val_)); + } + } + return *this; +} + template void TracedGlobal::SetWrapperClassId(uint16_t class_id) { typedef internal::Internals I; if (IsEmpty()) return; - internal::Address* obj = reinterpret_cast(this->val_); + internal::Address* obj = reinterpret_cast(**this); uint8_t* addr = reinterpret_cast(obj) + I::kNodeClassIdOffset; *reinterpret_cast(addr) = class_id; } @@ -10157,7 +10404,7 @@ template uint16_t TracedGlobal::WrapperClassId() const { typedef internal::Internals I; if (IsEmpty()) return 0; - internal::Address* obj = reinterpret_cast(this->val_); + internal::Address* obj = reinterpret_cast(**this); uint8_t* addr = reinterpret_cast(obj) + I::kNodeClassIdOffset; return *reinterpret_cast(addr); } @@ -10166,7 +10413,7 @@ template void TracedGlobal::SetFinalizationCallback( void* parameter, typename WeakCallbackInfo::Callback callback) { V8::SetFinalizationCallbackTraced( - reinterpret_cast(this->val_), parameter, callback); + reinterpret_cast(**this), parameter, callback); } template @@ -11111,9 +11358,12 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemory( } if (change_in_bytes < 0) { - const int64_t lower_limit = *external_memory_limit + change_in_bytes; - if (lower_limit > I::kExternalAllocationSoftLimit) + const int64_t lower_limit = + static_cast(static_cast(*external_memory_limit) + + static_cast(change_in_bytes)); + if (lower_limit > I::kExternalAllocationSoftLimit) { *external_memory_limit = lower_limit; + } } else if (change_in_bytes > 0 && amount > *external_memory_limit) { ReportExternalAllocationLimitReached(); } diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h index 7bd2938225bc74..7670c0e449c7fd 100644 --- a/deps/v8/include/v8config.h +++ b/deps/v8/include/v8config.h @@ -186,6 +186,8 @@ // V8_HAS_BUILTIN_SADD_OVERFLOW - __builtin_sadd_overflow() supported // V8_HAS_BUILTIN_SSUB_OVERFLOW - __builtin_ssub_overflow() supported // V8_HAS_BUILTIN_UADD_OVERFLOW - __builtin_uadd_overflow() supported +// V8_HAS_COMPUTED_GOTO - computed goto/labels as values +// supported // V8_HAS_DECLSPEC_DEPRECATED - __declspec(deprecated) supported // V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported // V8_HAS_DECLSPEC_SELECTANY - __declspec(selectany) supported @@ -214,6 +216,7 @@ # define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \ (__has_attribute(warn_unused_result)) +# define V8_HAS_BUILTIN_ASSUME_ALIGNED (__has_builtin(__builtin_assume_aligned)) # define V8_HAS_BUILTIN_BSWAP16 (__has_builtin(__builtin_bswap16)) # define V8_HAS_BUILTIN_BSWAP32 (__has_builtin(__builtin_bswap32)) # define V8_HAS_BUILTIN_BSWAP64 (__has_builtin(__builtin_bswap64)) @@ -226,6 +229,10 @@ # define V8_HAS_BUILTIN_SSUB_OVERFLOW (__has_builtin(__builtin_ssub_overflow)) # define V8_HAS_BUILTIN_UADD_OVERFLOW (__has_builtin(__builtin_uadd_overflow)) +// Clang has no __has_feature for computed gotos. +// GCC doc: https://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html +# define V8_HAS_COMPUTED_GOTO 1 + # if __cplusplus >= 201402L # define V8_CAN_HAVE_DCHECK_IN_CONSTEXPR 1 # endif @@ -256,12 +263,16 @@ # define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \ (!V8_CC_INTEL && V8_GNUC_PREREQ(4, 1, 0)) +# define V8_HAS_BUILTIN_ASSUME_ALIGNED (V8_GNUC_PREREQ(4, 7, 0)) # define V8_HAS_BUILTIN_CLZ (V8_GNUC_PREREQ(3, 4, 0)) # define V8_HAS_BUILTIN_CTZ (V8_GNUC_PREREQ(3, 4, 0)) # define V8_HAS_BUILTIN_EXPECT (V8_GNUC_PREREQ(2, 96, 0)) # define V8_HAS_BUILTIN_FRAME_ADDRESS (V8_GNUC_PREREQ(2, 96, 0)) # define V8_HAS_BUILTIN_POPCOUNT (V8_GNUC_PREREQ(3, 4, 0)) +// GCC doc: https://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html +#define V8_HAS_COMPUTED_GOTO (V8_GNUC_PREREQ(2, 0, 0)) + #endif #if defined(_MSC_VER) @@ -291,6 +302,12 @@ # define V8_INLINE inline #endif +#if V8_HAS_BUILTIN_ASSUME_ALIGNED +# define V8_ASSUME_ALIGNED(ptr, alignment) \ + __builtin_assume_aligned((ptr), (alignment)) +#else +# define V8_ASSUME_ALIGNED(ptr) (ptr) +#endif // A macro used to tell the compiler to never inline a particular function. // Don't bother for debug builds. diff --git a/deps/v8/infra/OWNERS b/deps/v8/infra/OWNERS index a75a43666efa57..a33a8ba8ed96fa 100644 --- a/deps/v8/infra/OWNERS +++ b/deps/v8/infra/OWNERS @@ -1,3 +1,3 @@ -file://INFRA_OWNERS +file:../INFRA_OWNERS tandrii@chromium.org diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl index 965300da7735f2..305007354d7509 100644 --- a/deps/v8/infra/testing/builders.pyl +++ b/deps/v8/infra/testing/builders.pyl @@ -485,13 +485,8 @@ 'swarming_dimensions' : { 'os': 'Ubuntu-16.04', }, - 'swarming_task_attrs': { - 'expiration': 14400, - 'hard_timeout': 3600, - 'priority': 35, - }, 'tests': [ - {'name': 'v8testing', 'shards': 7}, + {'name': 'v8testing', 'shards': 12}, ], }, ############################################################################## @@ -548,7 +543,7 @@ # Win64 'v8_win64_asan_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Windows-10', + 'os': 'Windows-10-15063', }, 'tests': [ {'name': 'v8testing', 'shards': 5}, @@ -834,7 +829,7 @@ }, 'tests': [ {'name': 'mozilla', 'variant': 'default'}, - {'name': 'test262', 'variant': 'default', 'shards': 4}, + {'name': 'test262', 'variant': 'default', 'shards': 6}, {'name': 'v8testing', 'variant': 'default', 'shards': 3}, ], }, @@ -1260,7 +1255,7 @@ }, 'V8 Win64 ASAN': { 'swarming_dimensions': { - 'os': 'Windows-10', + 'os': 'Windows-10-15063', }, 'tests': [ {'name': 'v8testing', 'shards': 5}, @@ -1412,8 +1407,8 @@ 'os': 'Ubuntu-16.04', }, 'tests': [ - {'name': 'mjsunit_sp_frame_access'}, - {'name': 'mozilla'}, + {'name': 'mjsunit_sp_frame_access', 'shards': 4}, + {'name': 'mozilla', 'shards': 4}, {'name': 'test262', 'variant': 'default'}, {'name': 'v8testing', 'shards': 8}, {'name': 'v8testing', 'variant': 'extra', 'shards': 4}, @@ -1423,14 +1418,14 @@ 'name': 'mozilla', 'suffix': 'armv8-a', 'test_args': ['--extra-flags', '--enable-armv8'], - 'shards': 2, + 'shards': 3, }, { 'name': 'test262', 'suffix': 'armv8-a', 'variant': 'default', 'test_args': ['--extra-flags', '--enable-armv8'], - 'shards': 2, + 'shards': 3, }, { 'name': 'v8testing', @@ -1822,9 +1817,9 @@ 'os': 'Ubuntu-16.04', }, 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing', 'shards': 8}, + {'name': 'mozilla', 'shards': 2}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, + {'name': 'v8testing', 'shards': 10}, ], }, 'V8 arm - sim - stable branch': { @@ -1842,9 +1837,9 @@ 'os': 'Ubuntu-16.04', }, 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing', 'shards': 8}, + {'name': 'mozilla', 'shards': 2}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, + {'name': 'v8testing', 'shards': 10}, ], }, 'V8 mips64el - sim - beta branch': { diff --git a/deps/v8/src/OWNERS b/deps/v8/src/OWNERS index c6881f232117b2..3e21b6ea369970 100644 --- a/deps/v8/src/OWNERS +++ b/deps/v8/src/OWNERS @@ -1,5 +1,5 @@ -per-file *DEPS=file://COMMON_OWNERS -per-file intl-*=file://INTL_OWNERS -per-file *-intl*=file://INTL_OWNERS +per-file *DEPS=file:../COMMON_OWNERS +per-file intl-*=file:../INTL_OWNERS +per-file *-intl*=file:../INTL_OWNERS # COMPONENT: Blink>JavaScript diff --git a/deps/v8/src/api/OWNERS b/deps/v8/src/api/OWNERS index ce6fb20af84d38..ef5a56dbfcecf3 100644 --- a/deps/v8/src/api/OWNERS +++ b/deps/v8/src/api/OWNERS @@ -1,4 +1,4 @@ -file://include/OWNERS +file:../../include/OWNERS clemensh@chromium.org ishell@chromium.org jkummerow@chromium.org diff --git a/deps/v8/src/api/api-inl.h b/deps/v8/src/api/api-inl.h index d152412b474f95..1a6b512e83a651 100644 --- a/deps/v8/src/api/api-inl.h +++ b/deps/v8/src/api/api-inl.h @@ -8,6 +8,7 @@ #include "src/api/api.h" #include "src/handles/handles-inl.h" #include "src/objects/foreign-inl.h" +#include "src/objects/js-weak-refs.h" #include "src/objects/objects-inl.h" #include "src/objects/stack-frame-info.h" @@ -84,6 +85,7 @@ MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView) MAKE_TO_LOCAL(ToLocal, JSDataView, DataView) MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray) MAKE_TO_LOCAL(ToLocalShared, JSArrayBuffer, SharedArrayBuffer) +MAKE_TO_LOCAL(ToLocal, JSFinalizationGroup, FinalizationGroup) TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY) diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc index cd380d3cda1aa2..b96b6fc4f62b2e 100644 --- a/deps/v8/src/api/api-natives.cc +++ b/deps/v8/src/api/api-natives.cc @@ -42,9 +42,17 @@ MaybeHandle InstantiateObject(Isolate* isolate, bool is_prototype); MaybeHandle InstantiateFunction( - Isolate* isolate, Handle data, + Isolate* isolate, Handle native_context, + Handle data, MaybeHandle maybe_name = MaybeHandle()); +MaybeHandle InstantiateFunction( + Isolate* isolate, Handle data, + MaybeHandle maybe_name = MaybeHandle()) { + return InstantiateFunction(isolate, isolate->native_context(), data, + maybe_name); +} + MaybeHandle Instantiate( Isolate* isolate, Handle data, MaybeHandle maybe_name = MaybeHandle()) { @@ -277,73 +285,73 @@ MaybeHandle ConfigureInstance(Isolate* isolate, Handle obj, // the cache for those cases. enum class CachingMode { kLimited, kUnlimited }; -MaybeHandle ProbeInstantiationsCache(Isolate* isolate, - int serial_number, - CachingMode caching_mode) { +MaybeHandle ProbeInstantiationsCache( + Isolate* isolate, Handle native_context, int serial_number, + CachingMode caching_mode) { DCHECK_LE(1, serial_number); if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) { - Handle fast_cache = - isolate->fast_template_instantiations_cache(); - Handle object{fast_cache->get(serial_number - 1), isolate}; + FixedArray fast_cache = + native_context->fast_template_instantiations_cache(); + Handle object{fast_cache.get(serial_number - 1), isolate}; if (object->IsUndefined(isolate)) return {}; return Handle::cast(object); } if (caching_mode == CachingMode::kUnlimited || (serial_number <= TemplateInfo::kSlowTemplateInstantiationsCacheSize)) { - Handle slow_cache = - isolate->slow_template_instantiations_cache(); - int entry = slow_cache->FindEntry(isolate, serial_number); + SimpleNumberDictionary slow_cache = + native_context->slow_template_instantiations_cache(); + int entry = slow_cache.FindEntry(isolate, serial_number); if (entry != SimpleNumberDictionary::kNotFound) { - return handle(JSObject::cast(slow_cache->ValueAt(entry)), isolate); + return handle(JSObject::cast(slow_cache.ValueAt(entry)), isolate); } } return {}; } -void CacheTemplateInstantiation(Isolate* isolate, int serial_number, - CachingMode caching_mode, +void CacheTemplateInstantiation(Isolate* isolate, + Handle native_context, + int serial_number, CachingMode caching_mode, Handle object) { DCHECK_LE(1, serial_number); if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) { Handle fast_cache = - isolate->fast_template_instantiations_cache(); + handle(native_context->fast_template_instantiations_cache(), isolate); Handle new_cache = FixedArray::SetAndGrow(isolate, fast_cache, serial_number - 1, object); if (*new_cache != *fast_cache) { - isolate->native_context()->set_fast_template_instantiations_cache( - *new_cache); + native_context->set_fast_template_instantiations_cache(*new_cache); } } else if (caching_mode == CachingMode::kUnlimited || (serial_number <= TemplateInfo::kSlowTemplateInstantiationsCacheSize)) { Handle cache = - isolate->slow_template_instantiations_cache(); + handle(native_context->slow_template_instantiations_cache(), isolate); auto new_cache = SimpleNumberDictionary::Set(isolate, cache, serial_number, object); if (*new_cache != *cache) { - isolate->native_context()->set_slow_template_instantiations_cache( - *new_cache); + native_context->set_slow_template_instantiations_cache(*new_cache); } } } -void UncacheTemplateInstantiation(Isolate* isolate, int serial_number, - CachingMode caching_mode) { +void UncacheTemplateInstantiation(Isolate* isolate, + Handle native_context, + int serial_number, CachingMode caching_mode) { DCHECK_LE(1, serial_number); if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) { - Handle fast_cache = - isolate->fast_template_instantiations_cache(); - DCHECK(!fast_cache->get(serial_number - 1).IsUndefined(isolate)); - fast_cache->set_undefined(serial_number - 1); + FixedArray fast_cache = + native_context->fast_template_instantiations_cache(); + DCHECK(!fast_cache.get(serial_number - 1).IsUndefined(isolate)); + fast_cache.set_undefined(serial_number - 1); } else if (caching_mode == CachingMode::kUnlimited || (serial_number <= TemplateInfo::kSlowTemplateInstantiationsCacheSize)) { Handle cache = - isolate->slow_template_instantiations_cache(); + handle(native_context->slow_template_instantiations_cache(), isolate); int entry = cache->FindEntry(isolate, serial_number); DCHECK_NE(SimpleNumberDictionary::kNotFound, entry); cache = SimpleNumberDictionary::DeleteEntry(isolate, cache, entry); - isolate->native_context()->set_slow_template_instantiations_cache(*cache); + native_context->set_slow_template_instantiations_cache(*cache); } } @@ -375,7 +383,8 @@ MaybeHandle InstantiateObject(Isolate* isolate, // Fast path. Handle result; if (serial_number) { - if (ProbeInstantiationsCache(isolate, serial_number, CachingMode::kLimited) + if (ProbeInstantiationsCache(isolate, isolate->native_context(), + serial_number, CachingMode::kLimited) .ToHandle(&result)) { return isolate->factory()->CopyJSObject(result); } @@ -419,8 +428,8 @@ MaybeHandle InstantiateObject(Isolate* isolate, JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject"); // Don't cache prototypes. if (serial_number) { - CacheTemplateInstantiation(isolate, serial_number, CachingMode::kLimited, - result); + CacheTemplateInstantiation(isolate, isolate->native_context(), + serial_number, CachingMode::kLimited, result); result = isolate->factory()->CopyJSObject(result); } } @@ -451,13 +460,13 @@ MaybeHandle GetInstancePrototype(Isolate* isolate, } } // namespace -MaybeHandle InstantiateFunction(Isolate* isolate, - Handle data, - MaybeHandle maybe_name) { +MaybeHandle InstantiateFunction( + Isolate* isolate, Handle native_context, + Handle data, MaybeHandle maybe_name) { int serial_number = Smi::ToInt(data->serial_number()); if (serial_number) { Handle result; - if (ProbeInstantiationsCache(isolate, serial_number, + if (ProbeInstantiationsCache(isolate, native_context, serial_number, CachingMode::kUnlimited) .ToHandle(&result)) { return Handle::cast(result); @@ -503,17 +512,17 @@ MaybeHandle InstantiateFunction(Isolate* isolate, : JS_SPECIAL_API_OBJECT_TYPE; Handle function = ApiNatives::CreateApiFunction( - isolate, data, prototype, function_type, maybe_name); + isolate, native_context, data, prototype, function_type, maybe_name); if (serial_number) { // Cache the function. - CacheTemplateInstantiation(isolate, serial_number, CachingMode::kUnlimited, - function); + CacheTemplateInstantiation(isolate, native_context, serial_number, + CachingMode::kUnlimited, function); } MaybeHandle result = ConfigureInstance(isolate, function, data); if (result.is_null()) { // Uncache on error. if (serial_number) { - UncacheTemplateInstantiation(isolate, serial_number, + UncacheTemplateInstantiation(isolate, native_context, serial_number, CachingMode::kUnlimited); } return MaybeHandle(); @@ -543,6 +552,14 @@ void AddPropertyToPropertyList(Isolate* isolate, Handle templ, } // namespace +MaybeHandle ApiNatives::InstantiateFunction( + Isolate* isolate, Handle native_context, + Handle data, MaybeHandle maybe_name) { + InvokeScope invoke_scope(isolate); + return ::v8::internal::InstantiateFunction(isolate, native_context, data, + maybe_name); +} + MaybeHandle ApiNatives::InstantiateFunction( Handle data, MaybeHandle maybe_name) { Isolate* isolate = data->GetIsolate(); @@ -626,8 +643,9 @@ void ApiNatives::AddNativeDataProperty(Isolate* isolate, } Handle ApiNatives::CreateApiFunction( - Isolate* isolate, Handle obj, - Handle prototype, InstanceType type, MaybeHandle maybe_name) { + Isolate* isolate, Handle native_context, + Handle obj, Handle prototype, + InstanceType type, MaybeHandle maybe_name) { Handle shared = FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, obj, maybe_name); @@ -635,8 +653,8 @@ Handle ApiNatives::CreateApiFunction( DCHECK(shared->HasSharedName()); Handle result = - isolate->factory()->NewFunctionFromSharedFunctionInfo( - shared, isolate->native_context()); + isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, + native_context); if (obj->remove_prototype()) { DCHECK(prototype.is_null()); diff --git a/deps/v8/src/api/api-natives.h b/deps/v8/src/api/api-natives.h index 153212cc6c43d7..fb59eb6cfcdb3f 100644 --- a/deps/v8/src/api/api-natives.h +++ b/deps/v8/src/api/api-natives.h @@ -24,6 +24,11 @@ class ApiNatives { public: static const int kInitialFunctionCacheSize = 256; + V8_WARN_UNUSED_RESULT static MaybeHandle InstantiateFunction( + Isolate* isolate, Handle native_context, + Handle data, + MaybeHandle maybe_name = MaybeHandle()); + V8_WARN_UNUSED_RESULT static MaybeHandle InstantiateFunction( Handle data, MaybeHandle maybe_name = MaybeHandle()); @@ -36,9 +41,9 @@ class ApiNatives { Handle data); static Handle CreateApiFunction( - Isolate* isolate, Handle obj, - Handle prototype, InstanceType type, - MaybeHandle name = MaybeHandle()); + Isolate* isolate, Handle native_context, + Handle obj, Handle prototype, + InstanceType type, MaybeHandle name = MaybeHandle()); static void AddDataProperty(Isolate* isolate, Handle info, Handle name, Handle value, diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index ca5d9ce5d26cd7..a0e25505a1ace0 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -68,6 +68,7 @@ #include "src/objects/js-generator-inl.h" #include "src/objects/js-promise-inl.h" #include "src/objects/js-regexp-inl.h" +#include "src/objects/js-weak-refs-inl.h" #include "src/objects/module-inl.h" #include "src/objects/objects-inl.h" #include "src/objects/oddball.h" @@ -121,9 +122,9 @@ #include #include "include/v8-wasm-trap-handler-win.h" #include "src/trap-handler/handler-inside-win.h" -#if V8_TARGET_ARCH_X64 +#if defined(V8_OS_WIN64) #include "src/diagnostics/unwinding-info-win64.h" -#endif // V8_TARGET_ARCH_X64 +#endif // V8_OS_WIN64 #endif // V8_OS_WIN namespace v8 { @@ -269,7 +270,7 @@ void CheckMicrotasksScopesConsistency(i::MicrotaskQueue* microtask_queue) { template class CallDepthScope { public: - explicit CallDepthScope(i::Isolate* isolate, Local context) + CallDepthScope(i::Isolate* isolate, Local context) : isolate_(isolate), context_(context), escaped_(false), @@ -280,7 +281,7 @@ class CallDepthScope { ? i::InterruptsScope::kRunInterrupts : i::InterruptsScope::kPostponeInterrupts) : i::InterruptsScope::kNoop) { - isolate_->handle_scope_implementer()->IncrementCallDepth(); + isolate_->thread_local_top()->IncrementCallDepth(this); isolate_->set_next_v8_call_is_safe_for_termination(false); if (!context.IsEmpty()) { i::Handle env = Utils::OpenHandle(*context); @@ -304,7 +305,7 @@ class CallDepthScope { i::Handle env = Utils::OpenHandle(*context_); microtask_queue = env->native_context().microtask_queue(); } - if (!escaped_) isolate_->handle_scope_implementer()->DecrementCallDepth(); + if (!escaped_) isolate_->thread_local_top()->DecrementCallDepth(this); if (do_callback) isolate_->FireCallCompletedCallback(microtask_queue); // TODO(jochen): This should be #ifdef DEBUG #ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY @@ -316,11 +317,10 @@ class CallDepthScope { void Escape() { DCHECK(!escaped_); escaped_ = true; - auto handle_scope_implementer = isolate_->handle_scope_implementer(); - handle_scope_implementer->DecrementCallDepth(); - bool clear_exception = - handle_scope_implementer->CallDepthIsZero() && - isolate_->thread_local_top()->try_catch_handler_ == nullptr; + auto thread_local_top = isolate_->thread_local_top(); + thread_local_top->DecrementCallDepth(this); + bool clear_exception = thread_local_top->CallDepthIsZero() && + thread_local_top->try_catch_handler_ == nullptr; isolate_->OptionalRescheduleException(clear_exception); } @@ -331,6 +331,12 @@ class CallDepthScope { bool do_callback_; bool safe_for_termination_; i::InterruptsScope interrupts_scope_; + i::Address previous_stack_height_; + + friend class i::ThreadLocalTop; + + DISALLOW_NEW_AND_DELETE() + DISALLOW_COPY_AND_ASSIGN(CallDepthScope); }; } // namespace @@ -819,10 +825,15 @@ StartupData SnapshotCreator::CreateBlob( // Complete in-object slack tracking for all functions. fun.CompleteInobjectSlackTrackingIfActive(); - fun.ResetIfBytecodeFlushed(); - // Also, clear out feedback vectors, or any optimized code. - if (fun.IsOptimized() || fun.IsInterpreted()) { + // Note that checking for fun.IsOptimized() || fun.IsInterpreted() is not + // sufficient because the function can have a feedback vector even if it + // is not compiled (e.g. when the bytecode was flushed). On the other + // hand, only checking for the feedback vector is not sufficient because + // there can be multiple functions sharing the same feedback vector. So we + // need all these checks. + if (fun.IsOptimized() || fun.IsInterpreted() || + !fun.raw_feedback_cell().value().IsUndefined()) { fun.raw_feedback_cell().set_value( i::ReadOnlyRoots(isolate).undefined_value()); fun.set_code(isolate->builtins()->builtin(i::Builtins::kCompileLazy)); @@ -1001,10 +1012,11 @@ i::Address* V8::GlobalizeReference(i::Isolate* isolate, i::Address* obj) { } i::Address* V8::GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj, - internal::Address* slot) { + internal::Address* slot, + bool has_destructor) { LOG_API(isolate, TracedGlobal, New); i::Handle result = - isolate->global_handles()->CreateTraced(*obj, slot); + isolate->global_handles()->CreateTraced(*obj, slot, has_destructor); #ifdef VERIFY_HEAP if (i::FLAG_verify_heap) { i::Object(*obj).ObjectVerify(isolate); @@ -1027,6 +1039,11 @@ void V8::MoveTracedGlobalReference(internal::Address** from, i::GlobalHandles::MoveTracedGlobal(from, to); } +void V8::CopyTracedGlobalReference(const internal::Address* const* from, + internal::Address** to) { + i::GlobalHandles::CopyTracedGlobal(from, to); +} + void V8::RegisterExternallyReferencedObject(i::Address* location, i::Isolate* isolate) { isolate->heap()->RegisterExternallyReferencedObject(location); @@ -2325,7 +2342,8 @@ Local Module::CreateSyntheticModule( i::Handle i_export_names = i_isolate->factory()->NewFixedArray( static_cast(export_names.size())); for (int i = 0; i < i_export_names->length(); ++i) { - i::Handle str = Utils::OpenHandle(*export_names[i]); + i::Handle str = i_isolate->factory()->InternalizeString( + Utils::OpenHandle(*export_names[i])); i_export_names->set(i, *str); } return v8::Utils::ToLocal( @@ -2333,6 +2351,28 @@ Local Module::CreateSyntheticModule( i_module_name, i_export_names, evaluation_steps))); } +Maybe Module::SetSyntheticModuleExport(Isolate* isolate, + Local export_name, + Local export_value) { + auto i_isolate = reinterpret_cast(isolate); + i::Handle i_export_name = Utils::OpenHandle(*export_name); + i::Handle i_export_value = Utils::OpenHandle(*export_value); + i::Handle self = Utils::OpenHandle(this); + Utils::ApiCheck(self->IsSyntheticModule(), + "v8::Module::SyntheticModuleSetExport", + "v8::Module::SyntheticModuleSetExport must only be called on " + "a SyntheticModule"); + ENTER_V8_NO_SCRIPT(i_isolate, isolate->GetCurrentContext(), Module, + SetSyntheticModuleExport, Nothing(), i::HandleScope); + has_pending_exception = + i::SyntheticModule::SetExport(i_isolate, + i::Handle::cast(self), + i_export_name, i_export_value) + .IsNothing(); + RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); + return Just(true); +} + void Module::SetSyntheticModuleExport(Local export_name, Local export_value) { i::Handle i_export_name = Utils::OpenHandle(*export_name); @@ -2342,9 +2382,9 @@ void Module::SetSyntheticModuleExport(Local export_name, "v8::Module::SetSyntheticModuleExport", "v8::Module::SetSyntheticModuleExport must only be called on " "a SyntheticModule"); - i::SyntheticModule::SetExport(self->GetIsolate(), - i::Handle::cast(self), - i_export_name, i_export_value); + i::SyntheticModule::SetExportStrict(self->GetIsolate(), + i::Handle::cast(self), + i_export_name, i_export_value); } namespace { @@ -4750,6 +4790,11 @@ bool v8::Object::IsConstructor() { return self->IsConstructor(); } +bool v8::Object::IsApiWrapper() { + auto self = i::Handle::cast(Utils::OpenHandle(this)); + return self->IsApiWrapper(); +} + MaybeLocal Object::CallAsFunction(Local context, Local recv, int argc, Local argv[]) { @@ -4930,7 +4975,7 @@ Local Function::GetDisplayName() const { } auto func = i::Handle::cast(self); i::Handle property_name = - isolate->factory()->NewStringFromStaticChars("displayName"); + isolate->factory()->display_name_string(); i::Handle value = i::JSReceiver::GetDataProperty(func, property_name); if (value->IsString()) { @@ -5642,14 +5687,14 @@ bool V8::EnableWebAssemblyTrapHandler(bool use_v8_signal_handler) { #if defined(V8_OS_WIN) void V8::SetUnhandledExceptionCallback( UnhandledExceptionCallback unhandled_exception_callback) { -#if defined(V8_TARGET_ARCH_X64) +#if defined(V8_OS_WIN64) v8::internal::win64_unwindinfo::SetUnhandledExceptionCallback( unhandled_exception_callback); #else - // Not implemented on ARM64. -#endif + // Not implemented, port needed. +#endif // V8_OS_WIN64 } -#endif +#endif // V8_OS_WIN void v8::V8::SetEntropySource(EntropySource entropy_source) { base::RandomNumberGenerator::SetEntropySource(entropy_source); @@ -6311,7 +6356,7 @@ MaybeLocal v8::String::NewExternalTwoByte( MaybeLocal v8::String::NewExternalOneByte( Isolate* isolate, v8::String::ExternalOneByteStringResource* resource) { - CHECK(resource && resource->data()); + CHECK_NOT_NULL(resource); // TODO(dcarney): throw a context free exception. if (resource->length() > static_cast(i::String::kMaxLength)) { return MaybeLocal(); @@ -6319,16 +6364,16 @@ MaybeLocal v8::String::NewExternalOneByte( i::Isolate* i_isolate = reinterpret_cast(isolate); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); LOG_API(i_isolate, String, NewExternalOneByte); - if (resource->length() > 0) { - i::Handle string = i_isolate->factory() - ->NewExternalStringFromOneByte(resource) - .ToHandleChecked(); - return Utils::ToLocal(string); - } else { + if (resource->length() == 0) { // The resource isn't going to be used, free it immediately. resource->Dispose(); return Utils::ToLocal(i_isolate->factory()->empty_string()); } + CHECK_NOT_NULL(resource->data()); + i::Handle string = i_isolate->factory() + ->NewExternalStringFromOneByte(resource) + .ToHandleChecked(); + return Utils::ToLocal(string); } @@ -7794,6 +7839,11 @@ bool Isolate::InContext() { return !isolate->context().is_null(); } +void Isolate::ClearKeptObjects() { + i::Isolate* isolate = reinterpret_cast(this); + isolate->ClearKeptObjects(); +} + v8::Local Isolate::GetCurrentContext() { i::Isolate* isolate = reinterpret_cast(this); i::Context context = isolate->context(); @@ -8057,6 +8107,28 @@ void Isolate::SetAbortOnUncaughtExceptionCallback( isolate->SetAbortOnUncaughtExceptionCallback(callback); } +void Isolate::SetHostCleanupFinalizationGroupCallback( + HostCleanupFinalizationGroupCallback callback) { + i::Isolate* isolate = reinterpret_cast(this); + isolate->SetHostCleanupFinalizationGroupCallback(callback); +} + +Maybe FinalizationGroup::Cleanup( + Local finalization_group) { + i::Handle fg = Utils::OpenHandle(*finalization_group); + i::Isolate* isolate = fg->native_context().GetIsolate(); + i::Handle i_context(fg->native_context(), isolate); + Local context = Utils::ToLocal(i_context); + ENTER_V8(isolate, context, FinalizationGroup, Cleanup, Nothing(), + i::HandleScope); + i::Handle callback(fg->cleanup(), isolate); + fg->set_scheduled_for_cleanup(false); + has_pending_exception = + i::JSFinalizationGroup::Cleanup(isolate, fg, callback).IsNothing(); + RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); + return Just(true); +} + void Isolate::SetHostImportModuleDynamicallyCallback( HostImportModuleDynamicallyCallback callback) { i::Isolate* isolate = reinterpret_cast(this); @@ -8133,15 +8205,14 @@ Isolate::AllowJavascriptExecutionScope::~AllowJavascriptExecutionScope() { Isolate::SuppressMicrotaskExecutionScope::SuppressMicrotaskExecutionScope( Isolate* isolate) - : isolate_(reinterpret_cast(isolate)), - microtask_queue_(isolate_->default_microtask_queue()) { - isolate_->handle_scope_implementer()->IncrementCallDepth(); - microtask_queue_->IncrementMicrotasksSuppressions(); + : isolate_(reinterpret_cast(isolate)) { + isolate_->thread_local_top()->IncrementCallDepth(this); + isolate_->default_microtask_queue()->IncrementMicrotasksSuppressions(); } Isolate::SuppressMicrotaskExecutionScope::~SuppressMicrotaskExecutionScope() { - microtask_queue_->DecrementMicrotasksSuppressions(); - isolate_->handle_scope_implementer()->DecrementCallDepth(); + isolate_->default_microtask_queue()->DecrementMicrotasksSuppressions(); + isolate_->thread_local_top()->DecrementCallDepth(this); } Isolate::SafeForTerminationScope::SafeForTerminationScope(v8::Isolate* isolate) @@ -8266,8 +8337,10 @@ bool Isolate::GetHeapCodeAndMetadataStatistics( void Isolate::GetStackSample(const RegisterState& state, void** frames, size_t frames_limit, SampleInfo* sample_info) { RegisterState regs = state; - if (TickSample::GetStackSample(this, ®s, TickSample::kSkipCEntryFrame, - frames, frames_limit, sample_info)) { + i::Isolate* isolate = reinterpret_cast(this); + if (i::TickSample::GetStackSample(isolate, ®s, + i::TickSample::kSkipCEntryFrame, frames, + frames_limit, sample_info)) { return; } sample_info->frames_count = 0; @@ -8427,6 +8500,11 @@ void Isolate::SetAddHistogramSampleFunction( ->SetAddHistogramSampleFunction(callback); } +void Isolate::SetAddCrashKeyCallback(AddCrashKeyCallback callback) { + i::Isolate* isolate = reinterpret_cast(this); + isolate->SetAddCrashKeyCallback(callback); +} + bool Isolate::IdleNotificationDeadline(double deadline_in_seconds) { // Returning true tells the caller that it need not // continue to call IdleNotification. @@ -8582,6 +8660,9 @@ CALLBACK_SETTER(WasmStreamingCallback, WasmStreamingCallback, CALLBACK_SETTER(WasmThreadsEnabledCallback, WasmThreadsEnabledCallback, wasm_threads_enabled_callback) +CALLBACK_SETTER(WasmLoadSourceMapCallback, WasmLoadSourceMapCallback, + wasm_load_source_map_callback) + void Isolate::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback, void* data) { i::Isolate* isolate = reinterpret_cast(this); @@ -8737,7 +8818,11 @@ MicrotasksScope::MicrotasksScope(Isolate* isolate, MicrotasksScope::~MicrotasksScope() { if (run_) { microtask_queue_->DecrementMicrotasksScopeDepth(); - if (MicrotasksPolicy::kScoped == microtask_queue_->microtasks_policy()) { + if (MicrotasksPolicy::kScoped == microtask_queue_->microtasks_policy() && + !isolate_->has_scheduled_exception()) { + DCHECK_IMPLIES(isolate_->has_scheduled_exception(), + isolate_->scheduled_exception() == + i::ReadOnlyRoots(isolate_).termination_exception()); microtask_queue_->PerformCheckpoint(reinterpret_cast(isolate_)); } } @@ -9896,13 +9981,27 @@ int CpuProfile::GetSamplesCount() const { return reinterpret_cast(this)->samples_count(); } +CpuProfiler* CpuProfiler::New(Isolate* isolate, + CpuProfilingNamingMode naming_mode, + CpuProfilingLoggingMode logging_mode) { + return reinterpret_cast(new i::CpuProfiler( + reinterpret_cast(isolate), naming_mode, logging_mode)); +} + CpuProfiler* CpuProfiler::New(Isolate* isolate) { - return New(isolate, kDebugNaming); + return New(isolate, kDebugNaming, kLazyLogging); } CpuProfiler* CpuProfiler::New(Isolate* isolate, CpuProfilingNamingMode mode) { - return reinterpret_cast( - new i::CpuProfiler(reinterpret_cast(isolate), mode)); + return New(isolate, mode, kLazyLogging); +} + +bool CpuProfilingOptions::has_filter_context() const { + return false; +} + +void* CpuProfilingOptions::raw_filter_context() const { + return nullptr; } void CpuProfiler::Dispose() { delete reinterpret_cast(this); } @@ -10354,8 +10453,7 @@ void EmbedderHeapTracer::RegisterEmbedderReference( if (ref.IsEmpty()) return; i::Heap* const heap = reinterpret_cast(isolate_)->heap(); - heap->RegisterExternallyReferencedObject( - reinterpret_cast(ref.val_)); + heap->RegisterExternallyReferencedObject(reinterpret_cast(*ref)); } void EmbedderHeapTracer::IterateTracedGlobalHandles( diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h index 6135a7dfc62024..21bbb3a101549d 100644 --- a/deps/v8/src/api/api.h +++ b/deps/v8/src/api/api.h @@ -90,6 +90,7 @@ class RegisteredExtension { V(Data, Object) \ V(RegExp, JSRegExp) \ V(Object, JSReceiver) \ + V(FinalizationGroup, JSFinalizationGroup) \ V(Array, JSArray) \ V(Map, JSMap) \ V(Set, JSSet) \ @@ -198,6 +199,8 @@ class Utils { v8::internal::Handle obj); static inline Local ToLocalBigUint64Array( v8::internal::Handle obj); + static inline Local ToLocal( + v8::internal::Handle obj); static inline Local ToLocalShared( v8::internal::Handle obj); @@ -248,9 +251,9 @@ class Utils { template static inline Local Convert(v8::internal::Handle obj); - template + template static inline v8::internal::Handle OpenPersistent( - const v8::Persistent& persistent) { + const v8::Persistent& persistent) { return v8::internal::Handle( reinterpret_cast(persistent.val_)); } @@ -354,7 +357,6 @@ class HandleScopeImplementer { explicit HandleScopeImplementer(Isolate* isolate) : isolate_(isolate), spare_(nullptr), - call_depth_(0), last_handle_before_deferred_block_(nullptr) {} ~HandleScopeImplementer() { DeleteArray(spare_); } @@ -373,11 +375,6 @@ class HandleScopeImplementer { inline internal::Address* GetSpareOrNewBlock(); inline void DeleteExtensions(internal::Address* prev_limit); - // Call depth represents nested v8 api calls. - inline void IncrementCallDepth() { call_depth_++; } - inline void DecrementCallDepth() { call_depth_--; } - inline bool CallDepthIsZero() { return call_depth_ == 0; } - inline void EnterContext(Context context); inline void LeaveContext(); inline bool LastEnteredContextWas(Context context); @@ -414,7 +411,6 @@ class HandleScopeImplementer { saved_contexts_.detach(); spare_ = nullptr; last_handle_before_deferred_block_ = nullptr; - call_depth_ = 0; } void Free() { @@ -431,7 +427,7 @@ class HandleScopeImplementer { DeleteArray(spare_); spare_ = nullptr; } - DCHECK_EQ(call_depth_, 0); + DCHECK(isolate_->thread_local_top()->CallDepthIsZero()); } void BeginDeferredScope(); @@ -451,8 +447,6 @@ class HandleScopeImplementer { // Used as a stack to keep track of saved contexts. DetachableVector saved_contexts_; Address* spare_; - int call_depth_; - Address* last_handle_before_deferred_block_; // This is only used for threading support. HandleScopeData handle_scope_data_; diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h index b4836ff7847488..2796e59a8dbb90 100644 --- a/deps/v8/src/ast/ast-traversal-visitor.h +++ b/deps/v8/src/ast/ast-traversal-visitor.h @@ -382,6 +382,12 @@ void AstTraversalVisitor::VisitThrow(Throw* expr) { RECURSE_EXPRESSION(Visit(expr->exception())); } +template +void AstTraversalVisitor::VisitOptionalChain(OptionalChain* expr) { + PROCESS_EXPRESSION(expr); + RECURSE_EXPRESSION(Visit(expr->expression())); +} + template void AstTraversalVisitor::VisitProperty(Property* expr) { PROCESS_EXPRESSION(expr); diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc index 9987eb28449a21..4b6c4805dedc16 100644 --- a/deps/v8/src/ast/ast.cc +++ b/deps/v8/src/ast/ast.cc @@ -122,6 +122,10 @@ bool Expression::IsUndefinedLiteral() const { var_proxy->raw_name()->IsOneByteEqualTo("undefined"); } +bool Expression::IsLiteralButNotNullOrUndefined() const { + return IsLiteral() && !IsNullOrUndefinedLiteral(); +} + bool Expression::ToBooleanIsTrue() const { return IsLiteral() && AsLiteral()->ToBooleanIsTrue(); } @@ -217,13 +221,7 @@ bool FunctionLiteral::AllowsLazyCompilation() { } bool FunctionLiteral::SafeToSkipArgumentsAdaptor() const { - // TODO(bmeurer,verwaest): The --fast_calls_with_arguments_mismatches - // is mostly here for checking the real-world impact of the calling - // convention. There's not really a point in turning off this flag - // otherwise, so we should remove it at some point, when we're done - // with the experiments (https://crbug.com/v8/8895). - return FLAG_fast_calls_with_arguments_mismatches && - language_mode() == LanguageMode::kStrict && + return language_mode() == LanguageMode::kStrict && scope()->arguments() == nullptr && scope()->rest_parameter() == nullptr; } diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h index bd52d1b2c04065..ced9f775dd57bc 100644 --- a/deps/v8/src/ast/ast.h +++ b/deps/v8/src/ast/ast.h @@ -16,6 +16,7 @@ #include "src/common/globals.h" #include "src/execution/isolate.h" #include "src/heap/factory.h" +#include "src/objects/function-syntax-kind.h" #include "src/objects/literal-objects.h" #include "src/objects/smi.h" #include "src/parsing/token.h" @@ -94,6 +95,7 @@ namespace internal { V(ImportCallExpression) \ V(Literal) \ V(NativeFunctionLiteral) \ + V(OptionalChain) \ V(Property) \ V(ResolvedProperty) \ V(Spread) \ @@ -168,11 +170,13 @@ class AstNode: public ZoneObject { void* operator new(size_t size); int position_; - class NodeTypeField : public BitField {}; + using NodeTypeField = BitField; protected: uint32_t bit_field_; - static const uint8_t kNextBitFieldIndex = NodeTypeField::kNext; + + template + using NextBitField = NodeTypeField::Next; AstNode(int position, NodeType type) : position_(position), bit_field_(NodeTypeField::encode(type)) {} @@ -182,8 +186,6 @@ class AstNode: public ZoneObject { class Statement : public AstNode { protected: Statement(int position, NodeType type) : AstNode(position, type) {} - - static const uint8_t kNextBitFieldIndex = AstNode::kNextBitFieldIndex; }; @@ -245,6 +247,14 @@ class Expression : public AstNode { // that this also checks for loads of the global "undefined" variable. bool IsUndefinedLiteral() const; + // True if either null literal or undefined literal. + inline bool IsNullOrUndefinedLiteral() const { + return IsNullLiteral() || IsUndefinedLiteral(); + } + + // True if a literal and not null or undefined. + bool IsLiteralButNotNullOrUndefined() const; + bool IsCompileTimeValue(); bool IsPattern() { @@ -265,15 +275,15 @@ class Expression : public AstNode { } private: - class IsParenthesizedField - : public BitField {}; + using IsParenthesizedField = AstNode::NextBitField; protected: Expression(int pos, NodeType type) : AstNode(pos, type) { DCHECK(!is_parenthesized()); } - static const uint8_t kNextBitFieldIndex = IsParenthesizedField::kNext; + template + using NextBitField = IsParenthesizedField::Next; }; class FailureExpression : public Expression { @@ -321,8 +331,7 @@ class BreakableStatement : public Statement { } private: - class BreakableTypeField - : public BitField {}; + using BreakableTypeField = Statement::NextBitField; protected: BreakableStatement(BreakableType breakable_type, int position, NodeType type) @@ -330,7 +339,8 @@ class BreakableStatement : public Statement { bit_field_ |= BreakableTypeField::encode(breakable_type); } - static const uint8_t kNextBitFieldIndex = BreakableTypeField::kNext; + template + using NextBitField = BreakableTypeField::Next; }; class Block : public BreakableStatement { @@ -357,10 +367,8 @@ class Block : public BreakableStatement { ZonePtrList statements_; Scope* scope_; - class IgnoreCompletionField - : public BitField {}; - class IsLabeledField - : public BitField {}; + using IgnoreCompletionField = BreakableStatement::NextBitField; + using IsLabeledField = IgnoreCompletionField::Next; protected: Block(Zone* zone, ZonePtrList* labels, int capacity, @@ -448,8 +456,7 @@ class VariableDeclaration : public Declaration { private: friend class AstNodeFactory; - class IsNestedField - : public BitField {}; + using IsNestedField = Declaration::NextBitField; protected: explicit VariableDeclaration(int pos, bool is_nested = false) @@ -457,7 +464,8 @@ class VariableDeclaration : public Declaration { bit_field_ = IsNestedField::update(bit_field_, is_nested); } - static const uint8_t kNextBitFieldIndex = IsNestedField::kNext; + template + using NextBitField = IsNestedField::Next; }; // For var declarations that appear in a block scope. @@ -524,9 +532,6 @@ class IterationStatement : public BreakableStatement { body_(nullptr) {} void Initialize(Statement* body) { body_ = body; } - static const uint8_t kNextBitFieldIndex = - BreakableStatement::kNextBitFieldIndex; - private: ZonePtrList* labels_; ZonePtrList* own_labels_; @@ -740,8 +745,7 @@ class ReturnStatement final : public JumpStatement { Expression* expression_; int end_position_; - class TypeField - : public BitField {}; + using TypeField = JumpStatement::NextBitField; }; @@ -977,8 +981,7 @@ class SloppyBlockFunctionStatement final : public Statement { private: friend class AstNodeFactory; - class TokenField - : public BitField {}; + using TokenField = Statement::NextBitField; SloppyBlockFunctionStatement(int pos, Variable* var, Token::Value init, Statement* statement) @@ -1079,7 +1082,7 @@ class Literal final : public Expression { private: friend class AstNodeFactory; - class TypeField : public BitField {}; + using TypeField = Expression::NextBitField; Literal(int smi, int position) : Expression(position, kLiteral), smi_(smi) { bit_field_ = TypeField::update(bit_field_, kSmi); @@ -1210,10 +1213,9 @@ class AggregateLiteral : public MaterializedLiteral { private: int depth_ : 31; - class NeedsInitialAllocationSiteField - : public BitField {}; - class IsSimpleField - : public BitField {}; + using NeedsInitialAllocationSiteField = + MaterializedLiteral::NextBitField; + using IsSimpleField = NeedsInitialAllocationSiteField::Next; protected: friend class AstNodeFactory; @@ -1236,7 +1238,8 @@ class AggregateLiteral : public MaterializedLiteral { bit_field_ = NeedsInitialAllocationSiteField::update(bit_field_, required); } - static const uint8_t kNextBitFieldIndex = IsSimpleField::kNext; + template + using NextBitField = IsSimpleField::Next; }; // Common supertype for ObjectLiteralProperty and ClassLiteralProperty @@ -1375,12 +1378,6 @@ class ObjectLiteral final : public AggregateLiteral { static_cast(AggregateLiteral::kNeedsInitialAllocationSite) < static_cast(kFastElements)); - struct Accessors: public ZoneObject { - Accessors() : getter(nullptr), setter(nullptr) {} - ObjectLiteralProperty* getter; - ObjectLiteralProperty* setter; - }; - private: friend class AstNodeFactory; @@ -1408,19 +1405,14 @@ class ObjectLiteral final : public AggregateLiteral { void set_has_null_protoype(bool has_null_prototype) { bit_field_ = HasNullPrototypeField::update(bit_field_, has_null_prototype); } - uint32_t boilerplate_properties_; Handle boilerplate_description_; ZoneList properties_; - class HasElementsField - : public BitField {}; - class HasRestPropertyField - : public BitField {}; - class FastElementsField - : public BitField {}; - class HasNullPrototypeField - : public BitField {}; + using HasElementsField = AggregateLiteral::NextBitField; + using HasRestPropertyField = HasElementsField::Next; + using FastElementsField = HasRestPropertyField::Next; + using HasNullPrototypeField = FastElementsField::Next; }; // An array literal has a literals object that is used @@ -1512,6 +1504,9 @@ class VariableProxy final : public Expression { var()->SetMaybeAssigned(); } } + void clear_is_assigned() { + bit_field_ = IsAssignedField::update(bit_field_, false); + } bool is_resolved() const { return IsResolvedField::decode(bit_field_); } void set_is_resolved() { @@ -1586,15 +1581,11 @@ class VariableProxy final : public Expression { explicit VariableProxy(const VariableProxy* copy_from); - class IsAssignedField - : public BitField {}; - class IsResolvedField : public BitField {}; - class IsRemovedFromUnresolvedField - : public BitField {}; - class IsNewTargetField - : public BitField {}; - class HoleCheckModeField - : public BitField {}; + using IsAssignedField = Expression::NextBitField; + using IsResolvedField = IsAssignedField::Next; + using IsRemovedFromUnresolvedField = IsResolvedField::Next; + using IsNewTargetField = IsRemovedFromUnresolvedField::Next; + using HoleCheckModeField = IsNewTargetField::Next; union { const AstRawString* raw_name_; // if !is_resolved_ @@ -1607,20 +1598,41 @@ class VariableProxy final : public Expression { friend base::ThreadedListTraits; }; +// Wraps an optional chain to provide a wrapper for jump labels. +class OptionalChain final : public Expression { + public: + Expression* expression() const { return expression_; } + + private: + friend class AstNodeFactory; + + explicit OptionalChain(Expression* expression) + : Expression(0, kOptionalChain), expression_(expression) {} + + Expression* expression_; +}; + // Assignments to a property will use one of several types of property access. // Otherwise, the assignment is to a non-property (a global, a local slot, a // parameter slot, or a destructuring pattern). enum AssignType { - NON_PROPERTY, // destructuring - NAMED_PROPERTY, // obj.key - KEYED_PROPERTY, // obj[key] - NAMED_SUPER_PROPERTY, // super.key - KEYED_SUPER_PROPERTY, // super[key] - PRIVATE_METHOD // obj.#key: #key is a private method + NON_PROPERTY, // destructuring + NAMED_PROPERTY, // obj.key + KEYED_PROPERTY, // obj[key] + NAMED_SUPER_PROPERTY, // super.key + KEYED_SUPER_PROPERTY, // super[key] + PRIVATE_METHOD, // obj.#key: #key is a private method + PRIVATE_GETTER_ONLY, // obj.#key: #key only has a getter defined + PRIVATE_SETTER_ONLY, // obj.#key: #key only has a setter defined + PRIVATE_GETTER_AND_SETTER // obj.#key: #key has both accessors defined }; class Property final : public Expression { public: + bool is_optional_chain_link() const { + return IsOptionalChainLinkField::decode(bit_field_); + } + bool IsValidReferenceExpression() const { return true; } Expression* obj() const { return obj_; } @@ -1637,8 +1649,21 @@ class Property final : public Expression { VariableProxy* proxy = property->key()->AsVariableProxy(); DCHECK_NOT_NULL(proxy); Variable* var = proxy->var(); - // Use KEYED_PROPERTY for private fields. - return var->requires_brand_check() ? PRIVATE_METHOD : KEYED_PROPERTY; + + switch (var->mode()) { + case VariableMode::kPrivateMethod: + return PRIVATE_METHOD; + case VariableMode::kConst: + return KEYED_PROPERTY; // Use KEYED_PROPERTY for private fields. + case VariableMode::kPrivateGetterOnly: + return PRIVATE_GETTER_ONLY; + case VariableMode::kPrivateSetterOnly: + return PRIVATE_SETTER_ONLY; + case VariableMode::kPrivateGetterAndSetter: + return PRIVATE_GETTER_AND_SETTER; + default: + UNREACHABLE(); + } } bool super_access = property->IsSuperAccess(); return (property->key()->IsPropertyName()) @@ -1649,10 +1674,13 @@ class Property final : public Expression { private: friend class AstNodeFactory; - Property(Expression* obj, Expression* key, int pos) + Property(Expression* obj, Expression* key, int pos, bool optional_chain) : Expression(pos, kProperty), obj_(obj), key_(key) { + bit_field_ |= IsOptionalChainLinkField::encode(optional_chain); } + using IsOptionalChainLinkField = Expression::NextBitField; + Expression* obj_; Expression* key_; }; @@ -1690,6 +1718,10 @@ class Call final : public Expression { return IsTaggedTemplateField::decode(bit_field_); } + bool is_optional_chain_link() const { + return IsOptionalChainLinkField::decode(bit_field_); + } + bool only_last_arg_is_spread() { return !arguments_.is_empty() && arguments_.last()->IsSpread(); } @@ -1722,13 +1754,14 @@ class Call final : public Expression { Call(Zone* zone, Expression* expression, const ScopedPtrList& arguments, int pos, - PossiblyEval possibly_eval) + PossiblyEval possibly_eval, bool optional_chain) : Expression(pos, kCall), expression_(expression), arguments_(0, nullptr) { bit_field_ |= IsPossiblyEvalField::encode(possibly_eval == IS_POSSIBLY_EVAL) | - IsTaggedTemplateField::encode(false); + IsTaggedTemplateField::encode(false) | + IsOptionalChainLinkField::encode(optional_chain); arguments.CopyTo(&arguments_, zone); } @@ -1739,14 +1772,14 @@ class Call final : public Expression { expression_(expression), arguments_(0, nullptr) { bit_field_ |= IsPossiblyEvalField::encode(false) | - IsTaggedTemplateField::encode(true); + IsTaggedTemplateField::encode(true) | + IsOptionalChainLinkField::encode(false); arguments.CopyTo(&arguments_, zone); } - class IsPossiblyEvalField - : public BitField {}; - class IsTaggedTemplateField - : public BitField {}; + using IsPossiblyEvalField = Expression::NextBitField; + using IsTaggedTemplateField = IsPossiblyEvalField::Next; + using IsOptionalChainLinkField = IsTaggedTemplateField::Next; Expression* expression_; ZonePtrList arguments_; @@ -1838,8 +1871,7 @@ class UnaryOperation final : public Expression { Expression* expression_; - class OperatorField - : public BitField {}; + using OperatorField = Expression::NextBitField; }; @@ -1865,8 +1897,7 @@ class BinaryOperation final : public Expression { Expression* left_; Expression* right_; - class OperatorField - : public BitField {}; + using OperatorField = Expression::NextBitField; }; class NaryOperation final : public Expression { @@ -1925,8 +1956,7 @@ class NaryOperation final : public Expression { }; ZoneVector subsequent_; - class OperatorField - : public BitField {}; + using OperatorField = Expression::NextBitField; }; class CountOperation final : public Expression { @@ -1946,9 +1976,8 @@ class CountOperation final : public Expression { bit_field_ |= IsPrefixField::encode(is_prefix) | TokenField::encode(op); } - class IsPrefixField - : public BitField {}; - class TokenField : public BitField {}; + using IsPrefixField = Expression::NextBitField; + using TokenField = IsPrefixField::Next; Expression* expression_; }; @@ -1978,8 +2007,7 @@ class CompareOperation final : public Expression { Expression* left_; Expression* right_; - class OperatorField - : public BitField {}; + using OperatorField = Expression::NextBitField; }; @@ -2071,10 +2099,8 @@ class Assignment : public Expression { private: friend class AstNodeFactory; - class TokenField - : public BitField {}; - class LookupHoistingModeField : public BitField { - }; + using TokenField = Expression::NextBitField; + using LookupHoistingModeField = TokenField::Next; Expression* target_; Expression* value_; @@ -2132,8 +2158,7 @@ class Suspend : public Expression { Expression* expression_; - class OnAbruptResumeField - : public BitField {}; + using OnAbruptResumeField = Expression::NextBitField; }; class Yield final : public Suspend { @@ -2175,14 +2200,6 @@ class Throw final : public Expression { class FunctionLiteral final : public Expression { public: - enum FunctionType { - kAnonymousExpression, - kNamedExpression, - kDeclaration, - kAccessorOrMethod, - kWrapped, - }; - enum ParameterFlag : uint8_t { kNoDuplicateParameters, kHasDuplicateParameters @@ -2204,12 +2221,8 @@ class FunctionLiteral final : public Expression { int function_token_position() const { return function_token_position_; } int start_position() const; int end_position() const; - bool is_declaration() const { return function_type() == kDeclaration; } - bool is_named_expression() const { - return function_type() == kNamedExpression; - } bool is_anonymous_expression() const { - return function_type() == kAnonymousExpression; + return syntax_kind() == FunctionSyntaxKind::kAnonymousExpression; } void mark_as_oneshot_iife() { @@ -2219,7 +2232,6 @@ class FunctionLiteral final : public Expression { bool is_toplevel() const { return function_literal_id() == kFunctionLiteralIdTopLevel; } - bool is_wrapped() const { return function_type() == kWrapped; } V8_EXPORT_PRIVATE LanguageMode language_mode() const; static bool NeedsHomeObject(Expression* expr); @@ -2289,8 +2301,8 @@ class FunctionLiteral final : public Expression { V8_EXPORT_PRIVATE bool ShouldEagerCompile() const; V8_EXPORT_PRIVATE void SetShouldEagerCompile(); - FunctionType function_type() const { - return FunctionTypeBits::decode(bit_field_); + FunctionSyntaxKind syntax_kind() const { + return FunctionSyntaxKindBits::decode(bit_field_); } FunctionKind kind() const; @@ -2342,7 +2354,7 @@ class FunctionLiteral final : public Expression { AstValueFactory* ast_value_factory, DeclarationScope* scope, const ScopedPtrList& body, int expected_property_count, int parameter_count, - int function_length, FunctionType function_type, + int function_length, FunctionSyntaxKind function_syntax_kind, ParameterFlag has_duplicate_parameters, EagerCompileHint eager_compile_hint, int position, bool has_braces, int function_literal_id, @@ -2359,28 +2371,28 @@ class FunctionLiteral final : public Expression { body_(0, nullptr), raw_inferred_name_(ast_value_factory->empty_cons_string()), produced_preparse_data_(produced_preparse_data) { - bit_field_ |= - FunctionTypeBits::encode(function_type) | Pretenure::encode(false) | - HasDuplicateParameters::encode(has_duplicate_parameters == - kHasDuplicateParameters) | - DontOptimizeReasonField::encode(BailoutReason::kNoReason) | - RequiresInstanceMembersInitializer::encode(false) | - HasBracesField::encode(has_braces) | OneshotIIFEBit::encode(false); + bit_field_ |= FunctionSyntaxKindBits::encode(function_syntax_kind) | + Pretenure::encode(false) | + HasDuplicateParameters::encode(has_duplicate_parameters == + kHasDuplicateParameters) | + DontOptimizeReasonField::encode(BailoutReason::kNoReason) | + RequiresInstanceMembersInitializer::encode(false) | + HasBracesField::encode(has_braces) | + OneshotIIFEBit::encode(false); if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile(); body.CopyTo(&body_, zone); } - class FunctionTypeBits - : public BitField {}; - class Pretenure : public BitField {}; - class HasDuplicateParameters : public BitField {}; - class DontOptimizeReasonField - : public BitField {}; - class RequiresInstanceMembersInitializer - : public BitField {}; - class HasBracesField - : public BitField {}; - class OneshotIIFEBit : public BitField {}; + using FunctionSyntaxKindBits = + Expression::NextBitField; + using Pretenure = FunctionSyntaxKindBits::Next; + using HasDuplicateParameters = Pretenure::Next; + using DontOptimizeReasonField = + HasDuplicateParameters::Next; + using RequiresInstanceMembersInitializer = + DontOptimizeReasonField::Next; + using HasBracesField = RequiresInstanceMembersInitializer::Next; + using OneshotIIFEBit = HasBracesField::Next; // expected_property_count_ is the sum of instance fields and properties. // It can vary depending on whether a function is lazily or eagerly parsed. @@ -2432,6 +2444,11 @@ class ClassLiteralProperty final : public LiteralProperty { return private_or_computed_name_var_; } + bool NeedsHomeObjectOnClassPrototype() const { + return is_private() && kind_ == METHOD && + FunctionLiteral::NeedsHomeObject(value_); + } + private: friend class AstNodeFactory; @@ -2525,12 +2542,9 @@ class ClassLiteral final : public Expression { ZonePtrList* properties_; FunctionLiteral* static_fields_initializer_; FunctionLiteral* instance_members_initializer_function_; - class HasNameStaticProperty - : public BitField {}; - class HasStaticComputedNames - : public BitField {}; - class IsAnonymousExpression - : public BitField {}; + using HasNameStaticProperty = Expression::NextBitField; + using HasStaticComputedNames = HasNameStaticProperty::Next; + using IsAnonymousExpression = HasStaticComputedNames::Next; }; @@ -3046,8 +3060,13 @@ class AstNodeFactory final { return new (zone_) Variable(variable); } - Property* NewProperty(Expression* obj, Expression* key, int pos) { - return new (zone_) Property(obj, key, pos); + OptionalChain* NewOptionalChain(Expression* expression) { + return new (zone_) OptionalChain(expression); + } + + Property* NewProperty(Expression* obj, Expression* key, int pos, + bool optional_chain = false) { + return new (zone_) Property(obj, key, pos, optional_chain); } ResolvedProperty* NewResolvedProperty(VariableProxy* obj, @@ -3058,8 +3077,10 @@ class AstNodeFactory final { Call* NewCall(Expression* expression, const ScopedPtrList& arguments, int pos, - Call::PossiblyEval possibly_eval = Call::NOT_EVAL) { - return new (zone_) Call(zone_, expression, arguments, pos, possibly_eval); + Call::PossiblyEval possibly_eval = Call::NOT_EVAL, + bool optional_chain = false) { + return new (zone_) + Call(zone_, expression, arguments, pos, possibly_eval, optional_chain); } Call* NewTaggedTemplate(Expression* expression, @@ -3189,13 +3210,13 @@ class AstNodeFactory final { const ScopedPtrList& body, int expected_property_count, int parameter_count, int function_length, FunctionLiteral::ParameterFlag has_duplicate_parameters, - FunctionLiteral::FunctionType function_type, + FunctionSyntaxKind function_syntax_kind, FunctionLiteral::EagerCompileHint eager_compile_hint, int position, bool has_braces, int function_literal_id, ProducedPreparseData* produced_preparse_data = nullptr) { return new (zone_) FunctionLiteral( zone_, name, ast_value_factory_, scope, body, expected_property_count, - parameter_count, function_length, function_type, + parameter_count, function_length, function_syntax_kind, has_duplicate_parameters, eager_compile_hint, position, has_braces, function_literal_id, produced_preparse_data); } @@ -3209,7 +3230,7 @@ class AstNodeFactory final { return new (zone_) FunctionLiteral( zone_, ast_value_factory_->empty_string(), ast_value_factory_, scope, body, expected_property_count, parameter_count, parameter_count, - FunctionLiteral::kAnonymousExpression, + FunctionSyntaxKind::kAnonymousExpression, FunctionLiteral::kNoDuplicateParameters, FunctionLiteral::kShouldLazyCompile, 0, /* has_braces */ false, kFunctionLiteralIdTopLevel); diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc index 261b72c352a55d..dbd20f50a80869 100644 --- a/deps/v8/src/ast/modules.cc +++ b/deps/v8/src/ast/modules.cc @@ -84,10 +84,11 @@ void SourceTextModuleDescriptor::AddStarExport( } namespace { -Handle ToStringOrUndefined(Isolate* isolate, const AstRawString* s) { +Handle ToStringOrUndefined(Isolate* isolate, + const AstRawString* s) { return (s == nullptr) - ? Handle::cast(isolate->factory()->undefined_value()) - : Handle::cast(s->string()); + ? Handle::cast(isolate->factory()->undefined_value()) + : Handle::cast(s->string()); } } // namespace diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc index c0fe3baff398bc..581517ee4ec34a 100644 --- a/deps/v8/src/ast/prettyprinter.cc +++ b/deps/v8/src/ast/prettyprinter.cc @@ -27,6 +27,8 @@ CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js) is_call_error_ = false; is_iterator_error_ = false; is_async_iterator_error_ = false; + destructuring_prop_ = nullptr; + destructuring_assignment_ = nullptr; is_user_js_ = is_user_js; function_kind_ = kNormalFunction; InitializeAstVisitor(isolate); @@ -299,24 +301,50 @@ void CallPrinter::VisitVariableProxy(VariableProxy* node) { void CallPrinter::VisitAssignment(Assignment* node) { - Find(node->target()); - if (node->target()->IsArrayLiteral()) { - // Special case the visit for destructuring array assignment. - bool was_found = false; - if (node->value()->position() == position_) { - is_iterator_error_ = true; + bool was_found = false; + if (node->target()->IsObjectLiteral()) { + ObjectLiteral* target = node->target()->AsObjectLiteral(); + if (target->position() == position_) { was_found = !found_; - if (was_found) { - found_ = true; + found_ = true; + destructuring_assignment_ = node; + } else { + for (ObjectLiteralProperty* prop : *target->properties()) { + if (prop->value()->position() == position_) { + was_found = !found_; + found_ = true; + destructuring_prop_ = prop; + destructuring_assignment_ = node; + break; + } } } - Find(node->value(), true); - if (was_found) { - done_ = true; - found_ = false; + } + if (!was_found) { + Find(node->target()); + if (node->target()->IsArrayLiteral()) { + // Special case the visit for destructuring array assignment. + bool was_found = false; + if (node->value()->position() == position_) { + is_iterator_error_ = true; + was_found = !found_; + found_ = true; + } + Find(node->value(), true); + if (was_found) { + done_ = true; + found_ = false; + } + } else { + Find(node->value()); } } else { - Find(node->value()); + Find(node->value(), true); + } + + if (was_found) { + done_ = true; + found_ = false; } } @@ -342,6 +370,9 @@ void CallPrinter::VisitAwait(Await* node) { Find(node->expression()); } void CallPrinter::VisitThrow(Throw* node) { Find(node->exception()); } +void CallPrinter::VisitOptionalChain(OptionalChain* node) { + Find(node->expression()); +} void CallPrinter::VisitProperty(Property* node) { Expression* key = node->key(); @@ -349,12 +380,18 @@ void CallPrinter::VisitProperty(Property* node) { if (literal != nullptr && literal->BuildValue(isolate_)->IsInternalizedString()) { Find(node->obj(), true); + if (node->is_optional_chain_link()) { + Print("?"); + } Print("."); // TODO(adamk): Teach Literal how to print its values without // allocating on the heap. PrintLiteral(literal->BuildValue(isolate_), false); } else { Find(node->obj(), true); + if (node->is_optional_chain_link()) { + Print("?."); + } Print("["); Find(key, true); Print("]"); @@ -1272,6 +1309,11 @@ void AstPrinter::VisitThrow(Throw* node) { Visit(node->exception()); } +void AstPrinter::VisitOptionalChain(OptionalChain* node) { + IndentedScope indent(this, "OPTIONAL_CHAIN", node->position()); + Visit(node->expression()); +} + void AstPrinter::VisitProperty(Property* node) { EmbeddedVector buf; SNPrintF(buf, "PROPERTY"); @@ -1289,6 +1331,18 @@ void AstPrinter::VisitProperty(Property* node) { PrintIndentedVisit("PRIVATE_METHOD", node->key()); break; } + case PRIVATE_GETTER_ONLY: { + PrintIndentedVisit("PRIVATE_GETTER_ONLY", node->key()); + break; + } + case PRIVATE_SETTER_ONLY: { + PrintIndentedVisit("PRIVATE_SETTER_ONLY", node->key()); + break; + } + case PRIVATE_GETTER_AND_SETTER: { + PrintIndentedVisit("PRIVATE_GETTER_AND_SETTER", node->key()); + break; + } case KEYED_PROPERTY: case KEYED_SUPER_PROPERTY: { PrintIndentedVisit("KEY", node->key()); diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h index cceb5fc269b5ef..322fd9fb1437bf 100644 --- a/deps/v8/src/ast/prettyprinter.h +++ b/deps/v8/src/ast/prettyprinter.h @@ -31,6 +31,12 @@ class CallPrinter final : public AstVisitor { kCallAndAsyncIterator }; ErrorHint GetErrorHint() const; + ObjectLiteralProperty* destructuring_prop() const { + return destructuring_prop_; + } + Assignment* destructuring_assignment() const { + return destructuring_assignment_; + } // Individual nodes #define DECLARE_VISIT(type) void Visit##type(type* node); @@ -54,6 +60,8 @@ class CallPrinter final : public AstVisitor { bool is_iterator_error_; bool is_async_iterator_error_; bool is_call_error_; + ObjectLiteralProperty* destructuring_prop_; + Assignment* destructuring_assignment_; FunctionKind function_kind_; DEFINE_AST_VISITOR_SUBCLASS_MEMBERS(); diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc index 237d98ec6047f2..c8002dd088c9c7 100644 --- a/deps/v8/src/ast/scopes.cc +++ b/deps/v8/src/ast/scopes.cc @@ -40,7 +40,6 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope, VariableKind kind, InitializationFlag initialization_flag, MaybeAssignedFlag maybe_assigned_flag, - RequiresBrandCheckFlag requires_brand_check, bool* was_added) { // AstRawStrings are unambiguous, i.e., the same string is always represented // by the same AstRawString*. @@ -52,9 +51,8 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope, if (*was_added) { // The variable has not been declared yet -> insert it. DCHECK_EQ(name, p->key); - Variable* variable = - new (zone) Variable(scope, name, mode, kind, initialization_flag, - maybe_assigned_flag, requires_brand_check); + Variable* variable = new (zone) Variable( + scope, name, mode, kind, initialization_flag, maybe_assigned_flag); p->value = variable; } return reinterpret_cast(p->value); @@ -170,7 +168,6 @@ Scope::Scope(Zone* zone, ScopeType scope_type, Handle scope_info) #ifdef DEBUG already_resolved_ = true; #endif - if (scope_info->CallsSloppyEval()) scope_calls_eval_ = true; set_language_mode(scope_info->language_mode()); num_heap_slots_ = scope_info->ContextLength(); DCHECK_LE(Context::MIN_CONTEXT_SLOTS, num_heap_slots_); @@ -186,6 +183,10 @@ DeclarationScope::DeclarationScope(Zone* zone, ScopeType scope_type, params_(0, zone) { DCHECK_NE(scope_type, SCRIPT_SCOPE); SetDefaults(); + if (scope_info->SloppyEvalCanExtendVars()) { + DCHECK(!is_eval_scope()); + sloppy_eval_can_extend_vars_ = true; + } } Scope::Scope(Zone* zone, const AstRawString* catch_variable_name, @@ -258,7 +259,8 @@ void Scope::SetDefaults() { set_language_mode(LanguageMode::kSloppy); - scope_calls_eval_ = false; + calls_eval_ = false; + sloppy_eval_can_extend_vars_ = false; scope_nonlinear_ = false; is_hidden_ = false; is_debug_evaluate_scope_ = false; @@ -380,11 +382,8 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone, if (deserialization_mode == DeserializationMode::kIncludingVariables && script_scope->scope_info_.is_null()) { - Handle table( - isolate->native_context()->script_context_table(), isolate); - Handle first = ScriptContextTable::GetContext(isolate, table, 0); - Handle scope_info(first->scope_info(), isolate); - script_scope->SetScriptScopeInfo(scope_info); + script_scope->SetScriptScopeInfo( + ReadOnlyRoots(isolate).global_this_binding_scope_info_handle()); } if (innermost_scope == nullptr) return script_scope; @@ -626,7 +625,7 @@ Variable* DeclarationScope::DeclareFunctionVar(const AstRawString* name, : NORMAL_VARIABLE; function_ = new (zone()) Variable(this, name, VariableMode::kConst, kind, kCreatedInitialized); - if (calls_sloppy_eval()) { + if (sloppy_eval_can_extend_vars()) { cache->NonLocal(name, VariableMode::kDynamic); } else { cache->variables_.Add(zone(), function_); @@ -652,7 +651,8 @@ Scope* Scope::FinalizeBlockScope() { #endif if (variables_.occupancy() > 0 || - (is_declaration_scope() && AsDeclarationScope()->calls_sloppy_eval())) { + (is_declaration_scope() && + AsDeclarationScope()->sloppy_eval_can_extend_vars())) { return this; } @@ -682,10 +682,10 @@ Scope* Scope::FinalizeBlockScope() { if (inner_scope_calls_eval_) outer_scope()->inner_scope_calls_eval_ = true; - // No need to propagate scope_calls_eval_, since if it was relevant to - // this scope we would have had to bail out at the top. - DCHECK(!scope_calls_eval_ || !is_declaration_scope() || - !is_sloppy(language_mode())); + // No need to propagate sloppy_eval_can_extend_vars_, since if it was relevant + // to this scope we would have had to bail out at the top. + DCHECK(!is_declaration_scope() || + !AsDeclarationScope()->sloppy_eval_can_extend_vars()); // This block does not need a context. num_heap_slots_ = 0; @@ -750,8 +750,8 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) { outer_closure->locals_.Rewind(top_local_); // Move eval calls since Snapshot's creation into new_parent. - if (outer_scope_and_calls_eval_->scope_calls_eval_) { - new_parent->scope_calls_eval_ = true; + if (outer_scope_and_calls_eval_->calls_eval_) { + new_parent->RecordDeclarationScopeEvalCall(); new_parent->inner_scope_calls_eval_ = true; } @@ -787,13 +787,11 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) { VariableMode mode; InitializationFlag init_flag; MaybeAssignedFlag maybe_assigned_flag; - RequiresBrandCheckFlag requires_brand_check = kNoBrandCheck; { location = VariableLocation::CONTEXT; index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, - &init_flag, &maybe_assigned_flag, - &requires_brand_check); + &init_flag, &maybe_assigned_flag); found = index >= 0; } @@ -818,9 +816,9 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) { } bool was_added; - Variable* var = cache->variables_.Declare( - zone(), this, name, mode, NORMAL_VARIABLE, init_flag, maybe_assigned_flag, - requires_brand_check, &was_added); + Variable* var = + cache->variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE, + init_flag, maybe_assigned_flag, &was_added); DCHECK(was_added); var->AllocateTo(location, index); return var; @@ -873,6 +871,8 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode, VariableKind kind, bool* was_added, InitializationFlag init_flag) { DCHECK(!already_resolved_); + // Private methods should be declared with ClassScope::DeclarePrivateName() + DCHECK(!IsPrivateMethodOrAccessorVariableMode(mode)); // This function handles VariableMode::kVar, VariableMode::kLet, and // VariableMode::kConst modes. VariableMode::kDynamic variables are // introduced during variable allocation, and VariableMode::kTemporary @@ -905,6 +905,8 @@ Variable* Scope::DeclareVariable( VariableMode mode, VariableKind kind, InitializationFlag init, bool* was_added, bool* sloppy_mode_block_scope_function_redefinition, bool* ok) { + // Private methods should be declared with ClassScope::DeclarePrivateName() + DCHECK(!IsPrivateMethodOrAccessorVariableMode(mode)); DCHECK(IsDeclaredVariableMode(mode)); DCHECK(!already_resolved_); DCHECK(!GetDeclarationScope()->is_being_lazily_parsed()); @@ -990,7 +992,8 @@ Variable* Scope::DeclareVariableName(const AstRawString* name, DCHECK(IsDeclaredVariableMode(mode)); DCHECK(!already_resolved_); DCHECK(GetDeclarationScope()->is_being_lazily_parsed()); - + // Private methods should be declared with ClassScope::DeclarePrivateName() + DCHECK(!IsPrivateMethodOrAccessorVariableMode(mode)); if (mode == VariableMode::kVar && !is_declaration_scope()) { return GetDeclarationScope()->DeclareVariableName(name, mode, was_added, kind); @@ -1044,7 +1047,7 @@ Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name, bool was_added; return cache->variables_.Declare( zone(), this, name, VariableMode::kDynamicGlobal, kind, - kCreatedInitialized, kNotAssigned, kNoBrandCheck, &was_added); + kCreatedInitialized, kNotAssigned, &was_added); // TODO(neis): Mark variable as maybe-assigned? } @@ -1243,7 +1246,7 @@ int Scope::ContextChainLengthUntilOutermostSloppyEval() const { if (!s->NeedsContext()) continue; length++; if (s->is_declaration_scope() && - s->AsDeclarationScope()->calls_sloppy_eval()) { + s->AsDeclarationScope()->sloppy_eval_can_extend_vars()) { result = length; } } @@ -1384,14 +1387,16 @@ void Scope::CollectNonLocals(DeclarationScope* max_outer_scope, void Scope::AnalyzePartially(DeclarationScope* max_outer_scope, AstNodeFactory* ast_node_factory, - UnresolvedList* new_unresolved_list) { - this->ForEach([max_outer_scope, ast_node_factory, - new_unresolved_list](Scope* scope) { + UnresolvedList* new_unresolved_list, + bool maybe_in_arrowhead) { + this->ForEach([max_outer_scope, ast_node_factory, new_unresolved_list, + maybe_in_arrowhead](Scope* scope) { DCHECK_IMPLIES(scope->is_declaration_scope(), !scope->AsDeclarationScope()->was_lazily_parsed()); for (VariableProxy* proxy = scope->unresolved_list_.first(); proxy != nullptr; proxy = proxy->next_unresolved()) { + if (proxy->is_removed_from_unresolved()) continue; DCHECK(!proxy->is_resolved()); Variable* var = Lookup(proxy, scope, max_outer_scope->outer_scope()); @@ -1399,7 +1404,8 @@ void Scope::AnalyzePartially(DeclarationScope* max_outer_scope, // Don't copy unresolved references to the script scope, unless it's a // reference to a private name or method. In that case keep it so we // can fail later. - if (!max_outer_scope->outer_scope()->is_script_scope()) { + if (!max_outer_scope->outer_scope()->is_script_scope() || + maybe_in_arrowhead) { VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy); new_unresolved_list->Add(copy); } @@ -1434,6 +1440,7 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory, sloppy_block_functions_.Clear(); rare_data_ = nullptr; has_rest_ = false; + function_ = nullptr; DCHECK_NE(zone_, ast_value_factory->zone()); zone_->ReleaseMemory(); @@ -1487,17 +1494,19 @@ void DeclarationScope::SavePreparseDataForDeclarationScope(Parser* parser) { } void DeclarationScope::AnalyzePartially(Parser* parser, - AstNodeFactory* ast_node_factory) { + AstNodeFactory* ast_node_factory, + bool maybe_in_arrowhead) { DCHECK(!force_eager_compilation_); UnresolvedList new_unresolved_list; if (!IsArrowFunction(function_kind_) && - (!outer_scope_->is_script_scope() || + (!outer_scope_->is_script_scope() || maybe_in_arrowhead || (preparse_data_builder_ != nullptr && preparse_data_builder_->HasInnerFunctions()))) { // Try to resolve unresolved variables for this Scope and migrate those // which cannot be resolved inside. It doesn't make sense to try to resolve // them in the outer Scopes here, because they are incomplete. - Scope::AnalyzePartially(this, ast_node_factory, &new_unresolved_list); + Scope::AnalyzePartially(this, ast_node_factory, &new_unresolved_list, + maybe_in_arrowhead); // Migrate function_ to the right Zone. if (function_ != nullptr) { @@ -1596,10 +1605,6 @@ void PrintVar(int indent, Variable* var) { if (comma) PrintF(", "); PrintF("hole initialization elided"); } - if (var->requires_brand_check()) { - if (comma) PrintF(", "); - PrintF("requires brand check"); - } PrintF("\n"); } @@ -1676,7 +1681,8 @@ void Scope::Print(int n) { Indent(n1, "// strict mode scope\n"); } if (IsAsmModule()) Indent(n1, "// scope is an asm module\n"); - if (is_declaration_scope() && AsDeclarationScope()->calls_sloppy_eval()) { + if (is_declaration_scope() && + AsDeclarationScope()->sloppy_eval_can_extend_vars()) { Indent(n1, "// scope calls sloppy 'eval'\n"); } if (is_declaration_scope() && AsDeclarationScope()->NeedsHomeObject()) { @@ -1774,9 +1780,9 @@ Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) { // Declare a new non-local. DCHECK(IsDynamicVariableMode(mode)); bool was_added; - Variable* var = variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE, - kCreatedInitialized, kNotAssigned, - kNoBrandCheck, &was_added); + Variable* var = + variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE, + kCreatedInitialized, kNotAssigned, &was_added); // Allocate it by giving it a dynamic lookup. var->AllocateTo(VariableLocation::LOOKUP, -1); return var; @@ -1814,7 +1820,18 @@ Variable* Scope::Lookup(VariableProxy* proxy, Scope* scope, // We found a variable and we are done. (Even if there is an 'eval' in this // scope which introduces the same variable again, the resulting variable // remains the same.) - if (var != nullptr) { + // + // For sloppy eval though, we skip dynamic variable to avoid resolving to a + // variable when the variable and proxy are in the same eval execution. The + // variable is not available on subsequent lazy executions of functions in + // the eval, so this avoids inner functions from looking up different + // variables during eager and lazy compilation. + // + // TODO(leszeks): Maybe we want to restrict this to e.g. lookups of a proxy + // living in a different scope to the current one, or some other + // optimisation. + if (var != nullptr && + !(scope->is_eval_scope() && var->mode() == VariableMode::kDynamic)) { if (mode == kParsedScope && force_context_allocation && !var->is_dynamic()) { var->ForceContextAllocation(); @@ -1829,8 +1846,9 @@ Variable* Scope::Lookup(VariableProxy* proxy, Scope* scope, return LookupWith(proxy, scope, outer_scope_end, entry_point, force_context_allocation); } - if (V8_UNLIKELY(scope->is_declaration_scope() && - scope->AsDeclarationScope()->calls_sloppy_eval())) { + if (V8_UNLIKELY( + scope->is_declaration_scope() && + scope->AsDeclarationScope()->sloppy_eval_can_extend_vars())) { return LookupSloppyEval(proxy, scope, outer_scope_end, entry_point, force_context_allocation); } @@ -1901,7 +1919,7 @@ Variable* Scope::LookupSloppyEval(VariableProxy* proxy, Scope* scope, Scope* outer_scope_end, Scope* entry_point, bool force_context_allocation) { DCHECK(scope->is_declaration_scope() && - scope->AsDeclarationScope()->calls_sloppy_eval()); + scope->AsDeclarationScope()->sloppy_eval_can_extend_vars()); // If we're compiling eval, it's possible that the outer scope is the first // ScopeInfo-backed scope. @@ -2065,7 +2083,7 @@ bool Scope::MustAllocate(Variable* var) { if (!var->raw_name()->IsEmpty() && (inner_scope_calls_eval_ || is_catch_scope() || is_script_scope())) { var->set_is_used(); - if (inner_scope_calls_eval_) var->SetMaybeAssigned(); + if (inner_scope_calls_eval_ && !var->is_this()) var->SetMaybeAssigned(); } DCHECK(!var->has_forced_context_allocation() || var->is_used()); // Global variables do not need to be allocated. @@ -2081,11 +2099,14 @@ bool Scope::MustAllocateInContext(Variable* var) { // // Temporary variables are always stack-allocated. Catch-bound variables are // always context-allocated. - if (var->mode() == VariableMode::kTemporary) return false; + VariableMode mode = var->mode(); + if (mode == VariableMode::kTemporary) return false; if (is_catch_scope()) return true; - if ((is_script_scope() || is_eval_scope()) && - IsLexicalVariableMode(var->mode())) { - return true; + if (is_script_scope() || is_eval_scope()) { + if (IsLexicalVariableMode(mode) || + IsPrivateMethodOrAccessorVariableMode(mode)) { + return true; + } } return var->has_forced_context_allocation() || inner_scope_calls_eval_; } @@ -2248,9 +2269,9 @@ void Scope::AllocateVariablesRecursively() { scope->is_with_scope() || scope->is_module_scope() || scope->IsAsmModule() || scope->ForceContextForLanguageMode() || (scope->is_function_scope() && - scope->AsDeclarationScope()->calls_sloppy_eval()) || + scope->AsDeclarationScope()->sloppy_eval_can_extend_vars()) || (scope->is_block_scope() && scope->is_declaration_scope() && - scope->AsDeclarationScope()->calls_sloppy_eval()); + scope->AsDeclarationScope()->sloppy_eval_can_extend_vars()); // If we didn't allocate any locals in the local context, then we only // need the minimal number of slots if we must have a context. @@ -2326,15 +2347,28 @@ int Scope::ContextLocalCount() const { (is_function_var_in_context ? 1 : 0); } -Variable* ClassScope::DeclarePrivateName( - const AstRawString* name, RequiresBrandCheckFlag requires_brand_check, - bool* was_added) { +bool IsComplementaryAccessorPair(VariableMode a, VariableMode b) { + switch (a) { + case VariableMode::kPrivateGetterOnly: + return b == VariableMode::kPrivateSetterOnly; + case VariableMode::kPrivateSetterOnly: + return b == VariableMode::kPrivateGetterOnly; + default: + return false; + } +} + +Variable* ClassScope::DeclarePrivateName(const AstRawString* name, + VariableMode mode, bool* was_added) { Variable* result = EnsureRareData()->private_name_map.Declare( - zone(), this, name, VariableMode::kConst, NORMAL_VARIABLE, + zone(), this, name, mode, NORMAL_VARIABLE, InitializationFlag::kNeedsInitialization, - MaybeAssignedFlag::kMaybeAssigned, requires_brand_check, was_added); + MaybeAssignedFlag::kMaybeAssigned, was_added); if (*was_added) { locals_.Add(result); + } else if (IsComplementaryAccessorPair(result->mode(), mode)) { + *was_added = true; + result->set_mode(VariableMode::kPrivateGetterAndSetter); } result->ForceContextAllocation(); return result; @@ -2416,22 +2450,20 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) { VariableMode mode; InitializationFlag init_flag; MaybeAssignedFlag maybe_assigned_flag; - RequiresBrandCheckFlag requires_brand_check; - int index = - ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, &init_flag, - &maybe_assigned_flag, &requires_brand_check); + int index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, + &init_flag, &maybe_assigned_flag); if (index < 0) { return nullptr; } - DCHECK_EQ(mode, VariableMode::kConst); + DCHECK(IsConstVariableMode(mode)); DCHECK_EQ(init_flag, InitializationFlag::kNeedsInitialization); DCHECK_EQ(maybe_assigned_flag, MaybeAssignedFlag::kMaybeAssigned); // Add the found private name to the map to speed up subsequent // lookups for the same name. bool was_added; - Variable* var = DeclarePrivateName(name, requires_brand_check, &was_added); + Variable* var = DeclarePrivateName(name, mode, &was_added); DCHECK(was_added); var->AllocateTo(VariableLocation::CONTEXT, index); return var; @@ -2450,7 +2482,9 @@ Variable* ClassScope::LookupPrivateName(VariableProxy* proxy) { if (var == nullptr && !class_scope->scope_info_.is_null()) { var = class_scope->LookupPrivateNameInScopeInfo(proxy->raw_name()); } - return var; + if (var != nullptr) { + return var; + } } return nullptr; } diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h index 932d5c70b937b8..73e6e8fd89755f 100644 --- a/deps/v8/src/ast/scopes.h +++ b/deps/v8/src/ast/scopes.h @@ -5,6 +5,7 @@ #ifndef V8_AST_SCOPES_H_ #define V8_AST_SCOPES_H_ +#include #include "src/ast/ast.h" #include "src/base/compiler-specific.h" #include "src/base/hashmap.h" @@ -13,6 +14,7 @@ #include "src/objects/function-kind.h" #include "src/objects/objects.h" #include "src/utils/pointer-with-payload.h" +#include "src/utils/utils.h" #include "src/zone/zone.h" namespace v8 { @@ -42,7 +44,6 @@ class VariableMap : public ZoneHashMap { VariableMode mode, VariableKind kind, InitializationFlag initialization_flag, MaybeAssignedFlag maybe_assigned_flag, - RequiresBrandCheckFlag requires_brand_check, bool* was_added); V8_EXPORT_PRIVATE Variable* Lookup(const AstRawString* name); @@ -111,8 +112,10 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { } void RestoreEvalFlag() { - outer_scope_and_calls_eval_->scope_calls_eval_ = - outer_scope_and_calls_eval_.GetPayload(); + if (outer_scope_and_calls_eval_.GetPayload()) { + // This recreates both calls_eval and sloppy_eval_can_extend_vars. + outer_scope_and_calls_eval_.GetPointer()->RecordEvalCall(); + } } void Reparent(DeclarationScope* new_parent); @@ -265,9 +268,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { // Inform the scope and outer scopes that the corresponding code contains an // eval call. - void RecordEvalCall() { - scope_calls_eval_ = true; - } + inline void RecordEvalCall(); void RecordInnerScopeEvalCall() { inner_scope_calls_eval_ = true; @@ -460,7 +461,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { int ContextChainLength(Scope* scope) const; // The number of contexts between this and the outermost context that has a - // sloppy eval call. One if this->calls_sloppy_eval(). + // sloppy eval call. One if this->sloppy_eval_can_extend_vars(). int ContextChainLengthUntilOutermostSloppyEval() const; // Find the closest class scope in the current scope and outer scopes. If no @@ -558,7 +559,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { MaybeAssignedFlag maybe_assigned_flag, bool* was_added) { Variable* result = variables_.Declare(zone, this, name, mode, kind, initialization_flag, - maybe_assigned_flag, kNoBrandCheck, was_added); + maybe_assigned_flag, was_added); if (*was_added) locals_.Add(result); return result; } @@ -610,7 +611,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { // list along the way, so full resolution cannot be done afterwards. void AnalyzePartially(DeclarationScope* max_outer_scope, AstNodeFactory* ast_node_factory, - UnresolvedList* new_unresolved_list); + UnresolvedList* new_unresolved_list, + bool maybe_in_arrowhead); void CollectNonLocals(DeclarationScope* max_outer_scope, Isolate* isolate, ParseInfo* info, Handle* non_locals); @@ -703,9 +705,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { // The language mode of this scope. STATIC_ASSERT(LanguageModeSize == 2); bool is_strict_ : 1; - // This scope or a nested catch scope or with scope contain an 'eval' call. At - // the 'eval' call site this scope is the declaration scope. - bool scope_calls_eval_ : 1; + // This scope contains an 'eval' call. + bool calls_eval_ : 1; + // The context associated with this scope can be extended by a sloppy eval + // called inside of it. + bool sloppy_eval_can_extend_vars_ : 1; // This scope's declarations might not be executed in order (e.g., switch). bool scope_nonlinear_ : 1; bool is_hidden_ : 1; @@ -753,11 +757,50 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { IsClassConstructor(function_kind()))); } - bool calls_sloppy_eval() const { - // TODO(delphick): Calculate this when setting and change the name of - // scope_calls_eval_. - return !is_script_scope() && scope_calls_eval_ && - is_sloppy(language_mode()); + // Inform the scope and outer scopes that the corresponding code contains an + // eval call. + void RecordDeclarationScopeEvalCall() { + calls_eval_ = true; + + // If this isn't a sloppy eval, we don't care about it. + if (language_mode() != LanguageMode::kSloppy) return; + + // Sloppy eval in script scopes can only introduce global variables anyway, + // so we don't care that it calls sloppy eval. + if (is_script_scope()) return; + + // Sloppy eval in a eval scope can only introduce variables into the outer + // (non-eval) declaration scope, not into this eval scope. + if (is_eval_scope()) { +#ifdef DEBUG + // One of three things must be true: + // 1. The outer non-eval declaration scope should already be marked as + // being extendable by sloppy eval, by the current sloppy eval rather + // than the inner one, + // 2. The outer non-eval declaration scope is a script scope and thus + // isn't extendable anyway, or + // 3. This is a debug evaluate and all bets are off. + DeclarationScope* outer_decl_scope = outer_scope()->GetDeclarationScope(); + while (outer_decl_scope->is_eval_scope()) { + outer_decl_scope = outer_decl_scope->GetDeclarationScope(); + } + if (outer_decl_scope->is_debug_evaluate_scope()) { + // Don't check anything. + // TODO(9662): Figure out where variables declared by an eval inside a + // debug-evaluate actually go. + } else if (!outer_decl_scope->is_script_scope()) { + DCHECK(outer_decl_scope->sloppy_eval_can_extend_vars_); + } +#endif + + return; + } + + sloppy_eval_can_extend_vars_ = true; + } + + bool sloppy_eval_can_extend_vars() const { + return sloppy_eval_can_extend_vars_; } bool was_lazily_parsed() const { return was_lazily_parsed_; } @@ -972,7 +1015,8 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { // this records variables which cannot be resolved inside the Scope (we don't // yet know what they will resolve to since the outer Scopes are incomplete) // and recreates them with the correct Zone with ast_node_factory. - void AnalyzePartially(Parser* parser, AstNodeFactory* ast_node_factory); + void AnalyzePartially(Parser* parser, AstNodeFactory* ast_node_factory, + bool maybe_in_arrowhead); // Allocate ScopeInfos for top scope and any inner scopes that need them. // Does nothing if ScopeInfo is already allocated. @@ -1138,13 +1182,21 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { RareData* rare_data_ = nullptr; }; +void Scope::RecordEvalCall() { + calls_eval_ = true; + GetDeclarationScope()->RecordDeclarationScopeEvalCall(); + RecordInnerScopeEvalCall(); +} + Scope::Snapshot::Snapshot(Scope* scope) - : outer_scope_and_calls_eval_(scope, scope->scope_calls_eval_), + : outer_scope_and_calls_eval_(scope, scope->calls_eval_), top_inner_scope_(scope->inner_scope_), top_unresolved_(scope->unresolved_list_.end()), top_local_(scope->GetClosureScope()->locals_.end()) { // Reset in order to record eval calls during this Snapshot's lifetime. - outer_scope_and_calls_eval_.GetPointer()->scope_calls_eval_ = false; + outer_scope_and_calls_eval_.GetPointer()->calls_eval_ = false; + outer_scope_and_calls_eval_.GetPointer()->sloppy_eval_can_extend_vars_ = + false; } class ModuleScope final : public DeclarationScope { @@ -1175,8 +1227,7 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope { // Declare a private name in the private name map and add it to the // local variables of this scope. - Variable* DeclarePrivateName(const AstRawString* name, - RequiresBrandCheckFlag requires_brand_check, + Variable* DeclarePrivateName(const AstRawString* name, VariableMode mode, bool* was_added); void AddUnresolvedPrivateName(VariableProxy* proxy); diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h index 7805fa20c8c8f6..1ff6f9f4228375 100644 --- a/deps/v8/src/ast/variables.h +++ b/deps/v8/src/ast/variables.h @@ -21,8 +21,7 @@ class Variable final : public ZoneObject { public: Variable(Scope* scope, const AstRawString* name, VariableMode mode, VariableKind kind, InitializationFlag initialization_flag, - MaybeAssignedFlag maybe_assigned_flag = kNotAssigned, - RequiresBrandCheckFlag requires_brand_check = kNoBrandCheck) + MaybeAssignedFlag maybe_assigned_flag = kNotAssigned) : scope_(scope), name_(name), local_if_not_shadowed_(nullptr), @@ -32,7 +31,6 @@ class Variable final : public ZoneObject { bit_field_(MaybeAssignedFlagField::encode(maybe_assigned_flag) | InitializationFlagField::encode(initialization_flag) | VariableModeField::encode(mode) | - RequiresBrandCheckField::encode(requires_brand_check) | IsUsedField::encode(false) | ForceContextAllocationField::encode(false) | ForceHoleInitializationField::encode(false) | @@ -58,6 +56,9 @@ class Variable final : public ZoneObject { Handle name() const { return name_->string(); } const AstRawString* raw_name() const { return name_; } VariableMode mode() const { return VariableModeField::decode(bit_field_); } + void set_mode(VariableMode mode) { + bit_field_ = VariableModeField::update(bit_field_, mode); + } bool has_forced_context_allocation() const { return ForceContextAllocationField::decode(bit_field_); } @@ -72,6 +73,8 @@ class Variable final : public ZoneObject { return MaybeAssignedFlagField::decode(bit_field_); } void SetMaybeAssigned() { + if (mode() == VariableMode::kConst) return; + // If this variable is dynamically shadowing another variable, then that // variable could also be assigned (in the non-shadowing case). if (has_local_if_not_shadowed()) { @@ -80,22 +83,14 @@ class Variable final : public ZoneObject { if (!maybe_assigned()) { local_if_not_shadowed()->SetMaybeAssigned(); } - DCHECK(local_if_not_shadowed()->maybe_assigned()); + DCHECK_IMPLIES(local_if_not_shadowed()->mode() != VariableMode::kConst, + local_if_not_shadowed()->maybe_assigned()); } set_maybe_assigned(); } - RequiresBrandCheckFlag get_requires_brand_check_flag() const { - return RequiresBrandCheckField::decode(bit_field_); - } - bool requires_brand_check() const { - return get_requires_brand_check_flag() == kRequiresBrandCheck; - } - - void set_requires_brand_check() { - bit_field_ = - RequiresBrandCheckField::update(bit_field_, kRequiresBrandCheck); + return IsPrivateMethodOrAccessorVariableMode(mode()); } int initializer_position() { return initializer_position_; } @@ -125,7 +120,8 @@ class Variable final : public ZoneObject { // declaration time. Only returns valid results after scope analysis. bool binding_needs_init() const { DCHECK_IMPLIES(initialization_flag() == kNeedsInitialization, - IsLexicalVariableMode(mode())); + IsLexicalVariableMode(mode()) || + IsPrivateMethodOrAccessorVariableMode(mode())); DCHECK_IMPLIES(ForceHoleInitializationField::decode(bit_field_), initialization_flag() == kNeedsInitialization); @@ -149,7 +145,8 @@ class Variable final : public ZoneObject { // be required at runtime. void ForceHoleInitialization() { DCHECK_EQ(kNeedsInitialization, initialization_flag()); - DCHECK(IsLexicalVariableMode(mode())); + DCHECK(IsLexicalVariableMode(mode()) || + IsPrivateMethodOrAccessorVariableMode(mode())); bit_field_ = ForceHoleInitializationField::update(bit_field_, true); } @@ -243,25 +240,16 @@ class Variable final : public ZoneObject { bit_field_ = MaybeAssignedFlagField::update(bit_field_, kMaybeAssigned); } - class VariableModeField : public BitField16 {}; - class VariableKindField - : public BitField16 {}; - class LocationField - : public BitField16 {}; - class ForceContextAllocationField - : public BitField16 {}; - class IsUsedField - : public BitField16 {}; - class InitializationFlagField - : public BitField16 {}; - class ForceHoleInitializationField - : public BitField16 {}; - class MaybeAssignedFlagField - : public BitField16 {}; - class RequiresBrandCheckField - : public BitField16 {}; + using VariableModeField = BitField16; + using VariableKindField = VariableModeField::Next; + using LocationField = VariableKindField::Next; + using ForceContextAllocationField = LocationField::Next; + using IsUsedField = ForceContextAllocationField::Next; + using InitializationFlagField = IsUsedField::Next; + using ForceHoleInitializationField = InitializationFlagField::Next; + using MaybeAssignedFlagField = + ForceHoleInitializationField::Next; + Variable** next() { return &next_; } friend List; friend base::ThreadedListTraits; diff --git a/deps/v8/src/base/address-region.h b/deps/v8/src/base/address-region.h index 1fdc479f6f3e93..0f4809f9e81574 100644 --- a/deps/v8/src/base/address-region.h +++ b/deps/v8/src/base/address-region.h @@ -45,6 +45,13 @@ class AddressRegion { return contains(region.address_, region.size_); } + base::AddressRegion GetOverlap(AddressRegion region) const { + Address overlap_start = std::max(begin(), region.begin()); + Address overlap_end = + std::max(overlap_start, std::min(end(), region.end())); + return {overlap_start, overlap_end - overlap_start}; + } + bool operator==(AddressRegion other) const { return address_ == other.address_ && size_ == other.size_; } diff --git a/deps/v8/src/base/flags.h b/deps/v8/src/base/flags.h index 055f0ff498484f..c2b7952260a5b5 100644 --- a/deps/v8/src/base/flags.h +++ b/deps/v8/src/base/flags.h @@ -53,13 +53,13 @@ class Flags final { } constexpr Flags operator&(const Flags& flags) const { - return Flags(*this) &= flags; + return Flags(mask_ & flags.mask_); } constexpr Flags operator|(const Flags& flags) const { - return Flags(*this) |= flags; + return Flags(mask_ | flags.mask_); } constexpr Flags operator^(const Flags& flags) const { - return Flags(*this) ^= flags; + return Flags(mask_ ^ flags.mask_); } Flags& operator&=(flag_type flag) { return operator&=(Flags(flag)); } diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc index b339f528d2b9d3..76a0aff39953a4 100644 --- a/deps/v8/src/base/page-allocator.cc +++ b/deps/v8/src/base/page-allocator.cc @@ -36,9 +36,9 @@ void* PageAllocator::GetRandomMmapAddr() { return base::OS::GetRandomMmapAddr(); } -void* PageAllocator::AllocatePages(void* address, size_t size, size_t alignment, +void* PageAllocator::AllocatePages(void* hint, size_t size, size_t alignment, PageAllocator::Permission access) { - return base::OS::Allocate(address, size, alignment, + return base::OS::Allocate(hint, size, alignment, static_cast(access)); } diff --git a/deps/v8/src/base/page-allocator.h b/deps/v8/src/base/page-allocator.h index ced1156ccaee38..2b8ee1a5e5e740 100644 --- a/deps/v8/src/base/page-allocator.h +++ b/deps/v8/src/base/page-allocator.h @@ -26,7 +26,7 @@ class V8_BASE_EXPORT PageAllocator void* GetRandomMmapAddr() override; - void* AllocatePages(void* address, size_t size, size_t alignment, + void* AllocatePages(void* hint, size_t size, size_t alignment, PageAllocator::Permission access) override; bool FreePages(void* address, size_t size) override; diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h index 2b8b55eeb5f247..c48cf8d3393c12 100644 --- a/deps/v8/src/base/platform/mutex.h +++ b/deps/v8/src/base/platform/mutex.h @@ -67,6 +67,8 @@ class V8_BASE_EXPORT Mutex final { return native_handle_; } + V8_INLINE void AssertHeld() { DCHECK_EQ(1, level_); } + private: NativeHandle native_handle_; #ifdef DEBUG diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc index 17f9aa3f17e5cb..92a5fbe490f4c3 100644 --- a/deps/v8/src/base/platform/platform-cygwin.cc +++ b/deps/v8/src/base/platform/platform-cygwin.cc @@ -95,13 +95,13 @@ double LocalTimeOffset(double time_ms, bool is_utc) { } // static -void* OS::Allocate(void* address, size_t size, size_t alignment, +void* OS::Allocate(void* hint, size_t size, size_t alignment, MemoryPermission access) { size_t page_size = AllocatePageSize(); DCHECK_EQ(0, size % page_size); DCHECK_EQ(0, alignment % page_size); DCHECK_LE(page_size, alignment); - address = AlignedAddress(address, alignment); + hint = AlignedAddress(hint, alignment); DWORD flags = (access == OS::MemoryPermission::kNoAccess) ? MEM_RESERVE @@ -109,7 +109,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment, DWORD protect = GetProtectionFromMemoryPermission(access); // First, try an exact size aligned allocation. - uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, address); + uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, hint); if (base == nullptr) return nullptr; // Can't allocate, we're OOM. // If address is suitably aligned, we're done. @@ -120,7 +120,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment, CHECK(Free(base, size)); // Clear the hint. It's unlikely we can allocate at this address. - address = nullptr; + hint = nullptr; // Add the maximum misalignment so we are guaranteed an aligned base address // in the allocated region. @@ -128,7 +128,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment, const int kMaxAttempts = 3; aligned_base = nullptr; for (int i = 0; i < kMaxAttempts; ++i) { - base = RandomizedVirtualAlloc(padded_size, flags, protect, address); + base = RandomizedVirtualAlloc(padded_size, flags, protect, hint); if (base == nullptr) return nullptr; // Can't allocate, we're OOM. // Try to trim the allocation by freeing the padded allocation and then diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc index d40452f3d48417..8d63dd2f84df34 100644 --- a/deps/v8/src/base/platform/platform-freebsd.cc +++ b/deps/v8/src/base/platform/platform-freebsd.cc @@ -13,10 +13,12 @@ #include #include #include +#include #include // open #include // mmap & munmap #include // open +#include #include // getpagesize // If you don't have execinfo.h then you need devel/libexecinfo from ports. #include @@ -46,41 +48,47 @@ static unsigned StringToLong(char* buffer) { std::vector OS::GetSharedLibraryAddresses() { std::vector result; - static const int MAP_LENGTH = 1024; - int fd = open("/proc/self/maps", O_RDONLY); - if (fd < 0) return result; - while (true) { - char addr_buffer[11]; - addr_buffer[0] = '0'; - addr_buffer[1] = 'x'; - addr_buffer[10] = 0; - ssize_t bytes_read = read(fd, addr_buffer + 2, 8); - if (bytes_read < 8) break; - unsigned start = StringToLong(addr_buffer); - bytes_read = read(fd, addr_buffer + 2, 1); - if (bytes_read < 1) break; - if (addr_buffer[2] != '-') break; - bytes_read = read(fd, addr_buffer + 2, 8); - if (bytes_read < 8) break; - unsigned end = StringToLong(addr_buffer); - char buffer[MAP_LENGTH]; - bytes_read = -1; - do { - bytes_read++; - if (bytes_read >= MAP_LENGTH - 1) break; - bytes_read = read(fd, buffer + bytes_read, 1); - if (bytes_read < 1) break; - } while (buffer[bytes_read] != '\n'); - buffer[bytes_read] = 0; - // Ignore mappings that are not executable. - if (buffer[3] != 'x') continue; - char* start_of_path = index(buffer, '/'); - // There may be no filename in this line. Skip to next. - if (start_of_path == nullptr) continue; - buffer[bytes_read] = 0; - result.push_back(SharedLibraryAddress(start_of_path, start, end)); + int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()}; + size_t miblen = sizeof(mib) / sizeof(mib[0]); + size_t buffer_size; + if (sysctl(mib, miblen, nullptr, &buffer_size, nullptr, 0) == 0) { + // Overallocate the buffer by 1/3 to account for concurrent + // kinfo_vmentry change. 1/3 is an arbitrary constant that + // works in practice. + buffer_size = buffer_size * 4 / 3; + std::vector buffer(buffer_size); + int ret = sysctl(mib, miblen, buffer.data(), &buffer_size, nullptr, 0); + + if (ret == 0 || (ret == -1 && errno == ENOMEM)) { + char* start = buffer.data(); + char* end = start + buffer_size; + + while (start < end) { + struct kinfo_vmentry* map = + reinterpret_cast(start); + const size_t ssize = map->kve_structsize; + char* path = map->kve_path; + + CHECK_NE(0, ssize); + + if ((map->kve_protection & KVME_PROT_READ) != 0 && + (map->kve_protection & KVME_PROT_EXEC) != 0 && path[0] != '\0') { + char* sep = strrchr(path, '/'); + std::string lib_name; + if (sep != nullptr) { + lib_name = std::string(++sep); + } else { + lib_name = std::string(path); + } + result.push_back(SharedLibraryAddress( + lib_name, reinterpret_cast(map->kve_start), + reinterpret_cast(map->kve_end))); + } + + start += ssize; + } + } } - close(fd); return result; } diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc index 6da83d7e0208a3..c50cdd7a98eefd 100644 --- a/deps/v8/src/base/platform/platform-posix.cc +++ b/deps/v8/src/base/platform/platform-posix.cc @@ -137,10 +137,10 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access) { return flags; } -void* Allocate(void* address, size_t size, OS::MemoryPermission access) { +void* Allocate(void* hint, size_t size, OS::MemoryPermission access) { int prot = GetProtectionFromMemoryPermission(access); int flags = GetFlagsForMemoryPermission(access); - void* result = mmap(address, size, prot, flags, kMmapFd, kMmapFdOffset); + void* result = mmap(hint, size, prot, flags, kMmapFd, kMmapFdOffset); if (result == MAP_FAILED) return nullptr; return result; } @@ -278,16 +278,16 @@ void* OS::GetRandomMmapAddr() { // TODO(bbudge) Move Cygwin and Fuchsia stuff into platform-specific files. #if !V8_OS_CYGWIN && !V8_OS_FUCHSIA // static -void* OS::Allocate(void* address, size_t size, size_t alignment, +void* OS::Allocate(void* hint, size_t size, size_t alignment, MemoryPermission access) { size_t page_size = AllocatePageSize(); DCHECK_EQ(0, size % page_size); DCHECK_EQ(0, alignment % page_size); - address = AlignedAddress(address, alignment); + hint = AlignedAddress(hint, alignment); // Add the maximum misalignment so we are guaranteed an aligned base address. size_t request_size = size + (alignment - page_size); request_size = RoundUp(request_size, OS::AllocatePageSize()); - void* result = base::Allocate(address, request_size, access); + void* result = base::Allocate(hint, request_size, access); if (result == nullptr) return nullptr; // Unmap memory allocated before the aligned base address. @@ -761,13 +761,12 @@ void Thread::set_name(const char* name) { name_[sizeof(name_) - 1] = '\0'; } - -void Thread::Start() { +bool Thread::Start() { int result; pthread_attr_t attr; memset(&attr, 0, sizeof(attr)); result = pthread_attr_init(&attr); - DCHECK_EQ(0, result); + if (result != 0) return false; size_t stack_size = stack_size_; if (stack_size == 0) { #if V8_OS_MACOSX @@ -780,17 +779,17 @@ void Thread::Start() { } if (stack_size > 0) { result = pthread_attr_setstacksize(&attr, stack_size); - DCHECK_EQ(0, result); + if (result != 0) return pthread_attr_destroy(&attr), false; } { MutexGuard lock_guard(&data_->thread_creation_mutex_); result = pthread_create(&data_->thread_, &attr, ThreadEntry, this); + if (result != 0 || data_->thread_ == kNoThread) { + return pthread_attr_destroy(&attr), false; + } } - DCHECK_EQ(0, result); result = pthread_attr_destroy(&attr); - DCHECK_EQ(0, result); - DCHECK_NE(data_->thread_, kNoThread); - USE(result); + return result == 0; } void Thread::Join() { pthread_join(data_->thread_, nullptr); } diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc index d01b1c07fe1926..04ef8a30f229bd 100644 --- a/deps/v8/src/base/platform/platform-win32.cc +++ b/deps/v8/src/base/platform/platform-win32.cc @@ -798,13 +798,13 @@ uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect, } // namespace // static -void* OS::Allocate(void* address, size_t size, size_t alignment, +void* OS::Allocate(void* hint, size_t size, size_t alignment, MemoryPermission access) { size_t page_size = AllocatePageSize(); DCHECK_EQ(0, size % page_size); DCHECK_EQ(0, alignment % page_size); DCHECK_LE(page_size, alignment); - address = AlignedAddress(address, alignment); + hint = AlignedAddress(hint, alignment); DWORD flags = (access == OS::MemoryPermission::kNoAccess) ? MEM_RESERVE @@ -812,7 +812,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment, DWORD protect = GetProtectionFromMemoryPermission(access); // First, try an exact size aligned allocation. - uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, address); + uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, hint); if (base == nullptr) return nullptr; // Can't allocate, we're OOM. // If address is suitably aligned, we're done. @@ -824,7 +824,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment, CHECK(Free(base, size)); // Clear the hint. It's unlikely we can allocate at this address. - address = nullptr; + hint = nullptr; // Add the maximum misalignment so we are guaranteed an aligned base address // in the allocated region. @@ -832,7 +832,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment, const int kMaxAttempts = 3; aligned_base = nullptr; for (int i = 0; i < kMaxAttempts; ++i) { - base = RandomizedVirtualAlloc(padded_size, flags, protect, address); + base = RandomizedVirtualAlloc(padded_size, flags, protect, hint); if (base == nullptr) return nullptr; // Can't allocate, we're OOM. // Try to trim the allocation by freeing the padded allocation and then @@ -1352,13 +1352,13 @@ Thread::~Thread() { // Create a new thread. It is important to use _beginthreadex() instead of // the Win32 function CreateThread(), because the CreateThread() does not // initialize thread specific structures in the C runtime library. -void Thread::Start() { - data_->thread_ = reinterpret_cast( - _beginthreadex(nullptr, static_cast(stack_size_), ThreadEntry, - this, 0, &data_->thread_id_)); +bool Thread::Start() { + uintptr_t result = _beginthreadex(nullptr, static_cast(stack_size_), + ThreadEntry, this, 0, &data_->thread_id_); + data_->thread_ = reinterpret_cast(result); + return result != 0; } - // Wait for thread to terminate. void Thread::Join() { if (data_->thread_id_ != GetCurrentThreadId()) { diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h index e073704b2c1ace..e1f84043eb8a73 100644 --- a/deps/v8/src/base/platform/platform.h +++ b/deps/v8/src/base/platform/platform.h @@ -333,15 +333,16 @@ class V8_BASE_EXPORT Thread { virtual ~Thread(); // Start new thread by calling the Run() method on the new thread. - void Start(); + V8_WARN_UNUSED_RESULT bool Start(); // Start new thread and wait until Run() method is called on the new thread. - void StartSynchronously() { + bool StartSynchronously() { start_semaphore_ = new Semaphore(0); - Start(); + if (!Start()) return false; start_semaphore_->Wait(); delete start_semaphore_; start_semaphore_ = nullptr; + return true; } // Wait until thread terminates. diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc index ecbd70fd743387..f07fd8e595fc60 100644 --- a/deps/v8/src/base/platform/time.cc +++ b/deps/v8/src/base/platform/time.cc @@ -70,6 +70,9 @@ V8_INLINE int64_t ClockNow(clockid_t clk_id) { #if defined(V8_OS_AIX) thread_cputime_t tc; if (clk_id == CLOCK_THREAD_CPUTIME_ID) { +#if defined(__PASE__) // CLOCK_THREAD_CPUTIME_ID clock not supported on IBMi + return 0; +#endif if (thread_cputime(-1, &tc) != 0) { UNREACHABLE(); } diff --git a/deps/v8/src/base/utils/random-number-generator.cc b/deps/v8/src/base/utils/random-number-generator.cc index 3b38858192970e..17c2cced8a452b 100644 --- a/deps/v8/src/base/utils/random-number-generator.cc +++ b/deps/v8/src/base/utils/random-number-generator.cc @@ -51,6 +51,13 @@ RandomNumberGenerator::RandomNumberGenerator() { result = rand_s(&second_half); DCHECK_EQ(0, result); SetSeed((static_cast(first_half) << 32) + second_half); +#elif V8_OS_MACOSX + // Despite its prefix suggests it is not RC4 algorithm anymore. + // It always succeeds while having decent performance and + // no file descriptor involved. + int64_t seed; + arc4random_buf(&seed, sizeof(seed)); + SetSeed(seed); #else // Gather entropy from /dev/urandom if available. FILE* fp = fopen("/dev/urandom", "rb"); diff --git a/deps/v8/src/builtins/OWNERS b/deps/v8/src/builtins/OWNERS index 450423f87850ba..f52e1c9ca8effc 100644 --- a/deps/v8/src/builtins/OWNERS +++ b/deps/v8/src/builtins/OWNERS @@ -1,3 +1,3 @@ -file://COMMON_OWNERS +file:../../COMMON_OWNERS # COMPONENT: Blink>JavaScript>Runtime diff --git a/deps/v8/src/builtins/arguments.tq b/deps/v8/src/builtins/arguments.tq index 6df5f801a3945a..18d6c23b3d9a38 100644 --- a/deps/v8/src/builtins/arguments.tq +++ b/deps/v8/src/builtins/arguments.tq @@ -8,7 +8,7 @@ struct Arguments { const length: intptr; } -extern operator '[]' macro GetArgumentValue(Arguments, intptr): Object; +extern operator '[]' macro GetArgumentValue(Arguments, intptr): JSAny; extern macro GetFrameArguments(FrameWithArguments, intptr): Arguments; diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index 9b9956b0fbba0a..e9b562620fcee5 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -90,12 +90,24 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, namespace { +void LoadRealStackLimit(MacroAssembler* masm, Register destination) { + DCHECK(masm->root_array_available()); + Isolate* isolate = masm->isolate(); + ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate); + DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + + intptr_t offset = + TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + CHECK(is_int32(offset)); + __ ldr(destination, MemOperand(kRootRegister, offset)); +} + void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, Register scratch, Label* stack_overflow) { // Check the stack for overflow. We are not trying to catch // interruptions (e.g. debug break and preemption) here, so the "real stack // limit" is checked. - __ LoadRoot(scratch, RootIndex::kRealStackLimit); + LoadRealStackLimit(masm, scratch); // Make scratch the space we have left. The stack might already be overflowed // here which will cause scratch to become negative. __ sub(scratch, sp, scratch); @@ -428,7 +440,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack limit". Label stack_overflow; - __ CompareRoot(sp, RootIndex::kRealStackLimit); + LoadRealStackLimit(masm, scratch); + __ cmp(sp, scratch); __ b(lo, &stack_overflow); // Push receiver. @@ -1116,7 +1129,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Do a stack check to ensure we don't go over the limit. Label ok; __ sub(r9, sp, Operand(r4)); - __ LoadRoot(r2, RootIndex::kRealStackLimit); + LoadRealStackLimit(masm, r2); __ cmp(r9, Operand(r2)); __ b(hs, &ok); __ CallRuntime(Runtime::kThrowStackOverflow); @@ -2089,7 +2102,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // Compute the space we have left. The stack might already be overflowed // here which will cause remaining_stack_size to become negative. - __ LoadRoot(remaining_stack_size, RootIndex::kRealStackLimit); + LoadRealStackLimit(masm, remaining_stack_size); __ sub(remaining_stack_size, sp, remaining_stack_size); // Check if the arguments will overflow the stack. @@ -2517,7 +2530,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { __ push(kWasmCompileLazyFuncIndexRegister); // Load the correct CEntry builtin from the instance object. __ ldr(r2, FieldMemOperand(kWasmInstanceRegister, - WasmInstanceObject::kCEntryStubOffset)); + WasmInstanceObject::kIsolateRootOffset)); + auto centry_id = + Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit; + __ ldr(r2, MemOperand(r2, IsolateData::builtin_slot_offset(centry_id))); // Initialize the JavaScript context with 0. CEntry will use it to // set the current context on the isolate. __ Move(cp, Smi::zero()); diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index bcee8f0b5dcbbe..4e159a69b7ede8 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -24,6 +24,10 @@ #include "src/runtime/runtime.h" #include "src/wasm/wasm-objects.h" +#if defined(V8_OS_WIN) +#include "src/diagnostics/unwinding-info-win64.h" +#endif // V8_OS_WIN + namespace v8 { namespace internal { @@ -85,6 +89,17 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, namespace { +void LoadRealStackLimit(MacroAssembler* masm, Register destination) { + DCHECK(masm->root_array_available()); + Isolate* isolate = masm->isolate(); + ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate); + DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + + intptr_t offset = + TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + __ Ldr(destination, MemOperand(kRootRegister, offset)); +} + void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, Label* stack_overflow) { UseScratchRegisterScope temps(masm); @@ -94,7 +109,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, // We are not trying to catch interruptions (e.g. debug break and // preemption) here, so the "real stack limit" is checked. - __ LoadRoot(scratch, RootIndex::kRealStackLimit); + LoadRealStackLimit(masm, scratch); // Make scratch the space we have left. The stack might already be overflowed // here which will cause scratch to become negative. __ Sub(scratch, sp, scratch); @@ -476,7 +491,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack limit". Label stack_overflow; - __ CompareRoot(sp, RootIndex::kRealStackLimit); + LoadRealStackLimit(masm, x10); + __ Cmp(sp, x10); __ B(lo, &stack_overflow); // Get number of arguments for generator function. @@ -623,6 +639,23 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, // will have no effect on the model or real hardware. __ EnableInstrumentation(); +#if defined(V8_OS_WIN) + // Windows ARM64 relies on a frame pointer (fp/x29 which are aliases to each + // other) chain to do stack unwinding, but JSEntry breaks that by setting fp + // to point to bad_frame_pointer below. To fix unwind information for this + // case, JSEntry registers the offset (from current fp to the caller's fp + // saved by PushCalleeSavedRegisters on stack) to xdata_encoder which then + // emits the offset value as part of result unwind data accordingly. The + // current offset is kFramePointerOffset which includes bad_frame_pointer + // saved below plus kFramePointerOffsetInPushCalleeSavedRegisters. + const int kFramePointerOffset = + kFramePointerOffsetInPushCalleeSavedRegisters + kSystemPointerSize; + win64_unwindinfo::XdataEncoder* xdata_encoder = masm->GetXdataEncoder(); + if (xdata_encoder) { + xdata_encoder->onFramePointerAdjustment(kFramePointerOffset); + } +#endif + __ PushCalleeSavedRegisters(); // Set up the reserved register for 0.0. @@ -1223,7 +1256,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Do a stack check to ensure we don't go over the limit. Label ok; __ Sub(x10, sp, Operand(x11)); - __ CompareRoot(x10, RootIndex::kRealStackLimit); + { + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + LoadRealStackLimit(masm, scratch); + __ Cmp(x10, scratch); + } __ B(hs, &ok); __ CallRuntime(Runtime::kThrowStackOverflow); __ Bind(&ok); @@ -2469,7 +2507,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // (i.e. debug break and preemption) here, so check the "real stack // limit". Label done; - __ LoadRoot(x10, RootIndex::kRealStackLimit); + LoadRealStackLimit(masm, x10); // Make x10 the space we have left. The stack might already be overflowed // here which will cause x10 to become negative. __ Sub(x10, sp, x10); @@ -3031,9 +3069,12 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // function. __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister); // Load the correct CEntry builtin from the instance object. + __ Ldr(x2, FieldMemOperand(kWasmInstanceRegister, + WasmInstanceObject::kIsolateRootOffset)); + auto centry_id = + Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit; __ LoadTaggedPointerField( - x2, FieldMemOperand(kWasmInstanceRegister, - WasmInstanceObject::kCEntryStubOffset)); + x2, MemOperand(x2, IsolateData::builtin_slot_offset(centry_id))); // Initialize the JavaScript context with 0. CEntry will use it to // set the current context on the isolate. __ Mov(cp, Smi::zero()); diff --git a/deps/v8/src/builtins/array-copywithin.tq b/deps/v8/src/builtins/array-copywithin.tq index 94d871e8f74c13..574eaf9b9de4cc 100644 --- a/deps/v8/src/builtins/array-copywithin.tq +++ b/deps/v8/src/builtins/array-copywithin.tq @@ -9,7 +9,7 @@ namespace array_copywithin { // https://tc39.github.io/ecma262/#sec-array.prototype.copyWithin transitioning javascript builtin ArrayPrototypeCopyWithin( - js-implicit context: Context, receiver: Object)(...arguments): Object { + js-implicit context: Context, receiver: JSAny)(...arguments): JSAny { // 1. Let O be ? ToObject(this value). const object: JSReceiver = ToObject_Inline(context, receiver); @@ -68,7 +68,7 @@ namespace array_copywithin { // d. If fromPresent is true, then. if (fromPresent == True) { // i. Let fromVal be ? Get(O, fromKey). - const fromVal: Object = GetProperty(object, from); + const fromVal: JSAny = GetProperty(object, from); // ii. Perform ? Set(O, toKey, fromVal, true). SetProperty(object, to, fromVal); diff --git a/deps/v8/src/builtins/array-every.tq b/deps/v8/src/builtins/array-every.tq index 3451cd769b92e7..8f4c0e1f231dca 100644 --- a/deps/v8/src/builtins/array-every.tq +++ b/deps/v8/src/builtins/array-every.tq @@ -5,15 +5,14 @@ namespace array { transitioning javascript builtin ArrayEveryLoopEagerDeoptContinuation( - js-implicit context: Context, receiver: Object)( - callback: Object, thisArg: Object, initialK: Object, - length: Object): Object { + js-implicit context: Context, receiver: JSAny)( + callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. // // Also, this great mass of casts is necessary because the signature - // of Torque javascript builtins requires Object type for all parameters + // of Torque javascript builtins requires JSAny type for all parameters // other than {context}. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; @@ -27,9 +26,9 @@ namespace array { transitioning javascript builtin ArrayEveryLoopLazyDeoptContinuation( - js-implicit context: Context, receiver: Object)( - callback: Object, thisArg: Object, initialK: Object, length: Object, - result: Object): Object { + js-implicit context: Context, receiver: JSAny)( + callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny, + result: JSAny): JSAny { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -53,9 +52,9 @@ namespace array { } transitioning builtin ArrayEveryLoopContinuation(implicit context: Context)( - _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, - _array: Object, o: JSReceiver, initialK: Number, length: Number, - _initialTo: Object): Object { + _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny, + _array: JSAny, o: JSReceiver, initialK: Number, length: Number, + _initialTo: JSAny): JSAny { // 5. Let k be 0. // 6. Repeat, while k < len for (let k: Number = initialK; k < length; k++) { @@ -69,10 +68,10 @@ namespace array { // 6c. If kPresent is true, then if (kPresent == True) { // 6c. i. Let kValue be ? Get(O, Pk). - const kValue: Object = GetProperty(o, k); + const kValue: JSAny = GetProperty(o, k); // 6c. ii. Perform ? Call(callbackfn, T, ). - const result: Object = Call(context, callbackfn, thisArg, kValue, k, o); + const result: JSAny = Call(context, callbackfn, thisArg, kValue, k, o); // iii. If selected is true, then... if (!ToBoolean(result)) { @@ -86,7 +85,7 @@ namespace array { } transitioning macro FastArrayEvery(implicit context: Context)( - o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object): Object + o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): JSAny labels Bailout(Smi) { let k: Smi = 0; const smiLen = Cast(len) otherwise goto Bailout(k); @@ -99,8 +98,8 @@ namespace array { // Ensure that we haven't walked beyond a possibly updated length. if (k >= fastOW.Get().length) goto Bailout(k); - const value: Object = fastOW.LoadElementNoHole(k) otherwise continue; - const result: Object = + const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue; + const result: JSAny = Call(context, callbackfn, thisArg, value, k, fastOW.Get()); if (!ToBoolean(result)) { return False; @@ -111,8 +110,8 @@ namespace array { // https://tc39.github.io/ecma262/#sec-array.prototype.every transitioning javascript builtin - ArrayEvery(js-implicit context: Context, receiver: Object)(...arguments): - Object { + ArrayEvery(js-implicit context: Context, receiver: JSAny)(...arguments): + JSAny { try { RequireObjectCoercible(receiver, 'Array.prototype.every'); @@ -129,7 +128,7 @@ namespace array { const callbackfn = Cast(arguments[0]) otherwise TypeError; // 4. If thisArg is present, let T be thisArg; else let T be undefined. - const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined; + const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined; // Special cases. try { diff --git a/deps/v8/src/builtins/array-filter.tq b/deps/v8/src/builtins/array-filter.tq index 9acd0d04ee3cd7..4d23144329ab47 100644 --- a/deps/v8/src/builtins/array-filter.tq +++ b/deps/v8/src/builtins/array-filter.tq @@ -5,15 +5,15 @@ namespace array_filter { transitioning javascript builtin ArrayFilterLoopEagerDeoptContinuation( - js-implicit context: Context, receiver: Object)( - callback: Object, thisArg: Object, array: Object, initialK: Object, - length: Object, initialTo: Object): Object { + js-implicit context: Context, receiver: JSAny)( + callback: JSAny, thisArg: JSAny, array: JSAny, initialK: JSAny, + length: JSAny, initialTo: JSAny): JSAny { // All continuation points in the optimized filter implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. // // Also, this great mass of casts is necessary because the signature - // of Torque javascript builtins requires Object type for all parameters + // of Torque javascript builtins requires JSAny type for all parameters // other than {context}. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; @@ -29,10 +29,9 @@ namespace array_filter { transitioning javascript builtin ArrayFilterLoopLazyDeoptContinuation( - js-implicit context: Context, receiver: Object)( - callback: Object, thisArg: Object, array: Object, initialK: Object, - length: Object, valueK: Object, initialTo: Object, - result: Object): Object { + js-implicit context: Context, receiver: JSAny)( + callback: JSAny, thisArg: JSAny, array: JSAny, initialK: JSAny, + length: JSAny, valueK: JSAny, initialTo: JSAny, result: JSAny): JSAny { // All continuation points in the optimized filter implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -60,9 +59,9 @@ namespace array_filter { } transitioning builtin ArrayFilterLoopContinuation(implicit context: Context)( - _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, + _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny, array: JSReceiver, o: JSReceiver, initialK: Number, length: Number, - initialTo: Number): Object { + initialTo: Number): JSAny { let to: Number = initialTo; // 5. Let k be 0. // 6. Repeat, while k < len @@ -77,10 +76,10 @@ namespace array_filter { // 6c. If kPresent is true, then if (kPresent == True) { // 6c. i. Let kValue be ? Get(O, Pk). - const kValue: Object = GetProperty(o, k); + const kValue: JSAny = GetProperty(o, k); // 6c. ii. Perform ? Call(callbackfn, T, ). - const result: Object = Call(context, callbackfn, thisArg, kValue, k, o); + const result: JSAny = Call(context, callbackfn, thisArg, kValue, k, o); // iii. If selected is true, then... if (ToBoolean(result)) { @@ -97,7 +96,7 @@ namespace array_filter { } transitioning macro FastArrayFilter(implicit context: Context)( - fastO: FastJSArray, len: Smi, callbackfn: Callable, thisArg: Object, + fastO: FastJSArray, len: Smi, callbackfn: Callable, thisArg: JSAny, output: FastJSArray) labels Bailout(Number, Number) { let k: Smi = 0; let to: Smi = 0; @@ -112,8 +111,8 @@ namespace array_filter { // Ensure that we haven't walked beyond a possibly updated length. if (k >= fastOW.Get().length) goto Bailout(k, to); - const value: Object = fastOW.LoadElementNoHole(k) otherwise continue; - const result: Object = + const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue; + const result: JSAny = Call(context, callbackfn, thisArg, value, k, fastOW.Get()); if (ToBoolean(result)) { try { @@ -147,8 +146,8 @@ namespace array_filter { // https://tc39.github.io/ecma262/#sec-array.prototype.filter transitioning javascript builtin - ArrayFilter(js-implicit context: Context, receiver: Object)(...arguments): - Object { + ArrayFilter(js-implicit context: Context, receiver: JSAny)(...arguments): + JSAny { try { RequireObjectCoercible(receiver, 'Array.prototype.filter'); @@ -165,7 +164,7 @@ namespace array_filter { const callbackfn = Cast(arguments[0]) otherwise TypeError; // 4. If thisArg is present, let T be thisArg; else let T be undefined. - const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined; + const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined; let output: JSReceiver; // Special cases. diff --git a/deps/v8/src/builtins/array-find.tq b/deps/v8/src/builtins/array-find.tq index ef54dd4666ef72..ec840a4c98b09b 100644 --- a/deps/v8/src/builtins/array-find.tq +++ b/deps/v8/src/builtins/array-find.tq @@ -5,15 +5,14 @@ namespace array_find { transitioning javascript builtin ArrayFindLoopEagerDeoptContinuation( - js-implicit context: Context, receiver: Object)( - callback: Object, thisArg: Object, initialK: Object, - length: Object): Object { + js-implicit context: Context, receiver: JSAny)( + callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny { // All continuation points in the optimized find implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. // // Also, this great mass of casts is necessary because the signature - // of Torque javascript builtins requires Object type for all parameters + // of Torque javascript builtins requires JSAny type for all parameters // other than {context}. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; @@ -26,9 +25,9 @@ namespace array_find { transitioning javascript builtin ArrayFindLoopLazyDeoptContinuation( - js-implicit context: Context, receiver: Object)( - _callback: Object, _thisArg: Object, _initialK: Object, _length: Object, - _result: Object): Object { + js-implicit context: Context, receiver: JSAny)( + _callback: JSAny, _thisArg: JSAny, _initialK: JSAny, _length: JSAny, + _result: JSAny): JSAny { // This deopt continuation point is never actually called, it just // exists to make stack traces correct from a ThrowTypeError if the // callback was found to be non-callable. @@ -40,9 +39,9 @@ namespace array_find { // before iteration continues. transitioning javascript builtin ArrayFindLoopAfterCallbackLazyDeoptContinuation( - js-implicit context: Context, receiver: Object)( - callback: Object, thisArg: Object, initialK: Object, length: Object, - foundValue: Object, isFound: Object): Object { + js-implicit context: Context, receiver: JSAny)( + callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny, + foundValue: JSAny, isFound: JSAny): JSAny { // All continuation points in the optimized find implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -65,8 +64,8 @@ namespace array_find { } transitioning builtin ArrayFindLoopContinuation(implicit context: Context)( - _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, - o: JSReceiver, initialK: Number, length: Number): Object { + _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny, + o: JSReceiver, initialK: Number, length: Number): JSAny { // 5. Let k be 0. // 6. Repeat, while k < len for (let k: Number = initialK; k < length; k++) { @@ -75,12 +74,11 @@ namespace array_find { // side-effect free and HasProperty/GetProperty do the conversion inline. // 6b. i. Let kValue be ? Get(O, Pk). - const value: Object = GetProperty(o, k); + const value: JSAny = GetProperty(o, k); // 6c. Let testResult be ToBoolean(? Call(predicate, T, <>)). - const testResult: Object = - Call(context, callbackfn, thisArg, value, k, o); + const testResult: JSAny = Call(context, callbackfn, thisArg, value, k, o); // 6d. If testResult is true, return kValue. if (ToBoolean(testResult)) { @@ -93,7 +91,7 @@ namespace array_find { } transitioning macro FastArrayFind(implicit context: Context)( - o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object): Object + o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): JSAny labels Bailout(Smi) { let k: Smi = 0; const smiLen = Cast(len) otherwise goto Bailout(k); @@ -107,8 +105,8 @@ namespace array_find { // Ensure that we haven't walked beyond a possibly updated length. if (k >= fastOW.Get().length) goto Bailout(k); - const value: Object = fastOW.LoadElementOrUndefined(k); - const testResult: Object = + const value: JSAny = fastOW.LoadElementOrUndefined(k); + const testResult: JSAny = Call(context, callbackfn, thisArg, value, k, fastOW.Get()); if (ToBoolean(testResult)) { return value; @@ -119,8 +117,8 @@ namespace array_find { // https://tc39.github.io/ecma262/#sec-array.prototype.find transitioning javascript builtin - ArrayPrototypeFind(js-implicit context: Context, receiver: Object)( - ...arguments): Object { + ArrayPrototypeFind(js-implicit context: Context, receiver: JSAny)( + ...arguments): JSAny { try { RequireObjectCoercible(receiver, 'Array.prototype.find'); @@ -138,7 +136,7 @@ namespace array_find { Cast(arguments[0]) otherwise NotCallableError; // 4. If thisArg is present, let T be thisArg; else let T be undefined. - const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined; + const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined; // Special cases. try { diff --git a/deps/v8/src/builtins/array-findindex.tq b/deps/v8/src/builtins/array-findindex.tq index 5a8bb85fbadd4c..6145c8146455b5 100644 --- a/deps/v8/src/builtins/array-findindex.tq +++ b/deps/v8/src/builtins/array-findindex.tq @@ -5,15 +5,14 @@ namespace array_findindex { transitioning javascript builtin ArrayFindIndexLoopEagerDeoptContinuation( - js-implicit context: Context, receiver: Object)( - callback: Object, thisArg: Object, initialK: Object, - length: Object): Object { + js-implicit context: Context, receiver: JSAny)( + callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny { // All continuation points in the optimized findIndex implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. // // Also, this great mass of casts is necessary because the signature - // of Torque javascript builtins requires Object type for all parameters + // of Torque javascript builtins requires JSAny type for all parameters // other than {context}. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; @@ -26,9 +25,9 @@ namespace array_findindex { transitioning javascript builtin ArrayFindIndexLoopLazyDeoptContinuation( - js-implicit context: Context, receiver: Object)( - _callback: Object, _thisArg: Object, _initialK: Object, _length: Object, - _result: Object): Object { + js-implicit context: Context, receiver: JSAny)( + _callback: JSAny, _thisArg: JSAny, _initialK: JSAny, _length: JSAny, + _result: JSAny): JSAny { // This deopt continuation point is never actually called, it just // exists to make stack traces correct from a ThrowTypeError if the // callback was found to be non-callable. @@ -40,9 +39,9 @@ namespace array_findindex { // before iteration continues. transitioning javascript builtin ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation( - js-implicit context: Context, receiver: Object)( - callback: Object, thisArg: Object, initialK: Object, length: Object, - foundValue: Object, isFound: Object): Object { + js-implicit context: Context, receiver: JSAny)( + callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny, + foundValue: JSAny, isFound: JSAny): JSAny { // All continuation points in the optimized findIndex implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -66,7 +65,7 @@ namespace array_findindex { transitioning builtin ArrayFindIndexLoopContinuation(implicit context: Context)( - _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, + _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny, o: JSReceiver, initialK: Number, length: Number): Number { // 5. Let k be 0. // 6. Repeat, while k < len @@ -76,12 +75,11 @@ namespace array_findindex { // side-effect free and HasProperty/GetProperty do the conversion inline. // 6b. i. Let kValue be ? Get(O, Pk). - const value: Object = GetProperty(o, k); + const value: JSAny = GetProperty(o, k); // 6c. Let testResult be ToBoolean(? Call(predicate, T, <>)). - const testResult: Object = - Call(context, callbackfn, thisArg, value, k, o); + const testResult: JSAny = Call(context, callbackfn, thisArg, value, k, o); // 6d. If testResult is true, return k. if (ToBoolean(testResult)) { @@ -94,7 +92,7 @@ namespace array_findindex { } transitioning macro FastArrayFindIndex(implicit context: Context)( - o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object): Number + o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): Number labels Bailout(Smi) { let k: Smi = 0; const smiLen = Cast(len) otherwise goto Bailout(k); @@ -108,8 +106,8 @@ namespace array_findindex { // Ensure that we haven't walked beyond a possibly updated length. if (k >= fastOW.Get().length) goto Bailout(k); - const value: Object = fastOW.LoadElementOrUndefined(k); - const testResult: Object = + const value: JSAny = fastOW.LoadElementOrUndefined(k); + const testResult: JSAny = Call(context, callbackfn, thisArg, value, k, fastOW.Get()); if (ToBoolean(testResult)) { return k; @@ -120,8 +118,8 @@ namespace array_findindex { // https://tc39.github.io/ecma262/#sec-array.prototype.findIndex transitioning javascript builtin - ArrayPrototypeFindIndex(js-implicit context: Context, receiver: Object)( - ...arguments): Object { + ArrayPrototypeFindIndex(js-implicit context: Context, receiver: JSAny)( + ...arguments): JSAny { try { RequireObjectCoercible(receiver, 'Array.prototype.findIndex'); @@ -139,7 +137,7 @@ namespace array_findindex { Cast(arguments[0]) otherwise NotCallableError; // 4. If thisArg is present, let T be thisArg; else let T be undefined. - const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined; + const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined; // Special cases. try { diff --git a/deps/v8/src/builtins/array-foreach.tq b/deps/v8/src/builtins/array-foreach.tq index f52d944291ec7f..5b6e3926016c2c 100644 --- a/deps/v8/src/builtins/array-foreach.tq +++ b/deps/v8/src/builtins/array-foreach.tq @@ -5,9 +5,8 @@ namespace array_foreach { transitioning javascript builtin ArrayForEachLoopEagerDeoptContinuation( - js-implicit context: Context, receiver: Object)( - callback: Object, thisArg: Object, initialK: Object, - length: Object): Object { + js-implicit context: Context, receiver: JSAny)( + callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny { // All continuation points in the optimized forEach implemntation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -23,9 +22,9 @@ namespace array_foreach { transitioning javascript builtin ArrayForEachLoopLazyDeoptContinuation( - js-implicit context: Context, receiver: Object)( - callback: Object, thisArg: Object, initialK: Object, length: Object, - _result: Object): Object { + js-implicit context: Context, receiver: JSAny)( + callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny, + _result: JSAny): JSAny { // All continuation points in the optimized forEach implemntation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -40,9 +39,9 @@ namespace array_foreach { } transitioning builtin ArrayForEachLoopContinuation(implicit context: Context)( - _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, - _array: Object, o: JSReceiver, initialK: Number, len: Number, - _to: Object): Object { + _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny, + _array: JSAny, o: JSReceiver, initialK: Number, len: Number, + _to: JSAny): JSAny { // variables {array} and {to} are ignored. // 5. Let k be 0. @@ -58,7 +57,7 @@ namespace array_foreach { // 6c. If kPresent is true, then if (kPresent == True) { // 6c. i. Let kValue be ? Get(O, Pk). - const kValue: Object = GetProperty(o, k); + const kValue: JSAny = GetProperty(o, k); // 6c. ii. Perform ? Call(callbackfn, T, ). Call(context, callbackfn, thisArg, kValue, k, o); @@ -70,7 +69,7 @@ namespace array_foreach { } transitioning macro FastArrayForEach(implicit context: Context)( - o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object): Object + o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): JSAny labels Bailout(Smi) { let k: Smi = 0; const smiLen = Cast(len) otherwise goto Bailout(k); @@ -83,7 +82,7 @@ namespace array_foreach { // Ensure that we haven't walked beyond a possibly updated length. if (k >= fastOW.Get().length) goto Bailout(k); - const value: Object = fastOW.LoadElementNoHole(k) + const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue; Call(context, callbackfn, thisArg, value, k, fastOW.Get()); } @@ -92,8 +91,8 @@ namespace array_foreach { // https://tc39.github.io/ecma262/#sec-array.prototype.foreach transitioning javascript builtin - ArrayForEach(js-implicit context: Context, receiver: Object)(...arguments): - Object { + ArrayForEach(js-implicit context: Context, receiver: JSAny)(...arguments): + JSAny { try { RequireObjectCoercible(receiver, 'Array.prototype.forEach'); @@ -110,7 +109,7 @@ namespace array_foreach { const callbackfn = Cast(arguments[0]) otherwise TypeError; // 4. If thisArg is present, let T be thisArg; else let T be undefined. - const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined; + const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined; // Special cases. let k: Number = 0; diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq index c04233b22244ec..08b5221e099c6b 100644 --- a/deps/v8/src/builtins/array-join.tq +++ b/deps/v8/src/builtins/array-join.tq @@ -3,7 +3,7 @@ // found in the LICENSE file. namespace array_join { - type LoadJoinElementFn = builtin(Context, JSReceiver, Number) => Object; + type LoadJoinElementFn = builtin(Context, JSReceiver, Number) => JSAny; // Fast C call to write a fixed array (see Buffer.fixedArray) to a single // string. @@ -12,12 +12,12 @@ namespace array_join { FixedArray, intptr, String, String): String; transitioning builtin LoadJoinElement( - context: Context, receiver: JSReceiver, k: Number): Object { + context: Context, receiver: JSReceiver, k: Number): JSAny { return GetProperty(receiver, k); } - LoadJoinElement( - context: Context, receiver: JSReceiver, k: Number): Object { + transitioning LoadJoinElement( + context: Context, receiver: JSReceiver, k: Number): JSAny { const array: JSArray = UnsafeCast(receiver); const dict: NumberDictionary = UnsafeCast(array.elements); try { @@ -33,15 +33,15 @@ namespace array_join { } LoadJoinElement( - context: Context, receiver: JSReceiver, k: Number): Object { + context: Context, receiver: JSReceiver, k: Number): JSAny { const array: JSArray = UnsafeCast(receiver); const fixedArray: FixedArray = UnsafeCast(array.elements); const element: Object = fixedArray.objects[UnsafeCast(k)]; - return element == TheHole ? kEmptyString : element; + return element == TheHole ? kEmptyString : UnsafeCast(element); } LoadJoinElement( - context: Context, receiver: JSReceiver, k: Number): Object { + context: Context, receiver: JSReceiver, k: Number): JSAny { const array: JSArray = UnsafeCast(receiver); const fixedDoubleArray: FixedDoubleArray = UnsafeCast(array.elements); @@ -51,7 +51,7 @@ namespace array_join { } builtin LoadJoinTypedElement( - context: Context, receiver: JSReceiver, k: Number): Object { + context: Context, receiver: JSReceiver, k: Number): JSAny { const typedArray: JSTypedArray = UnsafeCast(receiver); assert(!IsDetachedBuffer(typedArray.buffer)); return typed_array::LoadFixedTypedArrayElementAsTagged( @@ -60,14 +60,14 @@ namespace array_join { } transitioning builtin ConvertToLocaleString( - context: Context, element: Object, locales: Object, - options: Object): String { + context: Context, element: JSAny, locales: JSAny, + options: JSAny): String { if (IsNullOrUndefined(element)) return kEmptyString; - const prop: Object = GetProperty(element, 'toLocaleString'); + const prop: JSAny = GetProperty(element, 'toLocaleString'); try { const callable: Callable = Cast(prop) otherwise TypeError; - let result: Object; + let result: JSAny; if (IsNullOrUndefined(locales)) { result = Call(context, callable, element); } else if (IsNullOrUndefined(options)) { @@ -86,29 +86,25 @@ namespace array_join { // (see LoadJoinElement). macro CannotUseSameArrayAccessor(implicit context: Context)( loadFn: LoadJoinElementFn, receiver: JSReceiver, originalMap: Map, - originalLen: Number): never - labels Cannot, Can; + originalLen: Number): bool; CannotUseSameArrayAccessor(implicit context: Context)( loadFn: LoadJoinElementFn, receiver: JSReceiver, originalMap: Map, - originalLen: Number): never - labels Cannot, Can { - if (loadFn == LoadJoinElement) goto Can; + originalLen: Number): bool { + if (loadFn == LoadJoinElement) return false; const array: JSArray = UnsafeCast(receiver); - if (originalMap != array.map) goto Cannot; - if (originalLen != array.length) goto Cannot; - if (IsNoElementsProtectorCellInvalid()) goto Cannot; - goto Can; + if (originalMap != array.map) return true; + if (originalLen != array.length) return true; + if (IsNoElementsProtectorCellInvalid()) return true; + return false; } CannotUseSameArrayAccessor(implicit context: Context)( _loadFn: LoadJoinElementFn, receiver: JSReceiver, _initialMap: Map, - _initialLen: Number): never - labels Cannot, Can { + _initialLen: Number): bool { const typedArray: JSTypedArray = UnsafeCast(receiver); - if (IsDetachedBuffer(typedArray.buffer)) goto Cannot; - goto Can; + return IsDetachedBuffer(typedArray.buffer); } // Calculates the running total length of the resulting string. If the @@ -261,7 +257,7 @@ namespace array_join { transitioning macro ArrayJoinImpl(implicit context: Context)( receiver: JSReceiver, sep: String, lengthNumber: Number, - useToLocaleString: constexpr bool, locales: Object, options: Object, + useToLocaleString: constexpr bool, locales: JSAny, options: JSAny, initialLoadFn: LoadJoinElementFn): String { const initialMap: Map = receiver.map; const len: uintptr = Convert(lengthNumber); @@ -287,7 +283,7 @@ namespace array_join { } // b. Let element be ? Get(O, ! ToString(k)). - const element: Object = loadFn(context, receiver, Convert(k++)); + const element: JSAny = loadFn(context, receiver, Convert(k++)); // c. If element is undefined or null, let next be the empty String; // otherwise, let next be ? ToString(element). @@ -304,7 +300,7 @@ namespace array_join { case (num: Number): { next = NumberToString(num); } - case (obj: HeapObject): { + case (obj: JSAny): { if (IsNullOrUndefined(obj)) continue; next = ToString(context, obj); } @@ -325,11 +321,11 @@ namespace array_join { transitioning macro ArrayJoin(implicit context: Context)( useToLocaleString: constexpr bool, receiver: JSReceiver, sep: String, - lenNumber: Number, locales: Object, options: Object): Object; + lenNumber: Number, locales: JSAny, options: JSAny): JSAny; - ArrayJoin(implicit context: Context)( + transitioning ArrayJoin(implicit context: Context)( useToLocaleString: constexpr bool, receiver: JSReceiver, sep: String, - lenNumber: Number, locales: Object, options: Object): Object { + lenNumber: Number, locales: JSAny, options: JSAny): JSAny { const map: Map = receiver.map; const kind: ElementsKind = map.elements_kind; let loadFn: LoadJoinElementFn; @@ -374,9 +370,9 @@ namespace array_join { receiver, sep, lenNumber, useToLocaleString, locales, options, loadFn); } - ArrayJoin(implicit context: Context)( + transitioning ArrayJoin(implicit context: Context)( useToLocaleString: constexpr bool, receiver: JSReceiver, sep: String, - lenNumber: Number, locales: Object, options: Object): Object { + lenNumber: Number, locales: JSAny, options: JSAny): JSAny { const map: Map = receiver.map; const kind: ElementsKind = map.elements_kind; let loadFn: LoadJoinElementFn; @@ -465,11 +461,9 @@ namespace array_join { } // Fast path the common non-nested calls. If the receiver is not already on - // the stack, add it to the stack and go to ReceiverAdded. Otherwise go to - // ReceiverNotAdded. + // the stack, add it to the stack and return true. Otherwise return false. macro JoinStackPushInline(implicit context: Context)(receiver: JSReceiver): - never - labels ReceiverAdded, ReceiverNotAdded { + bool { try { const stack: FixedArray = LoadJoinStack() otherwise IfUninitialized; @@ -477,7 +471,7 @@ namespace array_join { stack.objects[0] = receiver; } else if (JoinStackPush(stack, receiver) == False) deferred { - goto ReceiverNotAdded; + return false; } } label IfUninitialized { @@ -486,13 +480,13 @@ namespace array_join { stack.objects[0] = receiver; SetJoinStack(stack); } - goto ReceiverAdded; + return true; } // Removes a receiver from the stack. The FixedArray will automatically shrink // to Heap::kMinJoinStackSize once the stack becomes empty. builtin JoinStackPop(implicit context: Context)( - stack: FixedArray, receiver: JSReceiver): Object { + stack: FixedArray, receiver: JSReceiver): JSAny { const len: intptr = stack.length_intptr; for (let i: intptr = 0; i < len; i++) { if (stack.objects[i] == receiver) { @@ -532,7 +526,7 @@ namespace array_join { transitioning macro CycleProtectedArrayJoin(implicit context: Context)( useToLocaleString: constexpr bool, o: JSReceiver, len: Number, - sepObj: Object, locales: Object, options: Object): Object { + sepObj: JSAny, locales: JSAny, options: JSAny): JSAny { // 3. If separator is undefined, let sep be the single-element String ",". // 4. Else, let sep be ? ToString(separator). const sep: String = @@ -542,7 +536,7 @@ namespace array_join { // the normal join algorithm. if (len > 0 && JoinStackPushInline(o)) { try { - const result: Object = + const result: JSAny = ArrayJoin(useToLocaleString, o, sep, len, locales, options); JoinStackPopInline(o); return result; @@ -557,9 +551,9 @@ namespace array_join { // https://tc39.github.io/ecma262/#sec-array.prototype.join transitioning javascript builtin - ArrayPrototypeJoin(js-implicit context: Context, receiver: Object)( - ...arguments): Object { - const separator: Object = arguments[0]; + ArrayPrototypeJoin(js-implicit context: Context, receiver: JSAny)( + ...arguments): JSAny { + const separator: JSAny = arguments[0]; // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -577,9 +571,9 @@ namespace array_join { // https://tc39.github.io/ecma262/#sec-array.prototype.tolocalestring transitioning javascript builtin ArrayPrototypeToLocaleString( - js-implicit context: Context, receiver: Object)(...arguments): Object { - const locales: Object = arguments[0]; - const options: Object = arguments[1]; + js-implicit context: Context, receiver: JSAny)(...arguments): JSAny { + const locales: JSAny = arguments[0]; + const options: JSAny = arguments[1]; // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -597,12 +591,12 @@ namespace array_join { // https://tc39.github.io/ecma262/#sec-array.prototype.tostring transitioning javascript builtin ArrayPrototypeToString( - js-implicit context: Context, receiver: Object)(...arguments): Object { + js-implicit context: Context, receiver: JSAny)(...arguments): JSAny { // 1. Let array be ? ToObject(this value). const array: JSReceiver = ToObject_Inline(context, receiver); // 2. Let func be ? Get(array, "join"). - const prop: Object = GetProperty(array, 'join'); + const prop: JSAny = GetProperty(array, 'join'); try { // 3. If IsCallable(func) is false, let func be the intrinsic function // %ObjProto_toString%. @@ -618,8 +612,8 @@ namespace array_join { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.join transitioning javascript builtin TypedArrayPrototypeJoin( - js-implicit context: Context, receiver: Object)(...arguments): Object { - const separator: Object = arguments[0]; + js-implicit context: Context, receiver: JSAny)(...arguments): JSAny { + const separator: JSAny = arguments[0]; // Spec: ValidateTypedArray is applied to the this value prior to evaluating // the algorithm. @@ -633,9 +627,9 @@ namespace array_join { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.tolocalestring transitioning javascript builtin TypedArrayPrototypeToLocaleString( - js-implicit context: Context, receiver: Object)(...arguments): Object { - const locales: Object = arguments[0]; - const options: Object = arguments[1]; + js-implicit context: Context, receiver: JSAny)(...arguments): JSAny { + const locales: JSAny = arguments[0]; + const options: JSAny = arguments[1]; // Spec: ValidateTypedArray is applied to the this value prior to evaluating // the algorithm. diff --git a/deps/v8/src/builtins/array-lastindexof.tq b/deps/v8/src/builtins/array-lastindexof.tq index 5ebc451e435117..7765eff6822f34 100644 --- a/deps/v8/src/builtins/array-lastindexof.tq +++ b/deps/v8/src/builtins/array-lastindexof.tq @@ -4,20 +4,20 @@ namespace array_lastindexof { macro LoadWithHoleCheck( - elements: FixedArrayBase, index: Smi): Object + elements: FixedArrayBase, index: Smi): JSAny labels IfHole; LoadWithHoleCheck(implicit context: Context)( - elements: FixedArrayBase, index: Smi): Object + elements: FixedArrayBase, index: Smi): JSAny labels IfHole { const elements: FixedArray = UnsafeCast(elements); const element: Object = elements.objects[index]; if (element == TheHole) goto IfHole; - return element; + return UnsafeCast(element); } LoadWithHoleCheck(implicit context: Context)( - elements: FixedArrayBase, index: Smi): Object + elements: FixedArrayBase, index: Smi): JSAny labels IfHole { const elements: FixedDoubleArray = UnsafeCast(elements); const element: float64 = LoadDoubleWithHoleCheck(elements, index) @@ -26,7 +26,7 @@ namespace array_lastindexof { } macro FastArrayLastIndexOf( - context: Context, array: JSArray, from: Smi, searchElement: Object): Smi { + context: Context, array: JSArray, from: Smi, searchElement: JSAny): Smi { const elements: FixedArrayBase = array.elements; let k: Smi = from; @@ -40,7 +40,7 @@ namespace array_lastindexof { while (k >= 0) { try { - const element: Object = LoadWithHoleCheck(elements, k) + const element: JSAny = LoadWithHoleCheck(elements, k) otherwise Hole; const same: Boolean = StrictEqual(searchElement, element); @@ -80,8 +80,8 @@ namespace array_lastindexof { } macro TryFastArrayLastIndexOf( - context: Context, receiver: JSReceiver, searchElement: Object, - from: Number): Object + context: Context, receiver: JSReceiver, searchElement: JSAny, + from: Number): JSAny labels Slow { const array: FastJSArray = Cast(receiver) otherwise Slow; const length: Smi = array.length; @@ -99,8 +99,8 @@ namespace array_lastindexof { } transitioning macro GenericArrayLastIndexOf( - context: Context, object: JSReceiver, searchElement: Object, - from: Number): Object { + context: Context, object: JSReceiver, searchElement: JSAny, + from: Number): JSAny { let k: Number = from; // 7. Repeat, while k >= 0. @@ -111,7 +111,7 @@ namespace array_lastindexof { // b. If kPresent is true, then. if (kPresent == True) { // i. Let elementK be ? Get(O, ! ToString(k)). - const element: Object = GetProperty(object, k); + const element: JSAny = GetProperty(object, k); // ii. Let same be the result of performing Strict Equality Comparison // searchElement === elementK. @@ -131,7 +131,7 @@ namespace array_lastindexof { // https://tc39.github.io/ecma262/#sec-array.prototype.lastIndexOf transitioning javascript builtin ArrayPrototypeLastIndexOf( - js-implicit context: Context, receiver: Object)(...arguments): Object { + js-implicit context: Context, receiver: JSAny)(...arguments): JSAny { // 1. Let O be ? ToObject(this value). const object: JSReceiver = ToObject_Inline(context, receiver); @@ -144,7 +144,7 @@ namespace array_lastindexof { // Step 4 - 6. const from: Number = GetFromIndex(context, length, arguments); - const searchElement: Object = arguments[0]; + const searchElement: JSAny = arguments[0]; try { return TryFastArrayLastIndexOf(context, object, searchElement, from) diff --git a/deps/v8/src/builtins/array-map.tq b/deps/v8/src/builtins/array-map.tq index dda569c68236b3..c4b0e8a358a20d 100644 --- a/deps/v8/src/builtins/array-map.tq +++ b/deps/v8/src/builtins/array-map.tq @@ -5,15 +5,15 @@ namespace array_map { transitioning javascript builtin ArrayMapLoopEagerDeoptContinuation( - js-implicit context: Context, receiver: Object)( - callback: Object, thisArg: Object, array: Object, initialK: Object, - length: Object): Object { + js-implicit context: Context, receiver: JSAny)( + callback: JSAny, thisArg: JSAny, array: JSAny, initialK: JSAny, + length: JSAny): JSAny { // All continuation points in the optimized filter implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. // // Also, this great mass of casts is necessary because the signature - // of Torque javascript builtins requires Object type for all parameters + // of Torque javascript builtins requires JSAny type for all parameters // other than {context}. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; @@ -28,9 +28,9 @@ namespace array_map { transitioning javascript builtin ArrayMapLoopLazyDeoptContinuation( - js-implicit context: Context, receiver: Object)( - callback: Object, thisArg: Object, array: Object, initialK: Object, - length: Object, result: Object): Object { + js-implicit context: Context, receiver: JSAny)( + callback: JSAny, thisArg: JSAny, array: JSAny, initialK: JSAny, + length: JSAny, result: JSAny): JSAny { // All continuation points in the optimized filter implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -57,9 +57,9 @@ namespace array_map { } transitioning builtin ArrayMapLoopContinuation(implicit context: Context)( - _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, + _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny, array: JSReceiver, o: JSReceiver, initialK: Number, - length: Number): Object { + length: Number): JSAny { // 6. Let k be 0. // 7. Repeat, while k < len for (let k: Number = initialK; k < length; k++) { @@ -73,10 +73,10 @@ namespace array_map { // 7c. If kPresent is true, then: if (kPresent == True) { // i. Let kValue be ? Get(O, Pk). - const kValue: Object = GetProperty(o, k); + const kValue: JSAny = GetProperty(o, k); // ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O). - const mappedValue: Object = + const mappedValue: JSAny = Call(context, callbackfn, thisArg, kValue, k, o); // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value). @@ -127,12 +127,12 @@ namespace array_map { SmiUntag(length), kAllowLargeObjectAllocation); a = NewJSArray(map, this.fixedArray); for (let i: Smi = 0; i < validLength; i++) { - typeswitch (this.fixedArray.objects[i]) { + typeswitch ( + UnsafeCast<(Number | TheHole)>(this.fixedArray.objects[i])) { case (n: Number): { elements.floats[i] = Convert(n); } - case (h: HeapObject): { - assert(h == TheHole); + case (TheHole): { } } } @@ -147,7 +147,7 @@ namespace array_map { return a; } - StoreResult(implicit context: Context)(index: Smi, result: Object) { + StoreResult(implicit context: Context)(index: Smi, result: JSAny) { typeswitch (result) { case (s: Smi): { this.fixedArray.objects[index] = s; @@ -156,7 +156,7 @@ namespace array_map { this.onlySmis = false; this.fixedArray.objects[index] = s; } - case (s: HeapObject): { + case (s: JSAnyNotNumber): { this.onlySmis = false; this.onlyNumbers = false; this.fixedArray.objects[index] = s; @@ -185,7 +185,7 @@ namespace array_map { transitioning macro FastArrayMap(implicit context: Context)( fastO: FastJSArrayForRead, len: Smi, callbackfn: Callable, - thisArg: Object): JSArray + thisArg: JSAny): JSArray labels Bailout(JSArray, Smi) { let k: Smi = 0; let fastOW = NewFastJSArrayForReadWitness(fastO); @@ -201,9 +201,9 @@ namespace array_map { if (k >= fastOW.Get().length) goto PrepareBailout(k); try { - const value: Object = fastOW.LoadElementNoHole(k) + const value: JSAny = fastOW.LoadElementNoHole(k) otherwise FoundHole; - const result: Object = + const result: JSAny = Call(context, callbackfn, thisArg, value, k, fastOW.Get()); vector.StoreResult(k, result); } @@ -224,8 +224,7 @@ namespace array_map { // https://tc39.github.io/ecma262/#sec-array.prototype.map transitioning javascript builtin - ArrayMap(js-implicit context: Context, receiver: Object)(...arguments): - Object { + ArrayMap(js-implicit context: Context, receiver: JSAny)(...arguments): JSAny { try { RequireObjectCoercible(receiver, 'Array.prototype.map'); @@ -241,7 +240,7 @@ namespace array_map { const callbackfn = Cast(arguments[0]) otherwise TypeError; // 4. If thisArg is present, let T be thisArg; else let T be undefined. - const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined; + const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined; let array: JSReceiver; let k: Number = 0; diff --git a/deps/v8/src/builtins/array-of.tq b/deps/v8/src/builtins/array-of.tq index 72933186257231..ceb9edff63c5b8 100644 --- a/deps/v8/src/builtins/array-of.tq +++ b/deps/v8/src/builtins/array-of.tq @@ -5,8 +5,7 @@ namespace array_of { // https://tc39.github.io/ecma262/#sec-array.of transitioning javascript builtin - ArrayOf(js-implicit context: Context, receiver: Object)(...arguments): - Object { + ArrayOf(js-implicit context: Context, receiver: JSAny)(...arguments): JSAny { // 1. Let len be the actual number of arguments passed to this function. const len: Smi = Convert(arguments.length); @@ -14,7 +13,7 @@ namespace array_of { const items: Arguments = arguments; // 3. Let C be the this value. - const c: Object = receiver; + const c: JSAny = receiver; let a: JSReceiver; @@ -24,7 +23,7 @@ namespace array_of { // a. Let A be ? Construct(C, « len »). a = Construct(c, len); } - case (Object): { + case (JSAny): { // a. Let A be ? ArrayCreate(len). a = ArrayCreate(len); } @@ -36,7 +35,7 @@ namespace array_of { // 7. Repeat, while k < len while (k < len) { // a. Let kValue be items[k]. - const kValue: Object = items[Convert(k)]; + const kValue: JSAny = items[Convert(k)]; // b. Let Pk be ! ToString(k). // c. Perform ? CreateDataPropertyOrThrow(A, Pk, kValue). diff --git a/deps/v8/src/builtins/array-reduce-right.tq b/deps/v8/src/builtins/array-reduce-right.tq index b1aa71b85b4623..ae5ca99d3d5e14 100644 --- a/deps/v8/src/builtins/array-reduce-right.tq +++ b/deps/v8/src/builtins/array-reduce-right.tq @@ -6,13 +6,13 @@ namespace array { transitioning javascript builtin ArrayReduceRightPreLoopEagerDeoptContinuation( js-implicit context: Context, - receiver: Object)(callback: Object, length: Object): Object { + receiver: JSAny)(callback: JSAny, length: JSAny): JSAny { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. // // Also, this great mass of casts is necessary because the signature - // of Torque javascript builtins requires Object type for all parameters + // of Torque javascript builtins requires JSAny type for all parameters // other than {context}. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; @@ -27,15 +27,15 @@ namespace array { transitioning javascript builtin ArrayReduceRightLoopEagerDeoptContinuation( - js-implicit context: Context, receiver: Object)( - callback: Object, initialK: Object, length: Object, - accumulator: Object): Object { + js-implicit context: Context, receiver: JSAny)( + callback: JSAny, initialK: JSAny, length: JSAny, + accumulator: JSAny): JSAny { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. // // Also, this great mass of casts is necessary because the signature - // of Torque javascript builtins requires Object type for all parameters + // of Torque javascript builtins requires JSAny type for all parameters // other than {context}. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; @@ -48,9 +48,8 @@ namespace array { transitioning javascript builtin ArrayReduceRightLoopLazyDeoptContinuation( - js-implicit context: Context, receiver: Object)( - callback: Object, initialK: Object, length: Object, - result: Object): Object { + js-implicit context: Context, receiver: JSAny)( + callback: JSAny, initialK: JSAny, length: JSAny, result: JSAny): JSAny { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -67,8 +66,9 @@ namespace array { transitioning builtin ArrayReduceRightLoopContinuation(implicit context: Context)( - _receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object, - o: JSReceiver, initialK: Number, _length: Number): Object { + _receiver: JSReceiver, callbackfn: Callable, + initialAccumulator: JSAny | TheHole, o: JSReceiver, initialK: Number, + _length: Number): JSAny { let accumulator = initialAccumulator; // 8b and 9. Repeat, while k >= 0 @@ -83,16 +83,20 @@ namespace array { // 8b iii and 9c. If kPresent is true, then if (present == True) { // 8b iii and 9c i. Let kValue be ? Get(O, Pk). - const value: Object = GetProperty(o, k); - - if (accumulator == TheHole) { - // 8b iii 1. - accumulator = value; - } else { - // 9c. ii. Set accumulator to ? Call(callbackfn, undefined, - // ). - accumulator = - Call(context, callbackfn, Undefined, accumulator, value, k, o); + const value: JSAny = GetProperty(o, k); + + typeswitch (accumulator) { + case (TheHole): { + // 8b iii 1. + accumulator = value; + } + case (accumulatorNotHole: JSAny): { + // 9c. ii. Set accumulator to ? Call(callbackfn, undefined, + // ). + accumulator = Call( + context, callbackfn, Undefined, accumulatorNotHole, value, k, + o); + } } } @@ -102,16 +106,20 @@ namespace array { // 8c. if kPresent is false, throw a TypeError exception. // If the accumulator is discovered with the sentinel hole value, // this means kPresent is false. - if (accumulator == TheHole) { - ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduceRight'); + typeswitch (accumulator) { + case (TheHole): { + ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduceRight'); + } + case (accumulator: JSAny): { + return accumulator; + } } - return accumulator; } transitioning macro FastArrayReduceRight(implicit context: Context)( o: JSReceiver, len: Number, callbackfn: Callable, - initialAccumulator: Object): Object - labels Bailout(Number, Object) { + initialAccumulator: JSAny | TheHole): JSAny + labels Bailout(Number, JSAny | TheHole) { let accumulator = initialAccumulator; const smiLen = Cast(len) otherwise goto Bailout(len - 1, accumulator); const fastO = Cast(o) @@ -125,25 +133,32 @@ namespace array { // Ensure that we haven't walked beyond a possibly updated length. if (k >= fastOW.Get().length) goto Bailout(k, accumulator); - const value: Object = fastOW.LoadElementNoHole(k) otherwise continue; - if (accumulator == TheHole) { - accumulator = value; - } else { - accumulator = Call( - context, callbackfn, Undefined, accumulator, value, k, - fastOW.Get()); + const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue; + typeswitch (accumulator) { + case (TheHole): { + accumulator = value; + } + case (accumulatorNotHole: JSAny): { + accumulator = Call( + context, callbackfn, Undefined, accumulatorNotHole, value, k, + fastOW.Get()); + } } } - if (accumulator == TheHole) { - ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduceRight'); + typeswitch (accumulator) { + case (TheHole): { + ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduceRight'); + } + case (accumulator: JSAny): { + return accumulator; + } } - return accumulator; } // https://tc39.github.io/ecma262/#sec-array.prototype.reduceRight transitioning javascript builtin - ArrayReduceRight(js-implicit context: Context, receiver: Object)( - ...arguments): Object { + ArrayReduceRight(js-implicit context: Context, receiver: JSAny)(...arguments): + JSAny { try { RequireObjectCoercible(receiver, 'Array.prototype.reduceRight'); @@ -163,14 +178,14 @@ namespace array { // exception. (This case is handled at the end of // ArrayReduceRightLoopContinuation). - const initialValue: Object = + const initialValue: JSAny | TheHole = arguments.length > 1 ? arguments[1] : TheHole; try { return FastArrayReduceRight(o, len, callbackfn, initialValue) otherwise Bailout; } - label Bailout(value: Number, accumulator: Object) { + label Bailout(value: Number, accumulator: JSAny | TheHole) { return ArrayReduceRightLoopContinuation( o, callbackfn, accumulator, o, value, len); } diff --git a/deps/v8/src/builtins/array-reduce.tq b/deps/v8/src/builtins/array-reduce.tq index a5f6feb9ccedf3..1021c4864281fe 100644 --- a/deps/v8/src/builtins/array-reduce.tq +++ b/deps/v8/src/builtins/array-reduce.tq @@ -6,13 +6,13 @@ namespace array { transitioning javascript builtin ArrayReducePreLoopEagerDeoptContinuation( js-implicit context: Context, - receiver: Object)(callback: Object, length: Object): Object { + receiver: JSAny)(callback: JSAny, length: JSAny): JSAny { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. // // Also, this great mass of casts is necessary because the signature - // of Torque javascript builtins requires Object type for all parameters + // of Torque javascript builtins requires JSAny type for all parameters // other than {context}. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; @@ -27,15 +27,15 @@ namespace array { transitioning javascript builtin ArrayReduceLoopEagerDeoptContinuation( - js-implicit context: Context, receiver: Object)( - callback: Object, initialK: Object, length: Object, - accumulator: Object): Object { + js-implicit context: Context, receiver: JSAny)( + callback: JSAny, initialK: JSAny, length: JSAny, + accumulator: JSAny): JSAny { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. // // Also, this great mass of casts is necessary because the signature - // of Torque javascript builtins requires Object type for all parameters + // of Torque javascript builtins requires JSAny type for all parameters // other than {context}. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; @@ -48,9 +48,8 @@ namespace array { transitioning javascript builtin ArrayReduceLoopLazyDeoptContinuation( - js-implicit context: Context, receiver: Object)( - callback: Object, initialK: Object, length: Object, - result: Object): Object { + js-implicit context: Context, receiver: JSAny)( + callback: JSAny, initialK: JSAny, length: JSAny, result: JSAny): JSAny { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -66,8 +65,9 @@ namespace array { } transitioning builtin ArrayReduceLoopContinuation(implicit context: Context)( - _receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object, - o: JSReceiver, initialK: Number, length: Number): Object { + _receiver: JSReceiver, callbackfn: Callable, + initialAccumulator: JSAny | TheHole, o: JSReceiver, initialK: Number, + length: Number): JSAny { let accumulator = initialAccumulator; // 8b and 9. Repeat, while k < len @@ -82,16 +82,20 @@ namespace array { // 6c. If kPresent is true, then if (present == True) { // 6c. i. Let kValue be ? Get(O, Pk). - const value: Object = GetProperty(o, k); - - if (accumulator == TheHole) { - // 8b. - accumulator = value; - } else { - // 9c. ii. Set accumulator to ? Call(callbackfn, undefined, - // ). - accumulator = - Call(context, callbackfn, Undefined, accumulator, value, k, o); + const value: JSAny = GetProperty(o, k); + + typeswitch (accumulator) { + case (TheHole): { + // 8b. + accumulator = value; + } + case (accumulatorNotHole: JSAny): { + // 9c. ii. Set accumulator to ? Call(callbackfn, undefined, + // ). + accumulator = Call( + context, callbackfn, Undefined, accumulatorNotHole, value, k, + o); + } } } @@ -101,16 +105,20 @@ namespace array { // 8c. if kPresent is false, throw a TypeError exception. // If the accumulator is discovered with the sentinel hole value, // this means kPresent is false. - if (accumulator == TheHole) { - ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduce'); + typeswitch (accumulator) { + case (TheHole): { + ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduce'); + } + case (accumulator: JSAny): { + return accumulator; + } } - return accumulator; } transitioning macro FastArrayReduce(implicit context: Context)( o: JSReceiver, len: Number, callbackfn: Callable, - initialAccumulator: Object): Object - labels Bailout(Number, Object) { + initialAccumulator: JSAny | TheHole): JSAny + labels Bailout(Number, JSAny | TheHole) { const k = 0; let accumulator = initialAccumulator; Cast(len) otherwise goto Bailout(k, accumulator); @@ -125,25 +133,32 @@ namespace array { // Ensure that we haven't walked beyond a possibly updated length. if (k >= fastOW.Get().length) goto Bailout(k, accumulator); - const value: Object = fastOW.LoadElementNoHole(k) otherwise continue; - if (accumulator == TheHole) { - accumulator = value; - } else { - accumulator = Call( - context, callbackfn, Undefined, accumulator, value, k, - fastOW.Get()); + const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue; + typeswitch (accumulator) { + case (TheHole): { + accumulator = value; + } + case (accumulatorNotHole: JSAny): { + accumulator = Call( + context, callbackfn, Undefined, accumulatorNotHole, value, k, + fastOW.Get()); + } } } - if (accumulator == TheHole) { - ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduce'); + typeswitch (accumulator) { + case (TheHole): { + ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduce'); + } + case (accumulator: JSAny): { + return accumulator; + } } - return accumulator; } // https://tc39.github.io/ecma262/#sec-array.prototype.reduce transitioning javascript builtin - ArrayReduce(js-implicit context: Context, receiver: Object)(...arguments): - Object { + ArrayReduce(js-implicit context: Context, receiver: JSAny)(...arguments): + JSAny { try { RequireObjectCoercible(receiver, 'Array.prototype.reduce'); @@ -163,14 +178,14 @@ namespace array { // exception. (This case is handled at the end of // ArrayReduceLoopContinuation). - const initialValue: Object = + const initialValue: JSAny | TheHole = arguments.length > 1 ? arguments[1] : TheHole; try { return FastArrayReduce(o, len, callbackfn, initialValue) otherwise Bailout; } - label Bailout(value: Number, accumulator: Object) { + label Bailout(value: Number, accumulator: JSAny | TheHole) { return ArrayReduceLoopContinuation( o, callbackfn, accumulator, o, value, len); } diff --git a/deps/v8/src/builtins/array-reverse.tq b/deps/v8/src/builtins/array-reverse.tq index 82d2e6b6058661..5e9d3a00f39d95 100644 --- a/deps/v8/src/builtins/array-reverse.tq +++ b/deps/v8/src/builtins/array-reverse.tq @@ -12,10 +12,10 @@ namespace array_reverse { return UnsafeCast(elements.objects[index]); } - LoadElement( - implicit context: Context)(elements: FixedArrayBase, index: Smi): Object { + LoadElement( + implicit context: Context)(elements: FixedArrayBase, index: Smi): JSAny { const elements: FixedArray = UnsafeCast(elements); - return elements.objects[index]; + return UnsafeCast(elements.objects[index]); } LoadElement( @@ -38,9 +38,9 @@ namespace array_reverse { StoreFixedArrayElement(elems, index, value, SKIP_WRITE_BARRIER); } - StoreElement( + StoreElement( implicit context: - Context)(elements: FixedArrayBase, index: Smi, value: Object) { + Context)(elements: FixedArrayBase, index: Smi, value: JSAny) { const elements: FixedArray = UnsafeCast(elements); elements.objects[index] = value; } @@ -70,8 +70,8 @@ namespace array_reverse { } } - transitioning macro GenericArrayReverse(context: Context, receiver: Object): - Object { + transitioning macro GenericArrayReverse(context: Context, receiver: JSAny): + JSAny { // 1. Let O be ? ToObject(this value). const object: JSReceiver = ToObject_Inline(context, receiver); @@ -89,8 +89,8 @@ namespace array_reverse { let upper: Number = length - 1; while (lower < upper) { - let lowerValue: Object = Undefined; - let upperValue: Object = Undefined; + let lowerValue: JSAny = Undefined; + let upperValue: JSAny = Undefined; // b. Let upperP be ! ToString(upper). // c. Let lowerP be ! ToString(lower). @@ -142,7 +142,7 @@ namespace array_reverse { return object; } - macro TryFastPackedArrayReverse(implicit context: Context)(receiver: Object) + macro TryFastPackedArrayReverse(implicit context: Context)(receiver: JSAny) labels Slow { const array: FastJSArray = Cast(receiver) otherwise Slow; @@ -153,7 +153,7 @@ namespace array_reverse { array.elements, array.length); } else if (kind == PACKED_ELEMENTS) { array::EnsureWriteableFastElements(array); - FastPackedArrayReverse( + FastPackedArrayReverse( array.elements, array.length); } else if (kind == PACKED_DOUBLE_ELEMENTS) { FastPackedArrayReverse( @@ -165,7 +165,7 @@ namespace array_reverse { // https://tc39.github.io/ecma262/#sec-array.prototype.reverse transitioning javascript builtin ArrayPrototypeReverse( - js-implicit context: Context, receiver: Object)(...arguments): Object { + js-implicit context: Context, receiver: JSAny)(...arguments): JSAny { try { TryFastPackedArrayReverse(receiver) otherwise Baseline; return receiver; diff --git a/deps/v8/src/builtins/array-shift.tq b/deps/v8/src/builtins/array-shift.tq index 4dd82d7b886d0a..48ffe3b4875805 100644 --- a/deps/v8/src/builtins/array-shift.tq +++ b/deps/v8/src/builtins/array-shift.tq @@ -3,11 +3,10 @@ // found in the LICENSE file. namespace array_shift { - extern builtin ArrayShift(Context, JSFunction, Object, int32); + extern builtin ArrayShift(Context, JSFunction, JSAny, int32): JSAny; - macro TryFastArrayShift(implicit context: Context)( - receiver: Object, arguments: Arguments): Object - labels Slow { + macro TryFastArrayShift(implicit context: Context)(receiver: JSAny): JSAny + labels Slow, Runtime { const array: FastJSArray = Cast(receiver) otherwise Slow; let witness = NewFastJSArrayWitness(array); @@ -17,35 +16,28 @@ namespace array_shift { return Undefined; } - try { - const newLength = array.length - 1; + const newLength = array.length - 1; - // Check that we're not supposed to right-trim the backing store, as - // implemented in elements.cc:ElementsAccessorBase::SetLengthImpl. - if ((newLength + newLength + kMinAddedElementsCapacity) < - array.elements.length) { - goto Runtime; - } + // Check that we're not supposed to right-trim the backing store, as + // implemented in elements.cc:ElementsAccessorBase::SetLengthImpl. + if ((newLength + newLength + kMinAddedElementsCapacity) < + array.elements.length) { + goto Runtime; + } - // Check that we're not supposed to left-trim the backing store, as - // implemented in elements.cc:FastElementsAccessor::MoveElements. - if (newLength > kMaxCopyElements) goto Runtime; + // Check that we're not supposed to left-trim the backing store, as + // implemented in elements.cc:FastElementsAccessor::MoveElements. + if (newLength > kMaxCopyElements) goto Runtime; - const result = witness.LoadElementOrUndefined(0); - witness.ChangeLength(newLength); - witness.MoveElements(0, 1, Convert(newLength)); - witness.StoreHole(newLength); - return result; - } - label Runtime { - tail ArrayShift( - context, LoadTargetFromFrame(), Undefined, - Convert(arguments.length)); - } + const result = witness.LoadElementOrUndefined(0); + witness.ChangeLength(newLength); + witness.MoveElements(0, 1, Convert(newLength)); + witness.StoreHole(newLength); + return result; } transitioning macro GenericArrayShift(implicit context: - Context)(receiver: Object): Object { + Context)(receiver: JSAny): JSAny { // 1. Let O be ? ToObject(this value). const object: JSReceiver = ToObject_Inline(context, receiver); @@ -78,7 +70,7 @@ namespace array_shift { // d. If fromPresent is true, then if (fromPresent == True) { // i. Let fromVal be ? Get(O, from). - const fromValue: Object = GetProperty(object, from); + const fromValue: JSAny = GetProperty(object, from); // ii. Perform ? Set(O, to, fromValue, true). SetProperty(object, to, fromValue); @@ -103,12 +95,17 @@ namespace array_shift { // https://tc39.github.io/ecma262/#sec-array.prototype.shift transitioning javascript builtin ArrayPrototypeShift( - js-implicit context: Context, receiver: Object)(...arguments): Object { + js-implicit context: Context, receiver: JSAny)(...arguments): JSAny { try { - return TryFastArrayShift(receiver, arguments) otherwise Slow; + return TryFastArrayShift(receiver) otherwise Slow, Runtime; } label Slow { return GenericArrayShift(receiver); } + label Runtime { + tail ArrayShift( + context, LoadTargetFromFrame(), Undefined, + Convert(arguments.length)); + } } } diff --git a/deps/v8/src/builtins/array-slice.tq b/deps/v8/src/builtins/array-slice.tq index c3a6ac75cb0ec7..57ddc8dea1c852 100644 --- a/deps/v8/src/builtins/array-slice.tq +++ b/deps/v8/src/builtins/array-slice.tq @@ -4,7 +4,7 @@ namespace array_slice { macro HandleSimpleArgumentsSlice( - context: Context, args: JSArgumentsObjectWithLength, start: Smi, + context: NativeContext, args: JSArgumentsObjectWithLength, start: Smi, count: Smi): JSArray labels Bailout { // If the resulting array doesn't fit in new space, use the slow path. @@ -27,7 +27,7 @@ namespace array_slice { } macro HandleFastAliasedSloppyArgumentsSlice( - context: Context, args: JSArgumentsObjectWithLength, start: Smi, + context: NativeContext, args: JSArgumentsObjectWithLength, start: Smi, count: Smi): JSArray labels Bailout { // If the resulting array doesn't fit in new space, use the slow path. @@ -63,9 +63,9 @@ namespace array_slice { for (let current: Smi = start; current < to; ++current) { const e: Object = sloppyElements.objects[current + kSloppyArgumentsParameterMapStart]; - const newElement: Object = e != TheHole ? - argumentsContext[UnsafeCast(e)] : - unmappedElements.objects[current]; + const newElement: JSAny = UnsafeCast( + e != TheHole ? argumentsContext[UnsafeCast(e)] : + unmappedElements.objects[current]); // It is safe to skip the write barrier here because resultElements was // allocated together with result in a folded allocation. // TODO(tebbi): The verification of this fails at the moment due to @@ -86,7 +86,7 @@ namespace array_slice { } macro HandleFastSlice( - context: Context, o: Object, startNumber: Number, + context: NativeContext, o: JSAny, startNumber: Number, countNumber: Number): JSArray labels Bailout { const start: Smi = Cast(startNumber) otherwise Bailout; @@ -114,7 +114,7 @@ namespace array_slice { otherwise Bailout; } } - case (Object): { + case (JSAny): { } } goto Bailout; @@ -122,15 +122,15 @@ namespace array_slice { // https://tc39.github.io/ecma262/#sec-array.prototype.slice transitioning javascript builtin - ArrayPrototypeSlice(js-implicit context: Context, receiver: Object)( - ...arguments): Object { + ArrayPrototypeSlice(js-implicit context: Context, receiver: JSAny)( + ...arguments): JSAny { // Handle array cloning case if the receiver is a fast array. if (arguments.length == 0) { typeswitch (receiver) { case (a: FastJSArrayForCopy): { return CloneFastJSArray(context, a); } - case (Object): { + case (JSAny): { } } } @@ -142,7 +142,7 @@ namespace array_slice { const len: Number = GetLengthProperty(o); // 3. Let relativeStart be ? ToInteger(start). - const start: Object = arguments[0]; + const start: JSAny = arguments[0]; const relativeStart: Number = ToInteger_Inline(context, start); // 4. If relativeStart < 0, let k be max((len + relativeStart), 0); @@ -152,7 +152,7 @@ namespace array_slice { // 5. If end is undefined, let relativeEnd be len; // else let relativeEnd be ? ToInteger(end). - const end: Object = arguments[1]; + const end: JSAny = arguments[1]; const relativeEnd: Number = end == Undefined ? len : ToInteger_Inline(context, end); @@ -172,7 +172,8 @@ namespace array_slice { assert(count <= len); try { - return HandleFastSlice(context, o, k, count) otherwise Slow; + return HandleFastSlice(UnsafeCast(context), o, k, count) + otherwise Slow; } label Slow {} @@ -193,7 +194,7 @@ namespace array_slice { // c. If kPresent is true, then if (fromPresent == True) { // i. Let kValue be ? Get(O, Pk). - const kValue: Object = GetProperty(o, pK); + const kValue: JSAny = GetProperty(o, pK); // ii. Perform ? CreateDataPropertyOrThrow(A, ! ToString(n), kValue). FastCreateDataProperty(a, n, kValue); diff --git a/deps/v8/src/builtins/array-some.tq b/deps/v8/src/builtins/array-some.tq index a30af4e47a42c4..5d93dd0b726017 100644 --- a/deps/v8/src/builtins/array-some.tq +++ b/deps/v8/src/builtins/array-some.tq @@ -5,15 +5,14 @@ namespace array { transitioning javascript builtin ArraySomeLoopEagerDeoptContinuation( - js-implicit context: Context, receiver: Object)( - callback: Object, thisArg: Object, initialK: Object, - length: Object): Object { + js-implicit context: Context, receiver: JSAny)( + callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny { // All continuation points in the optimized some implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. // // Also, this great mass of casts is necessary because the signature - // of Torque javascript builtins requires Object type for all parameters + // of Torque javascript builtins requires JSAny type for all parameters // other than {context}. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; @@ -27,9 +26,9 @@ namespace array { transitioning javascript builtin ArraySomeLoopLazyDeoptContinuation( - js-implicit context: Context, receiver: Object)( - callback: Object, thisArg: Object, initialK: Object, length: Object, - result: Object): Object { + js-implicit context: Context, receiver: JSAny)( + callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny, + result: JSAny): JSAny { // All continuation points in the optimized some implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -53,9 +52,9 @@ namespace array { } transitioning builtin ArraySomeLoopContinuation(implicit context: Context)( - _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, - _array: Object, o: JSReceiver, initialK: Number, length: Number, - _initialTo: Object): Object { + _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny, + _array: JSAny, o: JSReceiver, initialK: Number, length: Number, + _initialTo: JSAny): JSAny { // 5. Let k be 0. // 6. Repeat, while k < len for (let k: Number = initialK; k < length; k++) { @@ -69,10 +68,10 @@ namespace array { // 6c. If kPresent is true, then if (kPresent == True) { // 6c. i. Let kValue be ? Get(O, Pk). - const kValue: Object = GetProperty(o, k); + const kValue: JSAny = GetProperty(o, k); // 6c. ii. Perform ? Call(callbackfn, T, ). - const result: Object = Call(context, callbackfn, thisArg, kValue, k, o); + const result: JSAny = Call(context, callbackfn, thisArg, kValue, k, o); // iii. If selected is true, then... if (ToBoolean(result)) { @@ -86,7 +85,7 @@ namespace array { } transitioning macro FastArraySome(implicit context: Context)( - o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object): Object + o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): JSAny labels Bailout(Smi) { let k: Smi = 0; const smiLen = Cast(len) otherwise goto Bailout(k); @@ -99,8 +98,8 @@ namespace array { // Ensure that we haven't walked beyond a possibly updated length. if (k >= fastOW.Get().length) goto Bailout(k); - const value: Object = fastOW.LoadElementNoHole(k) otherwise continue; - const result: Object = + const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue; + const result: JSAny = Call(context, callbackfn, thisArg, value, k, fastOW.Get()); if (ToBoolean(result)) { return True; @@ -111,8 +110,8 @@ namespace array { // https://tc39.github.io/ecma262/#sec-array.prototype.some transitioning javascript builtin - ArraySome(js-implicit context: Context, receiver: Object)(...arguments): - Object { + ArraySome(js-implicit context: Context, receiver: JSAny)(...arguments): + JSAny { try { RequireObjectCoercible(receiver, 'Array.prototype.some'); @@ -129,7 +128,7 @@ namespace array { const callbackfn = Cast(arguments[0]) otherwise TypeError; // 4. If thisArg is present, let T be thisArg; else let T be undefined. - const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined; + const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined; // Special cases. try { diff --git a/deps/v8/src/builtins/array-splice.tq b/deps/v8/src/builtins/array-splice.tq index 3b65bb03d48bac..04885dbb9c481a 100644 --- a/deps/v8/src/builtins/array-splice.tq +++ b/deps/v8/src/builtins/array-splice.tq @@ -95,7 +95,7 @@ namespace array_splice { const typedNewElements: FixedArrayType = UnsafeCast(a.elements); for (let i: intptr = 2; i < args.length; ++i) { - const e: Object = args[i]; + const e: JSAny = args[i]; // The argument elements were already validated to be an appropriate // {ElementType} to store in {FixedArrayType}. typedNewElements[k++] = UnsafeCast(e); @@ -109,7 +109,7 @@ namespace array_splice { transitioning macro FastArraySplice( context: Context, args: Arguments, o: JSReceiver, originalLengthNumber: Number, actualStartNumber: Number, insertCount: Smi, - actualDeleteCountNumber: Number): Object + actualDeleteCountNumber: Number): JSAny labels Bailout { const originalLength: Smi = Cast(originalLengthNumber) otherwise Bailout; @@ -132,7 +132,7 @@ namespace array_splice { const oldElementsKind: ElementsKind = elementsKind; for (let i: intptr = 2; i < args.length; ++i) { - const e: Object = args[i]; + const e: JSAny = args[i]; if (IsFastSmiElementsKind(elementsKind)) { if (TaggedIsNotSmi(e)) { const heapObject: HeapObject = UnsafeCast(e); @@ -166,7 +166,7 @@ namespace array_splice { } if (IsFastSmiOrTaggedElementsKind(elementsKind)) { - FastSplice( + FastSplice( args, a, length, newLength, actualStart, insertCount, actualDeleteCount); } else { @@ -180,7 +180,7 @@ namespace array_splice { transitioning macro FillDeletedElementsArray( context: Context, o: JSReceiver, actualStart: Number, - actualDeleteCount: Number, a: JSReceiver): Object { + actualDeleteCount: Number, a: JSReceiver): JSAny { // 10. Let k be 0. let k: Number = 0; @@ -195,7 +195,7 @@ namespace array_splice { // c. If fromPresent is true, then if (fromPresent == True) { // i. Let fromValue be ? Get(O, from). - const fromValue: Object = GetProperty(o, from); + const fromValue: JSAny = GetProperty(o, from); // ii. Perform ? CreateDataPropertyOrThrow(A, ! ToString(k), fromValue). FastCreateDataProperty(a, k, fromValue); @@ -231,7 +231,7 @@ namespace array_splice { // iv. If fromPresent is true, then if (fromPresent == True) { // 1. Let fromValue be ? Get(O, from). - const fromValue: Object = GetProperty(o, from); + const fromValue: JSAny = GetProperty(o, from); // 2. Perform ? Set(O, to, fromValue, true). SetProperty(o, to, fromValue); @@ -280,7 +280,7 @@ namespace array_splice { // iv. If fromPresent is true, then if (fromPresent == True) { // 1. Let fromValue be ? Get(O, from). - const fromValue: Object = GetProperty(o, from); + const fromValue: JSAny = GetProperty(o, from); // 2. Perform ? Set(O, to, fromValue, true). SetProperty(o, to, fromValue); @@ -298,8 +298,7 @@ namespace array_splice { transitioning macro SlowSplice( context: Context, arguments: Arguments, o: JSReceiver, len: Number, - actualStart: Number, insertCount: Smi, - actualDeleteCount: Number): Object { + actualStart: Number, insertCount: Smi, actualDeleteCount: Number): JSAny { // 9. Let A be ? ArraySpeciesCreate(O, actualDeleteCount). const a: JSReceiver = ArraySpeciesCreate(context, o, actualDeleteCount); const itemCount: Number = insertCount; @@ -332,7 +331,7 @@ namespace array_splice { // element. if (arguments.length > 2) { for (let i: intptr = 2; i < arguments.length; ++i) { - const e: Object = arguments[i]; + const e: JSAny = arguments[i]; // b. Perform ? Set(O, ! ToString(k), E, true). SetProperty(o, k, e); @@ -350,8 +349,8 @@ namespace array_splice { // https://tc39.github.io/ecma262/#sec-array.prototype.splice transitioning javascript builtin - ArrayPrototypeSplice(js-implicit context: Context, receiver: Object)( - ...arguments): Object { + ArrayPrototypeSplice(js-implicit context: Context, receiver: JSAny)( + ...arguments): JSAny { // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject(context, receiver); @@ -359,7 +358,7 @@ namespace array_splice { const len: Number = GetLengthProperty(o); // 3. Let relativeStart be ? ToInteger(start). - const start: Object = arguments[0]; + const start: JSAny = arguments[0]; const relativeStart: Number = ToInteger_Inline(context, start); // 4. If relativeStart < 0, let actualStart be max((len + relativeStart), @@ -388,7 +387,7 @@ namespace array_splice { // a. Let insertCount be the Number of actual arguments minus 2. insertCount = Convert(arguments.length) - 2; // b. Let dc be ? ToInteger(deleteCount). - const deleteCount: Object = arguments[1]; + const deleteCount: JSAny = arguments[1]; const dc: Number = ToInteger_Inline(context, deleteCount); // c. Let actualDeleteCount be min(max(dc, 0), len - actualStart). actualDeleteCount = Min(Max(dc, 0), len - actualStart); diff --git a/deps/v8/src/builtins/array-unshift.tq b/deps/v8/src/builtins/array-unshift.tq index e685d520cd963a..422eee158de535 100644 --- a/deps/v8/src/builtins/array-unshift.tq +++ b/deps/v8/src/builtins/array-unshift.tq @@ -3,25 +3,10 @@ // found in the LICENSE file. namespace array_unshift { - extern builtin ArrayUnshift(Context, JSFunction, Object, int32); - - macro TryFastArrayUnshift( - context: Context, receiver: Object, arguments: Arguments): never - labels Slow { - const array: FastJSArray = Cast(receiver) otherwise Slow; - array::EnsureWriteableFastElements(array); - - const map: Map = array.map; - if (!IsExtensibleMap(map)) goto Slow; - EnsureArrayLengthWritable(map) otherwise Slow; - - tail ArrayUnshift( - context, LoadTargetFromFrame(), Undefined, - Convert(arguments.length)); - } + extern builtin ArrayUnshift(Context, JSFunction, JSAny, int32): JSAny; transitioning macro GenericArrayUnshift( - context: Context, receiver: Object, arguments: Arguments): Number { + context: Context, receiver: JSAny, arguments: Arguments): Number { // 1. Let O be ? ToObject(this value). const object: JSReceiver = ToObject_Inline(context, receiver); @@ -55,7 +40,7 @@ namespace array_unshift { // iv. If fromPresent is true, then if (fromPresent == True) { // 1. Let fromValue be ? Get(O, from). - const fromValue: Object = GetProperty(object, from); + const fromValue: JSAny = GetProperty(object, from); // 2. Perform ? Set(O, to, fromValue, true). SetProperty(object, to, fromValue); @@ -93,11 +78,20 @@ namespace array_unshift { // https://tc39.github.io/ecma262/#sec-array.prototype.unshift transitioning javascript builtin ArrayPrototypeUnshift( - js-implicit context: Context, receiver: Object)(...arguments): Object { + js-implicit context: Context, receiver: JSAny)(...arguments): JSAny { try { - TryFastArrayUnshift(context, receiver, arguments) otherwise Baseline; + const array: FastJSArray = Cast(receiver) otherwise Slow; + array::EnsureWriteableFastElements(array); + + const map: Map = array.map; + if (!IsExtensibleMap(map)) goto Slow; + EnsureArrayLengthWritable(map) otherwise Slow; + + tail ArrayUnshift( + context, LoadTargetFromFrame(), Undefined, + Convert(arguments.length)); } - label Baseline { + label Slow { return GenericArrayUnshift(context, receiver, arguments); } } diff --git a/deps/v8/src/builtins/array.tq b/deps/v8/src/builtins/array.tq index 7e044e086b89b3..b9ae314c08a297 100644 --- a/deps/v8/src/builtins/array.tq +++ b/deps/v8/src/builtins/array.tq @@ -32,30 +32,15 @@ namespace array { assert(array.elements.map != kCOWMap); } - macro IsJSArray(implicit context: Context)(o: Object): bool { - typeswitch (o) { - case (JSArray): { - return true; - } - case (Object): { - return false; - } - } - } - - macro LoadElementOrUndefined(a: FixedArray, i: Smi): Object { - const e: Object = a.objects[i]; - return e == TheHole ? Undefined : e; + macro LoadElementOrUndefined(implicit context: + Context)(a: FixedArray, i: Smi): JSAny { + const e = UnsafeCast<(JSAny | TheHole)>(a.objects[i]); + return ReplaceTheHoleWithUndefined(e); } macro LoadElementOrUndefined(a: FixedDoubleArray, i: Smi): NumberOrUndefined { - try { - const f: float64 = LoadDoubleWithHoleCheck(a, i) otherwise IfHole; - return AllocateHeapNumberWithValue(f); - } - label IfHole { - return Undefined; - } + const f: float64 = LoadDoubleWithHoleCheck(a, i) otherwise return Undefined; + return AllocateHeapNumberWithValue(f); } macro StoreArrayHole(elements: FixedDoubleArray, k: Smi): void { @@ -66,5 +51,5 @@ namespace array { elements.objects[k] = TheHole; } - extern macro SetPropertyLength(implicit context: Context)(Object, Number); + extern macro SetPropertyLength(implicit context: Context)(JSAny, Number); } diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq index 07af1f441f8060..aa5d4cc50a731c 100644 --- a/deps/v8/src/builtins/base.tq +++ b/deps/v8/src/builtins/base.tq @@ -43,6 +43,29 @@ extern class HeapObject extends Tagged { type Object = Smi | HeapObject; +// Defined to coincide with https://tc39.es/ecma262/#sec-ispropertykey +// Doesn't include PrivateSymbol. +type PropertyKey = String | PublicSymbol; + +// TODO(tebbi): PrivateSymbol is only exposed to JavaScript through the debugger +// API. We should reconsider this and try not to expose it at all. Then JSAny +// would not need to contain it. + +// A JavaScript primitive value as defined in +// https://tc39.es/ecma262/#sec-primitive-value. +type JSPrimitive = Numeric | String | Symbol | Boolean | + Null | Undefined; + +// A user-exposed JavaScript value, as opposed to V8-internal values like +// TheHole or FixedArray. +type JSAny = JSReceiver | JSPrimitive; + +type JSAnyNotNumber = BigInt | String | Symbol | Boolean | + Null | Undefined | JSReceiver; + +// This is the intersection of JSAny and HeapObject. +type JSAnyNotSmi = JSAnyNotNumber | HeapNumber; + type int32 generates 'TNode' constexpr 'int32_t'; type uint32 generates 'TNode' constexpr 'uint32_t'; type int31 extends int32 @@ -56,6 +79,8 @@ type uint16 extends uint31 type int8 extends int16 generates 'TNode' constexpr 'int8_t'; type uint8 extends uint16 generates 'TNode' constexpr 'uint8_t'; +type char8 extends int8 constexpr 'char'; +type char16 extends uint16 constexpr 'char16_t'; type int64 generates 'TNode' constexpr 'int64_t'; type intptr generates 'TNode' constexpr 'intptr_t'; type uintptr generates 'TNode' constexpr 'uintptr_t'; @@ -77,7 +102,7 @@ extern class Context extends HeapObject { extension: Object; native_context: Object; } -type NativeContext extends Context; +type NativeContext extends Context generates 'TNode'; @generateCppClass extern class Oddball extends HeapObject { @@ -97,6 +122,9 @@ type Numeric = Number | BigInt; extern class Name extends HeapObject { hash_field: uint32; } +// This is the same as Name, but with the information that there are no other +// kinds of names. +type AnyName = PrivateSymbol | PublicSymbol | String; @generateCppClass extern class Symbol extends Name { @@ -104,6 +132,9 @@ extern class Symbol extends Name { name: Object; // The print name of a symbol, or undefined if none. } +type PublicSymbol extends Symbol; +type PrivateSymbol extends Symbol; + @abstract @generateCppClass extern class String extends Name { @@ -136,9 +167,11 @@ extern class SeqString extends String { } @generateCppClass extern class SeqOneByteString extends SeqString { + chars[length]: char8; } @generateCppClass extern class SeqTwoByteString extends SeqString { + chars[length]: char16; } @generateCppClass @@ -185,7 +218,6 @@ type DirectString extends String; type RootIndex generates 'TNode' constexpr 'RootIndex'; @abstract -@generateCppClass extern class FixedArrayBase extends HeapObject { length: Smi; } @@ -205,7 +237,7 @@ type LayoutDescriptor extends ByteArray type TransitionArray extends WeakFixedArray generates 'TNode'; -type InstanceType extends uint16 constexpr 'InstanceType'; +type InstanceType extends uint16 constexpr 'v8::internal::InstanceType'; extern class Map extends HeapObject { instance_size_in_words: uint8; @@ -388,8 +420,8 @@ extern class JSProxy extends JSReceiver { // Just a starting shape for JSObject; properties can move after initialization. @noVerifier extern class JSProxyRevocableResult extends JSObject { - proxy: Object; - revoke: Object; + proxy: JSAny; + revoke: JSAny; } macro NewJSProxyRevocableResult(implicit context: Context)( @@ -412,22 +444,24 @@ extern class JSGlobalProxy extends JSObject { @generateCppClass extern class JSPrimitiveWrapper extends JSObject { - value: Object; + value: JSAny; } -extern class JSArgumentsObject extends JSObject {} +@generateCppClass +extern class JSArgumentsObject extends JSObject { +} // Just a starting shape for JSObject; properties can move after initialization. @noVerifier @hasSameInstanceTypeAsParent extern class JSArgumentsObjectWithLength extends JSArgumentsObject { - length: Object; + length: JSAny; } // Just a starting shape for JSObject; properties can move after initialization. @hasSameInstanceTypeAsParent extern class JSSloppyArgumentsObject extends JSArgumentsObjectWithLength { - callee: Object; + callee: JSAny; } // Just a starting shape for JSObject; properties can move after initialization. @@ -492,8 +526,8 @@ type NoSharedNameSentinel extends Smi; @generateCppClass extern class CallHandlerInfo extends Struct { - callback: Foreign | Undefined; - js_callback: Foreign | Undefined; + callback: NonNullForeign | Undefined | Zero; + js_callback: NonNullForeign | Undefined | Zero; data: Object; } @@ -510,18 +544,37 @@ extern class Module extends HeapObject { type SourceTextModuleInfo extends FixedArray; +@generateCppClass extern class SourceTextModule extends Module { + // The code representing this module, or an abstraction thereof. code: SharedFunctionInfo | JSFunction | JSGeneratorObject | SourceTextModuleInfo; + + // Arrays of cells corresponding to regular exports and regular imports. + // A cell's position in the array is determined by the cell index of the + // associated module entry (which coincides with the variable index of the + // associated variable). regular_exports: FixedArray; regular_imports: FixedArray; + + // Modules imported or re-exported by this module. + // Corresponds 1-to-1 to the module specifier strings in + // SourceTextModuleInfo::module_requests. requested_modules: FixedArray; + + // Script from which the module originates. script: Script; + + // The value of import.meta inside of this module. + // Lazily initialized on first access. It's the hole before first access and + // a JSObject afterwards. import_meta: TheHole | JSObject; + dfs_index: Smi; dfs_ancestor_index: Smi; } +@generateCppClass extern class SyntheticModule extends Module { name: String; export_names: FixedArray; @@ -529,6 +582,7 @@ extern class SyntheticModule extends Module { } @abstract +@generateCppClass extern class JSModuleNamespace extends JSObject { module: Module; } @@ -539,14 +593,23 @@ extern class TemplateList extends FixedArray { } @abstract +@generateCppClass extern class JSWeakCollection extends JSObject { + // The backing hash table mapping keys to values. table: Object; } -extern class JSWeakSet extends JSWeakCollection {} -extern class JSWeakMap extends JSWeakCollection {} +@generateCppClass +extern class JSWeakSet extends JSWeakCollection { +} +@generateCppClass +extern class JSWeakMap extends JSWeakCollection { +} +@generateCppClass extern class JSCollectionIterator extends JSObject { + // The backing hash table mapping keys to values. table: Object; + // The index into the data table. index: Object; } @@ -601,7 +664,10 @@ extern class Script extends Struct { host_defined_options: Object; } -extern class EmbedderDataArray extends HeapObject { length: Smi; } +@generateCppClass +extern class EmbedderDataArray extends HeapObject { + length: Smi; +} type ScopeInfo extends HeapObject generates 'TNode'; @@ -631,9 +697,15 @@ extern class SharedFunctionInfo extends HeapObject { @if(V8_SFI_HAS_UNIQUE_ID) unique_id: int32; } +@generateCppClass extern class JSBoundFunction extends JSObject { + // The wrapped function object. bound_target_function: Callable; - bound_this: Object; + // The value that is always passed as the this value when calling the wrapped + // function. + bound_this: JSAny; + // A list of values whose elements are used as the first arguments to any call + // to the wrapped function. bound_arguments: FixedArray; } @@ -644,7 +716,7 @@ extern class JSBoundFunction extends JSObject { type NonNullForeign extends Foreign; // A function built with InstantiateFunction for the public API. -type CallableApiObject extends HeapObject; +type CallableApiObject extends JSObject; // A JSProxy with the callable bit set. type CallableJSProxy extends JSProxy; @@ -729,14 +801,26 @@ extern class JSTypedArray extends JSArrayBufferView { } @abstract +@generateCppClass extern class JSCollection extends JSObject { + // The backing hash table. table: Object; } -extern class JSSet extends JSCollection {} -extern class JSMap extends JSCollection {} +@generateCppClass +extern class JSSet extends JSCollection { +} +@generateCppClass +extern class JSMap extends JSCollection { +} +@generateCppClass extern class JSDate extends JSObject { + // If one component is NaN, all of them are, indicating a NaN time value. + + // The time value. value: NumberOrUndefined; + + // Cached values: year: Undefined | Smi | NaN; month: Undefined | Smi | NaN; day: Undefined | Smi | NaN; @@ -744,6 +828,9 @@ extern class JSDate extends JSObject { hour: Undefined | Smi | NaN; min: Undefined | Smi | NaN; sec: Undefined | Smi | NaN; + + // Sample of the date cache stamp at the moment when chached fields were + // cached. cache_stamp: Undefined | Smi | NaN; } @@ -752,8 +839,11 @@ extern class JSGlobalObject extends JSObject { global_proxy: JSGlobalProxy; } +@generateCppClass extern class JSAsyncFromSyncIterator extends JSObject { sync_iterator: JSReceiver; + // The "next" method is loaded during GetIterator, and is not reloaded for + // subsequent "next" invocations. next: Object; } @@ -763,6 +853,7 @@ extern class JSStringIterator extends JSObject { } @abstract +@generateCppClass extern class TemplateInfo extends Struct { tag: Object; serial_number: Object; @@ -772,12 +863,15 @@ extern class TemplateInfo extends Struct { } @generatePrint +@generateCppClass extern class TemplateObjectDescription extends Struct { raw_strings: FixedArray; cooked_strings: FixedArray; } +@generateCppClass extern class FunctionTemplateRareData extends Struct { + // See DECL_RARE_ACCESSORS in FunctionTemplateInfo. prototype_template: Object; prototype_provider_template: Object; parent_template: Object; @@ -788,17 +882,31 @@ extern class FunctionTemplateRareData extends Struct { access_check_info: Object; } +@generateCppClass extern class FunctionTemplateInfo extends TemplateInfo { + // Handler invoked when calling an instance of this FunctionTemplateInfo. + // Either CallInfoHandler or Undefined. call_code: Object; class_name: Object; + // If the signature is a FunctionTemplateInfo it is used to check whether the + // receiver calling the associated JSFunction is a compatible receiver, i.e. + // it is an instance of the signature FunctionTemplateInfo or any of the + // receiver's prototypes are. signature: Object; - function_template_rare_data: Object; + // If any of the setters declared by DECL_RARE_ACCESSORS are used then a + // FunctionTemplateRareData will be stored here. Until then this contains + // undefined. + rare_data: HeapObject; shared_function_info: Object; flag: Smi; length: Smi; + // Either the_hole or a private symbol. Used to cache the result on + // the receiver under the the cached_property_name when this + // FunctionTemplateInfo is used as a getter. cached_property_name: Object; } +@generateCppClass extern class ObjectTemplateInfo extends TemplateInfo { constructor: Object; data: Object; @@ -809,7 +917,7 @@ extern class PropertyArray extends HeapObject { length_and_hash: Smi; } type DependentCode extends WeakFixedArray; extern class PropertyCell extends HeapObject { - name: Name; + name: AnyName; property_details_raw: Smi; value: Object; dependent_code: DependentCode; @@ -840,6 +948,7 @@ const UTF32: extern class Foreign extends HeapObject { foreign_address: RawPtr; } +@generateCppClass extern class InterceptorInfo extends Struct { getter: NonNullForeign | Zero | Undefined; setter: NonNullForeign | Zero | Undefined; @@ -852,6 +961,7 @@ extern class InterceptorInfo extends Struct { flags: Smi; } +@generateCppClass extern class AccessCheckInfo extends Struct { callback: Foreign | Zero | Undefined; named_interceptor: InterceptorInfo | Zero | Undefined; @@ -859,14 +969,21 @@ extern class AccessCheckInfo extends Struct { data: Object; } +@generateCppClass extern class ArrayBoilerplateDescription extends Struct { flags: Smi; constant_elements: FixedArrayBase; } -extern class AliasedArgumentsEntry extends Struct { aliased_context_slot: Smi; } +@generateCppClass +extern class AliasedArgumentsEntry extends Struct { + aliased_context_slot: Smi; +} -extern class Cell extends HeapObject { value: Object; } +@generateCppClass +extern class Cell extends HeapObject { + value: Object; +} extern class DataHandler extends Struct { smi_handler: Smi | Code; @@ -881,39 +998,58 @@ extern class DataHandler extends Struct { @abstract @dirtyInstantiatedAbstractClass +@generateCppClass extern class JSGeneratorObject extends JSObject { function: JSFunction; context: Context; - receiver: Object; + receiver: JSAny; + + // For executing generators: the most recent input value. + // For suspended generators: debug information (bytecode offset). + // There is currently no need to remember the most recent input value for a + // suspended generator. input_or_debug_pos: Object; + resume_mode: Smi; continuation: Smi; + + // Saved interpreter register file. parameters_and_registers: FixedArray; } +@generateCppClass extern class JSAsyncFunctionObject extends JSGeneratorObject { promise: JSPromise; } +@generateCppClass extern class JSAsyncGeneratorObject extends JSGeneratorObject { + // Pointer to the head of a singly linked list of AsyncGeneratorRequest, or + // undefined. queue: HeapObject; is_awaiting: Smi; } +@generateCppClass extern class JSPromise extends JSObject { + // Smi 0 terminated list of PromiseReaction objects in case the JSPromise was + // not settled yet, otherwise the result. reactions_or_result: Object; flags: Smi; } @abstract +@generateCppClass extern class Microtask extends Struct { } +@generateCppClass extern class CallbackTask extends Microtask { callback: Foreign; data: Foreign; } +@generateCppClass extern class CallableTask extends Microtask { callable: JSReceiver; context: Context; @@ -931,11 +1067,13 @@ extern class StackFrameInfo extends Struct { type_name: String | Null | Undefined; eval_origin: String | Null | Undefined; wasm_module_name: String | Null | Undefined; + wasm_instance: WasmInstanceObject | Null | Undefined; flag: Smi; } type FrameArray extends FixedArray; +@generateCppClass extern class StackTraceFrame extends Struct { frame_array: FrameArray | Undefined; frame_index: Smi; @@ -943,6 +1081,7 @@ extern class StackTraceFrame extends Struct { id: Smi; } +@generateCppClass extern class ClassPositions extends Struct { start: Smi; end: Smi; @@ -958,7 +1097,7 @@ extern class WasmExportedFunctionData extends Struct { // The remaining fields are for fast calling from C++. The contract is // that they are lazily populated, and either all will be present or none. c_wrapper_code: Object; - wasm_call_target: Smi; // Pseudo-smi: one-bit shift on all platforms. + wasm_call_target: Smi | Foreign; packed_args_size: Smi; } @@ -972,7 +1111,7 @@ extern class WasmJSFunctionData extends Struct { extern class WasmCapiFunctionData extends Struct { call_target: RawPtr; - embedder_data: RawPtr; + embedder_data: Foreign; // Managed wrapper_code: Code; serialized_signature: ByteArray; // PodArray } @@ -995,7 +1134,13 @@ extern class WasmDebugInfo extends Struct { c_wasm_entry_map: Foreign | Undefined; // Managed } -extern class WasmExceptionTag extends Struct { index: Smi; } +@generateCppClass +extern class WasmExceptionTag extends Struct { + index: Smi; +} + +const kTaggedSize: constexpr int31 generates 'kTaggedSize'; +const kDoubleSize: constexpr int31 generates 'kDoubleSize'; const kSmiTagSize: constexpr int31 generates 'kSmiTagSize'; const V8_INFINITY: constexpr float64 generates 'V8_INFINITY'; @@ -1013,8 +1158,8 @@ const PACKED_DOUBLE_ELEMENTS: constexpr ElementsKind generates 'PACKED_DOUBLE_ELEMENTS'; const HOLEY_DOUBLE_ELEMENTS: constexpr ElementsKind generates 'HOLEY_DOUBLE_ELEMENTS'; -const LAST_FROZEN_ELEMENTS_KIND: - constexpr ElementsKind generates 'LAST_FROZEN_ELEMENTS_KIND'; +const LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND: constexpr ElementsKind + generates 'LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND'; const DICTIONARY_ELEMENTS: constexpr ElementsKind generates 'DICTIONARY_ELEMENTS'; @@ -1186,6 +1331,7 @@ extern macro Int32FalseConstant(): bool; extern macro EmptyStringConstant(): EmptyString; extern macro LengthStringConstant(): String; extern macro NanConstant(): NaN; +extern macro IteratorSymbolConstant(): Symbol; const TheHole: TheHole = TheHoleConstant(); const Null: Null = NullConstant(); @@ -1207,6 +1353,7 @@ const SKIP_WRITE_BARRIER: const UNSAFE_SKIP_WRITE_BARRIER: constexpr WriteBarrierMode generates 'UNSAFE_SKIP_WRITE_BARRIER'; +@generateCppClass extern class AsyncGeneratorRequest extends Struct { next: AsyncGeneratorRequest | Undefined; resume_mode: Smi; @@ -1214,6 +1361,7 @@ extern class AsyncGeneratorRequest extends Struct { promise: JSPromise; } +@generateCppClass extern class SourceTextModuleInfoEntry extends Struct { export_name: String | Undefined; local_name: String | Undefined; @@ -1224,31 +1372,43 @@ extern class SourceTextModuleInfoEntry extends Struct { end_pos: Smi; } +@generateCppClass extern class PromiseCapability extends Struct { promise: JSReceiver | Undefined; resolve: Object; reject: Object; } +@generateCppClass extern class PromiseReaction extends Struct { next: PromiseReaction | Zero; reject_handler: Callable | Undefined; fulfill_handler: Callable | Undefined; + // Either a JSPromise (in case of native promises), a PromiseCapability + // (general case), or undefined (in case of await). promise_or_capability: JSPromise | PromiseCapability | Undefined; } @abstract +@generateCppClass extern class PromiseReactionJobTask extends Microtask { argument: Object; context: Context; handler: Callable | Undefined; + // Either a JSPromise (in case of native promises), a PromiseCapability + // (general case), or undefined (in case of await). promise_or_capability: JSPromise | PromiseCapability | Undefined; } -extern class PromiseFulfillReactionJobTask extends PromiseReactionJobTask {} +@generateCppClass +extern class PromiseFulfillReactionJobTask extends PromiseReactionJobTask { +} -extern class PromiseRejectReactionJobTask extends PromiseReactionJobTask {} +@generateCppClass +extern class PromiseRejectReactionJobTask extends PromiseReactionJobTask { +} +@generateCppClass extern class PromiseResolveThenableJobTask extends Microtask { context: Context; promise_to_resolve: JSPromise; @@ -1256,6 +1416,7 @@ extern class PromiseResolveThenableJobTask extends Microtask { thenable: JSReceiver; } +@generateCppClass extern class JSRegExp extends JSObject { data: FixedArray | Undefined; source: String | Undefined; @@ -1263,7 +1424,7 @@ extern class JSRegExp extends JSObject { } extern transitioning macro AllocateJSIteratorResult(implicit context: Context)( - Object, Boolean): JSObject; + JSAny, Boolean): JSObject; // Note: Although a condition for a FastJSRegExp is having a positive smi // lastIndex (see RegExpBuiltinsAssembler::BranchIfFastRegExp), it is possible @@ -1282,13 +1443,16 @@ RegExpBuiltinsAssembler::FastStoreLastIndex(FastJSRegExp, Smi): void; @hasSameInstanceTypeAsParent extern class JSRegExpResult extends JSArray { - index: Object; - input: Object; - groups: Object; + index: JSAny; + input: JSAny; + groups: JSAny; } +@generateCppClass extern class JSRegExpStringIterator extends JSObject { - iterating_reg_exp: Object; + // The [[IteratingRegExp]] internal property. + iterating_reg_exp: JSAny; + // The [[IteratedString]] internal property. iterated_string: String; flags: Smi; } @@ -1466,32 +1630,33 @@ extern macro Comment(constexpr string); extern macro StaticAssert(bool); extern macro Print(Object); extern macro DebugBreak(); -extern transitioning macro ToInteger_Inline(Context, Object): Number; +extern transitioning macro ToInteger_Inline(Context, JSAny): Number; extern transitioning macro ToInteger_Inline( - Context, Object, constexpr ToIntegerTruncationMode): Number; -extern transitioning macro ToLength_Inline(Context, Object): Number; -extern transitioning macro ToNumber_Inline(Context, Object): Number; -extern transitioning macro ToSmiIndex(implicit context: Context)(Object): + Context, JSAny, constexpr ToIntegerTruncationMode): Number; +extern transitioning macro ToLength_Inline(Context, JSAny): Number; +extern transitioning macro ToNumber_Inline(Context, JSAny): Number; +extern transitioning macro ToSmiIndex(implicit context: Context)(JSAny): PositiveSmi labels IfRangeError; -extern transitioning macro ToSmiLength(implicit context: Context)(Object): +extern transitioning macro ToSmiLength(implicit context: Context)(JSAny): PositiveSmi labels IfRangeError; -extern transitioning macro ToString_Inline(Context, Object): String; +extern transitioning macro ToString_Inline(Context, JSAny): String; extern transitioning macro ToThisString(implicit context: Context)( - Object, String): String; + JSAny, String): String; extern transitioning macro ToThisValue(implicit context: Context)( - Object, constexpr PrimitiveType, constexpr string): Object; + JSAny, constexpr PrimitiveType, constexpr string): JSAny; extern transitioning macro GetProperty(implicit context: Context)( - Object, Object): Object; + JSAny, JSAny): JSAny; extern transitioning builtin SetProperty(implicit context: Context)( - Object, Object, Object); + JSAny, JSAny, JSAny); extern transitioning builtin SetPropertyInLiteral(implicit context: Context)( - Object, Object, Object); + JSAny, JSAny, JSAny); extern transitioning builtin DeleteProperty(implicit context: Context)( - Object, Object, LanguageMode): Object; + JSAny, JSAny | PrivateSymbol, LanguageMode): Boolean; extern transitioning builtin HasProperty(implicit context: Context)( - Object, Object): Boolean; + JSAny, JSAny): Boolean; extern transitioning macro HasProperty_Inline(implicit context: Context)( - JSReceiver, Object): Boolean; + JSReceiver, JSAny): Boolean; +extern builtin LoadIC(Context, JSAny, JSAny, Smi, FeedbackVector): JSAny; extern macro ThrowRangeError(implicit context: Context)( constexpr MessageTemplate): never; @@ -1510,43 +1675,60 @@ extern macro ThrowTypeError(implicit context: Context)( extern transitioning runtime ThrowTypeErrorIfStrict(implicit context: Context)( Smi, Object, Object): void; -extern macro ArraySpeciesCreate(Context, Object, Number): JSReceiver; +extern transitioning macro ThrowIfNotJSReceiver(implicit context: Context)( + JSAny, constexpr MessageTemplate, constexpr string): void; + +extern macro ArraySpeciesCreate(Context, JSAny, Number): JSReceiver; extern macro ArrayCreate(implicit context: Context)(Number): JSArray; extern macro BuildAppendJSArray( - constexpr ElementsKind, FastJSArray, Object): void labels Bailout; + constexpr ElementsKind, FastJSArray, JSAny): void labels Bailout; extern macro EnsureArrayPushable(Map): ElementsKind labels Bailout; extern macro EnsureArrayLengthWritable(Map) labels Bailout; // TODO: Reduce duplication once varargs are supported in macros. extern macro Construct(implicit context: Context)( - Constructor, Object): JSReceiver; + Constructor, JSAny): JSReceiver; extern macro Construct(implicit context: Context)( - Constructor, Object, Object): JSReceiver; + Constructor, JSAny, JSAny): JSReceiver; extern macro Construct(implicit context: Context)( - Constructor, Object, Object, Object): JSReceiver; + Constructor, JSAny, JSAny, JSAny): JSReceiver; extern macro ConstructWithTarget(implicit context: Context)( Constructor, JSReceiver): JSReceiver; extern macro ConstructWithTarget(implicit context: Context)( - Constructor, JSReceiver, Object): JSReceiver; + Constructor, JSReceiver, JSAny): JSReceiver; extern macro SpeciesConstructor(implicit context: Context)( - Object, JSReceiver): JSReceiver; + JSAny, JSReceiver): JSReceiver; extern macro ConstructorBuiltinsAssembler::IsDictionaryMap(Map): bool; extern macro CodeStubAssembler::AllocateNameDictionary(constexpr int32): NameDictionary; -extern builtin ToObject(Context, Object): JSReceiver; -extern macro ToObject_Inline(Context, Object): JSReceiver; +extern builtin ToObject(Context, JSAny): JSReceiver; +extern macro ToObject_Inline(Context, JSAny): JSReceiver; extern macro IsNullOrUndefined(Object): bool; extern macro IsTheHole(Object): bool; extern macro IsString(HeapObject): bool; -transitioning builtin ToString(context: Context, o: Object): String { +transitioning builtin ToString(context: Context, o: JSAny): String { return ToStringImpl(context, o); } -extern transitioning runtime ToStringRT(Context, Object): String; +extern transitioning runtime ToStringRT(Context, JSAny): String; extern transitioning builtin NonPrimitiveToPrimitive_String( - Context, Object): Object; + Context, JSAny): JSPrimitive; +extern transitioning builtin NonPrimitiveToPrimitive_Default( + Context, JSAny): JSPrimitive; + +transitioning macro ToPrimitiveDefault(implicit context: Context)(v: JSAny): + JSPrimitive { + typeswitch (v) { + case (v: JSReceiver): { + return NonPrimitiveToPrimitive_Default(context, v); + } + case (v: JSPrimitive): { + return v; + } + } +} extern transitioning runtime NormalizeElements(Context, JSObject); extern transitioning runtime TransitionElementsKindWithKind( @@ -1556,18 +1738,15 @@ extern macro LoadBufferObject(RawPtr, constexpr int32): Object; extern macro LoadBufferPointer(RawPtr, constexpr int32): RawPtr; extern macro LoadBufferSmi(RawPtr, constexpr int32): Smi; -extern macro LoadRoot(constexpr RootIndex): Object; -extern macro StoreRoot(constexpr RootIndex, Object): Object; - extern runtime StringEqual(Context, String, String): Oddball; extern builtin StringLessThan(Context, String, String): Boolean; extern macro StringCharCodeAt(String, intptr): int32; extern runtime StringCompareSequence(Context, String, String, Number): Boolean; extern macro StringFromSingleCharCode(int32): String; -extern macro StrictEqual(Object, Object): Boolean; +extern macro StrictEqual(JSAny, JSAny): Boolean; extern macro SmiLexicographicCompare(Smi, Smi): Smi; -extern runtime ReThrow(Context, Object): never; +extern runtime ReThrow(Context, JSAny): never; extern runtime ThrowInvalidStringLength(Context): never; extern operator '==' macro WordEqual(RawPtr, RawPtr): bool; @@ -1638,38 +1817,51 @@ extern operator '<' macro Float64LessThan(float64, float64): bool; extern macro BranchIfNumberEqual(Number, Number): never labels Taken, NotTaken; operator '==' macro IsNumberEqual(a: Number, b: Number): bool { - return (BranchIfNumberEqual(a, b)) ? true : false; + BranchIfNumberEqual(a, b) otherwise return true, return false; } operator '!=' macro IsNumberNotEqual(a: Number, b: Number): bool { - return (BranchIfNumberEqual(a, b)) ? false : true; + return !(a == b); } -extern operator '<' macro BranchIfNumberLessThan(Number, Number): never +extern macro BranchIfNumberLessThan(Number, Number): never labels Taken, NotTaken; -extern operator '<=' macro BranchIfNumberLessThanOrEqual(Number, Number): never +operator '<' macro NumberIsLessThan(a: Number, b: Number): bool { + BranchIfNumberLessThan(a, b) otherwise return true, return false; +} +extern macro BranchIfNumberLessThanOrEqual(Number, Number): never labels Taken, NotTaken; +operator '<=' macro NumberIsLessThanOrEqual(a: Number, b: Number): bool { + BranchIfNumberLessThanOrEqual(a, b) otherwise return true, return false; +} -extern operator '>' macro BranchIfNumberGreaterThan(Number, Number): never - labels Taken, NotTaken; -extern operator '>=' macro BranchIfNumberGreaterThanOrEqual( - Number, Number): never +operator '>' macro NumberIsGreaterThan(a: Number, b: Number): bool { + return b < a; +} +operator '>=' macro NumberIsGreaterThanOrEqual(a: Number, b: Number): bool { + return b <= a; +} + +extern macro BranchIfFloat64IsNaN(float64): never labels Taken, NotTaken; +macro Float64IsNaN(n: float64): bool { + BranchIfFloat64IsNaN(n) otherwise return true, return false; +} -// The type of all tagged values that can safely be compared with WordEqual. +// The type of all tagged values that can safely be compared with TaggedEqual. type TaggedWithIdentity = JSReceiver | FixedArrayBase | Oddball | Map | EmptyString; -extern operator '==' macro WordEqual(TaggedWithIdentity, Object): bool; -extern operator '==' macro WordEqual(Object, TaggedWithIdentity): bool; -extern operator '==' macro WordEqual( +extern operator '==' macro TaggedEqual(TaggedWithIdentity, Object): bool; +extern operator '==' macro TaggedEqual(Object, TaggedWithIdentity): bool; +extern operator '==' macro TaggedEqual( TaggedWithIdentity, TaggedWithIdentity): bool; -extern operator '!=' macro WordNotEqual(TaggedWithIdentity, Object): bool; -extern operator '!=' macro WordNotEqual(Object, TaggedWithIdentity): bool; -extern operator '!=' macro WordNotEqual( +extern operator '!=' macro TaggedNotEqual(TaggedWithIdentity, Object): bool; +extern operator '!=' macro TaggedNotEqual(Object, TaggedWithIdentity): bool; +extern operator '!=' macro TaggedNotEqual( TaggedWithIdentity, TaggedWithIdentity): bool; // Do not overload == and != if it is unclear if object identity is the right // equality. -extern macro WordEqual(Object, Object): bool; -extern macro WordNotEqual(Object, Object): bool; +extern macro TaggedEqual(Object, Object): bool; +extern macro TaggedNotEqual(Object, Object): bool; extern operator '+' macro SmiAdd(Smi, Smi): Smi; extern operator '-' macro SmiSub(Smi, Smi): Smi; @@ -1707,11 +1899,14 @@ extern operator '*' macro ConstexprInt31Mul( constexpr int31, constexpr int31): constexpr int31; extern operator '-' macro Int32Sub(int32, int32): int32; extern operator '*' macro Int32Mul(int32, int32): int32; +extern operator '/' macro Int32Div(int32, int32): int32; extern operator '%' macro Int32Mod(int32, int32): int32; extern operator '&' macro Word32And(int32, int32): int32; extern operator '&' macro Word32And(uint32, uint32): uint32; extern operator '==' macro ConstexprInt31Equal(constexpr int31, constexpr int31): constexpr bool; +extern operator '!=' macro +ConstexprInt31NotEqual(constexpr int31, constexpr int31): constexpr bool; extern operator '>=' macro ConstexprInt31GreaterThanEqual( constexpr int31, constexpr int31): constexpr bool; @@ -1732,6 +1927,8 @@ extern operator '!=' macro Word32NotEqual(bool, bool): bool; extern operator '+' macro Float64Add(float64, float64): float64; extern operator '-' macro Float64Sub(float64, float64): float64; +extern operator '*' macro Float64Mul(float64, float64): float64; +extern operator '/' macro Float64Div(float64, float64): float64; extern operator '+' macro NumberAdd(Number, Number): Number; extern operator '-' macro NumberSub(Number, Number): Number; @@ -1845,6 +2042,146 @@ Cast(o: Object): Number return TaggedToNumber(o) otherwise CastError; } +Cast(o: Object): Undefined + labels CastError { + if (o != Undefined) goto CastError; + return %RawDownCast(o); +} + +Cast(o: Object): Numeric labels CastError { + typeswitch (o) { + case (o: Number): { + return o; + } + case (o: BigInt): { + return o; + } + case (HeapObject): { + goto CastError; + } + } +} + +Cast(o: Object): TheHole labels CastError { + if (o == TheHole) return %RawDownCast(o); + goto CastError; +} + +Cast(o: HeapObject): TheHole labels CastError { + const o: Object = o; + return Cast(o) otherwise CastError; +} + +Cast(o: Object): True labels CastError { + if (o == True) return %RawDownCast(o); + goto CastError; +} + +Cast(o: HeapObject): True labels CastError { + const o: Object = o; + return Cast(o) otherwise CastError; +} + +Cast(o: Object): False labels CastError { + if (o == False) return %RawDownCast(o); + goto CastError; +} + +Cast(o: HeapObject): False labels CastError { + const o: Object = o; + return Cast(o) otherwise CastError; +} + +Cast(o: Object): Boolean labels CastError { + typeswitch (o) { + case (o: True): { + return o; + } + case (o: False): { + return o; + } + case (Object): { + goto CastError; + } + } +} + +Cast(o: HeapObject): Boolean labels CastError { + const o: Object = o; + return Cast(o) otherwise CastError; +} + +// TODO(tebbi): These trivial casts for union types should be generated +// automatically. + +Cast(o: Object): JSPrimitive labels CastError { + typeswitch (o) { + case (o: Numeric): { + return o; + } + case (o: String): { + return o; + } + case (o: Symbol): { + return o; + } + case (o: Boolean): { + return o; + } + case (o: Undefined): { + return o; + } + case (o: Null): { + return o; + } + case (Object): { + goto CastError; + } + } +} + +Cast(o: Object): JSAny labels CastError { + typeswitch (o) { + case (o: JSPrimitive): { + return o; + } + case (o: JSReceiver): { + return o; + } + case (Object): { + goto CastError; + } + } +} + +Cast(o: Object): JSAny | TheHole labels CastError { + typeswitch (o) { + case (o: JSAny): { + return o; + } + case (o: TheHole): { + return o; + } + case (Object): { + goto CastError; + } + } +} + +Cast(o: Object): Number | TheHole labels CastError { + typeswitch (o) { + case (o: Number): { + return o; + } + case (o: TheHole): { + return o; + } + case (Object): { + goto CastError; + } + } +} + macro Cast(o: HeapObject): A labels CastError; @@ -1859,6 +2196,12 @@ Cast(o: HeapObject): Null return %RawDownCast(o); } +Cast(o: HeapObject): Undefined + labels CastError { + const o: Object = o; + return Cast(o) otherwise CastError; +} + Cast(o: HeapObject): FixedArray labels CastError { return HeapObjectToFixedArray(o) otherwise CastError; @@ -1928,6 +2271,12 @@ Cast(o: HeapObject): Context goto CastError; } +Cast(o: HeapObject): NativeContext + labels CastError { + if (IsNativeContext(o)) return %RawDownCast(o); + goto CastError; +} + Cast(o: HeapObject): JSObject labels CastError { if (IsJSObject(o)) return %RawDownCast(o); @@ -1957,6 +2306,27 @@ Cast(o: HeapObject): Symbol goto CastError; } +macro Cast(o: Symbol): T labels CastError; +Cast(o: Symbol): PublicSymbol labels CastError { + if (IsPrivateSymbol(o)) goto CastError; + return %RawDownCast(o); +} +Cast(o: Symbol): PrivateSymbol labels CastError { + if (IsPrivateSymbol(o)) { + return %RawDownCast(o); + } + goto CastError; +} + +Cast(o: HeapObject): PublicSymbol labels CastError { + const o = Cast(o) otherwise CastError; + return Cast(o) otherwise CastError; +} +Cast(o: HeapObject): PrivateSymbol labels CastError { + const o = Cast(o) otherwise CastError; + return Cast(o) otherwise CastError; +} + Cast(o: HeapObject): DirectString labels CastError { return TaggedToDirectString(o) otherwise CastError; @@ -2014,7 +2384,13 @@ Cast(implicit context: Context)(o: HeapObject): Cast(implicit context: Context)(o: HeapObject): FastJSRegExp labels CastError { - if (regexp::BranchIfFastRegExp(o)) return %RawDownCast(o); + // TODO(jgruber): Remove or redesign this. There is no single 'fast' regexp, + // the conditions to make a regexp object fast differ based on the callsite. + // For now, run the strict variant since replace (the only current callsite) + // accesses flag getters. + if (regexp::IsFastRegExpStrict(o)) { + return %RawDownCast(o); + } goto CastError; } @@ -2042,7 +2418,8 @@ Cast(implicit context: Context)(o: HeapObject): // Bailout if receiver has slow elements. const elementsKind: ElementsKind = LoadMapElementsKind(map); - if (!IsElementsKindLessThanOrEqual(elementsKind, LAST_FROZEN_ELEMENTS_KIND)) + if (!IsElementsKindLessThanOrEqual( + elementsKind, LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND)) goto CastError; // Verify that our prototype is the initial array prototype. @@ -2076,7 +2453,7 @@ Cast(implicit context: Context)( return %RawDownCast(a); } -Cast(implicit context: Context)(o: HeapObject): JSReceiver +Cast(o: HeapObject): JSReceiver labels CastError { if (IsJSReceiver(o)) return %RawDownCast(o); goto CastError; @@ -2103,6 +2480,21 @@ Cast(implicit context: Context)(o: HeapObject): CoverageInfo goto CastError; } +Cast(o: HeapObject): JSReceiver | Null + labels CastError { + typeswitch (o) { + case (o: Null): { + return o; + } + case (o: JSReceiver): { + return o; + } + case (HeapObject): { + goto CastError; + } + } +} + extern macro AllocateHeapNumberWithValue(float64): HeapNumber; extern macro ChangeInt32ToTagged(int32): Number; extern macro ChangeUint32ToTagged(uint32): Number; @@ -2132,8 +2524,8 @@ extern macro ChangeUint32ToWord(uint32): uintptr; // Doesn't sign-extend. extern macro LoadNativeContext(Context): NativeContext; extern macro TruncateFloat64ToFloat32(float64): float32; extern macro TruncateHeapNumberValueToWord32(HeapNumber): int32; -extern macro LoadJSArrayElementsMap(constexpr ElementsKind, Context): Map; -extern macro LoadJSArrayElementsMap(ElementsKind, Context): Map; +extern macro LoadJSArrayElementsMap(constexpr ElementsKind, NativeContext): Map; +extern macro LoadJSArrayElementsMap(ElementsKind, NativeContext): Map; extern macro ChangeNonnegativeNumberToUintPtr(Number): uintptr; extern macro TryNumberToUintPtr(Number): uintptr labels IfNegative; extern macro NumberConstant(constexpr float64): Number; @@ -2157,6 +2549,7 @@ extern macro IntPtrConstant(constexpr ContextSlot): ContextSlot; extern macro IntPtrConstant(constexpr intptr): intptr; extern macro PointerConstant(constexpr RawPtr): RawPtr; extern macro SingleCharacterStringConstant(constexpr string): String; +extern macro Float64SilenceNaN(float64): float64; extern macro BitcastWordToTaggedSigned(intptr): Smi; extern macro BitcastWordToTaggedSigned(uintptr): Smi; @@ -2241,6 +2634,9 @@ FromConstexpr(e: constexpr ElementsKind): FromConstexpr(s: constexpr string): Object { return StringConstant(s); } +FromConstexpr(s: constexpr string): JSAny { + return StringConstant(s); +} FromConstexpr( c: constexpr NativeContextSlot): NativeContextSlot { return IntPtrConstant(c); @@ -2384,20 +2780,9 @@ Convert(v: Smi): bint { return SmiToBInt(v); } -macro BranchIf(implicit context: Context)(o: B): never - labels True, False { - Cast(o) otherwise False; - goto True; -} - -macro BranchIfNot(implicit context: Context)(o: B): never - labels True, False { - Cast(o) otherwise True; - goto False; -} - macro Is(implicit context: Context)(o: B): bool { - return (BranchIf(o)) ? true : false; + Cast(o) otherwise return false; + return true; } macro UnsafeCast(implicit context: Context)(o: Object): A { @@ -2405,17 +2790,15 @@ macro UnsafeCast(implicit context: Context)(o: Object): A { return %RawDownCast(o); } -UnsafeCast(o: Object): Object { - return o; -} +extern macro FixedArrayMapConstant(): Map; +extern macro FixedCOWArrayMapConstant(): Map; +extern macro EmptyByteArrayConstant(): ByteArray; +extern macro EmptyFixedArrayConstant(): FixedArray; -const kFixedArrayMap: Map = - %RawDownCast(LoadRoot(kFixedArrayMapRootIndex)); -const kCOWMap: Map = %RawDownCast(LoadRoot(kFixedCOWArrayMapRootIndex)); -const kEmptyByteArray: ByteArray = - %RawDownCast(LoadRoot(kEmptyByteArrayRootIndex)); -const kEmptyFixedArray: FixedArray = - %RawDownCast(LoadRoot(kEmptyFixedArrayRootIndex)); +const kFixedArrayMap: Map = FixedArrayMapConstant(); +const kCOWMap: Map = FixedCOWArrayMapConstant(); +const kEmptyByteArray: ByteArray = EmptyByteArrayConstant(); +const kEmptyFixedArray: FixedArray = EmptyFixedArrayConstant(); extern macro IsPrototypeInitialArrayPrototype(implicit context: Context)(Map): bool; @@ -2478,6 +2861,8 @@ extern operator '.floats[]=' macro StoreFixedDoubleArrayElement( FixedDoubleArray, intptr, float64): void; extern operator '.floats[]=' macro StoreFixedDoubleArrayElementSmi( FixedDoubleArray, Smi, float64): void; +extern operator '.floats[]' macro LoadFixedDoubleArrayElement( + FixedDoubleArray, intptr): float64; operator '[]=' macro StoreFixedDoubleArrayDirect( a: FixedDoubleArray, i: Smi, v: Number) { a.floats[i] = Convert(v); @@ -2487,14 +2872,14 @@ operator '[]=' macro StoreFixedArrayDirect(a: FixedArray, i: Smi, v: Object) { } extern macro GetNumberDictionaryNumberOfElements(NumberDictionary): Smi; -extern macro GetIteratorMethod(implicit context: Context)(HeapObject): Object +extern macro GetIteratorMethod(implicit context: Context)(HeapObject): JSAny labels IfIteratorUndefined; extern macro LoadConstructorOrBackPointer(Map): Object; -extern macro BasicLoadNumberDictionaryElement(NumberDictionary, intptr): Object +extern macro BasicLoadNumberDictionaryElement(NumberDictionary, intptr): JSAny labels NotData, IfHole; -extern macro BasicStoreNumberDictionaryElement(NumberDictionary, intptr, Object) +extern macro BasicStoreNumberDictionaryElement(NumberDictionary, intptr, JSAny) labels NotData, IfHole, ReadOnly; extern macro IsFastElementsKind(ElementsKind): bool; @@ -2607,16 +2992,15 @@ macro GetRegExpLastMatchInfo(implicit context: Context)(): RegExpMatchInfo { LoadNativeContext(context)[REGEXP_LAST_MATCH_INFO_INDEX]); } -extern transitioning macro Call(Context, Callable, Object): Object; -extern transitioning macro Call(Context, Callable, Object, Object): Object; -extern transitioning macro Call( - Context, Callable, Object, Object, Object): Object; +extern transitioning macro Call(Context, Callable, JSAny): JSAny; +extern transitioning macro Call(Context, Callable, JSAny, JSAny): JSAny; +extern transitioning macro Call(Context, Callable, JSAny, JSAny, JSAny): JSAny; extern transitioning macro Call( - Context, Callable, Object, Object, Object, Object): Object; + Context, Callable, JSAny, JSAny, JSAny, JSAny): JSAny; extern transitioning macro Call( - Context, Callable, Object, Object, Object, Object, Object): Object; + Context, Callable, JSAny, JSAny, JSAny, JSAny, JSAny): JSAny; extern transitioning macro Call( - Context, Callable, Object, Object, Object, Object, Object, Object): Object; + Context, Callable, JSAny, JSAny, JSAny, JSAny, JSAny, JSAny): JSAny; extern builtin CloneFastJSArray(Context, FastJSArrayForCopy): JSArray; extern macro ExtractFixedArray(FixedArrayBase, Smi, Smi, Smi): FixedArrayBase; @@ -2665,20 +3049,24 @@ macro TorqueCopyElements( count); } -macro LoadElementNoHole(a: JSArray, index: Smi): Object +macro LoadElementNoHole(a: JSArray, index: Smi): JSAny labels IfHole; LoadElementNoHole(implicit context: Context)( - a: JSArray, index: Smi): Object + a: JSArray, index: Smi): JSAny labels IfHole { try { const elements: FixedArray = Cast(a.elements) otherwise Unexpected; - const e: Object = elements.objects[index]; - if (e == TheHole) { - goto IfHole; + const e = UnsafeCast<(JSAny | TheHole)>(elements.objects[index]); + typeswitch (e) { + case (TheHole): { + goto IfHole; + } + case (e: JSAny): { + return e; + } } - return e; } label Unexpected { unreachable; @@ -2686,7 +3074,7 @@ LoadElementNoHole(implicit context: Context)( } LoadElementNoHole(implicit context: Context)( - a: JSArray, index: Smi): Object + a: JSArray, index: Smi): JSAny labels IfHole { try { const elements: FixedDoubleArray = @@ -2717,7 +3105,7 @@ struct FastJSArrayWitness { this.unstable = %RawDownCast(this.stable); } - LoadElementNoHole(implicit context: Context)(k: Smi): Object + LoadElementNoHole(implicit context: Context)(k: Smi): JSAny labels FoundHole { if (this.hasDoubles) { return LoadElementNoHole(this.unstable, k) @@ -2740,7 +3128,7 @@ struct FastJSArrayWitness { } } - LoadElementOrUndefined(implicit context: Context)(k: Smi): Object { + LoadElementOrUndefined(implicit context: Context)(k: Smi): JSAny { try { return this.LoadElementNoHole(k) otherwise FoundHole; } @@ -2760,7 +3148,7 @@ struct FastJSArrayWitness { this.unstable.length = newLength; } - Push(value: Object) labels Failed { + Push(value: JSAny) labels Failed { assert(this.arrayIsPushable); if (this.hasDoubles) { BuildAppendJSArray(HOLEY_DOUBLE_ELEMENTS, this.unstable, value) @@ -2832,7 +3220,7 @@ struct FastJSArrayForReadWitness { this.unstable = %RawDownCast(this.stable); } - LoadElementNoHole(implicit context: Context)(k: Smi): Object + LoadElementNoHole(implicit context: Context)(k: Smi): JSAny labels FoundHole { if (this.hasDoubles) { return LoadElementNoHole(this.unstable, k) @@ -2876,6 +3264,7 @@ extern macro IsJSObject(HeapObject): bool; extern macro IsJSTypedArray(HeapObject): bool; extern macro IsNumberDictionary(HeapObject): bool; extern macro IsContext(HeapObject): bool; +extern macro IsNativeContext(HeapObject): bool; extern macro IsJSReceiver(HeapObject): bool; extern macro TaggedIsCallable(Object): bool; extern macro IsDetachedBuffer(JSArrayBuffer): bool; @@ -2892,7 +3281,7 @@ extern macro IsJSArrayMap(Map): bool; extern macro IsExtensibleMap(Map): bool; extern macro IsJSPrimitiveWrapper(HeapObject): bool; extern macro IsCustomElementsReceiverInstanceType(int32): bool; -extern macro Typeof(Object): Object; +extern macro Typeof(JSAny): String; // Return true iff number is NaN. macro NumberIsNaN(number: Number): bool { @@ -2908,31 +3297,35 @@ macro NumberIsNaN(number: Number): bool { } extern macro GotoIfForceSlowPath() labels Taken; -extern macro BranchIfToBooleanIsTrue(Object): never +macro IsForceSlowPath(): bool { + GotoIfForceSlowPath() otherwise return true; + return false; +} + +extern macro BranchIfToBooleanIsTrue(JSAny): never labels Taken, NotTaken; -extern macro BranchIfToBooleanIsFalse(Object): never +extern macro BranchIfToBooleanIsFalse(JSAny): never labels Taken, NotTaken; -macro ToBoolean(obj: Object): bool { - if (BranchIfToBooleanIsTrue(obj)) { - return true; - } else { - return false; - } +macro ToBoolean(obj: JSAny): bool { + BranchIfToBooleanIsTrue(obj) otherwise return true, return false; } @export macro RequireObjectCoercible(implicit context: Context)( - value: Object, name: constexpr string): Object { + value: JSAny, name: constexpr string): JSAny { if (IsNullOrUndefined(value)) { ThrowTypeError(kCalledOnNullOrUndefined, name); } return value; } -extern macro BranchIfSameValue(Object, Object): never labels Taken, NotTaken; +extern macro BranchIfSameValue(JSAny, JSAny): never labels Taken, NotTaken; +macro SameValue(a: JSAny, b: JSAny): bool { + BranchIfSameValue(a, b) otherwise return true, return false; +} -transitioning macro ToIndex(input: Object, context: Context): Number +transitioning macro ToIndex(input: JSAny, context: Context): Number labels RangeError { if (input == Undefined) { return 0; @@ -2946,7 +3339,7 @@ transitioning macro ToIndex(input: Object, context: Context): Number return value; } -transitioning macro GetLengthProperty(implicit context: Context)(o: Object): +transitioning macro GetLengthProperty(implicit context: Context)(o: JSAny): Number { try { typeswitch (o) { @@ -2956,18 +3349,18 @@ transitioning macro GetLengthProperty(implicit context: Context)(o: Object): case (a: JSArgumentsObjectWithLength): { goto ToLength(a.length); } - case (Object): deferred { + case (JSAny): deferred { goto ToLength(GetProperty(o, kLengthString)); } } } - label ToLength(length: Object) deferred { + label ToLength(length: JSAny) deferred { return ToLength_Inline(context, length); } } transitioning macro GetMethod(implicit context: Context)( - o: Object, name: constexpr string): Callable labels IfNullOrUndefined { + o: JSAny, name: constexpr string): Callable labels IfNullOrUndefined { const value = GetProperty(o, name); if (value == Undefined || value == Null) goto IfNullOrUndefined; return Cast(value) @@ -2976,44 +3369,37 @@ transitioning macro GetMethod(implicit context: Context)( extern macro NumberToString(Number): String; extern macro IsOneByteStringInstanceType(InstanceType): bool; -extern macro AllocateSeqOneByteString(implicit context: Context)(uint32): - String; -extern macro AllocateSeqTwoByteString(implicit context: Context)(uint32): - String; +extern macro AllocateSeqOneByteString(uint32): String; +extern macro AllocateSeqTwoByteString(uint32): String; extern macro ConvertToRelativeIndex(implicit context: Context)( - Object, intptr): intptr; + JSAny, intptr): intptr; -extern builtin ObjectToString(Context, Object): Object; +extern builtin ObjectToString(Context, JSAny): JSAny; extern builtin StringRepeat(Context, String, Number): String; struct KeyValuePair { - key: Object; - value: Object; + key: JSAny; + value: JSAny; } // Macro definitions for compatibility that expose functionality to the CSA // using "legacy" APIs. In Torque code, these should not be used. @export macro IsFastJSArray(o: Object, context: Context): bool { - try { - // Long-term, it's likely not a good idea to have this slow-path test here, - // since it fundamentally breaks the type system. - GotoIfForceSlowPath() otherwise ForceSlow; - } - label ForceSlow { - return false; - } - + // Long-term, it's likely not a good idea to have this slow-path test here, + // since it fundamentally breaks the type system. + if (IsForceSlowPath()) return false; return Is(o); } @export macro BranchIfFastJSArray(o: Object, context: Context): never labels True, False { - // Long-term, it's likely not a good idea to have this slow-path test here, - // since it fundamentally breaks the type system. - GotoIfForceSlowPath() otherwise False; - BranchIf(o) otherwise True, False; + if (IsFastJSArray(o, context)) { + goto True; + } else { + goto False; + } } @export @@ -3021,8 +3407,12 @@ macro BranchIfFastJSArrayForRead(o: Object, context: Context): never labels True, False { // Long-term, it's likely not a good idea to have this slow-path test here, // since it fundamentally breaks the type system. - GotoIfForceSlowPath() otherwise False; - BranchIf(o) otherwise True, False; + if (IsForceSlowPath()) goto False; + if (Is(o)) { + goto True; + } else { + goto False; + } } @export @@ -3037,7 +3427,7 @@ macro IsFastJSArrayForReadWithNoCustomIteration(context: Context, o: Object): } extern transitioning runtime -CreateDataProperty(implicit context: Context)(JSReceiver, Object, Object); +CreateDataProperty(implicit context: Context)(JSReceiver, JSAny, JSAny); namespace runtime { extern runtime @@ -3045,7 +3435,7 @@ namespace runtime { } transitioning builtin FastCreateDataProperty(implicit context: Context)( - receiver: JSReceiver, key: Object, value: Object): Object { + receiver: JSReceiver, key: JSAny, value: JSAny): Object { try { const array = Cast(receiver) otherwise Slow; const index: Smi = Cast(key) otherwise goto Slow; @@ -3090,8 +3480,8 @@ transitioning builtin FastCreateDataProperty(implicit context: Context)( } @export -transitioning macro ToStringImpl(context: Context, o: Object): String { - let result: Object = o; +transitioning macro ToStringImpl(context: Context, o: JSAny): String { + let result: JSAny = o; while (true) { typeswitch (result) { case (num: Number): { @@ -3110,7 +3500,7 @@ transitioning macro ToStringImpl(context: Context, o: Object): String { case (Symbol): { ThrowTypeError(kSymbolToString); } - case (Object): { + case (JSAny): { return ToStringRT(context, o); } } @@ -3160,3 +3550,14 @@ builtin CheckNumberInRange(implicit context: Context)( unreachable; } } + +macro ReplaceTheHoleWithUndefined(o: JSAny | TheHole): JSAny { + typeswitch (o) { + case (TheHole): { + return Undefined; + } + case (a: JSAny): { + return a; + } + } +} diff --git a/deps/v8/src/builtins/boolean.tq b/deps/v8/src/builtins/boolean.tq index 25f9ebd3961add..e8feaf1cf1f762 100644 --- a/deps/v8/src/builtins/boolean.tq +++ b/deps/v8/src/builtins/boolean.tq @@ -5,8 +5,8 @@ namespace boolean { javascript builtin BooleanConstructor( - js-implicit context: Context, receiver: Object, newTarget: Object, - target: JSFunction)(...arguments): Object { + js-implicit context: Context, receiver: JSAny, newTarget: JSAny, + target: JSFunction)(...arguments): JSAny { const value = SelectBooleanConstant(ToBoolean(arguments[0])); if (newTarget == Undefined) { diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc index d65d57cc79b079..c4399175e9846d 100644 --- a/deps/v8/src/builtins/builtins-arguments-gen.cc +++ b/deps/v8/src/builtins/builtins-arguments-gen.cc @@ -40,20 +40,20 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map, empty ? IntPtrConstant(base_size) : ElementOffsetFromIndex(element_count, PACKED_ELEMENTS, mode, base_size + FixedArray::kHeaderSize); - TNode result = Allocate(size); + TNode result = Allocate(size); Comment("Initialize arguments object"); StoreMapNoWriteBarrier(result, map); - Node* empty_fixed_array = LoadRoot(RootIndex::kEmptyFixedArray); + TNode empty_fixed_array = EmptyFixedArrayConstant(); StoreObjectField(result, JSArray::kPropertiesOrHashOffset, empty_fixed_array); - Node* smi_arguments_count = ParameterToTagged(arguments_count, mode); + TNode smi_arguments_count = ParameterToTagged(arguments_count, mode); StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset, smi_arguments_count); Node* arguments = nullptr; if (!empty) { - arguments = InnerAllocate(CAST(result), elements_offset); + arguments = InnerAllocate(result, elements_offset); StoreObjectFieldNoWriteBarrier(arguments, FixedArray::kLengthOffset, smi_arguments_count); - Node* fixed_array_map = LoadRoot(RootIndex::kFixedArrayMap); + TNode fixed_array_map = FixedArrayMapConstant(); StoreMapNoWriteBarrier(arguments, fixed_array_map); } Node* parameter_map = nullptr; @@ -63,8 +63,7 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map, parameter_map = InnerAllocate(CAST(arguments), parameter_map_offset); StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset, parameter_map); - Node* sloppy_elements_map = - LoadRoot(RootIndex::kSloppyArgumentsElementsMap); + TNode sloppy_elements_map = SloppyArgumentsElementsMapConstant(); StoreMapNoWriteBarrier(parameter_map, sloppy_elements_map); parameter_map_count = ParameterToTagged(parameter_map_count, mode); StoreObjectFieldNoWriteBarrier(parameter_map, FixedArray::kLengthOffset, @@ -97,13 +96,14 @@ Node* ArgumentsBuiltinsAssembler::ConstructParametersObjectFromArgs( VARIABLE(offset, MachineType::PointerRepresentation()); offset.Bind(IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag)); VariableList list({&offset}, zone()); - arguments.ForEach(list, - [this, elements, &offset](Node* arg) { - StoreNoWriteBarrier(MachineRepresentation::kTagged, - elements, offset.value(), arg); - Increment(&offset, kTaggedSize); - }, - first_arg, nullptr, param_mode); + arguments.ForEach( + list, + [this, elements, &offset](Node* arg) { + StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, + offset.value(), arg); + Increment(&offset, kTaggedSize); + }, + first_arg, nullptr, param_mode); return result; } @@ -121,8 +121,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewRestParameter(Node* context, Node* rest_count = IntPtrOrSmiSub(info.argument_count, info.formal_parameter_count, mode); - Node* const native_context = LoadNativeContext(context); - Node* const array_map = + TNode const native_context = LoadNativeContext(context); + TNode const array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context); GotoIf(IntPtrOrSmiLessThanOrEqual(rest_count, zero, mode), &no_rest_parameters); @@ -164,7 +164,7 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments(Node* context, Label done(this, &result), empty(this), runtime(this, Label::kDeferred); ParameterMode mode = OptimalParameterMode(); - Node* zero = IntPtrOrSmiConstant(0, mode); + TNode zero = BIntConstant(0); TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount( CAST(context), UncheckedCast(function)); @@ -173,10 +173,10 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments(Node* context, info.argument_count, &runtime, JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize, mode); - Node* const native_context = LoadNativeContext(context); - Node* const map = + TNode const native_context = LoadNativeContext(context); + TNode const map = LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX); - GotoIf(WordEqual(info.argument_count, zero), &empty); + GotoIf(BIntEqual(info.argument_count, zero), &empty); result.Bind(ConstructParametersObjectFromArgs( map, info.frame, info.argument_count, zero, info.argument_count, mode, @@ -209,7 +209,7 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, VARIABLE(result, MachineRepresentation::kTagged); ParameterMode mode = OptimalParameterMode(); - Node* zero = IntPtrOrSmiConstant(0, mode); + TNode zero = BIntConstant(0); Label done(this, &result), empty(this), no_parameters(this), runtime(this, Label::kDeferred); @@ -217,9 +217,9 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount( CAST(context), UncheckedCast(function)); - GotoIf(WordEqual(info.argument_count, zero), &empty); + GotoIf(BIntEqual(info.argument_count, zero), &empty); - GotoIf(WordEqual(info.formal_parameter_count, zero), &no_parameters); + GotoIf(BIntEqual(info.formal_parameter_count, zero), &no_parameters); { Comment("Mapped parameter JSSloppyArgumentsObject"); @@ -237,8 +237,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, elements_allocated, &runtime, JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize * 2, mode); - Node* const native_context = LoadNativeContext(context); - Node* const map = LoadContextElement( + TNode const native_context = LoadNativeContext(context); + TNode const map = LoadContextElement( native_context, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX); Node* argument_object; Node* elements; @@ -252,26 +252,26 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, StoreFixedArrayElement(CAST(map_array), 1, elements, SKIP_WRITE_BARRIER); Comment("Fill in non-mapped parameters"); - Node* argument_offset = + TNode argument_offset = ElementOffsetFromIndex(info.argument_count, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag); - Node* mapped_offset = + TNode mapped_offset = ElementOffsetFromIndex(mapped_count, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag); CodeStubArguments arguments(this, info.argument_count, info.frame, mode); VARIABLE(current_argument, MachineType::PointerRepresentation()); current_argument.Bind(arguments.AtIndexPtr(info.argument_count, mode)); VariableList var_list1({¤t_argument}, zone()); - mapped_offset = BuildFastLoop( + mapped_offset = UncheckedCast(BuildFastLoop( var_list1, argument_offset, mapped_offset, [this, elements, ¤t_argument](Node* offset) { Increment(¤t_argument, kSystemPointerSize); - Node* arg = LoadBufferObject( + TNode arg = LoadBufferObject( UncheckedCast(current_argument.value()), 0); StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset, arg); }, - -kTaggedSize, INTPTR_PARAMETERS); + -kTaggedSize, INTPTR_PARAMETERS)); // Copy the parameter slots and the holes in the arguments. // We need to fill in mapped_count slots. They index the context, @@ -287,13 +287,13 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, IntPtrOrSmiAdd(IntPtrOrSmiConstant(Context::MIN_CONTEXT_SLOTS, mode), info.formal_parameter_count, mode), mapped_count, mode)); - Node* the_hole = TheHoleConstant(); + TNode the_hole = TheHoleConstant(); VariableList var_list2({&context_index}, zone()); const int kParameterMapHeaderSize = FixedArray::OffsetOfElementAt(2); - Node* adjusted_map_array = IntPtrAdd( + TNode adjusted_map_array = IntPtrAdd( BitcastTaggedToWord(map_array), IntPtrConstant(kParameterMapHeaderSize - FixedArray::kHeaderSize)); - Node* zero_offset = ElementOffsetFromIndex( + TNode zero_offset = ElementOffsetFromIndex( zero, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag); BuildFastLoop( var_list2, mapped_offset, zero_offset, @@ -317,8 +317,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, GotoIfFixedArraySizeDoesntFitInNewSpace( info.argument_count, &runtime, JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize, mode); - Node* const native_context = LoadNativeContext(context); - Node* const map = + TNode const native_context = LoadNativeContext(context); + TNode const map = LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX); result.Bind(ConstructParametersObjectFromArgs( map, info.frame, info.argument_count, zero, info.argument_count, mode, @@ -331,8 +331,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, BIND(&empty); { Comment("Empty JSSloppyArgumentsObject"); - Node* const native_context = LoadNativeContext(context); - Node* const map = + TNode const native_context = LoadNativeContext(context); + TNode const map = LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX); Node* arguments; Node* elements; diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc index 07f74cb4298db9..c7d8eb009125da 100644 --- a/deps/v8/src/builtins/builtins-array-gen.cc +++ b/deps/v8/src/builtins/builtins-array-gen.cc @@ -30,272 +30,267 @@ ArrayBuiltinsAssembler::ArrayBuiltinsAssembler( to_(this, MachineRepresentation::kTagged, SmiConstant(0)), fully_spec_compliant_(this, {&k_, &a_, &to_}) {} - void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() { - // 6. Let A be ? TypedArraySpeciesCreate(O, len). - TNode original_array = CAST(o()); - TNode length = CAST(len_); - const char* method_name = "%TypedArray%.prototype.map"; - - TNode a = TypedArraySpeciesCreateByLength( - context(), method_name, original_array, length); - // In the Spec and our current implementation, the length check is already - // performed in TypedArraySpeciesCreate. - CSA_ASSERT(this, UintPtrLessThanOrEqual(SmiUntag(CAST(len_)), - LoadJSTypedArrayLength(a))); - fast_typed_array_target_ = - Word32Equal(LoadElementsKind(original_array), LoadElementsKind(a)); - a_.Bind(a); - } - - // See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map. - Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) { - // 8. c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »). - Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(), - callbackfn(), this_arg(), k_value, k, o()); - Label fast(this), slow(this), done(this), detached(this, Label::kDeferred); - - // 8. d. Perform ? Set(A, Pk, mapped_value, true). - // Since we know that A is a TypedArray, this always ends up in - // #sec-integer-indexed-exotic-objects-set-p-v-receiver and then - // tc39.github.io/ecma262/#sec-integerindexedelementset . - Branch(fast_typed_array_target_, &fast, &slow); - - BIND(&fast); - // #sec-integerindexedelementset - // 5. If arrayTypeName is "BigUint64Array" or "BigInt64Array", let - // numValue be ? ToBigInt(v). - // 6. Otherwise, let numValue be ? ToNumber(value). - Node* num_value; - if (source_elements_kind_ == BIGINT64_ELEMENTS || - source_elements_kind_ == BIGUINT64_ELEMENTS) { - num_value = ToBigInt(context(), mapped_value); - } else { - num_value = ToNumber_Inline(context(), mapped_value); - } - // The only way how this can bailout is because of a detached buffer. - EmitElementStore(a(), k, num_value, source_elements_kind_, - KeyedAccessStoreMode::STANDARD_STORE, &detached, - context()); - Goto(&done); +void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() { + // 6. Let A be ? TypedArraySpeciesCreate(O, len). + TNode original_array = CAST(o()); + TNode length = CAST(len_); + const char* method_name = "%TypedArray%.prototype.map"; + + TNode a = TypedArraySpeciesCreateByLength( + context(), method_name, original_array, length); + // In the Spec and our current implementation, the length check is already + // performed in TypedArraySpeciesCreate. + CSA_ASSERT(this, UintPtrLessThanOrEqual(SmiUntag(CAST(len_)), + LoadJSTypedArrayLength(a))); + fast_typed_array_target_ = + Word32Equal(LoadElementsKind(original_array), LoadElementsKind(a)); + a_.Bind(a); +} - BIND(&slow); - SetPropertyStrict(context(), CAST(a()), CAST(k), CAST(mapped_value)); - Goto(&done); +// See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map. +Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) { + // 8. c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »). + Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(), + callbackfn(), this_arg(), k_value, k, o()); + Label fast(this), slow(this), done(this), detached(this, Label::kDeferred); - BIND(&detached); - // tc39.github.io/ecma262/#sec-integerindexedelementset - // 8. If IsDetachedBuffer(buffer) is true, throw a TypeError exception. - ThrowTypeError(context_, MessageTemplate::kDetachedOperation, name_); + // 8. d. Perform ? Set(A, Pk, mapped_value, true). + // Since we know that A is a TypedArray, this always ends up in + // #sec-integer-indexed-exotic-objects-set-p-v-receiver and then + // tc39.github.io/ecma262/#sec-integerindexedelementset . + Branch(fast_typed_array_target_, &fast, &slow); - BIND(&done); - return a(); + BIND(&fast); + // #sec-integerindexedelementset + // 5. If arrayTypeName is "BigUint64Array" or "BigInt64Array", let + // numValue be ? ToBigInt(v). + // 6. Otherwise, let numValue be ? ToNumber(value). + Node* num_value; + if (source_elements_kind_ == BIGINT64_ELEMENTS || + source_elements_kind_ == BIGUINT64_ELEMENTS) { + num_value = ToBigInt(context(), mapped_value); + } else { + num_value = ToNumber_Inline(context(), mapped_value); } + // The only way how this can bailout is because of a detached buffer. + EmitElementStore(a(), k, num_value, source_elements_kind_, + KeyedAccessStoreMode::STANDARD_STORE, &detached, context()); + Goto(&done); - void ArrayBuiltinsAssembler::NullPostLoopAction() {} - - void ArrayBuiltinsAssembler::FillFixedArrayWithSmiZero( - TNode array, TNode smi_length) { - CSA_ASSERT(this, Word32BinaryNot(IsFixedDoubleArray(array))); + BIND(&slow); + SetPropertyStrict(context(), CAST(a()), CAST(k), CAST(mapped_value)); + Goto(&done); - TNode length = SmiToIntPtr(smi_length); - TNode byte_length = TimesTaggedSize(length); - CSA_ASSERT(this, UintPtrLessThan(length, byte_length)); + BIND(&detached); + // tc39.github.io/ecma262/#sec-integerindexedelementset + // 8. If IsDetachedBuffer(buffer) is true, throw a TypeError exception. + ThrowTypeError(context_, MessageTemplate::kDetachedOperation, name_); - static const int32_t fa_base_data_offset = - FixedArray::kHeaderSize - kHeapObjectTag; - TNode backing_store = IntPtrAdd( - BitcastTaggedToWord(array), IntPtrConstant(fa_base_data_offset)); + BIND(&done); + return a(); +} - // Call out to memset to perform initialization. - TNode memset = - ExternalConstant(ExternalReference::libc_memset_function()); - STATIC_ASSERT(kSizetSize == kIntptrSize); - CallCFunction(memset, MachineType::Pointer(), - std::make_pair(MachineType::Pointer(), backing_store), - std::make_pair(MachineType::IntPtr(), IntPtrConstant(0)), - std::make_pair(MachineType::UintPtr(), byte_length)); - } +void ArrayBuiltinsAssembler::NullPostLoopAction() {} + +void ArrayBuiltinsAssembler::FillFixedArrayWithSmiZero(TNode array, + TNode smi_length) { + CSA_ASSERT(this, Word32BinaryNot(IsFixedDoubleArray(array))); + + TNode length = SmiToIntPtr(smi_length); + TNode byte_length = TimesTaggedSize(length); + CSA_ASSERT(this, UintPtrLessThan(length, byte_length)); + + static const int32_t fa_base_data_offset = + FixedArray::kHeaderSize - kHeapObjectTag; + TNode backing_store = IntPtrAdd(BitcastTaggedToWord(array), + IntPtrConstant(fa_base_data_offset)); + + // Call out to memset to perform initialization. + TNode memset = + ExternalConstant(ExternalReference::libc_memset_function()); + STATIC_ASSERT(kSizetSize == kIntptrSize); + CallCFunction(memset, MachineType::Pointer(), + std::make_pair(MachineType::Pointer(), backing_store), + std::make_pair(MachineType::IntPtr(), IntPtrConstant(0)), + std::make_pair(MachineType::UintPtr(), byte_length)); +} - void ArrayBuiltinsAssembler::ReturnFromBuiltin(Node* value) { - if (argc_ == nullptr) { - Return(value); - } else { - // argc_ doesn't include the receiver, so it has to be added back in - // manually. - PopAndReturn(IntPtrAdd(argc_, IntPtrConstant(1)), value); - } +void ArrayBuiltinsAssembler::ReturnFromBuiltin(Node* value) { + if (argc_ == nullptr) { + Return(value); + } else { + // argc_ doesn't include the receiver, so it has to be added back in + // manually. + PopAndReturn(IntPtrAdd(argc_, IntPtrConstant(1)), value); } +} - void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinBody( - TNode context, TNode receiver, Node* callbackfn, - Node* this_arg, TNode argc) { - context_ = context; - receiver_ = receiver; - callbackfn_ = callbackfn; - this_arg_ = this_arg; - argc_ = argc; - } +void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinBody( + TNode context, TNode receiver, Node* callbackfn, + Node* this_arg, TNode argc) { + context_ = context; + receiver_ = receiver; + callbackfn_ = callbackfn; + this_arg_ = this_arg; + argc_ = argc; +} - void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody( - const char* name, const BuiltinResultGenerator& generator, - const CallResultProcessor& processor, const PostLoopAction& action, - ForEachDirection direction) { - name_ = name; +void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody( + const char* name, const BuiltinResultGenerator& generator, + const CallResultProcessor& processor, const PostLoopAction& action, + ForEachDirection direction) { + name_ = name; - // ValidateTypedArray: tc39.github.io/ecma262/#sec-validatetypedarray + // ValidateTypedArray: tc39.github.io/ecma262/#sec-validatetypedarray - Label throw_not_typed_array(this, Label::kDeferred); + Label throw_not_typed_array(this, Label::kDeferred); - GotoIf(TaggedIsSmi(receiver_), &throw_not_typed_array); - TNode typed_array_map = LoadMap(CAST(receiver_)); - GotoIfNot(IsJSTypedArrayMap(typed_array_map), &throw_not_typed_array); + GotoIf(TaggedIsSmi(receiver_), &throw_not_typed_array); + TNode typed_array_map = LoadMap(CAST(receiver_)); + GotoIfNot(IsJSTypedArrayMap(typed_array_map), &throw_not_typed_array); - TNode typed_array = CAST(receiver_); - o_ = typed_array; + TNode typed_array = CAST(receiver_); + o_ = typed_array; - TNode array_buffer = - LoadJSArrayBufferViewBuffer(typed_array); - ThrowIfArrayBufferIsDetached(context_, array_buffer, name_); + TNode array_buffer = LoadJSArrayBufferViewBuffer(typed_array); + ThrowIfArrayBufferIsDetached(context_, array_buffer, name_); - len_ = ChangeUintPtrToTagged(LoadJSTypedArrayLength(typed_array)); + len_ = ChangeUintPtrToTagged(LoadJSTypedArrayLength(typed_array)); - Label throw_not_callable(this, Label::kDeferred); - Label distinguish_types(this); - GotoIf(TaggedIsSmi(callbackfn_), &throw_not_callable); - Branch(IsCallableMap(LoadMap(callbackfn_)), &distinguish_types, - &throw_not_callable); + Label throw_not_callable(this, Label::kDeferred); + Label distinguish_types(this); + GotoIf(TaggedIsSmi(callbackfn_), &throw_not_callable); + Branch(IsCallableMap(LoadMap(callbackfn_)), &distinguish_types, + &throw_not_callable); - BIND(&throw_not_typed_array); - ThrowTypeError(context_, MessageTemplate::kNotTypedArray); + BIND(&throw_not_typed_array); + ThrowTypeError(context_, MessageTemplate::kNotTypedArray); - BIND(&throw_not_callable); - ThrowTypeError(context_, MessageTemplate::kCalledNonCallable, callbackfn_); + BIND(&throw_not_callable); + ThrowTypeError(context_, MessageTemplate::kCalledNonCallable, callbackfn_); - Label unexpected_instance_type(this); - BIND(&unexpected_instance_type); - Unreachable(); + Label unexpected_instance_type(this); + BIND(&unexpected_instance_type); + Unreachable(); - std::vector elements_kinds = { + std::vector elements_kinds = { #define ELEMENTS_KIND(Type, type, TYPE, ctype) TYPE##_ELEMENTS, - TYPED_ARRAYS(ELEMENTS_KIND) + TYPED_ARRAYS(ELEMENTS_KIND) #undef ELEMENTS_KIND - }; - std::list