diff --git a/.eslintrc b/.eslintrc index 532ac8b50cf07a..0bf34c83908c72 100644 --- a/.eslintrc +++ b/.eslintrc @@ -66,6 +66,8 @@ rules: eol-last: 2 ## no trailing spaces no-trailing-spaces: 2 + # require space after keywords, eg 'for (..)' + space-after-keywords: 2 # Strict Mode # list: https://github.com/eslint/eslint/tree/master/docs/rules#strict-mode diff --git a/.mailmap b/.mailmap index 38e5c1928a7102..0906e65c39f29f 100644 --- a/.mailmap +++ b/.mailmap @@ -77,6 +77,7 @@ Maciej Małecki Malte-Thorben Bruns Malte-Thorben Bruns Mathias Pettersson +Matthew Lye Michael Bernstein Michael Wilber Micheil Smith @@ -112,7 +113,8 @@ Siddharth Mahendraker Simon Willison Stanislav Opichal Stefan Bühler -Steven R. Loomis +Steven R. Loomis +Todd Kennedy TJ Holowaychuk TJ Holowaychuk Tadashi SAWADA diff --git a/AUTHORS b/AUTHORS index 2b2fe64309b79c..cfcf86132de0ea 100644 --- a/AUTHORS +++ b/AUTHORS @@ -631,6 +631,7 @@ dead-horse Luis Reis Jackson Tian sudodoki +Steven Loomis haoxin Artur Cistov MK Safi @@ -688,7 +689,7 @@ Roman Reiss Glen Keane Xiaowei Li <446240525@qq.com> toastynerd -Todd Kennedy +Todd Kennedy Icer Liang Stephen Belanger Jeremiah Senkpiel @@ -773,5 +774,17 @@ Pierre Inglebert Ivan Yan Sangmin Yoon Mark Plomer +Phillip Johnsen +Matteo Collina +jomo +Gireesh Punathil +Lucien Greathouse +Chad Johnston +Sam Stites +Matthew Lye +Matt Loring +P.S.V.R +Jacob Edelman +Mike Atkins # Generated by tools/update-authors.sh diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b045eceb580a8..02fe537611b4d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,127 @@ # io.js ChangeLog +## 2015-08-24, Version 3.2.0, @rvagg + +### Notable changes + +TODO + +### Known issues + +See https://github.com/nodejs/io.js/labels/confirmed-bug for complete and current list of known issues. + +* Possible remaining post-3.0.0 buffer memory leak(s), details at [#2308](https://github.com/nodejs/node/issues/2308). Mostly fixed in [2781333...8841947](https://github.com/nodejs/node/compare/27813339cf713275fa7bbf2ae497d13b66aa78ce...88419479ccb6384473339f09e5c55f234f4ee194) - [#2352](https://github.com/nodejs/node/pull/2352), and also [6fff0f7...a40ae51](https://github.com/nodejs/node/compare/6fff0f73dc371153507d96ecd5f88c1ddc7780c9...a40ae513bb99ec3f85ea5b864a366bc7552d61f0) - [#2375](https://github.com/nodejs/node/pull/2375). +* Some problems with unreferenced timers running during `beforeExit` are still to be resolved. See [#1264](https://github.com/nodejs/io.js/issues/1264). +* Surrogate pair in REPL can freeze terminal. [#690](https://github.com/nodejs/io.js/issues/690) +* `process.send()` is not synchronous as the docs suggest, a regression introduced in 1.0.2, see [#760](https://github.com/nodejs/io.js/issues/760). +* Calling `dns.setServers()` while a DNS query is in progress can cause the process to crash on a failed assertion. [#894](https://github.com/nodejs/io.js/issues/894) +* `url.resolve` may transfer the auth portion of the url when resolving between two full hosts, see [#1435](https://github.com/nodejs/io.js/issues/1435). + +### Commits + +* [[`1cd794f129`](https://github.com/nodejs/node/commit/1cd794f129)] - **buffer**: reapply 07c0667 (Fedor Indutny) [#2487](https://github.com/nodejs/node/pull/2487) +* [[`77075ec906`](https://github.com/nodejs/node/commit/77075ec906)] - **crypto**: fix mem {de}allocation in ExportChallenge (Karl Skomski) [#2359](https://github.com/nodejs/node/pull/2359) +* [[`09437e0146`](https://github.com/nodejs/node/commit/09437e0146)] - **doc**: fix comments in tls_wrap.cc and _http_client.js (Minwoo Jung) [#2489](https://github.com/nodejs/node/pull/2489) +* [[`c9867fed29`](https://github.com/nodejs/node/commit/c9867fed29)] - **doc**: document response.finished in http.markdown (hackerjs) [#2414](https://github.com/nodejs/node/pull/2414) +* [[`7f23a83c42`](https://github.com/nodejs/node/commit/7f23a83c42)] - **doc**: update AUTHORS list (Rod Vagg) [#2505](https://github.com/nodejs/node/pull/2505) +* [[`cd0c362f67`](https://github.com/nodejs/node/commit/cd0c362f67)] - **doc**: update AUTHORS list (Rod Vagg) [#2318](https://github.com/nodejs/node/pull/2318) +* [[`2c7b9257ea`](https://github.com/nodejs/node/commit/2c7b9257ea)] - **doc**: add TSC meeting minutes 2015-07-29 (Rod Vagg) [#2437](https://github.com/nodejs/node/pull/2437) +* [[`aaefde793e`](https://github.com/nodejs/node/commit/aaefde793e)] - **doc**: add TSC meeting minutes 2015-08-19 (Rod Vagg) [#2460](https://github.com/nodejs/node/pull/2460) +* [[`51ef9106f5`](https://github.com/nodejs/node/commit/51ef9106f5)] - **doc**: add TSC meeting minutes 2015-06-03 (Rod Vagg) [#2453](https://github.com/nodejs/node/pull/2453) +* [[`7130b4cf1d`](https://github.com/nodejs/node/commit/7130b4cf1d)] - **doc**: fix links to original converged repo (Rod Vagg) [#2454](https://github.com/nodejs/node/pull/2454) +* [[`14f2aee1df`](https://github.com/nodejs/node/commit/14f2aee1df)] - **doc**: fix links to original gh issues for TSC meetings (Rod Vagg) [#2454](https://github.com/nodejs/node/pull/2454) +* [[`87a9ef0a40`](https://github.com/nodejs/node/commit/87a9ef0a40)] - **doc**: add audio recording links to TSC meeting minutes (Rod Vagg) [#2454](https://github.com/nodejs/node/pull/2454) +* [[`f5cf24afbc`](https://github.com/nodejs/node/commit/f5cf24afbc)] - **doc**: add TSC meeting minutes 2015-07-22 (Rod Vagg) [#2436](https://github.com/nodejs/node/pull/2436) +* [[`3f821b96eb`](https://github.com/nodejs/node/commit/3f821b96eb)] - **doc**: fix spelling mistake in node.js comment (Jacob Edelman) [#2391](https://github.com/nodejs/node/pull/2391) +* [[`3e6a6fcdd6`](https://github.com/nodejs/node/commit/3e6a6fcdd6)] - **(SEMVER-MINOR)** **events**: deprecate static listenerCount function (Sakthipriyan Vairamani) [#2349](https://github.com/nodejs/node/pull/2349) +* [[`023386c852`](https://github.com/nodejs/node/commit/023386c852)] - **fs**: replace bad_args macro with concrete error msg (Roman Klauke) [#2495](https://github.com/nodejs/node/pull/2495) +* [[`5d7486941b`](https://github.com/nodejs/node/commit/5d7486941b)] - **repl**: filter integer keys from repl tab complete list (James M Snell) [#2409](https://github.com/nodejs/node/pull/2409) +* [[`7f02443a9a`](https://github.com/nodejs/node/commit/7f02443a9a)] - **repl**: dont throw ENOENT on NODE_REPL_HISTORY_FILE (Todd Kennedy) [#2451](https://github.com/nodejs/node/pull/2451) +* [[`56a2ae9cef`](https://github.com/nodejs/node/commit/56a2ae9cef)] - **src**: improve startup time (Evan Lucas) [#2483](https://github.com/nodejs/node/pull/2483) +* [[`2d3f09bd76`](https://github.com/nodejs/node/commit/2d3f09bd76)] - **stream_base**: various improvements (Fedor Indutny) [#2351](https://github.com/nodejs/node/pull/2351) +* [[`c1ce423b35`](https://github.com/nodejs/node/commit/c1ce423b35)] - **string_bytes**: fix unaligned write in UCS2 (Fedor Indutny) [#2480](https://github.com/nodejs/node/pull/2480) +* [[`536c3d0537`](https://github.com/nodejs/node/commit/536c3d0537)] - **test**: use reserved IP in test-net-connect-timeout (Rich Trott) [#2257](https://github.com/nodejs/node/pull/2257) +* [[`5df06fd8df`](https://github.com/nodejs/node/commit/5df06fd8df)] - **test**: add spaces after keywords (Brendan Ashworth) +* [[`e714b5620e`](https://github.com/nodejs/node/commit/e714b5620e)] - **test**: remove unreachable code (Michaël Zasso) [#2289](https://github.com/nodejs/node/pull/2289) +* [[`3579f3a2a4`](https://github.com/nodejs/node/commit/3579f3a2a4)] - **test**: disallow unreachable code (Michaël Zasso) [#2289](https://github.com/nodejs/node/pull/2289) +* [[`3545e236fc`](https://github.com/nodejs/node/commit/3545e236fc)] - **test**: reduce timeouts in test-net-keepalive (Brendan Ashworth) [#2429](https://github.com/nodejs/node/pull/2429) +* [[`b60e690023`](https://github.com/nodejs/node/commit/b60e690023)] - **test**: improve test-net-server-pause-on-connect (Brendan Ashworth) [#2429](https://github.com/nodejs/node/pull/2429) +* [[`11d1b8fcaf`](https://github.com/nodejs/node/commit/11d1b8fcaf)] - **test**: improve test-net-pingpong (Brendan Ashworth) [#2429](https://github.com/nodejs/node/pull/2429) +* [[`5fef5c6562`](https://github.com/nodejs/node/commit/5fef5c6562)] - **(SEMVER-MINOR)** **tls**: add --tls-cipher-list command line switch (James M Snell) [#2412](https://github.com/nodejs/node/pull/2412) +* [[`d9b70f9cbf`](https://github.com/nodejs/node/commit/d9b70f9cbf)] - **tls**: handle empty cert in checkServerIndentity (Mike Atkins) [#2343](https://github.com/nodejs/node/pull/2343) +* [[`4f8e34c202`](https://github.com/nodejs/node/commit/4f8e34c202)] - **tools**: add license boilerplate to check-imports.sh (James M Snell) [#2386](https://github.com/nodejs/node/pull/2386) +* [[`b76b9197f9`](https://github.com/nodejs/node/commit/b76b9197f9)] - **tools**: enable space-after-keywords in eslint (Brendan Ashworth) +* [[`64a8f30a70`](https://github.com/nodejs/node/commit/64a8f30a70)] - **tools**: fix anchors in generated documents (Sakthipriyan Vairamani) [#2491](https://github.com/nodejs/node/pull/2491) + +## 2015-08-18, Version 3.1.0, @Fishrock123 + +### Notable changes + +* **buffer**: Fixed a couple large memory leaks (Ben Noordhuis) [#2352](https://github.com/nodejs/node/pull/2352). +* **crypto**: + - Fixed a couple of minor memory leaks (Karl Skomski) [#2375](https://github.com/nodejs/node/pull/2375). + - Signing now checks for OpenSSL errors (P.S.V.R) [#2342](https://github.com/nodejs/node/pull/2342). **Note that this may expose previously hidden errors in user code.** +* **intl**: Intl support using small-icu is now enabled by default in builds (Steven R. Loomis) [#2264](https://github.com/nodejs/node/pull/2264). + - [`String#normalize()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/normalize) can now be used for unicode normalization. + - The [`Intl`](https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/Intl) object and various `String` and `Number` methods are present, but only support the English locale. + - For support of all locales, node must be built with [full-icu](https://github.com/nodejs/node#build-with-full-icu-support-all-locales-supported-by-icu). +* **tls**: Fixed tls throughput being much lower after an incorrect merge (Fedor Indutny) [#2381](https://github.com/nodejs/node/pull/2381). +* **tools**: The v8 tick processor now comes bundled with node (Matt Loring) [#2090](https://github.com/nodejs/node/pull/2090). + - This can be used by producing performance profiling output by running node with `--perf`, then running your appropriate platform's script on the output as found in [tools/v8-prof](https://github.com/nodejs/node/tree/master/tools/v8-prof). +* **util**: `util.inspect(obj)` now prints the constructor name of the object if there is one (Christopher Monsanto) [#1935](https://github.com/nodejs/io.js/pull/1935). + +### Known issues + +See https://github.com/nodejs/io.js/labels/confirmed-bug for complete and current list of known issues. + +* Possible remaining post-3.0.0 buffer memory leak(s), details at [#2308](https://github.com/nodejs/node/issues/2308). Mostly fixed in [2781333...8841947](https://github.com/nodejs/node/compare/27813339cf713275fa7bbf2ae497d13b66aa78ce...88419479ccb6384473339f09e5c55f234f4ee194) - [#2352](https://github.com/nodejs/node/pull/2352), and also [6fff0f7...a40ae51](https://github.com/nodejs/node/compare/6fff0f73dc371153507d96ecd5f88c1ddc7780c9...a40ae513bb99ec3f85ea5b864a366bc7552d61f0) - [#2375](https://github.com/nodejs/node/pull/2375). +* Some problems with unreferenced timers running during `beforeExit` are still to be resolved. See [#1264](https://github.com/nodejs/io.js/issues/1264). +* Surrogate pair in REPL can freeze terminal. [#690](https://github.com/nodejs/io.js/issues/690) +* `process.send()` is not synchronous as the docs suggest, a regression introduced in 1.0.2, see [#760](https://github.com/nodejs/io.js/issues/760). +* Calling `dns.setServers()` while a DNS query is in progress can cause the process to crash on a failed assertion. [#894](https://github.com/nodejs/io.js/issues/894) +* `url.resolve` may transfer the auth portion of the url when resolving between two full hosts, see [#1435](https://github.com/nodejs/io.js/issues/1435). + +### Commits + +* [[`3645dc62ed`](https://github.com/nodejs/node/commit/3645dc62ed)] - **build**: work around VS2015 issue in ICU <56 (Steven R. Loomis) [#2283](https://github.com/nodejs/node/pull/2283) +* [[`1f12e03266`](https://github.com/nodejs/node/commit/1f12e03266)] - **(SEMVER-MINOR)** **build**: intl: converge from joyent/node (Steven R. Loomis) [#2264](https://github.com/nodejs/node/pull/2264) +* [[`071640abdd`](https://github.com/nodejs/node/commit/071640abdd)] - **build**: Intl: bump ICU4C from 54 to 55 (Steven R. Loomis) [#2293](https://github.com/nodejs/node/pull/2293) +* [[`07a88b0c8b`](https://github.com/nodejs/node/commit/07a88b0c8b)] - **build**: update manifest to include Windows 10 (Lucien Greathouse) [#2332](https://github.com/nodejs/io.js/pull/2332) +* [[`0bb099f444`](https://github.com/nodejs/node/commit/0bb099f444)] - **build**: expand ~ in install prefix early (Ben Noordhuis) [#2307](https://github.com/nodejs/io.js/pull/2307) +* [[`7fe6dd8f5d`](https://github.com/nodejs/node/commit/7fe6dd8f5d)] - **crypto**: check for OpenSSL errors when signing (P.S.V.R) [#2342](https://github.com/nodejs/node/pull/2342) +* [[`605f6ee904`](https://github.com/nodejs/node/commit/605f6ee904)] - **crypto**: fix memory leak in PBKDF2Request (Karl Skomski) [#2375](https://github.com/nodejs/node/pull/2375) +* [[`ba6eb8af12`](https://github.com/nodejs/node/commit/ba6eb8af12)] - **crypto**: fix memory leak in ECDH::SetPrivateKey (Karl Skomski) [#2375](https://github.com/nodejs/node/pull/2375) +* [[`6a16368611`](https://github.com/nodejs/node/commit/6a16368611)] - **crypto**: fix memory leak in PublicKeyCipher::Cipher (Karl Skomski) [#2375](https://github.com/nodejs/node/pull/2375) +* [[`a760a87803`](https://github.com/nodejs/node/commit/a760a87803)] - **crypto**: fix memory leak in SafeX509ExtPrint (Karl Skomski) [#2375](https://github.com/nodejs/node/pull/2375) +* [[`f45487cd6e`](https://github.com/nodejs/node/commit/f45487cd6e)] - **crypto**: fix memory leak in SetDHParam (Karl Skomski) [#2375](https://github.com/nodejs/node/pull/2375) +* [[`2ff183dd86`](https://github.com/nodejs/node/commit/2ff183dd86)] - **doc**: Update FIPS instructions in README.md (Michael Dawson) [#2278](https://github.com/nodejs/node/pull/2278) +* [[`6483bc2e8f`](https://github.com/nodejs/node/commit/6483bc2e8f)] - **doc**: clarify options for fs.watchFile() (Rich Trott) [#2425](https://github.com/nodejs/node/pull/2425) +* [[`e76822f454`](https://github.com/nodejs/node/commit/e76822f454)] - **doc**: multiple documentation updates cherry picked from v0.12 (James M Snell) [#2302](https://github.com/nodejs/io.js/pull/2302) +* [[`1738c9680b`](https://github.com/nodejs/node/commit/1738c9680b)] - **net**: ensure Socket reported address is current (Ryan Graham) [#2095](https://github.com/nodejs/io.js/pull/2095) +* [[`844d3f0e3e`](https://github.com/nodejs/node/commit/844d3f0e3e)] - **path**: use '===' instead of '==' for comparison (Sam Stites) [#2388](https://github.com/nodejs/node/pull/2388) +* [[`7118b8a882`](https://github.com/nodejs/node/commit/7118b8a882)] - **path**: remove dead code in favor of unit tests (Nathan Woltman) [#2282](https://github.com/nodejs/io.js/pull/2282) +* [[`34f2cfa806`](https://github.com/nodejs/node/commit/34f2cfa806)] - **src**: better error message on failed Buffer malloc (Karl Skomski) [#2422](https://github.com/nodejs/node/pull/2422) +* [[`b196c1da3c`](https://github.com/nodejs/node/commit/b196c1da3c)] - **src**: fix memory leak in DLOpen (Karl Skomski) [#2375](https://github.com/nodejs/node/pull/2375) +* [[`d1307b2995`](https://github.com/nodejs/node/commit/d1307b2995)] - **src**: don't use fopen() in require() fast path (Ben Noordhuis) [#2377](https://github.com/nodejs/node/pull/2377) +* [[`455ec570d1`](https://github.com/nodejs/node/commit/455ec570d1)] - **src**: rename Buffer::Use() to Buffer::New() (Ben Noordhuis) [#2352](https://github.com/nodejs/node/pull/2352) +* [[`fd63e1ce2b`](https://github.com/nodejs/node/commit/fd63e1ce2b)] - **src**: introduce internal Buffer::Copy() function (Ben Noordhuis) [#2352](https://github.com/nodejs/node/pull/2352) +* [[`5586ceca13`](https://github.com/nodejs/node/commit/5586ceca13)] - **src**: move internal functions out of node_buffer.h (Ben Noordhuis) [#2352](https://github.com/nodejs/node/pull/2352) +* [[`bff9bcddb6`](https://github.com/nodejs/node/commit/bff9bcddb6)] - **src**: plug memory leaks (Ben Noordhuis) [#2352](https://github.com/nodejs/node/pull/2352) +* [[`ccf12df4f3`](https://github.com/nodejs/node/commit/ccf12df4f3)] - **(SEMVER-MINOR)** **src**: add total_available_size to v8 statistics (Roman Klauke) [#2348](https://github.com/nodejs/io.js/pull/2348) +* [[`194eeb841b`](https://github.com/nodejs/node/commit/194eeb841b)] - **test**: drop Isolate::GetCurrent() from addon tests (Ben Noordhuis) [#2427](https://github.com/nodejs/node/pull/2427) +* [[`46cdb2f6e2`](https://github.com/nodejs/node/commit/46cdb2f6e2)] - **test**: lint addon tests (Ben Noordhuis) [#2427](https://github.com/nodejs/node/pull/2427) +* [[`850c794882`](https://github.com/nodejs/node/commit/850c794882)] - **test**: refactor test-fs-watchfile.js (Rich Trott) [#2393](https://github.com/nodejs/node/pull/2393) +* [[`a3160c0a33`](https://github.com/nodejs/node/commit/a3160c0a33)] - **test**: correct spelling of 'childProcess' (muddletoes) [#2389](https://github.com/nodejs/node/pull/2389) +* [[`e51f90d747`](https://github.com/nodejs/node/commit/e51f90d747)] - **test**: option to run a subset of tests (João Reis) [#2260](https://github.com/nodejs/io.js/pull/2260) +* [[`cc46d3bca3`](https://github.com/nodejs/node/commit/cc46d3bca3)] - **test**: clarify dropMembership() call (Rich Trott) [#2062](https://github.com/nodejs/io.js/pull/2062) +* [[`0ee4df9c7a`](https://github.com/nodejs/node/commit/0ee4df9c7a)] - **test**: make listen-fd-cluster/server more robust (Sam Roberts) [#1944](https://github.com/nodejs/io.js/pull/1944) +* [[`cf9ba81398`](https://github.com/nodejs/node/commit/cf9ba81398)] - **test**: address timing issues in simple http tests (Gireesh Punathil) [#2294](https://github.com/nodejs/io.js/pull/2294) +* [[`cbb75c4f86`](https://github.com/nodejs/node/commit/cbb75c4f86)] - **tls**: fix throughput issues after incorrect merge (Fedor Indutny) [#2381](https://github.com/nodejs/node/pull/2381) +* [[`94b765f409`](https://github.com/nodejs/node/commit/94b765f409)] - **tls**: fix check for reused session (Fedor Indutny) [#2312](https://github.com/nodejs/io.js/pull/2312) +* [[`e83a41ad65`](https://github.com/nodejs/node/commit/e83a41ad65)] - **tls**: introduce internal `onticketkeycallback` (Fedor Indutny) [#2312](https://github.com/nodejs/io.js/pull/2312) +* [[`fb0f5d733f`](https://github.com/nodejs/node/commit/fb0f5d733f)] - **(SEMVER-MINOR)** **tools**: run the tick processor without building v8 (Matt Loring) [#2090](https://github.com/nodejs/node/pull/2090) +* [[`7606bdb897`](https://github.com/nodejs/node/commit/7606bdb897)] - **(SEMVER-MINOR)** **util**: display constructor when inspecting objects (Christopher Monsanto) [#1935](https://github.com/nodejs/io.js/pull/1935) + ## 2015-08-04, Version 3.0.0, @rvagg ### Notable changes diff --git a/LICENSE b/LICENSE index f493fde3613eb5..7d215a0717da22 100644 --- a/LICENSE +++ b/LICENSE @@ -657,3 +657,388 @@ The externally maintained libraries used by io.js are: ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """ + +- ICU's license follows: + From http://source.icu-project.org/repos/icu/icu/trunk/license.html + """ + ICU License - ICU 1.8.1 and later + + COPYRIGHT AND PERMISSION NOTICE + + Copyright (c) 1995-2014 International Business Machines Corporation and others + + All rights reserved. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, copy, + modify, merge, publish, distribute, and/or sell copies of the + Software, and to permit persons to whom the Software is furnished + to do so, provided that the above copyright notice(s) and this + permission notice appear in all copies of the Software and that + both the above copyright notice(s) and this permission notice + appear in supporting documentation. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE + COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR + ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR + ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + PERFORMANCE OF THIS SOFTWARE. + + Except as contained in this notice, the name of a copyright holder + shall not be used in advertising or otherwise to promote the sale, + use or other dealings in this Software without prior written + authorization of the copyright holder. + + All trademarks and registered trademarks mentioned herein are the + property of their respective owners. + + Third-Party Software Licenses + + This section contains third-party software notices and/or + additional terms for licensed third-party software components + included within ICU libraries. + + 1. Unicode Data Files and Software + COPYRIGHT AND PERMISSION NOTICE + + Copyright © 1991-2014 Unicode, Inc. All rights reserved. + Distributed under the Terms of Use in + http://www.unicode.org/copyright.html. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of the Unicode data files and any associated documentation + (the "Data Files") or Unicode software and any associated documentation + (the "Software") to deal in the Data Files or Software + without restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, and/or sell copies of + the Data Files or Software, and to permit persons to whom the Data Files + or Software are furnished to do so, provided that + (a) this copyright and permission notice appear with all copies + of the Data Files or Software, + (b) this copyright and permission notice appear in associated + documentation, and + (c) there is clear notice in each modified Data File or in the Software + as well as in the documentation associated with the Data File(s) or + Software that the data or software has been modified. + + THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE + WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS + NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL + DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + PERFORMANCE OF THE DATA FILES OR SOFTWARE. + + Except as contained in this notice, the name of a copyright holder + shall not be used in advertising or otherwise to promote the sale, + use or other dealings in these Data Files or Software without prior + written authorization of the copyright holder. + + 2. Chinese/Japanese Word Break Dictionary Data (cjdict.txt) + # The Google Chrome software developed by Google is licensed + # under the BSD license. Other software included in this distribution + # is provided under other licenses, as set forth below. + # + # The BSD License + # http://opensource.org/licenses/bsd-license.php + # Copyright (C) 2006-2008, Google Inc. + # + # All rights reserved. + # + # Redistribution and use in source and binary forms, with or + # without modification, are permitted provided that the following + # conditions are met: + # + # Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + # Redistributions in binary form must reproduce the above + # copyright notice, this list of conditions and the following + # disclaimer in the documentation and/or other materials provided with + # the distribution. + # Neither the name of Google Inc. nor the names of its + # contributors may be used to endorse or promote products derived from + # this software without specific prior written permission. + # + # + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + # + # + # The word list in cjdict.txt are generated by combining three + word lists listed + # below with further processing for compound word breaking. The + frequency is generated + # with an iterative training against Google web corpora. + # + # * Libtabe (Chinese) + # - https://sourceforge.net/project/?group_id=1519 + # - Its license terms and conditions are shown below. + # + # * IPADIC (Japanese) + # - http://chasen.aist-nara.ac.jp/chasen/distribution.html + # - Its license terms and conditions are shown below. + # + # ---------COPYING.libtabe ---- BEGIN-------------------- + # + # /* + # * Copyrighy (c) 1999 TaBE Project. + # * Copyright (c) 1999 Pai-Hsiang Hsiao. + # * All rights reserved. + # * + # * Redistribution and use in source and binary forms, with or without + # * modification, are permitted provided that the following conditions + # * are met: + # * + # * . Redistributions of source code must retain the above copyright + # * notice, this list of conditions and the following disclaimer. + # * . Redistributions in binary form must reproduce the above copyright + # * notice, this list of conditions and the following disclaimer in + # * the documentation and/or other materials provided with the + # * distribution. + # * . Neither the name of the TaBE Project nor the names of its + # * contributors may be used to endorse or promote products derived + # * from this software without specific prior written permission. + # * + # * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + # * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + # * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + # * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + # * REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + # * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + # * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + # * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + # * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + # * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + # * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + # * OF THE POSSIBILITY OF SUCH DAMAGE. + # */ + # + # /* + # * Copyright (c) 1999 Computer Systems and Communication Lab, + # * Institute of Information Science, Academia Sinica. + # * All rights reserved. + # * + # * Redistribution and use in source and binary forms, with or without + # * modification, are permitted provided that the following conditions + # * are met: + # * + # * . Redistributions of source code must retain the above copyright + # * notice, this list of conditions and the following disclaimer. + # * . Redistributions in binary form must reproduce the above copyright + # * notice, this list of conditions and the following disclaimer in + # * the documentation and/or other materials provided with the + # * distribution. + # * . Neither the name of the Computer Systems and Communication Lab + # * nor the names of its contributors may be used to endorse or + # * promote products derived from this software without specific + # * prior written permission. + # * + # * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + # * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + # * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + # * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + # * REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + # * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + # * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + # * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + # * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + # * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + # * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + # * OF THE POSSIBILITY OF SUCH DAMAGE. + # */ + # + # Copyright 1996 Chih-Hao Tsai @ Beckman Institute, University of Illinois + # c-tsai4@uiuc.edu http://casper.beckman.uiuc.edu/~c-tsai4 + # + # ---------------COPYING.libtabe-----END------------------------------------ + # + # + # ---------------COPYING.ipadic-----BEGIN------------------------------------ + # + # Copyright 2000, 2001, 2002, 2003 Nara Institute of Science + # and Technology. All Rights Reserved. + # + # Use, reproduction, and distribution of this software is permitted. + # Any copy of this software, whether in its original form or modified, + # must include both the above copyright notice and the following + # paragraphs. + # + # Nara Institute of Science and Technology (NAIST), + # the copyright holders, disclaims all warranties with regard to this + # software, including all implied warranties of merchantability and + # fitness, in no event shall NAIST be liable for + # any special, indirect or consequential damages or any damages + # whatsoever resulting from loss of use, data or profits, whether in an + # action of contract, negligence or other tortuous action, arising out + # of or in connection with the use or performance of this software. + # + # A large portion of the dictionary entries + # originate from ICOT Free Software. The following conditions for ICOT + # Free Software applies to the current dictionary as well. + # + # Each User may also freely distribute the Program, whether in its + # original form or modified, to any third party or parties, PROVIDED + # that the provisions of Section 3 ("NO WARRANTY") will ALWAYS appear + # on, or be attached to, the Program, which is distributed substantially + # in the same form as set out herein and that such intended + # distribution, if actually made, will neither violate or otherwise + # contravene any of the laws and regulations of the countries having + # jurisdiction over the User or the intended distribution itself. + # + # NO WARRANTY + # + # The program was produced on an experimental basis in the course of the + # research and development conducted during the project and is provided + # to users as so produced on an experimental basis. Accordingly, the + # program is provided without any warranty whatsoever, whether express, + # implied, statutory or otherwise. The term "warranty" used herein + # includes, but is not limited to, any warranty of the quality, + # performance, merchantability and fitness for a particular purpose of + # the program and the nonexistence of any infringement or violation of + # any right of any third party. + # + # Each user of the program will agree and understand, and be deemed to + # have agreed and understood, that there is no warranty whatsoever for + # the program and, accordingly, the entire risk arising from or + # otherwise connected with the program is assumed by the user. + # + # Therefore, neither ICOT, the copyright holder, or any other + # organization that participated in or was otherwise related to the + # development of the program and their respective officials, directors, + # officers and other employees shall be held liable for any and all + # damages, including, without limitation, general, special, incidental + # and consequential damages, arising out of or otherwise in connection + # with the use or inability to use the program or any product, material + # or result produced or otherwise obtained by using the program, + # regardless of whether they have been advised of, or otherwise had + # knowledge of, the possibility of such damages at any time during the + # project or thereafter. Each user will be deemed to have agreed to the + # foregoing by his or her commencement of use of the program. The term + # "use" as used herein includes, but is not limited to, the use, + # modification, copying and distribution of the program and the + # production of secondary products from the program. + # + # In the case where the program, whether in its original form or + # modified, was distributed or delivered to or received by a user from + # any person, organization or entity other than ICOT, unless it makes or + # grants independently of ICOT any specific warranty to the user in + # writing, such person, organization or entity, will also be exempted + # from and not be held liable to the user for any such damages as noted + # above as far as the program is concerned. + # + # ---------------COPYING.ipadic-----END------------------------------------ + + 3. Lao Word Break Dictionary Data (laodict.txt) + # Copyright (c) 2013 International Business Machines Corporation + # and others. All Rights Reserved. + # + # Project: http://code.google.com/p/lao-dictionary/ + # Dictionary: http://lao-dictionary.googlecode.com/git/Lao-Dictionary.txt + # License: http://lao-dictionary.googlecode.com/git/Lao-Dictionary-LICENSE.txt + # (copied below) + # + # This file is derived from the above dictionary, with slight modifications. + # -------------------------------------------------------------------------------- + # Copyright (C) 2013 Brian Eugene Wilson, Robert Martin Campbell. + # All rights reserved. + # + # Redistribution and use in source and binary forms, with or without modification, + # are permitted provided that the following conditions are met: + # + # Redistributions of source code must retain the above copyright notice, this + # list of conditions and the following disclaimer. Redistributions in binary + # form must reproduce the above copyright notice, this list of conditions and + # the following disclaimer in the documentation and/or other materials + # provided with the distribution. + # + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR + # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + # -------------------------------------------------------------------------------- + + 4. Burmese Word Break Dictionary Data (burmesedict.txt) + # Copyright (c) 2014 International Business Machines Corporation + # and others. All Rights Reserved. + # + # This list is part of a project hosted at: + # github.com/kanyawtech/myanmar-karen-word-lists + # + # -------------------------------------------------------------------------------- + # Copyright (c) 2013, LeRoy Benjamin Sharon + # All rights reserved. + # + # Redistribution and use in source and binary forms, with or without modification, + # are permitted provided that the following conditions are met: + # + # Redistributions of source code must retain the above copyright notice, this + # list of conditions and the following disclaimer. + # + # Redistributions in binary form must reproduce the above copyright notice, this + # list of conditions and the following disclaimer in the documentation and/or + # other materials provided with the distribution. + # + # Neither the name Myanmar Karen Word Lists, nor the names of its + # contributors may be used to endorse or promote products derived from + # this software without specific prior written permission. + # + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR + # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + # -------------------------------------------------------------------------------- + + 5. Time Zone Database + ICU uses the public domain data and code derived from Time Zone + Database for its time zone support. The ownership of the TZ + database is explained in BCP 175: Procedure for Maintaining the + Time Zone Database section 7. + + 7. Database Ownership + + The TZ database itself is not an IETF Contribution or an IETF + document. Rather it is a pre-existing and regularly updated work + that is in the public domain, and is intended to remain in the public + domain. Therefore, BCPs 78 [RFC5378] and 79 [RFC3979] do not apply + to the TZ Database or contributions that individuals make to it. + Should any claims be made and substantiated against the TZ Database, + the organization that is providing the IANA Considerations defined in + this RFC, under the memorandum of understanding with the IETF, + currently ICANN, may act in accordance with all competent court + orders. No ownership claims will be made by ICANN or the IETF Trust + on the database or the code. Any person making a contribution to the + database or code waives all rights to future claims in that + contribution or in the TZ Database. + """ diff --git a/Makefile b/Makefile index aa29e2bc06daca..7027ba973f95c1 100644 --- a/Makefile +++ b/Makefile @@ -17,6 +17,11 @@ NODE ?= ./iojs$(EXEEXT) NODE_EXE = iojs$(EXEEXT) NODE_G_EXE = iojs_g$(EXEEXT) +# Flags for packaging. +BUILD_DOWNLOAD_FLAGS ?= --download=all +BUILD_INTL_FLAGS ?= --with-intl=small-icu +BUILD_RELEASE_FLAGS ?= $(BUILD_DOWNLOAD_FLAGS) $(BUILD_INTL_FLAGS) + # Default to verbose builds. # To do quiet/pretty builds, run `make V=` to set V to an empty string, # or set the V environment variable to an empty string. @@ -302,7 +307,7 @@ $(PKG): release-only --dest-cpu=x64 \ --tag=$(TAG) \ --release-urlbase=$(RELEASE_URLBASE) \ - $(CONFIG_FLAGS) + $(CONFIG_FLAGS) $(BUILD_RELEASE_FLAGS) $(MAKE) install V=$(V) DESTDIR=$(PKGDIR) SIGN="$(CODESIGN_CERT)" PKGDIR="$(PKGDIR)" bash tools/osx-codesign.sh cat tools/osx-pkg.pmdoc/index.xml.tmpl \ @@ -363,7 +368,7 @@ $(TARBALL)-headers: config.gypi release-only --dest-cpu=$(DESTCPU) \ --tag=$(TAG) \ --release-urlbase=$(RELEASE_URLBASE) \ - $(CONFIG_FLAGS) + $(CONFIG_FLAGS) $(BUILD_RELEASE_FLAGS) HEADERS_ONLY=1 $(PYTHON) tools/install.py install '$(TARNAME)' '/' find $(TARNAME)/ -type l | xargs rm # annoying on windows tar -cf $(TARNAME)-headers.tar $(TARNAME) @@ -393,7 +398,7 @@ $(BINARYTAR): release-only --dest-cpu=$(DESTCPU) \ --tag=$(TAG) \ --release-urlbase=$(RELEASE_URLBASE) \ - $(CONFIG_FLAGS) + $(CONFIG_FLAGS) $(BUILD_RELEASE_FLAGS) $(MAKE) install DESTDIR=$(BINARYNAME) V=$(V) PORTABLE=1 cp README.md $(BINARYNAME) cp LICENSE $(BINARYNAME) @@ -478,8 +483,19 @@ CPPLINT_EXCLUDE += src/node_win32_perfctr_provider.cc CPPLINT_EXCLUDE += src/queue.h CPPLINT_EXCLUDE += src/tree.h CPPLINT_EXCLUDE += src/v8abbr.h - -CPPLINT_FILES = $(filter-out $(CPPLINT_EXCLUDE), $(wildcard src/*.cc src/*.h src/*.c tools/icu/*.h tools/icu/*.cc deps/debugger-agent/include/* deps/debugger-agent/src/*)) +CPPLINT_EXCLUDE += $(wildcard test/addons/doc-*/*.cc test/addons/doc-*/*.h) + +CPPLINT_FILES = $(filter-out $(CPPLINT_EXCLUDE), $(wildcard \ + deps/debugger-agent/include/* \ + deps/debugger-agent/src/* \ + src/*.c \ + src/*.cc \ + src/*.h \ + test/addons/*/*.cc \ + test/addons/*/*.h \ + tools/icu/*.cc \ + tools/icu/*.h \ + )) cpplint: @$(PYTHON) tools/cpplint.py $(CPPLINT_FILES) diff --git a/README.md b/README.md index 60e7620fbf33bc..dcf36e4dbcec82 100644 --- a/README.md +++ b/README.md @@ -269,20 +269,46 @@ NOTE: Windows is not yet supported It is possible to build io.js with [OpenSSL FIPS module](https://www.openssl.org/docs/fips/fipsnotes.html). +**Note** that building in this way does **not** allow you to +claim that the runtime is FIPS 140-2 validated. Instead you +can indicate that the runtime uses a validated module. See +the [security policy] +(http://csrc.nist.gov/groups/STM/cmvp/documents/140-1/140sp/140sp1747.pdf) +page 60 for more details. In addition, the validation for +the underlying module is only valid if it is deployed in +accordance with its [security policy] +(http://csrc.nist.gov/groups/STM/cmvp/documents/140-1/140sp/140sp1747.pdf). +If you need FIPS validated cryptography it is recommended that you +read both the [security policy] +(http://csrc.nist.gov/groups/STM/cmvp/documents/140-1/140sp/140sp1747.pdf) +and [user guide] (https://openssl.org/docs/fips/UserGuide-2.0.pdf). + Instructions: -1. Download and verify `openssl-fips-x.x.x.tar.gz` from - https://www.openssl.org/source/ -2. Extract source to `openssl-fips` folder -3. ``cd openssl-fips && ./config fipscanisterbuild --prefix=`pwd`/out`` - (NOTE: On OS X, you may want to run - ``./Configure darwin64-x86_64-cc --prefix=`pwd`/out`` if you are going to - build x64-mode io.js) -4. `make -j && make install` -5. Get into io.js checkout folder -6. `./configure --openssl-fips=/path/to/openssl-fips/out` -7. Build io.js with `make -j` -8. Verify with `node -p "process.versions.openssl"` (`1.0.2a-fips`) +1. Obtain a copy of openssl-fips-x.x.x.tar.gz. + To comply with the security policy you must ensure the path + through which you get the file complies with the requirements + for a "secure intallation" as described in section 6.6 in + the [user guide] (https://openssl.org/docs/fips/UserGuide-2.0.pdf). + For evaluation/experimentation you can simply download and verify + `openssl-fips-x.x.x.tar.gz` from https://www.openssl.org/source/ +2. Extract source to `openssl-fips` folder and `cd openssl-fips` +3. `./config` +4. `make` +5. `make install` + (NOTE: to comply with the security policy you must use the exact + commands in steps 3-5 without any additional options as per + Appendix A in the [security policy] + (http://csrc.nist.gov/groups/STM/cmvp/documents/140-1/140sp/140sp1747.pdf). + The only exception is that `./config no-asm` can be + used in place of `./config` ) +6. Get into io.js checkout folder +7. `./configure --openssl-fips=/path/to/openssl-fips/installdir` + For example on ubuntu 12 the installation directory was + /usr/local/ssl/fips-2.0 +8. Build io.js with `make -j` +9. Verify with `node -p "process.versions.openssl"` (`1.0.2a-fips`) + ## Resources for Newcomers diff --git a/benchmark/events/ee-listener-count.js b/benchmark/events/ee-listener-count-on-prototype.js similarity index 80% rename from benchmark/events/ee-listener-count.js rename to benchmark/events/ee-listener-count-on-prototype.js index a697c41b8b4fe4..8d536edd62739b 100644 --- a/benchmark/events/ee-listener-count.js +++ b/benchmark/events/ee-listener-count-on-prototype.js @@ -7,14 +7,13 @@ function main(conf) { var n = conf.n | 0; var ee = new EventEmitter(); - var listenerCount = EventEmitter.listenerCount; for (var k = 0; k < 10; k += 1) ee.on('dummy', function() {}); bench.start(); for (var i = 0; i < n; i += 1) { - var r = listenerCount(ee, 'dummy'); + var r = ee.listenerCount('dummy'); } bench.end(n); } diff --git a/configure b/configure index ab2383c8141b15..cd594d62788658 100755 --- a/configure +++ b/configure @@ -335,6 +335,9 @@ parser.add_option('--enable-static', (options, args) = parser.parse_args() +# Expand ~ in the install prefix now, it gets written to multiple files. +options.prefix = os.path.expanduser(options.prefix or '') + # set up auto-download list auto_downloads = nodedownload.parse(options.download_list) @@ -611,7 +614,7 @@ def configure_mips(o): def configure_node(o): if options.dest_os == 'android': o['variables']['OS'] = 'android' - o['variables']['node_prefix'] = os.path.expanduser(options.prefix or '') + o['variables']['node_prefix'] = options.prefix o['variables']['node_install_npm'] = b(not options.without_npm) o['default_configuration'] = 'Debug' if options.debug else 'Release' @@ -778,7 +781,7 @@ def write(filename, data): do_not_edit = '# Do not edit. Generated by the configure script.\n' -def glob_to_var(dir_base, dir_sub): +def glob_to_var(dir_base, dir_sub, patch_dir): list = [] dir_all = os.path.join(dir_base, dir_sub) files = os.walk(dir_all) @@ -786,16 +789,23 @@ def glob_to_var(dir_base, dir_sub): (path, dirs, files) = ent for file in files: if file.endswith('.cpp') or file.endswith('.c') or file.endswith('.h'): - list.append('%s/%s' % (dir_sub, file)) + # srcfile uses "slash" as dir separator as its output is consumed by gyp + srcfile = '%s/%s' % (dir_sub, file) + if patch_dir: + patchfile = '%s/%s/%s' % (dir_base, patch_dir, file) + if os.path.isfile(patchfile): + srcfile = '%s/%s' % (patch_dir, file) + print 'Using version-specific floating patch %s' % patchfile + list.append(srcfile) break return list def configure_intl(o): icus = [ { - 'url': 'http://download.icu-project.org/files/icu4c/54.1/icu4c-54_1-src.zip', - # from https://ssl.icu-project.org/files/icu4c/54.1/icu4c-src-54_1.md5: - 'md5': '6b89d60e2f0e140898ae4d7f72323bca', + 'url': 'http://download.icu-project.org/files/icu4c/55.1/icu4c-55_1-src.zip', + # from https://ssl.icu-project.org/files/icu4c/55.1/icu4c-src-55_1.md5: + 'md5': '4cddf1e1d47622fdd9de2cd7bb5001fd', }, ] def icu_download(path): @@ -1008,7 +1018,7 @@ def configure_intl(o): for i in icu_src: var = 'icu_src_%s' % i path = '../../deps/icu/source/%s' % icu_src[i] - icu_config['variables'][var] = glob_to_var('tools/icu', path) + icu_config['variables'][var] = glob_to_var('tools/icu', path, 'patches/%s/source/%s' % (icu_ver_major, icu_src[i]) ) # write updated icu_config.gypi with a bunch of paths write(icu_config_name, do_not_edit + pprint.pformat(icu_config, indent=2) + '\n') diff --git a/doc/api/buffer.markdown b/doc/api/buffer.markdown index 92b7f2ba93b288..94f3e3d8e164f5 100644 --- a/doc/api/buffer.markdown +++ b/doc/api/buffer.markdown @@ -43,7 +43,7 @@ Creating a typed array from a `Buffer` works with the following caveats: 2. The buffer's memory is interpreted as an array, not a byte array. That is, `new Uint32Array(new Buffer([1,2,3,4]))` creates a 4-element `Uint32Array` - with elements `[1,2,3,4]`, not an `Uint32Array` with a single element + with elements `[1,2,3,4]`, not a `Uint32Array` with a single element `[0x1020304]` or `[0x4030201]`. NOTE: Node.js v0.8 simply retained a reference to the buffer in `array.buffer` @@ -67,6 +67,10 @@ Allocates a new buffer of `size` bytes. `size` must be less than 2,147,483,648 bytes (2 GB) on 64 bits architectures, otherwise a `RangeError` is thrown. +Unlike `ArrayBuffers`, the underlying memory for buffers is not initialized. So +the contents of a newly created `Buffer` is unknown. Use `buf.fill(0)`to +initialize a buffer to zeroes. + ### new Buffer(array) * `array` Array diff --git a/doc/api/child_process.markdown b/doc/api/child_process.markdown index 3d6e5f5fe2f048..4acd61bfa6a97c 100644 --- a/doc/api/child_process.markdown +++ b/doc/api/child_process.markdown @@ -279,7 +279,7 @@ Here is an example of sending a server: child.send('server', server); }); -And the child would the receive the server object as: +And the child would then receive the server object as: process.on('message', function(m, server) { if (m === 'server') { diff --git a/doc/api/cluster.markdown b/doc/api/cluster.markdown index b7e76fcb8ffa0e..abf05bf9f40c82 100644 --- a/doc/api/cluster.markdown +++ b/doc/api/cluster.markdown @@ -121,7 +121,7 @@ values are `"rr"` and `"none"`. ## cluster.settings * {Object} - * `execArgv` {Array} list of string arguments passed to the io.js executable. + * `execArgv` {Array} list of string arguments passed to the io.js executable. (Default=`process.execArgv`) * `exec` {String} file path to worker file. (Default=`process.argv[1]`) * `args` {Array} string arguments passed to worker. @@ -613,7 +613,7 @@ It is not emitted in the worker. ### Event: 'disconnect' -Similar to the `cluster.on('disconnect')` event, but specfic to this worker. +Similar to the `cluster.on('disconnect')` event, but specific to this worker. cluster.fork().on('disconnect', function() { // Worker has disconnected diff --git a/doc/api/dns.markdown b/doc/api/dns.markdown index d8ed53e3fa0f79..7c9f419ce08ae7 100644 --- a/doc/api/dns.markdown +++ b/doc/api/dns.markdown @@ -85,7 +85,7 @@ All properties are optional. An example usage of options is shown below. ``` The callback has arguments `(err, address, family)`. `address` is a string -representation of a IP v4 or v6 address. `family` is either the integer 4 or 6 +representation of an IP v4 or v6 address. `family` is either the integer 4 or 6 and denotes the family of `address` (not necessarily the value initially passed to `lookup`). @@ -163,7 +163,7 @@ attribute (e.g. `[{'priority': 10, 'exchange': 'mx.example.com'},...]`). ## dns.resolveTxt(hostname, callback) The same as `dns.resolve()`, but only for text queries (`TXT` records). -`addresses` is an 2-d array of the text records available for `hostname` (e.g., +`addresses` is a 2-d array of the text records available for `hostname` (e.g., `[ ['v=spf1 ip4:0.0.0.0 ', '~all' ] ]`). Each sub-array contains TXT chunks of one record. Depending on the use case, the could be either joined together or treated separately. diff --git a/doc/api/events.markdown b/doc/api/events.markdown index fbc04a96239b0f..de547a5b8b580d 100644 --- a/doc/api/events.markdown +++ b/doc/api/events.markdown @@ -122,7 +122,7 @@ Note that `emitter.setMaxListeners(n)` still has precedence over ### emitter.listeners(event) -Returns an array of listeners for the specified event. +Returns a copy of the array of listeners for the specified event. server.on('connection', function (stream) { console.log('someone connected!'); @@ -137,10 +137,17 @@ Execute each of the listeners in order with the supplied arguments. Returns `true` if event had listeners, `false` otherwise. +### emitter.listenerCount(type) + +* `type` {Value} The type of event + +Returns the number of listeners listening to the `type` of event. + ### Class Method: EventEmitter.listenerCount(emitter, event) Return the number of listeners for a given event. +_Note: This is deprecated. Use `emitter.listenerCount` instead._ ### Event: 'newListener' diff --git a/doc/api/fs.markdown b/doc/api/fs.markdown index 5f96b10763fe45..c9fd8466811c14 100644 --- a/doc/api/fs.markdown +++ b/doc/api/fs.markdown @@ -557,11 +557,12 @@ The synchronous version of `fs.appendFile`. Returns `undefined`. Watch for changes on `filename`. The callback `listener` will be called each time the file is accessed. -The second argument is optional. The `options` if provided should be an object -containing two members a boolean, `persistent`, and `interval`. `persistent` -indicates whether the process should continue to run as long as files are -being watched. `interval` indicates how often the target should be polled, -in milliseconds. The default is `{ persistent: true, interval: 5007 }`. +The `options` argument may be omitted. If provided, it should be an object. The +`options` object may contain a boolean named `persistent` that indicates +whether the process should continue to run as long as files are being watched. +The `options` object may specify an `interval` property indicating how often the +target should be polled in milliseconds. The default is +`{ persistent: true, interval: 5007 }`. The `listener` gets two arguments the current stat object and the previous stat object: @@ -801,6 +802,10 @@ on Unix systems, it never was. Returns a new ReadStream object (See `Readable Stream`). +Be aware that, unlike the default value set for `highWaterMark` on a +readable stream (16 kb), the stream returned by this method has a +default value of 64 kb for the same parameter. + `options` is an object or string with the following defaults: { flags: 'r', @@ -823,6 +828,9 @@ there's no file descriptor leak. If `autoClose` is set to true (default behavior), on `error` or `end` the file descriptor will be closed automatically. +`mode` sets the file mode (permission and sticky bits), but only if the +file was created. + An example to read the last 10 bytes of a file which is 100 bytes long: fs.createReadStream('sample.txt', {start: 90, end: 99}); @@ -847,14 +855,14 @@ Returns a new WriteStream object (See `Writable Stream`). `options` is an object or string with the following defaults: { flags: 'w', - encoding: null, + defaultEncoding: 'utf8', fd: null, mode: 0o666 } `options` may also include a `start` option to allow writing data at some position past the beginning of the file. Modifying a file rather than replacing it may require a `flags` mode of `r+` rather than the -default mode `w`. The `encoding` can be `'utf8'`, `'ascii'`, `binary`, +default mode `w`. The `defaultEncoding` can be `'utf8'`, `'ascii'`, `binary`, or `'base64'`. Like `ReadStream` above, if `fd` is specified, `WriteStream` will ignore the diff --git a/doc/api/http.markdown b/doc/api/http.markdown index 5cf3b07a9cd778..ad2473c98019d7 100644 --- a/doc/api/http.markdown +++ b/doc/api/http.markdown @@ -452,6 +452,11 @@ If `data` is specified, it is equivalent to calling If `callback` is specified, it will be called when the response stream is finished. +### response.finished + +Boolean value that indicates whether the response has completed. Starts +as `false`. After `response.end()` executes, the value will be `true`. + ## http.request(options[, callback]) io.js maintains several connections per server to make HTTP requests. @@ -462,6 +467,7 @@ automatically parsed with [url.parse()][]. Options: +- `protocol`: Protocol to use. Defaults to `'http'`. - `host`: A domain name or IP address of the server to issue the request to. Defaults to `'localhost'`. - `hostname`: Alias for `host`. To support `url.parse()` `hostname` is @@ -911,7 +917,8 @@ is finished. ### request.abort() -Aborts a request. (New since v0.3.8.) +Marks the request as aborting. Calling this will cause remaining data +in the response to be dropped and the socket to be destroyed. ### request.setTimeout(timeout[, callback]) diff --git a/doc/api/stream.markdown b/doc/api/stream.markdown index a7a78f229edb57..ffad1717f75096 100644 --- a/doc/api/stream.markdown +++ b/doc/api/stream.markdown @@ -164,6 +164,34 @@ readable.on('readable', function() { Once the internal buffer is drained, a `readable` event will fire again when more data is available. +The `readable` event is not emitted in the "flowing" mode with the +sole exception of the last one, on end-of-stream. + +The 'readable' event indicates that the stream has new information: +either new data is available or the end of the stream has been reached. +In the former case, `.read()` will return that data. In the latter case, +`.read()` will return null. For instance, in the following example, `foo.txt` +is an empty file: + +```javascript +var fs = require('fs'); +var rr = fs.createReadStream('foo.txt'); +rr.on('readable', function() { + console.log('readable:', rr.read()); +}); +rr.on('end', function() { + console.log('end'); +}); +``` + +The output of running this script is: + +``` +bash-3.2$ node test.js +readable: null +end +``` + #### Event: 'data' * `chunk` {Buffer | String} The chunk of data. @@ -221,7 +249,9 @@ returns it. If there is no data available, then it will return `null`. If you pass in a `size` argument, then it will return that many -bytes. If `size` bytes are not available, then it will return `null`. +bytes. If `size` bytes are not available, then it will return `null`, +unless we've ended, in which case it will return the data remaining +in the buffer. If you do not specify a `size` argument, then it will return all the data in the internal buffer. @@ -243,6 +273,9 @@ readable.on('readable', function() { If this method returns a data chunk, then it will also trigger the emission of a [`'data'` event][]. +Note that calling `readable.read([size])` after the `end` event has been +triggered will return `null`. No runtime error will be raised. + #### readable.setEncoding(encoding) * `encoding` {String} The encoding to use. @@ -414,6 +447,9 @@ parser, which needs to "un-consume" some data that it has optimistically pulled out of the source, so that the stream can be passed on to some other party. +Note that `stream.unshift(chunk)` cannot be called after the `end` event +has been triggered; a runtime error will be raised. + If you find that you must often call `stream.unshift(chunk)` in your programs, consider implementing a [Transform][] stream instead. (See API for Stream Implementors, below.) @@ -452,6 +488,13 @@ function parseHeader(stream, callback) { } } ``` +Note that, unlike `stream.push(chunk)`, `stream.unshift(chunk)` will not +end the reading process by resetting the internal reading state of the +stream. This can cause unexpected results if `unshift` is called during a +read (i.e. from within a `_read` implementation on a custom stream). Following +the call to `unshift` with an immediate `stream.push('')` will reset the +reading state appropriately, however it is best to simply avoid calling +`unshift` while in the process of performing a read. #### readable.wrap(stream) @@ -883,6 +926,10 @@ SimpleProtocol.prototype._read = function(n) { // back into the read queue so that our consumer will see it. var b = chunk.slice(split); this.unshift(b); + // calling unshift by itself does not reset the reading state + // of the stream; since we're inside _read, doing an additional + // push('') will reset the state appropriately. + this.push(''); // and let them know that we are done parsing the header. this.emit('header', this.header); @@ -922,24 +969,22 @@ initialized. * `size` {Number} Number of bytes to read asynchronously -Note: **Implement this function, but do NOT call it directly.** +Note: **Implement this method, but do NOT call it directly.** -This function should NOT be called directly. It should be implemented -by child classes, and only called by the internal Readable class -methods. +This method is prefixed with an underscore because it is internal to the +class that defines it and should only be called by the internal Readable +class methods. All Readable stream implementations must provide a _read +method to fetch data from the underlying resource. -All Readable stream implementations must provide a `_read` method to -fetch data from the underlying resource. - -This method is prefixed with an underscore because it is internal to -the class that defines it, and should not be called directly by user -programs. However, you **are** expected to override this method in -your own extension classes. +When _read is called, if data is available from the resource, `_read` should +start pushing that data into the read queue by calling `this.push(dataChunk)`. +`_read` should continue reading from the resource and pushing data until push +returns false, at which point it should stop reading from the resource. Only +when _read is called again after it has stopped should it start reading +more data from the resource and pushing that data onto the queue. -When data is available, put it into the read queue by calling -`readable.push(chunk)`. If `push` returns false, then you should stop -reading. When `_read` is called again, you should start pushing more -data. +Note: once the `_read()` method is called, it will not be called again until +the `push` method is called. The `size` argument is advisory. Implementations where a "read" is a single call that returns data can use this to know how much data to @@ -955,19 +1000,16 @@ becomes available. There is no need, for example to "wait" until Buffer encoding, such as `'utf8'` or `'ascii'` * return {Boolean} Whether or not more pushes should be performed -Note: **This function should be called by Readable implementors, NOT +Note: **This method should be called by Readable implementors, NOT by consumers of Readable streams.** -The `_read()` function will not be called again until at least one -`push(chunk)` call is made. - -The `Readable` class works by putting data into a read queue to be -pulled out later by calling the `read()` method when the `'readable'` -event fires. +If a value other than null is passed, The `push()` method adds a chunk of data +into the queue for subsequent stream processors to consume. If `null` is +passed, it signals the end of the stream (EOF), after which no more data +can be written. -The `push()` method will explicitly insert some data into the read -queue. If it is called with `null` then it will signal the end of the -data (EOF). +The data added with `push` can be pulled out by calling the `read()` method +when the `'readable'`event fires. This API is designed to be as flexible as possible. For example, you may be wrapping a lower-level source which has some sort of @@ -1315,7 +1357,7 @@ for examples and testing, but there are occasionally use cases where it can come in handy as a building block for novel sorts of streams. -## Simplified Constructor API +## Simplified Constructor API diff --git a/doc/api/tls.markdown b/doc/api/tls.markdown index 16af6fe74f7c63..422251f1041f70 100644 --- a/doc/api/tls.markdown +++ b/doc/api/tls.markdown @@ -77,6 +77,44 @@ handshake extensions allowing you: * SNI - to use one TLS server for multiple hostnames with different SSL certificates. +## Modifying the Default TLS Cipher suite + +Node.js is built with a default suite of enabled and disabled TLS ciphers. +Currently, the default cipher suite is: + + ECDHE-RSA-AES128-GCM-SHA256: + ECDHE-ECDSA-AES128-GCM-SHA256: + ECDHE-RSA-AES256-GCM-SHA384: + ECDHE-ECDSA-AES256-GCM-SHA384: + DHE-RSA-AES128-GCM-SHA256: + ECDHE-RSA-AES128-SHA256: + DHE-RSA-AES128-SHA256: + ECDHE-RSA-AES256-SHA384: + DHE-RSA-AES256-SHA384: + ECDHE-RSA-AES256-SHA256: + DHE-RSA-AES256-SHA256: + HIGH: + !aNULL: + !eNULL: + !EXPORT: + !DES: + !RC4: + !MD5: + !PSK: + !SRP: + !CAMELLIA + +This default can be overriden entirely using the `--tls-cipher-list` command +line switch. For instance, the following makes +`ECDHE-RSA-AES128-GCM-SHA256:!RC4` the default TLS cipher suite: + + node --tls-cipher-list="ECDHE-RSA-AES128-GCM-SHA256:!RC4" + +Note that the default cipher suite included within Node.js has been carefully +selected to reflect current security best practices and risk mitigation. +Changing the default cipher suite can have a significant impact on the security +of an application. The `--tls-cipher-list` switch should by used only if +absolutely necessary. ## Perfect Forward Secrecy @@ -138,7 +176,7 @@ automatically set as a listener for the [secureConnection][] event. The - `crl` : Either a string or list of strings of PEM encoded CRLs (Certificate Revocation List) - - `ciphers`: A string describing the ciphers to use or exclude, seperated by + - `ciphers`: A string describing the ciphers to use or exclude, separated by `:`. The default cipher suite is: ECDHE-RSA-AES128-GCM-SHA256: diff --git a/doc/api/v8.markdown b/doc/api/v8.markdown index cedd5c86d9b008..4caf6ebcd4437a 100644 --- a/doc/api/v8.markdown +++ b/doc/api/v8.markdown @@ -15,6 +15,7 @@ Returns an object with the following properties total_heap_size: 7326976, total_heap_size_executable: 4194304, total_physical_size: 7326976, + total_available_size: 1152656, used_heap_size: 3476208, heap_size_limit: 1535115264 } diff --git a/doc/api_assets/style.css b/doc/api_assets/style.css index 709a835f814963..d40253e5ae1d4a 100644 --- a/doc/api_assets/style.css +++ b/doc/api_assets/style.css @@ -79,6 +79,15 @@ code a:hover { color: white !important; } +.api_stability_0 a, +.api_stability_1 a, +.api_stability_2 a, +.api_stability_3 a, +.api_stability_4 a, +.api_stability_5 a { + text-decoration: underline; +} + .api_stability_0 { background-color: #D60027; } diff --git a/doc/iojs.1 b/doc/iojs.1 index 0512d5e75cdd37..c8dc08cb6f42d4 100644 --- a/doc/iojs.1 +++ b/doc/iojs.1 @@ -64,6 +64,9 @@ and servers. --v8-options print v8 command line options + --tls-cipher-list=list use an alternative default TLS cipher list + (available only when Node.js is built with + OpenSSL and crypto support enabled) .SH ENVIRONMENT VARIABLES diff --git a/doc/tsc-meetings/2015-05-27.md b/doc/tsc-meetings/2015-05-27.md index 7d93cd89dd4808..7c0bce36d5dd4f 100644 --- a/doc/tsc-meetings/2015-05-27.md +++ b/doc/tsc-meetings/2015-05-27.md @@ -2,20 +2,21 @@ ## Links +* **Audio Recording**: https://soundcloud.com/node-foundation/tsc-meeting-2015-05-27 * **Public YouTube feed**: http://www.youtube.com/watch?v=0DPfLxulsbQ -* **GitHub Issue**: https://github.com/nodejs/node/issues/41 +* **GitHub Issue**: https://github.com/nodejs/node-convergence-archive/issues/41 * **Original Minutes Google Doc**: https://docs.google.com/document/d/1-KlxiQGMsJFNJu3meok9e9XFsM39k_PMnQmY_9d_cy0 ## Agenda Extracted from **tsc-agenda** labelled issues and pull requests prior to meeting. -### nodejs/node +### nodejs/node-convergence-archive -* \[Converge\] timers: Avoid linear scan in `_unrefActive`. [#23](https://github.com/nodejs/node/issues/23) -* \[Converge\] child_process argument type checking [#22](https://github.com/nodejs/node/issues/22) -* \[Converge\] SSLv2/3 disable/enable related commits [#20](https://github.com/nodejs/node/issues/20) -* doc: Add new working groups [#15](https://github.com/nodejs/node/pull/15) +* \[Converge\] timers: Avoid linear scan in `_unrefActive`. [#23](https://github.com/nodejs/node-convergence-archive/issues/23) +* \[Converge\] child_process argument type checking [#22](https://github.com/nodejs/node-convergence-archive/issues/22) +* \[Converge\] SSLv2/3 disable/enable related commits [#20](https://github.com/nodejs/node-convergence-archive/issues/20) +* doc: Add new working groups [#15](https://github.com/nodejs/node-convergence-archive/pull/15) ### nodejs/io.js @@ -68,7 +69,7 @@ Extracted from **tsc-agenda** labelled issues and pull requests prior to meeting ## Minutes -### \[Converge\] timers: Avoid linear scan in `_unrefActive`. [#23](https://github.com/nodejs/node/issues/23) +### \[Converge\] timers: Avoid linear scan in `_unrefActive`. [#23](https://github.com/nodejs/node-convergence-archive/issues/23) * James: conflicting approaches in both repos * Ben: both are terrible under different workloads - do away with the code and start again @@ -76,13 +77,13 @@ Extracted from **tsc-agenda** labelled issues and pull requests prior to meeting * Bert: some problems with http - discussion happened about the implementation * Chris: would be good to have Julien’s input since he was active on the joyent/node impl -### \[Converge\] child_process argument type checking [#22](https://github.com/nodejs/node/issues/22) +### \[Converge\] child_process argument type checking [#22](https://github.com/nodejs/node-convergence-archive/issues/22) * James: arg checking merged in 0.10 after the fork * Discussion about why this wasn’t merged to io.js * Defer back to GitHub discussion after no reason for not merging could be found on the call -### \[Converge\] SSLv2/3 disable/enable related commits [#20](https://github.com/nodejs/node/issues/20) +### \[Converge\] SSLv2/3 disable/enable related commits [#20](https://github.com/nodejs/node-convergence-archive/issues/20) * James: SSLv2/3 removed in io.js, merging these commits would involve reverting * Jeremiah proposed 0.12 being the LTS for SSLv2/3 support @@ -90,7 +91,7 @@ Extracted from **tsc-agenda** labelled issues and pull requests prior to meeting * Michael: we don’t know how extensively it’s being used? * James: pending research into that question we’ll leave this alone, come back if there’s a compelling reason to revert -### doc: Add new working groups [#15](https://github.com/nodejs/node/pull/15) +### doc: Add new working groups [#15](https://github.com/nodejs/node-convergence-archive/pull/15) * Michael: Benchmarking and Post Mortem Debugging working groups are ready and have started, i18n group needs a bit more work to get off the ground * Group didn’t see any reason not to go forward with these groups, they have repos and can be in an “incubating” state for now diff --git a/doc/tsc-meetings/2015-06-03.md b/doc/tsc-meetings/2015-06-03.md new file mode 100644 index 00000000000000..e9b75db5d3aaf6 --- /dev/null +++ b/doc/tsc-meetings/2015-06-03.md @@ -0,0 +1,166 @@ +# Node.js Foundation TSC Meeting 2015-06-03 + +## Links + +* **Audio Recording**: https://soundcloud.com/node-foundation/tsc-meeting-2015-06-03 +* **GitHub Issue**: https://github.com/nodejs/node-convergence-archive/issues/47 +* **Original Minutes Google Doc**: https://docs.google.com/document/d/1sTD0uryasBR15UBzEbJj3oHYtnuN9ZIqVxA2A_-N56E + +## Agenda + +Extracted from **tsc-agenda** labelled issues and pull requests prior to meeting. + +### nodejs/io.js + +* Add working group state per Project Lifecycle. [#1880](https://github.com/nodejs/io.js/pull/1880) +* Proposal: Split TSC agenda in to two meetings [#1879](https://github.com/nodejs/io.js/issues/1879) +* Chrome 43 released; time for V8 4.3! [#1735](https://github.com/nodejs/io.js/issues/1735) +* TSC needs to elect a board representative. [#1697](https://github.com/nodejs/io.js/issues/1697) +* Expose `deepEqual` and `deepStrictEqual` in `util`. #1172 [#1177](https://github.com/nodejs/io.js/pull/1177) + +## Present + +* Rod Vagg (TSC) +* Mikeal Rogers +* Shigeki Ohtsu (TSC) +* Chris Dickinson (TSC) +* Colin Ihrig (TSC) +* Julien Gilli (TSC) +* James Snell (TSC) +* Michael Dawson (TSC) +* Bert Belder (TSC) +* Fedor Indutny (TSC) +* Jeremiah Senkpiel (TSC) +* Domenic Denicola +* Alexis Campailla (TSC) +* Ben Noordhuis (TSC) + +## Quick stand-up + +* Rod: working on the build, looking into npm smoke-testing +* Mikeal: getting foundation legalities and officialities in order +* Shigeki: working on security issues, reviewing root cert update +* Chris: working on getting npm static analysis working [estoc](https://github.com/chrisdickinson/estoc) +* Colin: reviewing prs and issues, doing some libuv work (uv_os_homedir, which landed in libuv 1.6.0) +* Julien: vacation, nodejs.org downtime postmortem +* James: working on convergence, triaging joyent/node issues +* Michael: triaging some joyent/node issues, working on powerpc build and npm testing +* Bert: not much, looked at some issues, discussing with Saul if libuv should move into the Node Foundation +* Fedor: reviewed some pull requests and issues, working on some openssl mode +* Jeremiah: issues, convergence, `_unrefActive` timers work, looking at improving Ben’s binary heap implementation +* Domenic: vm fixes in v8 integrating patches +* Alexis: patch for timers firing early, CI convergence work - jenkins hackery +* Ben: reviewing pull requests, making libuv threadpool more scalable +* Brian: JS dns resolver +* Trevor: merged UInt8Array Buffer changes into the next branch, additional ArrayBuffer-related changes and fiddling + +## Review of last meeting + +### nodejs/node + +* \[Converge\] timers: Avoid linear scan in `_unrefActive`. [#23](https://github.com/nodejs/node/issues/23) +* \[Converge\] child_process argument type checking [#22](https://github.com/nodejs/node/issues/22) +* \[Converge\] SSLv2/3 disable/enable related commits [#20](https://github.com/nodejs/node/issues/20) +* doc: Add new working groups [#15](https://github.com/nodejs/node/pull/15) + +### nodejs/io.js + +* Buffer implemented using Uint8Array [#1810](https://github.com/nodejs/io.js/issues/1810) +* \[Discussion\] FFI - Giving Buffer more low-level C functionality [#1750](https://github.com/nodejs/io.js/pull/1750) +* Chrome 43 released; time for V8 4.3! [#1735](https://github.com/nodejs/io.js/issues/1735) +* Deprecation Policy [#1704](https://github.com/nodejs/io.js/issues/1704) +* TSC needs to elect a board representative. [#1697](https://github.com/nodejs/io.js/issues/1697) +* V8 4.4 to remove indexed properties via external data [#1451](https://github.com/nodejs/io.js/issues/1451) + +## Minutes + +### Add working group state per Project Lifecycle. [#1880](https://github.com/nodejs/io.js/pull/1880) + +* Mikeal: making working groups as “core” means they get to elect a TSC seat. +* Brief discussion about what WG’s should be considered “core” and get a TSC seat. +* Deferred to GitHub. + +### Chrome 43 released; time for V8 4.3! [#1735](https://github.com/nodejs/io.js/issues/1735) + +Domenic: things are ready to go. There’s some concern about double-breakage in 4.3 and 4.4. + +Jeremiah: what about the maybe changes? + +Domenic: the non-Maybe versions are still there, and haven’t been removed yet, either in 4.3 or 4.4 or even 4.5 (which isn’t finalized yet though). + +Rod: blocker is whether the double-breakage is real, if not then we should move forward. + +Domenic: there’s also the issue of some people in the thread advocating blocking a release on readying the ecosystem. + +Rod: we shouldn’t be following the ecosystem and waiting for them to catch up before we release. + +Mikeal: it’s more a matter of how we approach these things. + +Michael: LTS should help this? + +Mikeal: the problem is about major releases and the messaging - currently when people download them lots of stuff is broken. + +Domenic called for a vote on a 3.0 release _assuming there is no double-breakage_. + +Rod: I need to get the fix for node-gyp in place. Also we should see if we can get nvm to allow testing nightlies so that people can test them more easily (including on Travis). + +Jeremiah: when I last talked to Jordan (@ljharb) he wanted to make sure that the mechanism we used for nightlies would also be the mechanism used in the converged project. + +Mikeal: well that’s definitely true. So we should be good. + +Trevor: do we have a way of measuring uptake? + +Domenic/Rod: npm traffic is probably the best metric. + +Rod calls for a vote. + +Domenic: can we clarify whether we allow minor, covered-by-nan breakages between 4.3/3.0.0 and 4.4/4.0.0, or do we require no breakages at all? + +Mikeal: is nan released? + +Jeremiah: not yet; it is experimental and they don’t release until we merge next into master + +Mikeal: that seems bad + +(General agreement that we want nan to release first.) + +Trevor: looking at nan they seem to be working to encapsulate changes all the way out to V8 4.6. + +Bert: what was the problem with putting nan into core? + +Rod/Ben: sometimes V8 makes big changes that cause breaking changes in nan. E.g. isolates, buffer changes, maybes. Until now it’s been just individual APIs, but the 4.3 and 4.4 change has been very big. nan’s promise is just that you can write against a single, possibly-changing API that will support multiple node versions. + +### Expose `deepEqual` and `deepStrictEqual` in `util`. #1172 [#1177](https://github.com/nodejs/io.js/pull/1177) + +Jeremiah: what do we want to expose from core, there’s pressure from some parts of the community for core to be more isomorphic and support a lot of browser stuff. This issue is specifically about exposing what’s already implemented. + +Ben: why not pull it out of core and put it in npm? util has always been about utilities that core uses. + +Rod: when you expose something you’re stuck with it forever, minimising surface area should be our goal because the more we have to officially support the slower our release cycle will be. + +### Proposal: Split TSC agenda in to two meetings [#1879](https://github.com/nodejs/io.js/issues/1879) + +Mikeal: the scope of the TSC responsibilities are too wide, making meeting times go to long. The suggestion is to split up in to “project” related issues and “core” related issues. + +Rod: can there be a clear distinction between the issues? + +Domenic: assuming nobody wanted to attend two meetings, would there be enough? + +Mikeal: yes because there are lots of people that aren’t here who would be in the other group + +Rod: I wonder about the timing, e.g. letting the foundation kickoff happen first; it feels a bit premature to split now. + +Mikeal: it’s a little premature but that’s because we haven’t onboarded the core working groups to this meeting. + +Bert: in favour of the proposal. + +_Discussed the pros and cons and agreed to tentatively move forward with experimentation, time slot for the new “project” meeting would either be after the current meeting or the day after that meeting._ + +### TSC needs to elect a board representative. [#1697](https://github.com/nodejs/io.js/issues/1697) + +***Call ended prematurely due to Uberconference difficulties*** + +## Next meeting + +* + diff --git a/doc/tsc-meetings/2015-06-10.md b/doc/tsc-meetings/2015-06-10.md index 989541d1c88c90..a554b582649568 100644 --- a/doc/tsc-meetings/2015-06-10.md +++ b/doc/tsc-meetings/2015-06-10.md @@ -2,7 +2,8 @@ ## Links -* **GitHub Issue**: https://github.com/nodejs/node/issues/53 +* **Audio Recording**: https://soundcloud.com/node-foundation/tsc-meeting-2015-06-10 +* **GitHub Issue**: https://github.com/nodejs/node-convergence-archive/issues/53 * **Original Minutes Google Doc**: https://docs.google.com/document/d/1cn7SKaKYUMYLBiQhE6HAAknnwPabKsYjpOuyQkVikW8 ## Agenda diff --git a/doc/tsc-meetings/2015-06-17.md b/doc/tsc-meetings/2015-06-17.md index b4dc10571ac7e1..c07fe82b7050e2 100644 --- a/doc/tsc-meetings/2015-06-17.md +++ b/doc/tsc-meetings/2015-06-17.md @@ -2,7 +2,8 @@ ## Links -* **GitHub Issue**: https://github.com/nodejs/node/issues/56 +* **Audio Recording**: https://soundcloud.com/node-foundation/tsc-meeting-2015-06-17 +* **GitHub Issue**: https://github.com/nodejs/node-convergence-archive/issues/56 * **Original Minutes Google Doc**: https://docs.google.com/document/d/1d4mAJgm06rpPWLDqhZcxsRnKMrS92Ip4CW2akOyeIL4 ## Agenda @@ -11,7 +12,7 @@ Extracted from **tsc-agenda** labelled issues and pull requests prior to meeting ### nodejs/node -* Create a security team [#48](https://github.com/nodejs/node/issues/48) +* Create a security team [#48](https://github.com/nodejs/node-convergence-archive/issues/48) ### nodejs/io.js @@ -71,7 +72,7 @@ Extracted from **tsc-agenda** labelled issues and pull requests prior to meeting * Steven: getting back on board * Bert: libuv work for multi-worker on Windows (https://github.com/libuv/libuv/pull/396), found a potential libuv/Windows contributor at NodeConf, NF board meeting * Alexis: Working on build & CI convergence with Rod, CI can now automatically decide what options to use for different node versions, and porting node-accept-pull-request CI job. -* Julien: time off, launching nodejs.org updates for NF launch, working on changes for 0.10/0.12 releases, onboarded two new collaborators for joyent/node - https://github.com/nodejs/node/wiki/Breaking-changes-between-v0.12-and-next-LTS-release +* Julien: time off, launching nodejs.org updates for NF launch, working on changes for 0.10/0.12 releases, onboarded two new collaborators for joyent/node - https://github.com/nodejs/LTS/wiki/Breaking-changes-between-v0.12-and-next-LTS-release * Shigeki: Working on upgrading OpenSSL, the upgrade process is becoming much simpler, landed the CINNIC whitelist * Jeremiah: NodeConf - brought back good feedback, helping spin up the Diversity WG, integrating timers heap impl, struggling with bugs * Brian: not much, triage & PR review diff --git a/doc/tsc-meetings/2015-07-01.md b/doc/tsc-meetings/2015-07-01.md index ca7f9b06e93f62..9fcbd9e2a36884 100644 --- a/doc/tsc-meetings/2015-07-01.md +++ b/doc/tsc-meetings/2015-07-01.md @@ -2,7 +2,8 @@ ## Links -* **GitHub Issue**: https://github.com/nodejs/node/issues/60 +* **Audio Recording**: https://soundcloud.com/node-foundation/tsc-meeting-2015-07-01 +* **GitHub Issue**: https://github.com/nodejs/node-convergence-archive/issues/60 * **Original Minutes Google Doc**: https://docs.google.com/document/d/1TN3Ks0fC4ciY3jeS0VxXxXRkT_dq8-tfs-bWQqZGRoE ## Agenda diff --git a/doc/tsc-meetings/2015-07-08.md b/doc/tsc-meetings/2015-07-08.md index d70f39b7df5c69..0b4b60be8c73f7 100644 --- a/doc/tsc-meetings/2015-07-08.md +++ b/doc/tsc-meetings/2015-07-08.md @@ -2,7 +2,8 @@ ## Links -* **GitHub Issue**: https://github.com/nodejs/node/issues/64 +* **Audio Recording**: https://soundcloud.com/node-foundation/tsc-meeting-2015-07-08 +* **GitHub Issue**: https://github.com/nodejs/node-convergence-archive/issues/64 * **Original Minutes Google Doc**: https://docs.google.com/document/d/1HuRtu5ZP7ZlrIp756EbZYo4I26v2RY-7CY1pr_3y1nY ## Agenda diff --git a/doc/tsc-meetings/2015-07-15.md b/doc/tsc-meetings/2015-07-15.md index 485c3513b41936..c78be93bc6d2cb 100644 --- a/doc/tsc-meetings/2015-07-15.md +++ b/doc/tsc-meetings/2015-07-15.md @@ -2,7 +2,8 @@ ## Links -* **GitHub Issue**: https://github.com/nodejs/node/issues/67 +* **Audio Recording**: https://soundcloud.com/node-foundation/tsc-meeting-2015-07-15 +* **GitHub Issue**: https://github.com/nodejs/node-convergence-archive/issues/67 * **Original Minutes Google Doc**: https://docs.google.com/document/d/1r8boI4E67Cq7PEsYeIpXkFZM0be4Ww5UDlNr_uXOop0 ## Agenda diff --git a/doc/tsc-meetings/2015-07-22.md b/doc/tsc-meetings/2015-07-22.md new file mode 100644 index 00000000000000..bca7d7991fec5a --- /dev/null +++ b/doc/tsc-meetings/2015-07-22.md @@ -0,0 +1,168 @@ +# Node.js Foundation TSC Meeting 2015-07-22 + +## Links + +* **Audio Recording**: https://soundcloud.com/node-foundation/tsc-meeting-2015-07-22 +* **GitHub Issue**: https://github.com/nodejs/node-convergence-archive/issues/69 +* **Original Minutes Google Doc**: https://docs.google.com/document/d/1r8boI4E67Cq7PEsYeIpXkFZM0be4Ww5UDlNr_uXOop0 + +## Agenda + +Extracted from **tsc-agenda** labelled issues and pull requests prior to meeting. + +### nodejs/io.js + +* doc: add GPG fingerprint for cjihrig [#2217](https://github.com/nodejs/io.js/pull/2217) +* Process & Approval for Collab Summit Travel Fund [#2213](https://github.com/nodejs/io.js/issues/2213) +* TC39 representation for the Node.js Foundation [#2153](https://github.com/nodejs/io.js/issues/2153) +* Next branch release versioning [#2215](https://github.com/nodejs/io.js/issues/2215) + +## Minutes + + +### Present + +* Mikeal Rogers +* Rod Vagg (TSC) +* Colin Ihrig (TSC) +* James Snell (TSC) +* Fedor Indutny (TSC) +* Michael Dawson (TSC) +* Steven R Loomis (TSC) +* Jeremiah Senkpiel (TSC) +* Brian White (TSC) +* Ben Noordhuis (TSC) +* Trevor Norris (TSC) +* Chris Dickinson (TSC) +* Mike Dolan (Linux F) +* Emily Ratliff (Linux F) + +### Security Policy Discussion + +Emily Ratliff from the LF has joined us to help with our security and disclosure policy. TSC members were sent a briefing prior to the meeting. + +Discussed ISO 29147 “Vulnerability Disclosure Overview” and ISO 30111 “Vulnerability Handling Processes Overview”. + + +### Review of the previous meeting + +* Foundation Discussion (@mikeal leading) +* Intl [#238](https://github.com/nodejs/io.js/issues/238) +* TC39 representation [#2153](https://github.com/nodejs/io.js/issues/2153) +* lts: LTS Proposal (https://github.com/nodejs/LTS#proposed-lts)[ Proposal: Release Process] [#1997](https://github.com/nodejs/io.js/issues/1997) + +### Standup: + +* Mikeal Rogers: preparing for the foundation board meeting +* Rod Vagg: working on 3.0 and release candidates / NAN +* Colin Ihrig: reviewing issues & PRs, worked with julien to do releases from joyent/node 0.x branches +* James Snell: working on smoke-testing npm modules +* Fedor Indutny: doing so bug fixes, and reviewing PRs +* Michael Dawson: some joyent/node issue triage, PPC build work +* Steven R Loomis: some Intl WG work, working on getting the Intl commits from joyent/node into io.js +* Jeremiah Senkpiel: reviewing issues + prs, doing work on REPL in light of 3.0; fixing bugs in REPL +* Brian White: working more on the in-browser node.js/io.js benchmarking tool, which is now in a usable state. using it now to test current and future performance improvement techniques for the the js http parser +* Ben Noordhuis: (no mic) +* Trevor Norris: did a fix for the buffer implementation for the 3.0 release +* Chris Dickinson: npm work. + +### doc: add GPG fingerprint for cjihrig [#2217](https://github.com/nodejs/io.js/pull/2217) + +* Rod +1 +* James +1 +* Fedor +1 +* Michael +1 +* Steven +1 +* Jeremiah +1 +* Brian +1 +* Ben +1 +* Trevor +1 +* Chris +1 + +Action: make sure Colin’s GPG key setup is correct on the PR, after we can merge and Rod can add Colin’s credentials to the build server. + +### Process & Approval for Collab Summit Travel Fund [#2213](https://github.com/nodejs/io.js/issues/2213) + +* Mikeal: budget auditing requires that spending be approved by the board - need to approve the budget and the process for expenditure of those funds. Mikeal has a proposal for the process with basic limits and a process for having the TSC approve expenditure beyond that. + + +**Process as outlined** + +* TSC approves target budget (max amount to spend on travel) and caps on each type of spend (with the possibility that the TSC can approve a specific spend over if need be). +* Contributors in need of the fund apply (this will happen in the GitHub thread) but should explicitly state if they need flight, accommodation or both. +* If the number of contributors in need of the fund exceeds the target budget the TSC will prioritize the list of contributors applying for the fund. + + +Voting on approving the process stated above: + +* Rod: +1 +* James: +1 +* Fedor: +1 +* Michael: +1 +* Steven: +1 +* Jeremiah: +1 +* Brian: +1 +* Ben: +1 +* Trevor: +1 +* Chris: +1 + + +Specific proposal for August + +* 15K max budget (we had previously talked about 10K but I don't think that is enough) +* Approve a $900 max spend per person on accommodations. +* Approve a $500 max spend on domestic travel +* Approve a $1500 max spend on international travel (if someone has to go over it just requires additional TSC approval) + +* Rod: +1 +* James: +1 +* Fedor: +1 +* Michael: +1 +* Steven: +1 +* Jeremiah: +1 +* Brian: +1 +* Ben: +1 +* Trevor: +1 +* Chris: +1 + + +Approval for extra expenditure for @joaocgreis (from Portugal): $1,742.66 + +* Rod: +1 +* James: +1 +* Fedor: +1 +* Michael: +1 +* Steven: +1 +* Jeremiah: +1 +* Brian: +1 +* Ben: +1 +* Trevor: +1 +* Chris: +1 + +Approval for expenditure on @yosuke-furukawa (from Japan) max spend of $2400 + +* Rod: +1 +* James: +1 +* Fedor: +1 +* Michael: +1 +* Steven: +1 +* Jeremiah: +1 +* Brian: +1 +* Ben: +1 +* Trevor: +1 +* Chris: +1 + +### Next branch release versioning [#2215](https://github.com/nodejs/io.js/issues/2215) + +* Rod outlined the state of play: + - LTS WG moved from “proposal” to “plan” but are still depending on the stable release branch having a clear process + - LTS WG discussed a proposal by Trevor for how to handle next/canary/alpha & master & release branches & LTS branches: https://gist.github.com/trevnorris/7620a64b086e95271197 +* Mikeal: it’s more helpful if we think about V8 upgrades as a pull-request to master rather than a separate “next” that has to be separately managed. + +Much bikeshedding was had in an attempt to move forward. + +Group agreed in general with Trevor’s proposal, will organise further discussions amongst the group of interested parties at another time. + +### Next Meeting + +July 29th 2015 diff --git a/doc/tsc-meetings/2015-07-29.md b/doc/tsc-meetings/2015-07-29.md new file mode 100644 index 00000000000000..e3b27a30dd1bda --- /dev/null +++ b/doc/tsc-meetings/2015-07-29.md @@ -0,0 +1,89 @@ +# Node.js Foundation TSC Meeting 2015-07-29 + +## Links + +* **Audio Recording**: https://soundcloud.com/node-foundation/tsc-meeting-2015-07-29 +* **GitHub Issue**: https://github.com/nodejs/node-convergence-archive/issues/71 +* **Original Minutes Google Doc**: https://docs.google.com/document/d/1FBmDczHD4D8jfffc6A8CW-K9mmT0KI8shG6dm_d3jXI +* Previous minutes: https://docs.google.com/document/d/1eCETYn44gAOUp0udl22QxqyxrJ0oEeAFRdWHDCIq9V4 + +## Agenda + +Extracted from **tsc-agenda** labelled issues and pull requests prior to meeting. + +### nodejs/io.js + +* Next branch release versioning [#2215](https://github.com/nodejs/io.js/issues/2215) +* TSC Chair - Election? [#2136](https://github.com/nodejs/io.js/issues/2136) + +## Minutes + + +### Present + +* Mikeal Rogers +* Rod Vagg (TSC) +* Colin Ihrig (TSC) +* James Snell (TSC) +* Fedor Indutny (TSC) +* Michael Dawson (TSC) +* Steven R Loomis (TSC) +* Jeremiah Senkpiel (TSC) +* Shigeki Ohtsu(TSC) +* Ben Noordhuis (TSC) +* Trevor Norris (TSC) +* Chris Dickinson (TSC) +* Alexis Campailla (TSC) +* Bert Belder (TSC) + +### Review of the previous meeting + +* Security Policy Discussion +* doc: add GPG fingerprint for cjihrig [#2217](https://github.com/nodejs/io.js/pull/2217) +* Process & Approval for Collab Summit Travel Fund [#2213](https://github.com/nodejs/io.js/issues/2213) +* Next branch release versioning [#2215](https://github.com/nodejs/io.js/issues/2215) + + +### Standup: + +* Mikeal Rogers: Got ready for the board meeting, working to get the new website spun up based on the iojs.org build system +* Rod Vagg: Finished writing NAN 2.0 documentation +* Colin Ihrig: Issues & PRs, did the 2.5.0 release (armv6 build had an issue), playing around with the citgm package smoke-testing tool +* James Snell: Working on citgm (npm package smoke-testing), halfway done writing up the release plan +* Fedor Indutny: ? +* Michael Dawson: Triaging joyent/node issues / PRs, looking at AIX build support on the `next` branch, looking at updating some docs +* Steven R Loomis: Worked on the Intl converge PR [#2264](https://github.com/nodejs/io.js/pull/2264) +* Jeremiah Senkpiel: Issue and PR review, helping to get someone to start contributing to core +* Ben Noordhuis: not much, reviewed some PRs this week and did some minor patches +* Trevor Norris: Just had time to review some issues and respond to PRs +* Chris Dickinson: Not much, did docs work, trying to document streams (again), and the REPL history-paintext change PR [#2224](https://github.com/nodejs/io.js/pull/2224) +* Alexis Campailla: Working on CI/Jenkins convergence, and some windows issue triage +* Shigeki Ohtsu: Working on the updated crypto module for weak ciphers +* Bert Belder: Not much except started reviewing a VS2015 support PR + +#### Next branch release versioning [#2215](https://github.com/nodejs/io.js/issues/2215) + +Discussion about the release plan (not yet written up, James Snell is working on that.) + +#### TSC Chair - Election? [#2136](https://github.com/nodejs/io.js/issues/2136) + +Vote? + +* Colin: +1 +* Chris: +1 +* James: +1 +* Fedor: +1 +* Michael: +1 +* Steven: +1 +* Jeremiah: +1 +* Shigeki: +1 +* Ben: +1 +* Trevor: +1 +* Chris: +1 +* Alexis: +1 +* Bert: +1 + +### Next Meeting + +Not happening on August 5th (as it usually would). +Possibly a (short) TSC meeting will be held during the summit. diff --git a/doc/tsc-meetings/2015-08-19.md b/doc/tsc-meetings/2015-08-19.md new file mode 100644 index 00000000000000..55b862aa88bbfa --- /dev/null +++ b/doc/tsc-meetings/2015-08-19.md @@ -0,0 +1,120 @@ +# Node.js Foundation TSC Meeting 2015-08-19 + +## Links + +* **Audio Recording**: https://soundcloud.com/node-foundation/tsc-meeting-2015-08-19 +* **GitHub Issue**: https://github.com/nodejs/node/issues/2435 +* **Minutes Google Doc**: https://docs.google.com/document/d/1xsj_4UlrLNxahRvC7SpLtFM3D-Ks6CZEqEM5nyj6bjk +* _Previous Minutes Google Doc: _ + +## Agenda + +Extracted from **tsc-agenda** labelled issues and pull requests prior to meeting. + +* Procedure for rolling out node-accept-pull-request [#2434](https://github.com/nodejs/node/issues/2434) +* Release procedure changes & nominating @sam-github and @jasnell as releasers [#2416](https://github.com/nodejs/node/issues/2416) + +## Minutes + + +### Present + +* Mikeal Rogers +* Rod Vagg (TSC) +* James Snell (TSC) +* Michael Dawson (TSC) +* Steven R Loomis (TSC) +* Chris Dickinson (TSC) +* Alexis Campailla (TSC) +* Brian White (TSC) +* Jeremiah Senkpiel (TSC) +* Shigeki Ohtsu (TSC) +* Trevor Norris (TSC) +* Domenic Denicola +* Ben Noordhuis (TSC) +* Colin Ihrig (TSC) +* Bert Belder (TSC) + + +### Standup + +* Mikeal Rogers: Linuxconf, preparing next board meeting agenda, nailing down the foundation conf +* Rod Vagg: build work, memory leak testing, progress towards v4 (mainly infra) +* James Snell: some joyent/node PR triaging, preparing for nodeconf.eu +* Michael Dawson: nodeconf.eu, some AIX build work, talking to the v8 team about security notifications +* Steven R Loomis: Intl WG, landed Intl with small-icu by default in nodejs/node +* Chris Dickinson: first docs WG meeting, working on streams docs (again), working on docs tooling +* Alexis Campailla: working on ci convergence and jenkins jobs +* Brian White: not much, looking over issues and PRs, submitted a couple PRs +* Jeremiah Senkpiel: not so much, working on 0.10, 0.12 -> v4 upgrade docs, 3.1.0 release which should be ready straight after this meeting. +* Shigeki Ohtsu: little time to work on node right now, joined the LTS meeting to discuss OpenSSL LTS +* Trevor Norris: reviewing issues and PRs, noticed a bug in asyncwrap +* Domenic Denicola: issues and PRs, v8 team has a new Project Manager who is more interested in node; communicated our API stability concerns to him +* Ben Noordhuis: fixed a big memory leak, fixed a regression in windows module loading, reviewing PRs, responding to bug reports + +### Review of the previous meeting + +* Travel assistance amendment (no issue for this) +* FYI: Collaboration WG: https://github.com/nodejs/collaboration +* Summit recap +* level-set on repo rename +* Future: “project lifecycle” (Mikeal) - process by which top level projects are added (libuv, node-gyp, etc), (conferences…) + +### Procedure for rolling out node-accept-pull-request [#2434](https://github.com/nodejs/node/issues/2434) + +* Discussed some potential concerns, which were alleviated: +* Jeremiah: Wondered how to land PRs where nits needed to be fixed. +* Bert: Concerns about Jenkins costantly building the wrong PR in the io.js CI infrastructure. +* Trevor: asked for a dropdown to pick the reviewers. +* Bert: asked whether the job would support landing multiple commits. Alexis confirmed this. +* Alexis: mentioned that there is also an node-accept-commit job that is more low-level and advanced. + +### Release procedure changes & nominating @sam-github and @jasnell as releasers [#2416](https://github.com/nodejs/node/issues/2416) + +Votes for @jasnell: + +* Rod Vagg: +1 +* Michael Dawson: +1 +* Steven R Loomis: +1 +* Chris Dickinson: +1 +* Alexis Campailla: +1 +* Brian White: +1 +* Jeremiah Senkpiel: +1 +* Shigeki Ohtsu: +1 +* Trevor Norris: +1 +* Ben Noordhuis: +1 +* Colin Ihrig: +1 +* Bert Belder: +1 + +Votes for @sam-github: + +* Rod Vagg: +1 +* Michael Dawson: +1 +* Steven R Loomis: +1 +* Chris Dickinson: +1 +* Alexis Campailla: +1 +* Brian White: +1 +* Jeremiah Senkpiel: +1 +* Shigeki Ohtsu: +1 +* Trevor Norris: +1 +* Ben Noordhuis: +1 +* Colin Ihrig: +1 +* Bert Belder: +1 + +No objections to combining the a whole "release team" to handle all release branches including 0.10, 0.12, stable and LTS. + +### node-gyp is now in our org [#2379](https://github.com/nodejs/node/issues/2379) + +* Rod: node-gyp has a busy issue tracker and has no tests, needs more eyes +* Ben: Zero tests? +* Rod: Correct. +* Domenic: on Windows Chromium’s depot_tools will automatically download VS community edition and put it in the right place. Someone with copious free time could have node-gyp do similar things. + +* Discussed maybe looking at `gn` (the eventual replacement for `gyp`) + +* Domenic: even v8 still uses gyp, don’t worry about it for a good while until noise about it being deprecated gets louder +* Rod: issue [#151](https://github.com/nodejs/build/issues/151) in build has a discussion about precompiled native addons. Chime in! + +## Next Meeting + +August 26th diff --git a/lib/_http_client.js b/lib/_http_client.js index a7d714f7e0b0b2..e490dce5d821d6 100644 --- a/lib/_http_client.js +++ b/lib/_http_client.js @@ -216,7 +216,7 @@ function socketCloseListener() { // is a no-op if no final chunk remains. socket.read(); - // NOTE: Its important to get parser here, because it could be freed by + // NOTE: It's important to get parser here, because it could be freed by // the `socketOnData`. var parser = socket.parser; req.emit('close'); @@ -322,7 +322,7 @@ function socketOnData(d) { var bodyHead = d.slice(bytesParsed, d.length); var eventName = req.method === 'CONNECT' ? 'connect' : 'upgrade'; - if (EventEmitter.listenerCount(req, eventName) > 0) { + if (req.listenerCount(eventName) > 0) { req.upgradeOrConnect = true; // detach the socket @@ -359,7 +359,7 @@ function parserOnIncomingClient(res, shouldKeepAlive) { var req = socket._httpMessage; - // propogate "domain" setting... + // propagate "domain" setting... if (req.domain && !res.domain) { debug('setting "res.domain"'); res.domain = req.domain; @@ -465,7 +465,7 @@ function tickOnSocket(req, socket) { socket.parser = parser; socket._httpMessage = req; - // Setup "drain" propogation. + // Setup "drain" propagation. httpSocketSetup(socket); // Propagate headers limit from request object to parser diff --git a/lib/_http_server.js b/lib/_http_server.js index 1a4bd7555a5ccb..6769f4a1521bd9 100644 --- a/lib/_http_server.js +++ b/lib/_http_server.js @@ -343,7 +343,7 @@ function connectionListener(socket) { parser = null; var eventName = req.method === 'CONNECT' ? 'connect' : 'upgrade'; - if (EventEmitter.listenerCount(self, eventName) > 0) { + if (self.listenerCount(eventName) > 0) { debug('SERVER have listener for %s', eventName); var bodyHead = d.slice(bytesParsed, d.length); @@ -467,7 +467,7 @@ function connectionListener(socket) { (req.httpVersionMajor == 1 && req.httpVersionMinor == 1) && continueExpression.test(req.headers['expect'])) { res._expect_continue = true; - if (EventEmitter.listenerCount(self, 'checkContinue') > 0) { + if (self.listenerCount('checkContinue') > 0) { self.emit('checkContinue', req, res); } else { res.writeContinue(); diff --git a/lib/_stream_readable.js b/lib/_stream_readable.js index 7be7723e52cb44..11069985a4d40a 100644 --- a/lib/_stream_readable.js +++ b/lib/_stream_readable.js @@ -533,7 +533,7 @@ Readable.prototype.pipe = function(dest, pipeOpts) { debug('onerror', er); unpipe(); dest.removeListener('error', onerror); - if (EE.listenerCount(dest, 'error') === 0) + if (dest.listenerCount('error') === 0) dest.emit('error', er); } // This is a brutally ugly hack to make sure that our error handler @@ -582,7 +582,7 @@ function pipeOnDrain(src) { debug('pipeOnDrain', state.awaitDrain); if (state.awaitDrain) state.awaitDrain--; - if (state.awaitDrain === 0 && EE.listenerCount(src, 'data')) { + if (state.awaitDrain === 0 && src.listenerCount('data')) { state.flowing = true; flow(src); } diff --git a/lib/_tls_wrap.js b/lib/_tls_wrap.js index 182346904c9bf4..1bff7579fc2914 100644 --- a/lib/_tls_wrap.js +++ b/lib/_tls_wrap.js @@ -5,7 +5,6 @@ const crypto = require('crypto'); const net = require('net'); const tls = require('tls'); const util = require('util'); -const listenerCount = require('events').listenerCount; const common = require('_tls_common'); const StreamWrap = require('_stream_wrap').StreamWrap; const Buffer = require('buffer').Buffer; @@ -116,7 +115,7 @@ function requestOCSP(self, hello, ctx, cb) { if (ctx.context) ctx = ctx.context; - if (listenerCount(self.server, 'OCSPRequest') === 0) { + if (self.server.listenerCount('OCSPRequest') === 0) { return cb(null); } else { self.server.emit('OCSPRequest', @@ -320,7 +319,9 @@ TLSSocket.prototype._wrapHandle = function(wrap) { var context = options.secureContext || options.credentials || tls.createSecureContext(); - res = tls_wrap.wrap(handle, context.context, options.isServer); + res = tls_wrap.wrap(handle._externalStream, + context.context, + options.isServer); res._parent = handle; res._parentWrap = wrap; res._secureContext = context; @@ -396,11 +397,11 @@ TLSSocket.prototype._init = function(socket, wrap) { ssl.handshakes = 0; if (this.server) { - if (listenerCount(this.server, 'resumeSession') > 0 || - listenerCount(this.server, 'newSession') > 0) { + if (this.server.listenerCount('resumeSession') > 0 || + this.server.listenerCount('newSession') > 0) { ssl.enableSessionCallbacks(); } - if (listenerCount(this.server, 'OCSPRequest') > 0) + if (this.server.listenerCount('OCSPRequest') > 0) ssl.enableCertCb(); } } else { @@ -584,17 +585,6 @@ TLSSocket.prototype._start = function() { this._handle.start(); }; -TLSSocket.prototype._isSessionResumed = function _isSessionResumed(session) { - if (!session) - return false; - - var next = this.getSession(); - if (!next) - return false; - - return next.equals(session); -}; - TLSSocket.prototype.setServername = function(name) { this._handle.setServername(name); }; @@ -1011,7 +1001,7 @@ exports.connect = function(/* [port, host], options, cb */) { // Verify that server's identity matches it's certificate's names // Unless server has resumed our existing session - if (!verifyError && !socket._isSessionResumed(options.session)) { + if (!verifyError && !socket.isSessionReused()) { var cert = socket.getPeerCertificate(); verifyError = options.checkServerIdentity(hostname, cert); } diff --git a/lib/buffer.js b/lib/buffer.js index 403c344bdbe9c8..4166724b73248e 100644 --- a/lib/buffer.js +++ b/lib/buffer.js @@ -21,6 +21,15 @@ function createPool() { } +function alignPool() { + // Ensure aligned slices + if (poolOffset & 0x7) { + poolOffset |= 0x7; + poolOffset++; + } +} + + function Buffer(arg) { // Common case. if (typeof arg === 'number') { @@ -66,6 +75,7 @@ function allocate(size) { createPool(); var b = binding.slice(allocPool, poolOffset, poolOffset + size); poolOffset += size; + alignPool(); return b; } else { return binding.create(size); @@ -86,6 +96,7 @@ function fromString(string, encoding) { var actual = allocPool.write(string, poolOffset, encoding); var b = binding.slice(allocPool, poolOffset, poolOffset + actual); poolOffset += actual; + alignPool(); return b; } diff --git a/lib/events.js b/lib/events.js index 3ea798b3bdb01a..722b64537e109d 100644 --- a/lib/events.js +++ b/lib/events.js @@ -395,19 +395,23 @@ EventEmitter.prototype.listeners = function listeners(type) { }; EventEmitter.listenerCount = function(emitter, type) { - var evlistener; - var ret = 0; - var events = emitter._events; + return emitter.listenerCount(type); +}; + +EventEmitter.prototype.listenerCount = function listenerCount(type) { + const events = this._events; if (events) { - evlistener = events[type]; - if (typeof evlistener === 'function') - ret = 1; - else if (evlistener) - ret = evlistener.length; + const evlistener = events[type]; + + if (typeof evlistener === 'function') { + return 1; + } else if (evlistener) { + return evlistener.length; + } } - return ret; + return 0; }; // About 1.5x faster than the two-arg version of Array#splice(). diff --git a/lib/fs.js b/lib/fs.js index 138a90e2f2ecd5..9bfd14cd29b512 100644 --- a/lib/fs.js +++ b/lib/fs.js @@ -1354,7 +1354,7 @@ fs.unwatchFile = function(filename, listener) { stat.removeAllListeners('change'); } - if (EventEmitter.listenerCount(stat, 'change') === 0) { + if (stat.listenerCount('change') === 0) { stat.stop(); statWatchers.delete(filename); } diff --git a/lib/internal/repl.js b/lib/internal/repl.js index 75909f72e2415e..b2c74e179c59d0 100644 --- a/lib/internal/repl.js +++ b/lib/internal/repl.js @@ -122,8 +122,10 @@ function setupHistory(repl, historyPath, oldHistoryPath, ready) { } repl.history = repl.history.slice(-repl.historySize); } catch (err) { - return ready( + if (err.code !== 'ENOENT') { + return ready( new Error(`Could not parse history data in ${oldHistoryPath}.`)); + } } } diff --git a/lib/net.js b/lib/net.js index 6146029d3c4743..10e789678440fd 100644 --- a/lib/net.js +++ b/lib/net.js @@ -93,6 +93,7 @@ function initSocketHandle(self) { self.destroyed = false; self.bytesRead = 0; self._bytesDispatched = 0; + self._sockname = null; // Handle creation may be deferred to bind() or connect() time. if (self._handle) { @@ -469,6 +470,7 @@ Socket.prototype._destroy = function(exception, cb) { }); this._handle.onread = noop; this._handle = null; + this._sockname = null; } // we set destroyed to true before firing error callbacks in order @@ -871,6 +873,7 @@ Socket.prototype.connect = function(options, cb) { this.destroyed = false; this._handle = null; this._peername = null; + this._sockname = null; } var self = this; @@ -1032,6 +1035,7 @@ function afterConnect(status, handle, req, readable, writable) { assert.ok(self._connecting); self._connecting = false; + self._sockname = null; if (status == 0) { self.readable = readable; diff --git a/lib/path.js b/lib/path.js index e0c5bcaa1c1050..4bcb2b3672091c 100644 --- a/lib/path.js +++ b/lib/path.js @@ -76,7 +76,7 @@ function win32SplitPath(filename) { // Separate device+slash from tail var result = splitDeviceRe.exec(filename), device = (result[1] || '') + (result[2] || ''), - tail = result[3] || ''; + tail = result[3]; // Split the tail into dir, basename and extension var result2 = splitTailRe.exec(tail), dir = result2[1], @@ -277,7 +277,7 @@ win32.relative = function(from, to) { } } - if (samePartsLength == 0) { + if (samePartsLength === 0) { return to; } @@ -386,9 +386,6 @@ win32.parse = function(pathString) { assertPath(pathString); var allParts = win32SplitPath(pathString); - if (!allParts || allParts.length !== 4) { - throw new TypeError("Invalid path '" + pathString + "'"); - } return { root: allParts[0], dir: allParts[0] + allParts[1].slice(0, -1), @@ -590,13 +587,6 @@ posix.parse = function(pathString) { assertPath(pathString); var allParts = posixSplitPath(pathString); - if (!allParts || allParts.length !== 4) { - throw new TypeError("Invalid path '" + pathString + "'"); - } - allParts[1] = allParts[1] || ''; - allParts[2] = allParts[2] || ''; - allParts[3] = allParts[3] || ''; - return { root: allParts[0], dir: allParts[0] + allParts[1].slice(0, -1), diff --git a/lib/readline.js b/lib/readline.js index 02fa9d08f666f9..6164bcc85fb8ff 100644 --- a/lib/readline.js +++ b/lib/readline.js @@ -683,7 +683,7 @@ Interface.prototype._ttyWrite = function(s, key) { switch (key.name) { case 'c': - if (EventEmitter.listenerCount(this, 'SIGINT') > 0) { + if (this.listenerCount('SIGINT') > 0) { this.emit('SIGINT'); } else { // This readline instance is finished @@ -746,7 +746,7 @@ Interface.prototype._ttyWrite = function(s, key) { case 'z': if (process.platform == 'win32') break; - if (EventEmitter.listenerCount(this, 'SIGTSTP') > 0) { + if (this.listenerCount('SIGTSTP') > 0) { this.emit('SIGTSTP'); } else { process.once('SIGCONT', (function(self) { @@ -907,7 +907,7 @@ function emitKeypressEvents(stream) { stream[ESCAPE_DECODER].next(); function onData(b) { - if (EventEmitter.listenerCount(stream, 'keypress') > 0) { + if (stream.listenerCount('keypress') > 0) { var r = stream[KEYPRESS_DECODER].write(b); if (r) { for (var i = 0; i < r.length; i++) { @@ -936,7 +936,7 @@ function emitKeypressEvents(stream) { } } - if (EventEmitter.listenerCount(stream, 'keypress') > 0) { + if (stream.listenerCount('keypress') > 0) { stream.on('data', onData); } else { stream.on('newListener', onNewListener); diff --git a/lib/repl.js b/lib/repl.js index 366b2b7b0a7c60..d099725bc1d0be 100644 --- a/lib/repl.js +++ b/lib/repl.js @@ -561,6 +561,15 @@ const requireRE = /\brequire\s*\(['"](([\w\.\/-]+\/)?([\w\.\/-]*))/; const simpleExpressionRE = /(([a-zA-Z_$](?:\w|\$)*)\.)*([a-zA-Z_$](?:\w|\$)*)\.?$/; +function intFilter(item) { + // filters out anything not starting with A-Z, a-z, $ or _ + return /^[A-Za-z_$]/.test(item); +} + +function filteredOwnPropertyNames(obj) { + if (!obj) return []; + return Object.getOwnPropertyNames(obj).filter(intFilter); +} // Provide a list of completions for the given leading text. This is // given to the readline interface for handling tab completion. @@ -705,9 +714,9 @@ REPLServer.prototype.complete = function(line, callback) { if (this.useGlobal || vm.isContext(this.context)) { var contextProto = this.context; while (contextProto = Object.getPrototypeOf(contextProto)) { - completionGroups.push(Object.getOwnPropertyNames(contextProto)); + completionGroups.push(filteredOwnPropertyNames(contextProto)); } - completionGroups.push(Object.getOwnPropertyNames(this.context)); + completionGroups.push(filteredOwnPropertyNames(this.context)); addStandardGlobals(completionGroups, filter); completionGroupsLoaded(); } else { @@ -733,7 +742,7 @@ REPLServer.prototype.complete = function(line, callback) { if (obj != null) { if (typeof obj === 'object' || typeof obj === 'function') { try { - memberGroups.push(Object.getOwnPropertyNames(obj)); + memberGroups.push(filteredOwnPropertyNames(obj)); } catch (ex) { // Probably a Proxy object without `getOwnPropertyNames` trap. // We simply ignore it here, as we don't want to break the @@ -751,7 +760,7 @@ REPLServer.prototype.complete = function(line, callback) { p = obj.constructor ? obj.constructor.prototype : null; } while (p !== null) { - memberGroups.push(Object.getOwnPropertyNames(p)); + memberGroups.push(filteredOwnPropertyNames(p)); p = Object.getPrototypeOf(p); // Circular refs possible? Let's guard against that. sentinel--; diff --git a/lib/stream.js b/lib/stream.js index 8d3535dc4d2f0e..2e0cdfc3134065 100644 --- a/lib/stream.js +++ b/lib/stream.js @@ -70,7 +70,7 @@ Stream.prototype.pipe = function(dest, options) { // don't leave dangling pipes when there are errors. function onerror(er) { cleanup(); - if (EE.listenerCount(this, 'error') === 0) { + if (this.listenerCount('error') === 0) { throw er; // Unhandled stream error in pipe. } } diff --git a/lib/tls.js b/lib/tls.js index 714fdebfc079bd..0d85a948dcc511 100644 --- a/lib/tls.js +++ b/lib/tls.js @@ -5,6 +5,7 @@ const url = require('url'); const util = require('util'); const binding = process.binding('crypto'); const Buffer = require('buffer').Buffer; +const constants = require('constants'); // Allow {CLIENT_RENEG_LIMIT} client-initiated session renegotiations // every {CLIENT_RENEG_WINDOW} seconds. An error event is emitted if more @@ -15,29 +16,7 @@ exports.CLIENT_RENEG_WINDOW = 600; exports.SLAB_BUFFER_SIZE = 10 * 1024 * 1024; -exports.DEFAULT_CIPHERS = [ - 'ECDHE-RSA-AES128-GCM-SHA256', - 'ECDHE-ECDSA-AES128-GCM-SHA256', - 'ECDHE-RSA-AES256-GCM-SHA384', - 'ECDHE-ECDSA-AES256-GCM-SHA384', - 'DHE-RSA-AES128-GCM-SHA256', - 'ECDHE-RSA-AES128-SHA256', - 'DHE-RSA-AES128-SHA256', - 'ECDHE-RSA-AES256-SHA384', - 'DHE-RSA-AES256-SHA384', - 'ECDHE-RSA-AES256-SHA256', - 'DHE-RSA-AES256-SHA256', - 'HIGH', - '!aNULL', - '!eNULL', - '!EXPORT', - '!DES', - '!RC4', - '!MD5', - '!PSK', - '!SRP', - '!CAMELLIA' -].join(':'); +exports.DEFAULT_CIPHERS = constants.defaultCipherList; exports.DEFAULT_ECDH_CURVE = 'prime256v1'; @@ -151,7 +130,7 @@ exports.checkServerIdentity = function checkServerIdentity(host, cert) { host, ips.join(', ')); } - } else { + } else if (cert.subject) { // Transform hostname to canonical form if (!/\.$/.test(host)) host += '.'; @@ -204,6 +183,8 @@ exports.checkServerIdentity = function checkServerIdentity(host, cert) { cert.subject.CN); } } + } else { + reason = 'Cert is empty'; } if (!valid) { diff --git a/lib/url.js b/lib/url.js index 55c5248e4751dd..45155fee936bbf 100644 --- a/lib/url.js +++ b/lib/url.js @@ -587,7 +587,7 @@ Url.prototype.resolveObject = function(relative) { if (psychotic) { result.hostname = result.host = srcPath.shift(); //occationaly the auth can get stuck only in host - //this especialy happens in cases like + //this especially happens in cases like //url.resolveObject('mailto:local1@domain1', 'local2@domain2') var authInHost = result.host && result.host.indexOf('@') > 0 ? result.host.split('@') : false; @@ -669,7 +669,7 @@ Url.prototype.resolveObject = function(relative) { result.hostname = result.host = isAbsolute ? '' : srcPath.length ? srcPath.shift() : ''; //occationaly the auth can get stuck only in host - //this especialy happens in cases like + //this especially happens in cases like //url.resolveObject('mailto:local1@domain1', 'local2@domain2') var authInHost = result.host && result.host.indexOf('@') > 0 ? result.host.split('@') : false; diff --git a/lib/util.js b/lib/util.js index 136a66a6c1c0e8..c5d7bea7db352d 100644 --- a/lib/util.js +++ b/lib/util.js @@ -167,6 +167,22 @@ function arrayToHash(array) { } +function getConstructorOf(obj) { + while (obj) { + var descriptor = Object.getOwnPropertyDescriptor(obj, 'constructor'); + if (descriptor !== undefined && + typeof descriptor.value === 'function' && + descriptor.value.name !== '') { + return descriptor.value; + } + + obj = Object.getPrototypeOf(obj); + } + + return null; +} + + function inspectPromise(p) { Debug = Debug || require('vm').runInDebugContext('Debug'); var mirror = Debug.MakeMirror(p, true); @@ -260,14 +276,17 @@ function formatValue(ctx, value, recurseTimes) { } } + var constructor = getConstructorOf(value); var base = '', empty = false, braces, formatter; if (Array.isArray(value)) { + if (constructor === Array) + constructor = null; braces = ['[', ']']; empty = value.length === 0; formatter = formatArray; } else if (value instanceof Set) { - braces = ['Set {', '}']; + braces = ['{', '}']; // With `showHidden`, `length` will display as a hidden property for // arrays. For consistency's sake, do the same for `size`, even though this // property isn't selected by Object.getOwnPropertyNames(). @@ -276,7 +295,7 @@ function formatValue(ctx, value, recurseTimes) { empty = value.size === 0; formatter = formatSet; } else if (value instanceof Map) { - braces = ['Map {', '}']; + braces = ['{', '}']; // Ditto. if (ctx.showHidden) keys.unshift('size'); @@ -286,9 +305,11 @@ function formatValue(ctx, value, recurseTimes) { // Only create a mirror if the object superficially looks like a Promise. var promiseInternals = value instanceof Promise && inspectPromise(value); if (promiseInternals) { - braces = ['Promise {', '}']; + braces = ['{', '}']; formatter = formatPromise; } else { + if (constructor === Object) + constructor = null; braces = ['{', '}']; empty = true; // No other data than keys. formatter = formatObject; @@ -336,6 +357,10 @@ function formatValue(ctx, value, recurseTimes) { base = ' ' + '[Boolean: ' + formatted + ']'; } + // Add constructor name if available + if (base === '' && constructor) + braces[0] = constructor.name + ' ' + braces[0]; + if (empty === true) { return braces[0] + base + braces[1]; } diff --git a/lib/v8.js b/lib/v8.js index f25814bab31a95..acadfa64e0650e 100644 --- a/lib/v8.js +++ b/lib/v8.js @@ -22,6 +22,7 @@ const heapStatisticsBuffer = const kTotalHeapSizeIndex = v8binding.kTotalHeapSizeIndex; const kTotalHeapSizeExecutableIndex = v8binding.kTotalHeapSizeExecutableIndex; const kTotalPhysicalSizeIndex = v8binding.kTotalPhysicalSizeIndex; +const kTotalAvailableSize = v8binding.kTotalAvailableSize; const kUsedHeapSizeIndex = v8binding.kUsedHeapSizeIndex; const kHeapSizeLimitIndex = v8binding.kHeapSizeLimitIndex; @@ -34,6 +35,7 @@ exports.getHeapStatistics = function() { 'total_heap_size': buffer[kTotalHeapSizeIndex], 'total_heap_size_executable': buffer[kTotalHeapSizeExecutableIndex], 'total_physical_size': buffer[kTotalPhysicalSizeIndex], + 'total_available_size': buffer[kTotalAvailableSize], 'used_heap_size': buffer[kUsedHeapSizeIndex], 'heap_size_limit': buffer[kHeapSizeLimitIndex] }; diff --git a/src/env.h b/src/env.h index 501c151122198e..1801ffecd3ccea 100644 --- a/src/env.h +++ b/src/env.h @@ -88,6 +88,7 @@ namespace node { V(exponent_string, "exponent") \ V(exports_string, "exports") \ V(ext_key_usage_string, "ext_key_usage") \ + V(external_stream_string, "_externalStream") \ V(family_string, "family") \ V(fatal_exception_string, "_fatalException") \ V(fd_string, "fd") \ @@ -198,6 +199,7 @@ namespace node { V(syscall_string, "syscall") \ V(tick_callback_string, "_tickCallback") \ V(tick_domain_cb_string, "_tickDomainCallback") \ + V(ticketkeycallback_string, "onticketkeycallback") \ V(timeout_string, "timeout") \ V(times_string, "times") \ V(timestamp_string, "timestamp") \ diff --git a/src/js_stream.cc b/src/js_stream.cc index aa8de3a9ad9b8f..16fabc9df3449f 100644 --- a/src/js_stream.cc +++ b/src/js_stream.cc @@ -90,7 +90,7 @@ int JSStream::DoWrite(WriteWrap* w, Local bufs_arr = Array::New(env()->isolate(), count); Local buf; for (size_t i = 0; i < count; i++) { - buf = Buffer::New(env(), bufs[i].base, bufs[i].len).ToLocalChecked(); + buf = Buffer::Copy(env(), bufs[i].base, bufs[i].len).ToLocalChecked(); bufs_arr->Set(i, buf); } diff --git a/src/node.cc b/src/node.cc index 7c0a80ec31462b..a8723dc095f5c9 100644 --- a/src/node.cc +++ b/src/node.cc @@ -2124,6 +2124,7 @@ void DLOpen(const FunctionCallbackInfo& args) { if (is_dlopen_error) { Local errmsg = OneByteString(env->isolate(), uv_dlerror(&lib)); + uv_dlclose(&lib); #ifdef _WIN32 // Windows needs to add the filename into the error message errmsg = String::Concat(errmsg, args[1]->ToString(env->isolate())); @@ -2133,10 +2134,12 @@ void DLOpen(const FunctionCallbackInfo& args) { } if (mp == nullptr) { + uv_dlclose(&lib); env->ThrowError("Module did not self-register."); return; } if (mp->nm_version != NODE_MODULE_VERSION) { + uv_dlclose(&lib); char errmsg[1024]; snprintf(errmsg, sizeof(errmsg), @@ -2146,6 +2149,7 @@ void DLOpen(const FunctionCallbackInfo& args) { return; } if (mp->nm_flags & NM_F_BUILTIN) { + uv_dlclose(&lib); env->ThrowError("Built-in module self-registered."); return; } @@ -2162,6 +2166,7 @@ void DLOpen(const FunctionCallbackInfo& args) { } else if (mp->nm_register_func != nullptr) { mp->nm_register_func(exports, module, mp->nm_priv); } else { + uv_dlclose(&lib); env->ThrowError("Module has no declared entry point."); return; } @@ -2184,7 +2189,7 @@ static void OnFatalError(const char* location, const char* message) { NO_RETURN void FatalError(const char* location, const char* message) { OnFatalError(location, message); - // to supress compiler warning + // to suppress compiler warning abort(); } @@ -3106,6 +3111,9 @@ static void PrintHelp() { " --track-heap-objects track heap object allocations for heap " "snapshots\n" " --v8-options print v8 command line options\n" +#if HAVE_OPENSSL + " --tls-cipher-list=val use an alternative default TLS cipher list\n" +#endif #if defined(NODE_HAVE_I18N_SUPPORT) " --icu-data-dir=dir set ICU data load path to dir\n" " (overrides NODE_ICU_DATA)\n" @@ -3237,6 +3245,10 @@ static void ParseArgs(int* argc, } else if (strcmp(arg, "--v8-options") == 0) { new_v8_argv[new_v8_argc] = "--help"; new_v8_argc += 1; +#if HAVE_OPENSSL + } else if (strncmp(arg, "--tls-cipher-list=", 18) == 0) { + default_cipher_list = arg + 18; +#endif #if defined(NODE_HAVE_I18N_SUPPORT) } else if (strncmp(arg, "--icu-data-dir=", 15) == 0) { icu_data_dir = arg + 15; @@ -3701,7 +3713,8 @@ void Init(int* argc, #endif // The const_cast doesn't violate conceptual const-ness. V8 doesn't modify // the argv array or the elements it points to. - V8::SetFlagsFromCommandLine(&v8_argc, const_cast(v8_argv), true); + if (v8_argc != 0) + V8::SetFlagsFromCommandLine(&v8_argc, const_cast(v8_argv), true); // Anything that's still in v8_argv is not a V8 or a node option. for (int i = 1; i < v8_argc; i++) { diff --git a/src/node.js b/src/node.js index af66b1784e7181..685a3692c619ef 100644 --- a/src/node.js +++ b/src/node.js @@ -277,7 +277,7 @@ process._tickDomainCallback = _tickDomainCallback; // This tickInfo thing is used so that the C++ code in src/node.cc - // can have easy accesss to our nextTick state, and avoid unnecessary + // can have easy access to our nextTick state, and avoid unnecessary // calls into JS land. const tickInfo = process._setupNextTick(_tickCallback, _runMicrotasks); @@ -802,8 +802,7 @@ }); process.on('removeListener', function(type, listener) { - if (signalWraps.hasOwnProperty(type) && - NativeModule.require('events').listenerCount(this, type) === 0) { + if (signalWraps.hasOwnProperty(type) && this.listenerCount(type) === 0) { signalWraps[type].close(); delete signalWraps[type]; } diff --git a/src/node_buffer.cc b/src/node_buffer.cc index 5ab599ebab93ec..c8be5b5a448c9e 100644 --- a/src/node_buffer.cc +++ b/src/node_buffer.cc @@ -281,13 +281,13 @@ MaybeLocal Copy(Isolate* isolate, const char* data, size_t length) { Environment* env = Environment::GetCurrent(isolate); EscapableHandleScope handle_scope(env->isolate()); Local obj; - if (Buffer::New(env, data, length).ToLocal(&obj)) + if (Buffer::Copy(env, data, length).ToLocal(&obj)) return handle_scope.Escape(obj); return Local(); } -MaybeLocal New(Environment* env, const char* data, size_t length) { +MaybeLocal Copy(Environment* env, const char* data, size_t length) { EscapableHandleScope scope(env->isolate()); // V8 currently only allows a maximum Typed Array index of max Smi. @@ -371,7 +371,7 @@ MaybeLocal New(Isolate* isolate, char* data, size_t length) { } -MaybeLocal Use(Environment* env, char* data, size_t length) { +MaybeLocal New(Environment* env, char* data, size_t length) { EscapableHandleScope scope(env->isolate()); if (length > 0) { @@ -408,8 +408,10 @@ void Create(const FunctionCallbackInfo& args) { void* data; if (length > 0) { data = malloc(length); - if (data == nullptr) - return env->ThrowRangeError("invalid Buffer length"); + if (data == nullptr) { + return env->ThrowRangeError( + "Buffer allocation failed - process out of memory"); + } } else { data = nullptr; } diff --git a/src/node_buffer.h b/src/node_buffer.h index 5b935b8c063abc..49fb5741640060 100644 --- a/src/node_buffer.h +++ b/src/node_buffer.h @@ -4,10 +4,6 @@ #include "node.h" #include "v8.h" -#if defined(NODE_WANT_INTERNALS) -#include "env.h" -#endif // defined(NODE_WANT_INTERNALS) - namespace node { namespace Buffer { @@ -63,19 +59,6 @@ static inline bool IsWithinBounds(size_t off, size_t len, size_t max) { return true; } -// Internal. Not for public consumption. We can't define these -// in src/node_internals.h because of a circular dependency. -#if defined(NODE_WANT_INTERNALS) -v8::MaybeLocal New(Environment* env, size_t size); -v8::MaybeLocal New(Environment* env, const char* data, size_t len); -v8::MaybeLocal New(Environment* env, - char* data, - size_t length, - FreeCallback callback, - void* hint); -v8::MaybeLocal Use(Environment* env, char* data, size_t length); -#endif // defined(NODE_WANT_INTERNALS) - } // namespace Buffer } // namespace node diff --git a/src/node_constants.cc b/src/node_constants.cc index ce715a32462655..59dd11113eb70c 100644 --- a/src/node_constants.cc +++ b/src/node_constants.cc @@ -24,6 +24,10 @@ namespace node { using v8::Handle; using v8::Object; +#if HAVE_OPENSSL +const char* default_cipher_list = DEFAULT_CIPHER_LIST_CORE; +#endif + void DefineErrnoConstants(Handle target) { #ifdef E2BIG NODE_DEFINE_CONSTANT(target, E2BIG); @@ -1108,6 +1112,17 @@ void DefineUVConstants(Handle target) { NODE_DEFINE_CONSTANT(target, UV_UDP_REUSEADDR); } +void DefineCryptoConstants(Handle target) { +#if HAVE_OPENSSL + NODE_DEFINE_STRING_CONSTANT(target, + "defaultCoreCipherList", + DEFAULT_CIPHER_LIST_CORE); + NODE_DEFINE_STRING_CONSTANT(target, + "defaultCipherList", + default_cipher_list); +#endif +} + void DefineConstants(Handle target) { DefineErrnoConstants(target); DefineWindowsErrorConstants(target); @@ -1115,6 +1130,7 @@ void DefineConstants(Handle target) { DefineOpenSSLConstants(target); DefineSystemConstants(target); DefineUVConstants(target); + DefineCryptoConstants(target); } } // namespace node diff --git a/src/node_constants.h b/src/node_constants.h index 8493d4d13b3d2a..45c991022e6bc6 100644 --- a/src/node_constants.h +++ b/src/node_constants.h @@ -4,7 +4,36 @@ #include "node.h" #include "v8.h" +#if HAVE_OPENSSL +#define DEFAULT_CIPHER_LIST_CORE "ECDHE-RSA-AES128-GCM-SHA256:" \ + "ECDHE-ECDSA-AES128-GCM-SHA256:" \ + "ECDHE-RSA-AES256-GCM-SHA384:" \ + "ECDHE-ECDSA-AES256-GCM-SHA384:" \ + "DHE-RSA-AES128-GCM-SHA256:" \ + "ECDHE-RSA-AES128-SHA256:" \ + "DHE-RSA-AES128-SHA256:" \ + "ECDHE-RSA-AES256-SHA384:" \ + "DHE-RSA-AES256-SHA384:" \ + "ECDHE-RSA-AES256-SHA256:" \ + "DHE-RSA-AES256-SHA256:" \ + "HIGH:" \ + "!aNULL:" \ + "!eNULL:" \ + "!EXPORT:" \ + "!DES:" \ + "!RC4:" \ + "!MD5:" \ + "!PSK:" \ + "!SRP:" \ + "!CAMELLIA" +#endif + namespace node { + +#if HAVE_OPENSSL +extern const char* default_cipher_list; +#endif + void DefineConstants(v8::Handle target); } // namespace node diff --git a/src/node_crypto.cc b/src/node_crypto.cc index c14f2b600c677e..c647d327edc74e 100644 --- a/src/node_crypto.cc +++ b/src/node_crypto.cc @@ -300,9 +300,23 @@ void SecureContext::Initialize(Environment* env, Handle target) { env->SetProtoMethod(t, "getTicketKeys", SecureContext::GetTicketKeys); env->SetProtoMethod(t, "setTicketKeys", SecureContext::SetTicketKeys); env->SetProtoMethod(t, "setFreeListLength", SecureContext::SetFreeListLength); + env->SetProtoMethod(t, + "enableTicketKeyCallback", + SecureContext::EnableTicketKeyCallback); env->SetProtoMethod(t, "getCertificate", SecureContext::GetCertificate); env->SetProtoMethod(t, "getIssuer", SecureContext::GetCertificate); + t->Set(FIXED_ONE_BYTE_STRING(env->isolate(), "kTicketKeyReturnIndex"), + Integer::NewFromUnsigned(env->isolate(), kTicketKeyReturnIndex)); + t->Set(FIXED_ONE_BYTE_STRING(env->isolate(), "kTicketKeyHMACIndex"), + Integer::NewFromUnsigned(env->isolate(), kTicketKeyHMACIndex)); + t->Set(FIXED_ONE_BYTE_STRING(env->isolate(), "kTicketKeyAESIndex"), + Integer::NewFromUnsigned(env->isolate(), kTicketKeyAESIndex)); + t->Set(FIXED_ONE_BYTE_STRING(env->isolate(), "kTicketKeyNameIndex"), + Integer::NewFromUnsigned(env->isolate(), kTicketKeyNameIndex)); + t->Set(FIXED_ONE_BYTE_STRING(env->isolate(), "kTicketKeyIVIndex"), + Integer::NewFromUnsigned(env->isolate(), kTicketKeyIVIndex)); + t->PrototypeTemplate()->SetAccessor( FIXED_ONE_BYTE_STRING(env->isolate(), "_external"), CtxGetter, @@ -378,6 +392,7 @@ void SecureContext::Init(const FunctionCallbackInfo& args) { } sc->ctx_ = SSL_CTX_new(method); + SSL_CTX_set_app_data(sc->ctx_, sc); // Disable SSLv2 in the case when method == SSLv23_method() and the // cipher list contains SSLv2 ciphers (not the default, should be rare.) @@ -790,10 +805,12 @@ void SecureContext::SetDHParam(const FunctionCallbackInfo& args) { return; const int keylen = BN_num_bits(dh->p); - if (keylen < 1024) + if (keylen < 1024) { + DH_free(dh); return env->ThrowError("DH parameter is less than 1024 bits"); - else if (keylen < 2048) + } else if (keylen < 2048) { fprintf(stderr, "WARNING: DH parameter is less than 2048 bits\n"); + } SSL_CTX_set_options(sc->ctx_, SSL_OP_SINGLE_DH_USE); int r = SSL_CTX_set_tmp_dh(sc->ctx_, dh); @@ -982,6 +999,95 @@ void SecureContext::SetFreeListLength(const FunctionCallbackInfo& args) { } +void SecureContext::EnableTicketKeyCallback( + const FunctionCallbackInfo& args) { + SecureContext* wrap = Unwrap(args.Holder()); + + SSL_CTX_set_tlsext_ticket_key_cb(wrap->ctx_, TicketKeyCallback); +} + + +int SecureContext::TicketKeyCallback(SSL* ssl, + unsigned char* name, + unsigned char* iv, + EVP_CIPHER_CTX* ectx, + HMAC_CTX* hctx, + int enc) { + static const int kTicketPartSize = 16; + + SecureContext* sc = static_cast( + SSL_CTX_get_app_data(ssl->ctx)); + + Environment* env = sc->env(); + HandleScope handle_scope(env->isolate()); + Context::Scope context_scope(env->context()); + + Local argv[] = { + Buffer::Copy(env, + reinterpret_cast(name), + kTicketPartSize).ToLocalChecked(), + Buffer::Copy(env, + reinterpret_cast(iv), + kTicketPartSize).ToLocalChecked(), + Boolean::New(env->isolate(), enc != 0) + }; + Local ret = node::MakeCallback(env, + sc->object(), + env->ticketkeycallback_string(), + ARRAY_SIZE(argv), + argv); + Local arr = ret.As(); + + int r = arr->Get(kTicketKeyReturnIndex)->Int32Value(); + if (r < 0) + return r; + + Local hmac = arr->Get(kTicketKeyHMACIndex); + Local aes = arr->Get(kTicketKeyAESIndex); + if (Buffer::Length(aes) != kTicketPartSize) + return -1; + + if (enc) { + Local name_val = arr->Get(kTicketKeyNameIndex); + Local iv_val = arr->Get(kTicketKeyIVIndex); + + if (Buffer::Length(name_val) != kTicketPartSize || + Buffer::Length(iv_val) != kTicketPartSize) { + return -1; + } + + memcpy(name, Buffer::Data(name_val), kTicketPartSize); + memcpy(iv, Buffer::Data(iv_val), kTicketPartSize); + } + + HMAC_Init_ex(hctx, + Buffer::Data(hmac), + Buffer::Length(hmac), + EVP_sha256(), + nullptr); + + const unsigned char* aes_key = + reinterpret_cast(Buffer::Data(aes)); + if (enc) { + EVP_EncryptInit_ex(ectx, + EVP_aes_128_cbc(), + nullptr, + aes_key, + iv); + } else { + EVP_DecryptInit_ex(ectx, + EVP_aes_128_cbc(), + nullptr, + aes_key, + iv); + } + + return r; +} + + + + void SecureContext::CtxGetter(Local property, const PropertyCallbackInfo& info) { HandleScope scope(info.GetIsolate()); @@ -1115,7 +1221,7 @@ int SSLWrap::NewSessionCallback(SSL* s, SSL_SESSION* sess) { memset(serialized, 0, size); i2d_SSL_SESSION(sess, &serialized); - Local session = Buffer::New( + Local session = Buffer::Copy( env, reinterpret_cast(sess->session_id), sess->session_id_length).ToLocalChecked(); @@ -1136,7 +1242,7 @@ void SSLWrap::OnClientHello(void* arg, Context::Scope context_scope(env->context()); Local hello_obj = Object::New(env->isolate()); - Local buff = Buffer::New( + Local buff = Buffer::Copy( env, reinterpret_cast(hello.session_id()), hello.session_size()).ToLocalChecked(); @@ -1191,6 +1297,7 @@ static bool SafeX509ExtPrint(BIO* out, X509_EXTENSION* ext) { if (nval == NULL) return false; X509V3_EXT_val_prn(out, nval, 0, 0); + sk_CONF_VALUE_pop_free(nval, X509V3_conf_free); } } sk_GENERAL_NAME_pop_free(names, GENERAL_NAME_free); @@ -1597,7 +1704,7 @@ void SSLWrap::GetTLSTicket(const FunctionCallbackInfo& args) { if (sess == nullptr || sess->tlsext_tick == nullptr) return; - Local buff = Buffer::New( + Local buff = Buffer::Copy( env, reinterpret_cast(sess->tlsext_tick), sess->tlsext_ticklen).ToLocalChecked(); @@ -1879,7 +1986,7 @@ int SSLWrap::TLSExtStatusCallback(SSL* s, void* arg) { if (resp == nullptr) { arg = Null(env->isolate()); } else { - arg = Buffer::New( + arg = Buffer::Copy( env, reinterpret_cast(const_cast(resp)), len).ToLocalChecked(); @@ -2885,7 +2992,8 @@ bool CipherBase::GetAuthTag(char** out, unsigned int* out_len) const { if (initialised_ || kind_ != kCipher || !auth_tag_) return false; *out_len = auth_tag_len_; - *out = new char[auth_tag_len_]; + *out = static_cast(malloc(auth_tag_len_)); + CHECK_NE(*out, nullptr); memcpy(*out, auth_tag_, auth_tag_len_); return true; } @@ -3016,8 +3124,9 @@ void CipherBase::Update(const FunctionCallbackInfo& args) { "Trying to add data in unsupported state"); } + CHECK(out != nullptr || out_len == 0); Local buf = - Buffer::New(env, reinterpret_cast(out), out_len).ToLocalChecked(); + Buffer::Copy(env, reinterpret_cast(out), out_len).ToLocalChecked(); if (out) delete[] out; @@ -3092,7 +3201,7 @@ void CipherBase::Final(const FunctionCallbackInfo& args) { } } - Local buf = Buffer::New( + Local buf = Buffer::Copy( env, reinterpret_cast(out_value), out_len).ToLocalChecked(); @@ -3474,7 +3583,11 @@ SignBase::Error Sign::SignFinal(const char* key_pem, nullptr, CryptoPemCallback, const_cast(passphrase)); - if (pkey == nullptr) + + // Errors might be injected into OpenSSL's error stack + // without `pkey` being set to nullptr; + // cf. the test of `test_bad_rsa_privkey.pem` for an example. + if (pkey == nullptr || 0 != ERR_peek_error()) goto exit; if (EVP_SignFinal(&mdctx_, *sig, sig_len, pkey)) @@ -3522,6 +3635,9 @@ void Sign::SignFinal(const FunctionCallbackInfo& args) { md_len = 8192; // Maximum key size is 8192 bits md_value = new unsigned char[md_len]; + ClearErrorOnReturn clear_error_on_return; + (void) &clear_error_on_return; // Silence compiler warning. + Error err = sign->SignFinal( buf, buf_len, @@ -3822,6 +3938,8 @@ bool PublicKeyCipher::Cipher(const char* key_pem, fatal = false; exit: + if (x509 != nullptr) + X509_free(x509); if (pkey != nullptr) EVP_PKEY_free(pkey); if (bp != nullptr) @@ -3854,6 +3972,9 @@ void PublicKeyCipher::Cipher(const FunctionCallbackInfo& args) { unsigned char* out_value = nullptr; size_t out_len = 0; + ClearErrorOnReturn clear_error_on_return; + (void) &clear_error_on_return; // Silence compiler warning. + bool r = Cipher( kbuf, klen, @@ -3874,7 +3995,7 @@ void PublicKeyCipher::Cipher(const FunctionCallbackInfo& args) { } } - Local vbuf = Buffer::New( + Local vbuf = Buffer::Copy( env, reinterpret_cast(out_value), out_len).ToLocalChecked(); @@ -4457,8 +4578,12 @@ void ECDH::SetPrivateKey(const FunctionCallbackInfo& args) { if (priv == nullptr) return env->ThrowError("Failed to convert Buffer to BN"); - if (!EC_KEY_set_private_key(ecdh->key_, priv)) + int result = EC_KEY_set_private_key(ecdh->key_, priv); + BN_free(priv); + + if (!result) { return env->ThrowError("Failed to convert BN to a private key"); + } } @@ -4507,6 +4632,7 @@ class PBKDF2Request : public AsyncWrap { } ~PBKDF2Request() override { + release(); persistent().Reset(); } @@ -4548,10 +4674,15 @@ class PBKDF2Request : public AsyncWrap { inline void release() { free(pass_); + pass_ = nullptr; passlen_ = 0; + free(salt_); + salt_ = nullptr; saltlen_ = 0; + free(key_); + key_ = nullptr; keylen_ = 0; } @@ -4622,7 +4753,6 @@ void EIO_PBKDF2After(uv_work_t* work_req, int status) { Local argv[2]; EIO_PBKDF2After(req, argv); req->MakeCallback(env->ondone_string(), ARRAY_SIZE(argv), argv); - req->release(); delete req; } @@ -4733,6 +4863,9 @@ void PBKDF2(const FunctionCallbackInfo& args) { Local argv[2]; EIO_PBKDF2(req); EIO_PBKDF2After(req, argv); + + delete req; + if (argv[0]->IsObject()) env->isolate()->ThrowException(argv[0]); else @@ -5146,10 +5279,12 @@ const char* Certificate::ExportChallenge(const char* data, int len) { if (sp == nullptr) return nullptr; - const char* buf = nullptr; - buf = reinterpret_cast(ASN1_STRING_data(sp->spkac->challenge)); + unsigned char* buf = nullptr; + ASN1_STRING_to_UTF8(&buf, sp->spkac->challenge); - return buf; + NETSCAPE_SPKI_free(sp); + + return reinterpret_cast(buf); } @@ -5176,7 +5311,7 @@ void Certificate::ExportChallenge(const FunctionCallbackInfo& args) { Local outString = Encode(env->isolate(), cert, strlen(cert), BUFFER); - delete[] cert; + OPENSSL_free(const_cast(cert)); args.GetReturnValue().Set(outString); } diff --git a/src/node_crypto.h b/src/node_crypto.h index 3a00b519323d52..edacaa1b0095b9 100644 --- a/src/node_crypto.h +++ b/src/node_crypto.h @@ -68,6 +68,13 @@ class SecureContext : public BaseObject { static const int kMaxSessionSize = 10 * 1024; + // See TicketKeyCallback + static const int kTicketKeyReturnIndex = 0; + static const int kTicketKeyHMACIndex = 1; + static const int kTicketKeyAESIndex = 2; + static const int kTicketKeyNameIndex = 3; + static const int kTicketKeyIVIndex = 4; + protected: static const int64_t kExternalSize = sizeof(SSL_CTX); @@ -92,12 +99,21 @@ class SecureContext : public BaseObject { static void SetTicketKeys(const v8::FunctionCallbackInfo& args); static void SetFreeListLength( const v8::FunctionCallbackInfo& args); + static void EnableTicketKeyCallback( + const v8::FunctionCallbackInfo& args); static void CtxGetter(v8::Local property, const v8::PropertyCallbackInfo& info); template static void GetCertificate(const v8::FunctionCallbackInfo& args); + static int TicketKeyCallback(SSL* ssl, + unsigned char* name, + unsigned char* iv, + EVP_CIPHER_CTX* ectx, + HMAC_CTX* hctx, + int enc); + SecureContext(Environment* env, v8::Local wrap) : BaseObject(env, wrap), ca_store_(nullptr), diff --git a/src/node_file.cc b/src/node_file.cc index 0297b08e68d478..f10b99e94b16ad 100644 --- a/src/node_file.cc +++ b/src/node_file.cc @@ -47,8 +47,6 @@ using v8::Value; #define TYPE_ERROR(msg) env->ThrowTypeError(msg) -#define THROW_BAD_ARGS TYPE_ERROR("Bad argument") - #define GET_OFFSET(a) ((a)->IsNumber() ? (a)->IntegerValue() : -1) class FSReqWrap: public ReqWrap { @@ -306,7 +304,7 @@ static void Access(const FunctionCallbackInfo& args) { HandleScope scope(env->isolate()); if (args.Length() < 2) - return THROW_BAD_ARGS; + return TYPE_ERROR("path and mode are required"); if (!args[0]->IsString()) return TYPE_ERROR("path must be a string"); if (!args[1]->IsInt32()) @@ -326,9 +324,10 @@ static void Access(const FunctionCallbackInfo& args) { static void Close(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); - if (args.Length() < 1 || !args[0]->IsInt32()) { - return THROW_BAD_ARGS; - } + if (args.Length() < 1) + return TYPE_ERROR("fd is required"); + if (!args[0]->IsInt32()) + return TYPE_ERROR("fd must be a file descriptor"); int fd = args[0]->Int32Value(); @@ -442,31 +441,48 @@ Local BuildStatsObject(Environment* env, const uv_stat_t* s) { // comes from not creating Error objects on failure. static void InternalModuleReadFile(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); + uv_loop_t* loop = env->event_loop(); CHECK(args[0]->IsString()); node::Utf8Value path(env->isolate(), args[0]); - FILE* const stream = fopen(*path, "rb"); - if (stream == nullptr) { + uv_fs_t open_req; + const int fd = uv_fs_open(loop, &open_req, *path, O_RDONLY, 0, nullptr); + uv_fs_req_cleanup(&open_req); + + if (fd < 0) { return; } std::vector chars; - while (!ferror(stream)) { + int64_t offset = 0; + for (;;) { const size_t kBlockSize = 32 << 10; const size_t start = chars.size(); chars.resize(start + kBlockSize); - const size_t numchars = fread(&chars[start], 1, kBlockSize, stream); - if (numchars < kBlockSize) { + + uv_buf_t buf; + buf.base = &chars[start]; + buf.len = kBlockSize; + + uv_fs_t read_req; + const ssize_t numchars = + uv_fs_read(loop, &read_req, fd, &buf, 1, offset, nullptr); + uv_fs_req_cleanup(&read_req); + + CHECK_GE(numchars, 0); + if (static_cast(numchars) < kBlockSize) { chars.resize(start + numchars); } if (numchars == 0) { break; } + offset += numchars; } - CHECK_EQ(false, ferror(stream)); - CHECK_EQ(0, fclose(stream)); + uv_fs_t close_req; + CHECK_EQ(0, uv_fs_close(loop, &close_req, fd, nullptr)); + uv_fs_req_cleanup(&close_req); size_t start = 0; if (chars.size() >= 3 && 0 == memcmp(&chars[0], "\xEF\xBB\xBF", 3)) { @@ -542,9 +558,10 @@ static void LStat(const FunctionCallbackInfo& args) { static void FStat(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); - if (args.Length() < 1 || !args[0]->IsInt32()) { - return THROW_BAD_ARGS; - } + if (args.Length() < 1) + return TYPE_ERROR("fd is required"); + if (!args[0]->IsInt32()) + return TYPE_ERROR("fd must be a file descriptor"); int fd = args[0]->Int32Value(); @@ -661,9 +678,10 @@ static void Rename(const FunctionCallbackInfo& args) { static void FTruncate(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); - if (args.Length() < 2 || !args[0]->IsInt32()) { - return THROW_BAD_ARGS; - } + if (args.Length() < 2) + return TYPE_ERROR("fd and length are required"); + if (!args[0]->IsInt32()) + return TYPE_ERROR("fd must be a file descriptor"); int fd = args[0]->Int32Value(); @@ -689,9 +707,10 @@ static void FTruncate(const FunctionCallbackInfo& args) { static void Fdatasync(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); - if (args.Length() < 1 || !args[0]->IsInt32()) { - return THROW_BAD_ARGS; - } + if (args.Length() < 1) + return TYPE_ERROR("fd is required"); + if (!args[0]->IsInt32()) + return TYPE_ERROR("fd must be a file descriptor"); int fd = args[0]->Int32Value(); @@ -705,9 +724,10 @@ static void Fdatasync(const FunctionCallbackInfo& args) { static void Fsync(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); - if (args.Length() < 1 || !args[0]->IsInt32()) { - return THROW_BAD_ARGS; - } + if (args.Length() < 1) + return TYPE_ERROR("fd is required"); + if (!args[0]->IsInt32()) + return TYPE_ERROR("fd must be a file descriptor"); int fd = args[0]->Int32Value(); @@ -755,9 +775,12 @@ static void RMDir(const FunctionCallbackInfo& args) { static void MKDir(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); - if (args.Length() < 2 || !args[0]->IsString() || !args[1]->IsInt32()) { - return THROW_BAD_ARGS; - } + if (args.Length() < 2) + return TYPE_ERROR("path and mode are required"); + if (!args[0]->IsString()) + return TYPE_ERROR("path must be a string"); + if (!args[1]->IsInt32()) + return TYPE_ERROR("mode must be an integer"); node::Utf8Value path(env->isolate(), args[0]); int mode = static_cast(args[1]->Int32Value()); @@ -973,9 +996,12 @@ static void WriteString(const FunctionCallbackInfo& args) { static void Read(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); - if (args.Length() < 2 || !args[0]->IsInt32()) { - return THROW_BAD_ARGS; - } + if (args.Length() < 2) + return TYPE_ERROR("fd and buffer are required"); + if (!args[0]->IsInt32()) + return TYPE_ERROR("fd must be a file descriptor"); + if (!Buffer::HasInstance(args[1])) + return TYPE_ERROR("Second argument needs to be a buffer"); int fd = args[0]->Int32Value(); @@ -986,10 +1012,6 @@ static void Read(const FunctionCallbackInfo& args) { char * buf = nullptr; - if (!Buffer::HasInstance(args[1])) { - return env->ThrowError("Second argument needs to be a buffer"); - } - Local buffer_obj = args[1]->ToObject(env->isolate()); char *buffer_data = Buffer::Data(buffer_obj); size_t buffer_length = Buffer::Length(buffer_obj); @@ -1026,9 +1048,13 @@ static void Read(const FunctionCallbackInfo& args) { static void Chmod(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); - if (args.Length() < 2 || !args[0]->IsString() || !args[1]->IsInt32()) { - return THROW_BAD_ARGS; - } + if (args.Length() < 2) + return TYPE_ERROR("path and mode are required"); + if (!args[0]->IsString()) + return TYPE_ERROR("path must be a string"); + if (!args[1]->IsInt32()) + return TYPE_ERROR("mode must be an integer"); + node::Utf8Value path(env->isolate(), args[0]); int mode = static_cast(args[1]->Int32Value()); @@ -1046,9 +1072,13 @@ static void Chmod(const FunctionCallbackInfo& args) { static void FChmod(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); - if (args.Length() < 2 || !args[0]->IsInt32() || !args[1]->IsInt32()) { - return THROW_BAD_ARGS; - } + if (args.Length() < 2) + return TYPE_ERROR("fd and mode are required"); + if (!args[0]->IsInt32()) + return TYPE_ERROR("fd must be a file descriptor"); + if (!args[1]->IsInt32()) + return TYPE_ERROR("mode must be an integer"); + int fd = args[0]->Int32Value(); int mode = static_cast(args[1]->Int32Value()); diff --git a/src/node_internals.h b/src/node_internals.h index c99b2feeb0bdcd..8f35433b2f85c3 100644 --- a/src/node_internals.h +++ b/src/node_internals.h @@ -12,6 +12,22 @@ struct sockaddr; +// Variation on NODE_DEFINE_CONSTANT that sets a String value. +#define NODE_DEFINE_STRING_CONSTANT(target, name, constant) \ + do { \ + v8::Isolate* isolate = target->GetIsolate(); \ + v8::Local constant_name = \ + v8::String::NewFromUtf8(isolate, name); \ + v8::Local constant_value = \ + v8::String::NewFromUtf8(isolate, constant); \ + v8::PropertyAttribute constant_attributes = \ + static_cast(v8::ReadOnly | v8::DontDelete); \ + target->ForceSet(isolate->GetCurrentContext(), \ + constant_name, \ + constant_value, \ + constant_attributes); \ + } while (0) + namespace node { // Forward declaration @@ -272,6 +288,21 @@ class NodeInstanceData { DISALLOW_COPY_AND_ASSIGN(NodeInstanceData); }; +namespace Buffer { +v8::MaybeLocal Copy(Environment* env, const char* data, size_t len); +v8::MaybeLocal New(Environment* env, size_t size); +// Takes ownership of |data|. +v8::MaybeLocal New(Environment* env, + char* data, + size_t length, + void (*callback)(char* data, void* hint), + void* hint); +// Takes ownership of |data|. Must allocate |data| with malloc() or realloc() +// because ArrayBufferAllocator::Free() deallocates it again with free(). +// Mixing operator new and free() is undefined behavior so don't do that. +v8::MaybeLocal New(Environment* env, char* data, size_t length); +} // namespace Buffer + } // namespace node #endif // SRC_NODE_INTERNALS_H_ diff --git a/src/node_object_wrap.h b/src/node_object_wrap.h index d00e1484b7c10c..f0226622272a5c 100644 --- a/src/node_object_wrap.h +++ b/src/node_object_wrap.h @@ -80,7 +80,7 @@ class ObjectWrap { * attached to detached state it will be freed. Be careful not to access * the object after making this call as it might be gone! * (A "weak reference" means an object that only has a - * persistant handle.) + * persistent handle.) * * DO NOT CALL THIS FROM DESTRUCTOR */ diff --git a/src/node_v8.cc b/src/node_v8.cc index 0a3e6e76338752..db492e3d1a537c 100644 --- a/src/node_v8.cc +++ b/src/node_v8.cc @@ -26,8 +26,9 @@ using v8::Value; V(0, total_heap_size, kTotalHeapSizeIndex) \ V(1, total_heap_size_executable, kTotalHeapSizeExecutableIndex) \ V(2, total_physical_size, kTotalPhysicalSizeIndex) \ - V(3, used_heap_size, kUsedHeapSizeIndex) \ - V(4, heap_size_limit, kHeapSizeLimitIndex) + V(3, total_available_size, kTotalAvailableSize) \ + V(4, used_heap_size, kUsedHeapSizeIndex) \ + V(5, heap_size_limit, kHeapSizeLimitIndex) #define V(a, b, c) +1 static const size_t kHeapStatisticsPropertiesCount = diff --git a/src/node_version.h b/src/node_version.h index cf259469de4293..08ca81fa904f7d 100644 --- a/src/node_version.h +++ b/src/node_version.h @@ -2,7 +2,7 @@ #define SRC_NODE_VERSION_H_ #define NODE_MAJOR_VERSION 3 -#define NODE_MINOR_VERSION 0 +#define NODE_MINOR_VERSION 1 #define NODE_PATCH_VERSION 1 #define NODE_VERSION_IS_RELEASE 0 diff --git a/src/node_wrap.h b/src/node_wrap.h index 58b042a63b475a..d508a4a470f63c 100644 --- a/src/node_wrap.h +++ b/src/node_wrap.h @@ -34,21 +34,6 @@ namespace node { } \ } while (0) -#define WITH_GENERIC_STREAM(env, obj, BODY) \ - do { \ - WITH_GENERIC_UV_STREAM(env, obj, BODY, { \ - if (env->tls_wrap_constructor_template().IsEmpty() == false && \ - env->tls_wrap_constructor_template()->HasInstance(obj)) { \ - TLSWrap* const wrap = Unwrap(obj); \ - BODY \ - } else if (env->jsstream_constructor_template().IsEmpty() == false && \ - env->jsstream_constructor_template()->HasInstance(obj)) { \ - JSStream* const wrap = Unwrap(obj); \ - BODY \ - } \ - }); \ - } while (0) - inline uv_stream_t* HandleToStream(Environment* env, v8::Local obj) { v8::HandleScope scope(env->isolate()); diff --git a/src/res/node.exe.extra.manifest b/src/res/node.exe.extra.manifest index c4cc80a141d9dd..e2e9f175473305 100644 --- a/src/res/node.exe.extra.manifest +++ b/src/res/node.exe.extra.manifest @@ -2,6 +2,8 @@ + + diff --git a/src/stream_base-inl.h b/src/stream_base-inl.h index d74b47de6009f6..dd0bbcfbd879a7 100644 --- a/src/stream_base-inl.h +++ b/src/stream_base-inl.h @@ -10,6 +10,7 @@ namespace node { +using v8::External; using v8::FunctionCallbackInfo; using v8::FunctionTemplate; using v8::Handle; @@ -36,6 +37,13 @@ void StreamBase::AddMethods(Environment* env, v8::DEFAULT, attributes); + t->InstanceTemplate()->SetAccessor(env->external_stream_string(), + GetExternal, + nullptr, + env->as_external(), + v8::DEFAULT, + attributes); + env->SetProtoMethod(t, "readStart", JSMethod); env->SetProtoMethod(t, "readStop", JSMethod); if ((flags & kFlagNoShutdown) == 0) @@ -72,6 +80,16 @@ void StreamBase::GetFD(Local key, } +template +void StreamBase::GetExternal(Local key, + const PropertyCallbackInfo& args) { + StreamBase* wrap = Unwrap(args.Holder()); + + Local ext = External::New(args.GetIsolate(), wrap); + args.GetReturnValue().Set(ext); +} + + template & args)> void StreamBase::JSMethod(const FunctionCallbackInfo& args) { diff --git a/src/stream_base.cc b/src/stream_base.cc index b2518404a8fe62..d957465abcdcbb 100644 --- a/src/stream_base.cc +++ b/src/stream_base.cc @@ -72,7 +72,6 @@ void StreamBase::AfterShutdown(ShutdownWrap* req_wrap, int status) { // The wrap and request objects should still be there. CHECK_EQ(req_wrap->persistent().IsEmpty(), false); - CHECK_EQ(wrap->GetAsyncWrap()->persistent().IsEmpty(), false); HandleScope handle_scope(env->isolate()); Context::Scope context_scope(env->context()); @@ -80,7 +79,7 @@ void StreamBase::AfterShutdown(ShutdownWrap* req_wrap, int status) { Local req_wrap_obj = req_wrap->object(); Local argv[3] = { Integer::New(env->isolate(), status), - wrap->GetAsyncWrap()->object(), + wrap->GetObject(), req_wrap_obj }; @@ -370,7 +369,6 @@ void StreamBase::AfterWrite(WriteWrap* req_wrap, int status) { // The wrap and request objects should still be there. CHECK_EQ(req_wrap->persistent().IsEmpty(), false); - CHECK_EQ(wrap->GetAsyncWrap()->persistent().IsEmpty(), false); // Unref handle property Local req_wrap_obj = req_wrap->object(); @@ -379,7 +377,7 @@ void StreamBase::AfterWrite(WriteWrap* req_wrap, int status) { Local argv[] = { Integer::New(env->isolate(), status), - wrap->GetAsyncWrap()->object(), + wrap->GetObject(), req_wrap_obj, Undefined(env->isolate()) }; @@ -414,7 +412,16 @@ void StreamBase::EmitData(ssize_t nread, if (argv[2].IsEmpty()) argv[2] = Undefined(env->isolate()); - GetAsyncWrap()->MakeCallback(env->onread_string(), ARRAY_SIZE(argv), argv); + AsyncWrap* async = GetAsyncWrap(); + if (async == nullptr) { + node::MakeCallback(env, + GetObject(), + env->onread_string(), + ARRAY_SIZE(argv), + argv); + } else { + async->MakeCallback(env->onread_string(), ARRAY_SIZE(argv), argv); + } } @@ -428,6 +435,16 @@ int StreamBase::GetFD() { } +AsyncWrap* StreamBase::GetAsyncWrap() { + return nullptr; +} + + +Local StreamBase::GetObject() { + return GetAsyncWrap()->object(); +} + + int StreamResource::DoTryWrite(uv_buf_t** bufs, size_t* count) { // No TryWrite by default return 0; diff --git a/src/stream_base.h b/src/stream_base.h index 31854b3435a337..c5a09777925465 100644 --- a/src/stream_base.h +++ b/src/stream_base.h @@ -106,6 +106,22 @@ class WriteWrap: public ReqWrap, class StreamResource { public: + template + struct Callback { + Callback() : fn(nullptr), ctx(nullptr) {} + Callback(T fn, void* ctx) : fn(fn), ctx(ctx) {} + Callback(const Callback&) = default; + + inline bool is_empty() { return fn == nullptr; } + inline void clear() { + fn = nullptr; + ctx = nullptr; + } + + T fn; + void* ctx; + }; + typedef void (*AfterWriteCb)(WriteWrap* w, void* ctx); typedef void (*AllocCb)(size_t size, uv_buf_t* buf, void* ctx); typedef void (*ReadCb)(ssize_t nread, @@ -113,11 +129,8 @@ class StreamResource { uv_handle_type pending, void* ctx); - StreamResource() : after_write_cb_(nullptr), - alloc_cb_(nullptr), - read_cb_(nullptr) { + StreamResource() { } - virtual ~StreamResource() = default; virtual int DoShutdown(ShutdownWrap* req_wrap) = 0; @@ -131,44 +144,37 @@ class StreamResource { // Events inline void OnAfterWrite(WriteWrap* w) { - if (after_write_cb_ != nullptr) - after_write_cb_(w, after_write_ctx_); + if (!after_write_cb_.is_empty()) + after_write_cb_.fn(w, after_write_cb_.ctx); } inline void OnAlloc(size_t size, uv_buf_t* buf) { - if (alloc_cb_ != nullptr) - alloc_cb_(size, buf, alloc_ctx_); + if (!alloc_cb_.is_empty()) + alloc_cb_.fn(size, buf, alloc_cb_.ctx); } inline void OnRead(size_t nread, const uv_buf_t* buf, uv_handle_type pending = UV_UNKNOWN_HANDLE) { - if (read_cb_ != nullptr) - read_cb_(nread, buf, pending, read_ctx_); + if (!read_cb_.is_empty()) + read_cb_.fn(nread, buf, pending, read_cb_.ctx); } - inline void set_after_write_cb(AfterWriteCb cb, void* ctx) { - after_write_ctx_ = ctx; - after_write_cb_ = cb; + inline void set_after_write_cb(Callback c) { + after_write_cb_ = c; } - inline void set_alloc_cb(AllocCb cb, void* ctx) { - alloc_cb_ = cb; - alloc_ctx_ = ctx; - } + inline void set_alloc_cb(Callback c) { alloc_cb_ = c; } + inline void set_read_cb(Callback c) { read_cb_ = c; } - inline void set_read_cb(ReadCb cb, void* ctx) { - read_cb_ = cb; - read_ctx_ = ctx; - } + inline Callback after_write_cb() { return after_write_cb_; } + inline Callback alloc_cb() { return alloc_cb_; } + inline Callback read_cb() { return read_cb_; } private: - AfterWriteCb after_write_cb_; - void* after_write_ctx_; - AllocCb alloc_cb_; - void* alloc_ctx_; - ReadCb read_cb_; - void* read_ctx_; + Callback after_write_cb_; + Callback alloc_cb_; + Callback read_cb_; }; class StreamBase : public StreamResource { @@ -211,7 +217,9 @@ class StreamBase : public StreamResource { virtual ~StreamBase() = default; - virtual AsyncWrap* GetAsyncWrap() = 0; + // One of these must be implemented + virtual AsyncWrap* GetAsyncWrap(); + virtual v8::Local GetObject(); // Libuv callbacks static void AfterShutdown(ShutdownWrap* req, int status); @@ -227,8 +235,12 @@ class StreamBase : public StreamResource { int WriteString(const v8::FunctionCallbackInfo& args); template - static void GetFD(v8::Local, - const v8::PropertyCallbackInfo&); + static void GetFD(v8::Local key, + const v8::PropertyCallbackInfo& args); + + template + static void GetExternal(v8::Local key, + const v8::PropertyCallbackInfo& args); template str, + int flags, + size_t* chars_written) { + uint16_t* const dst = reinterpret_cast(buf); + + size_t max_chars = (buflen / sizeof(*dst)); + size_t nchars; + size_t alignment = reinterpret_cast(dst) % sizeof(*dst); + if (alignment == 0) { + nchars = str->Write(dst, 0, max_chars, flags); + *chars_written = nchars; + return nchars * sizeof(*dst); + } + + uint16_t* aligned_dst = + reinterpret_cast(buf + sizeof(*dst) - alignment); + ASSERT_EQ(reinterpret_cast(aligned_dst) % sizeof(*dst), 0); + + // Write all but the last char + nchars = str->Write(aligned_dst, 0, max_chars - 1, flags); + + // Shift everything to unaligned-left + memmove(dst, aligned_dst, nchars * sizeof(*dst)); + + // One more char to be written + uint16_t last; + if (nchars == max_chars - 1 && str->Write(&last, nchars, 1, flags) != 0) { + memcpy(buf + nchars * sizeof(*dst), &last, sizeof(last)); + nchars++; + } + + *chars_written = nchars; + return nchars * sizeof(*dst); +} + + size_t StringBytes::Write(Isolate* isolate, char* buf, size_t buflen, @@ -334,26 +374,40 @@ size_t StringBytes::Write(Isolate* isolate, break; case UCS2: { - uint16_t* const dst = reinterpret_cast(buf); size_t nchars; + if (is_extern && !str->IsOneByte()) { memcpy(buf, data, nbytes); - nchars = nbytes / sizeof(*dst); + nchars = nbytes / sizeof(uint16_t); } else { - nchars = buflen / sizeof(*dst); - nchars = str->Write(dst, 0, nchars, flags); - nbytes = nchars * sizeof(*dst); + nbytes = WriteUCS2(buf, buflen, nbytes, data, str, flags, &nchars); } - if (IsBigEndian()) { - // Node's "ucs2" encoding wants LE character data stored in - // the Buffer, so we need to reorder on BE platforms. See - // http://nodejs.org/api/buffer.html regarding Node's "ucs2" - // encoding specification + if (chars_written != nullptr) + *chars_written = nchars; + + if (!IsBigEndian()) + break; + + // Node's "ucs2" encoding wants LE character data stored in + // the Buffer, so we need to reorder on BE platforms. See + // http://nodejs.org/api/buffer.html regarding Node's "ucs2" + // encoding specification + + const bool is_aligned = + reinterpret_cast(buf) % sizeof(uint16_t); + if (is_aligned) { + uint16_t* const dst = reinterpret_cast(buf); for (size_t i = 0; i < nchars; i++) dst[i] = dst[i] << 8 | dst[i] >> 8; + break; + } + + ASSERT_EQ(sizeof(uint16_t), 2); + for (size_t i = 0; i < nchars; i++) { + char tmp = buf[i * 2]; + buf[i * 2] = buf[i * 2 + 1]; + buf[i * 2 + 1] = tmp; } - if (chars_written != nullptr) - *chars_written = nchars; break; } diff --git a/src/string_bytes.h b/src/string_bytes.h index 2fcfedaa098b67..7c044ebaf56562 100644 --- a/src/string_bytes.h +++ b/src/string_bytes.h @@ -151,6 +151,15 @@ class StringBytes { enum encoding encoding) { return Encode(v8::Isolate::GetCurrent(), buf, buflen, encoding); }) + + private: + static size_t WriteUCS2(char* buf, + size_t buflen, + size_t nbytes, + const char* data, + v8::Local str, + int flags, + size_t* chars_written); }; } // namespace node diff --git a/src/tls_wrap.cc b/src/tls_wrap.cc index fc19a5ce0bbe38..381690c79d71ca 100644 --- a/src/tls_wrap.cc +++ b/src/tls_wrap.cc @@ -6,7 +6,6 @@ #include "node_crypto_bio.h" // NodeBIO #include "node_crypto_clienthello.h" // ClientHelloParser #include "node_crypto_clienthello-inl.h" -#include "node_wrap.h" // WithGenericStream #include "node_counters.h" #include "node_internals.h" #include "stream_base.h" @@ -63,12 +62,12 @@ TLSWrap::TLSWrap(Environment* env, SSL_CTX_sess_set_new_cb(sc_->ctx_, SSLWrap::NewSessionCallback); stream_->Consume(); - stream_->set_after_write_cb(OnAfterWriteImpl, this); - stream_->set_alloc_cb(OnAllocImpl, this); - stream_->set_read_cb(OnReadImpl, this); + stream_->set_after_write_cb({ OnAfterWriteImpl, this }); + stream_->set_alloc_cb({ OnAllocImpl, this }); + stream_->set_read_cb({ OnReadImpl, this }); - set_alloc_cb(OnAllocSelf, this); - set_read_cb(OnReadSelf, this); + set_alloc_cb({ OnAllocSelf, this }); + set_read_cb({ OnReadSelf, this }); InitSSL(); } @@ -177,15 +176,12 @@ void TLSWrap::Wrap(const FunctionCallbackInfo& args) { if (args.Length() < 3 || !args[2]->IsBoolean()) return env->ThrowTypeError("Third argument should be boolean"); - Local stream_obj = args[0].As(); + Local stream_obj = args[0].As(); Local sc = args[1].As(); Kind kind = args[2]->IsTrue() ? SSLWrap::kServer : SSLWrap::kClient; - StreamBase* stream = nullptr; - WITH_GENERIC_STREAM(env, stream_obj, { - stream = wrap; - }); + StreamBase* stream = static_cast(stream_obj->Value()); CHECK_NE(stream, nullptr); TLSWrap* res = new TLSWrap(env, kind, stream, Unwrap(sc)); @@ -565,8 +561,8 @@ int TLSWrap::DoWrite(WriteWrap* w, } if (empty) { ClearOut(); - // However if there any data that should be written to socket, - // callback should not be invoked immediately + // However, if there is any data that should be written to the socket, + // the callback should not be invoked immediately if (BIO_pending(enc_out_) == 0) return stream_->DoWrite(w, bufs, count, send_handle); } diff --git a/src/tls_wrap.h b/src/tls_wrap.h index b906d78de1ffb0..7a9dc888ccb190 100644 --- a/src/tls_wrap.h +++ b/src/tls_wrap.h @@ -53,7 +53,7 @@ class TLSWrap : public crypto::SSLWrap, size_t self_size() const override { return sizeof(*this); } protected: - static const int kClearOutChunkSize = 1024; + static const int kClearOutChunkSize = 16384; // Maximum number of bytes for hello parser static const int kMaxHelloLength = 16384; diff --git a/test/.eslintrc b/test/.eslintrc index 608c62dff5fa63..2a8f61cb35f6be 100644 --- a/test/.eslintrc +++ b/test/.eslintrc @@ -1,8 +1,6 @@ ## Test-specific linter rules rules: - ## allow unreachable code - no-unreachable: 0 ## allow undeclared variables no-undef: 0 ## allow global Buffer usage diff --git a/test/addons/async-hello-world/binding.cc b/test/addons/async-hello-world/binding.cc index f458dc6a5632fd..aee3a3763f4755 100644 --- a/test/addons/async-hello-world/binding.cc +++ b/test/addons/async-hello-world/binding.cc @@ -3,35 +3,34 @@ #include #include -using namespace v8; -using namespace node; - struct async_req { uv_work_t req; int input; int output; - Persistent callback; + v8::Isolate* isolate; + v8::Persistent callback; }; void DoAsync(uv_work_t* r) { async_req* req = reinterpret_cast(r->data); - sleep(1); // simulate CPU intensive process... + sleep(1); // Simulate CPU intensive process... req->output = req->input * 2; } void AfterAsync(uv_work_t* r) { - Isolate* isolate = Isolate::GetCurrent(); - HandleScope scope(isolate); async_req* req = reinterpret_cast(r->data); + v8::Isolate* isolate = req->isolate; + v8::HandleScope scope(isolate); - Handle argv[2] = { - Null(isolate), - Integer::New(isolate, req->output) + v8::Handle argv[2] = { + v8::Null(isolate), + v8::Integer::New(isolate, req->output) }; - TryCatch try_catch; + v8::TryCatch try_catch(isolate); - Local callback = Local::New(isolate, req->callback); + v8::Local callback = + v8::Local::New(isolate, req->callback); callback->Call(isolate->GetCurrentContext()->Global(), 2, argv); // cleanup @@ -39,21 +38,22 @@ void AfterAsync(uv_work_t* r) { delete req; if (try_catch.HasCaught()) { - FatalException(isolate, try_catch); + node::FatalException(isolate, try_catch); } } -void Method(const FunctionCallbackInfo& args) { - Isolate* isolate = Isolate::GetCurrent(); - HandleScope scope(isolate); +void Method(const v8::FunctionCallbackInfo& args) { + v8::Isolate* isolate = args.GetIsolate(); + v8::HandleScope scope(isolate); async_req* req = new async_req; req->req.data = req; req->input = args[0]->IntegerValue(); req->output = 0; + req->isolate = isolate; - Local callback = Local::Cast(args[1]); + v8::Local callback = v8::Local::Cast(args[1]); req->callback.Reset(isolate, callback); uv_queue_work(uv_default_loop(), @@ -62,7 +62,7 @@ void Method(const FunctionCallbackInfo& args) { (uv_after_work_cb)AfterAsync); } -void init(Handle exports, Handle module) { +void init(v8::Handle exports, v8::Handle module) { NODE_SET_METHOD(module, "exports", Method); } diff --git a/test/addons/at-exit/binding.cc b/test/addons/at-exit/binding.cc index 156dbe4ff54bb8..d300aad3e8256f 100644 --- a/test/addons/at-exit/binding.cc +++ b/test/addons/at-exit/binding.cc @@ -16,12 +16,10 @@ static int at_exit_cb1_called = 0; static int at_exit_cb2_called = 0; static void at_exit_cb1(void* arg) { - // FIXME(bnoordhuis) Isolate::GetCurrent() is on its way out. - Isolate* isolate = Isolate::GetCurrent(); + Isolate* isolate = static_cast(arg); HandleScope handle_scope(isolate); - assert(arg == 0); Local obj = Object::New(isolate); - assert(!obj.IsEmpty()); // assert VM is still alive + assert(!obj.IsEmpty()); // Assert VM is still alive. assert(obj->IsObject()); at_exit_cb1_called++; } @@ -37,7 +35,7 @@ static void sanity_check(void) { } void init(Handle target) { - AtExit(at_exit_cb1); + AtExit(at_exit_cb1, target->CreationContext()->GetIsolate()); AtExit(at_exit_cb2, cookie); AtExit(at_exit_cb2, cookie); atexit(sanity_check); diff --git a/test/addons/hello-world-function-export/binding.cc b/test/addons/hello-world-function-export/binding.cc index 91fc26cef652fb..68db22748fdbd3 100644 --- a/test/addons/hello-world-function-export/binding.cc +++ b/test/addons/hello-world-function-export/binding.cc @@ -1,15 +1,13 @@ #include #include -using namespace v8; - -void Method(const FunctionCallbackInfo& args) { - Isolate* isolate = Isolate::GetCurrent(); - HandleScope scope(isolate); - args.GetReturnValue().Set(String::NewFromUtf8(isolate, "world")); +void Method(const v8::FunctionCallbackInfo& args) { + v8::Isolate* isolate = args.GetIsolate(); + v8::HandleScope scope(isolate); + args.GetReturnValue().Set(v8::String::NewFromUtf8(isolate, "world")); } -void init(Handle exports, Handle module) { +void init(v8::Handle exports, v8::Handle module) { NODE_SET_METHOD(module, "exports", Method); } diff --git a/test/addons/hello-world/binding.cc b/test/addons/hello-world/binding.cc index 1a6d179abe264b..4982bc3e55d84a 100644 --- a/test/addons/hello-world/binding.cc +++ b/test/addons/hello-world/binding.cc @@ -1,15 +1,13 @@ #include #include -using namespace v8; - -void Method(const FunctionCallbackInfo& args) { - Isolate* isolate = Isolate::GetCurrent(); - HandleScope scope(isolate); - args.GetReturnValue().Set(String::NewFromUtf8(isolate, "world")); +void Method(const v8::FunctionCallbackInfo& args) { + v8::Isolate* isolate = args.GetIsolate(); + v8::HandleScope scope(isolate); + args.GetReturnValue().Set(v8::String::NewFromUtf8(isolate, "world")); } -void init(Handle target) { +void init(v8::Handle target) { NODE_SET_METHOD(target, "hello", Method); } diff --git a/test/fixtures/test_bad_rsa_privkey.pem b/test/fixtures/test_bad_rsa_privkey.pem new file mode 100644 index 00000000000000..cc84a6fc6d67de --- /dev/null +++ b/test/fixtures/test_bad_rsa_privkey.pem @@ -0,0 +1,10 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIBUwIBADANBgkqhkiG9w0BAQEFAASCAT0wggE5AgEAAkEAz0ZHmXyxQSdWk6NF +GRotTax0O94iHv843su0mOynV9QLvlAwMrUk9k4+/SwyLu0eE3iYsYgXstXi3t2u +rDSIMwIDAQABAkAH4ag/Udp7m79TBdZOygwG9BPHYv7xJstGzYAkgHssf7Yd5ZuC +hpKtBvWdPXZaAFbwF8NSisMl98Q/9zgB/q5BAiEA5zXuwMnwt4hE2YqzBDRFB4g9 +I+v+l1soy6x7Wdqo9esCIQDlf15qDb26uRDurBioE3IpZstWIIvLDdKqviZXKMs8 +2QIgWeC5QvA9RtsOCJLGLCg1fUwUmFYwzZ1+Kk6OVMuPSqkCIDIWFSXyL8kzoKVm +O89axxyQCaqXWcsMDkEjVLzK82gpAiB7lzdDHr7MoMWwV2wC/heEFC2p0Rw4wg9j +1V8QbL0Q0A== +-----END RSA PRIVATE KEY----- diff --git a/test/gc/test-http-client-timeout.js b/test/gc/test-http-client-timeout.js index 5bb2d2b05b4fb0..c84581b5c8e54a 100644 --- a/test/gc/test-http-client-timeout.js +++ b/test/gc/test-http-client-timeout.js @@ -54,7 +54,7 @@ function getall() { setImmediate(getall); } -for(var i = 0; i < 10; i++) +for (var i = 0; i < 10; i++) getall(); function afterGC() { diff --git a/test/internet/test-dgram-broadcast-multi-process.js b/test/internet/test-dgram-broadcast-multi-process.js index 4cf5d7226f2c50..c1ca04aa574c11 100644 --- a/test/internet/test-dgram-broadcast-multi-process.js +++ b/test/internet/test-dgram-broadcast-multi-process.js @@ -18,7 +18,7 @@ var common = require('../common'), // take the first non-internal interface as the address for binding get_bindAddress: for (var name in networkInterfaces) { var interfaces = networkInterfaces[name]; - for(var i = 0; i < interfaces.length; i++) { + for (var i = 0; i < interfaces.length; i++) { var localInterface = interfaces[i]; if (!localInterface.internal && localInterface.family === 'IPv4') { var bindAddress = localInterface.address; diff --git a/test/internet/test-dgram-multicast-multi-process.js b/test/internet/test-dgram-multicast-multi-process.js index 05acb844e54490..34db38331dc7b6 100644 --- a/test/internet/test-dgram-multicast-multi-process.js +++ b/test/internet/test-dgram-multicast-multi-process.js @@ -3,7 +3,6 @@ var common = require('../common'), assert = require('assert'), dgram = require('dgram'), util = require('util'), - assert = require('assert'), Buffer = require('buffer').Buffer, fork = require('child_process').fork, LOCAL_BROADCAST_HOST = '224.0.0.114', @@ -183,10 +182,9 @@ if (process.argv[2] === 'child') { process.send({ message: buf.toString() }); if (receivedMessages.length == messages.length) { + // .dropMembership() not strictly needed but here as a sanity check listenSocket.dropMembership(LOCAL_BROADCAST_HOST); - - process.nextTick(function() { // TODO should be changed to below. - // listenSocket.dropMembership(LOCAL_BROADCAST_HOST, function() { + process.nextTick(function() { listenSocket.close(); }); } diff --git a/test/internet/test-dns.js b/test/internet/test-dns.js index eb7bab85475b2a..796fd26c0a2e72 100644 --- a/test/internet/test-dns.js +++ b/test/internet/test-dns.js @@ -663,7 +663,7 @@ TEST(function test_resolve_failure(done) { var req = dns.resolve4('nosuchhostimsure', function(err) { assert(err instanceof Error); - switch(err.code) { + switch (err.code) { case 'ENOTFOUND': case 'ESERVFAIL': break; diff --git a/test/internet/test-net-connect-timeout.js b/test/internet/test-net-connect-timeout.js index 436d7731827ef0..b92d234852967e 100644 --- a/test/internet/test-net-connect-timeout.js +++ b/test/internet/test-net-connect-timeout.js @@ -15,9 +15,11 @@ var gotConnect = false; var T = 100; - -// 240.*.*.*.* is "reserved for future use" -var socket = net.createConnection(9999, '240.0.0.0'); +// 192.0.2.1 is part of subnet assigned as "TEST-NET" in RFC 5737. +// For use solely in documentation and example source code. +// In short, it should be unreachable. +// In practice, it's a network black hole. +var socket = net.createConnection(9999, '192.0.2.1'); socket.setTimeout(T); diff --git a/test/message/throw_custom_error.js b/test/message/throw_custom_error.js index 270b37d14ddead..bb5dae05a9b03a 100644 --- a/test/message/throw_custom_error.js +++ b/test/message/throw_custom_error.js @@ -2,9 +2,5 @@ var common = require('../common'); var assert = require('assert'); -console.error('before'); - // custom error throwing throw ({ name: 'MyCustomError', message: 'This is a custom message' }); - -console.error('after'); diff --git a/test/message/throw_custom_error.out b/test/message/throw_custom_error.out index bfb928d48dd08e..ef73c52c889dff 100644 --- a/test/message/throw_custom_error.out +++ b/test/message/throw_custom_error.out @@ -1,5 +1,4 @@ -before -*test*message*throw_custom_error.js:8 +*test*message*throw_custom_error.js:6 throw ({ name: 'MyCustomError', message: 'This is a custom message' }); ^ MyCustomError: This is a custom message diff --git a/test/message/throw_non_error.js b/test/message/throw_non_error.js index 33e0a051d65a1d..a1046b725e93a4 100644 --- a/test/message/throw_non_error.js +++ b/test/message/throw_non_error.js @@ -2,9 +2,5 @@ var common = require('../common'); var assert = require('assert'); -console.error('before'); - // custom error throwing throw ({ foo: 'bar' }); - -console.error('after'); diff --git a/test/message/throw_non_error.out b/test/message/throw_non_error.out index c859d5f16a511e..15f95fcc11699a 100644 --- a/test/message/throw_non_error.out +++ b/test/message/throw_non_error.out @@ -1,5 +1,4 @@ -before -*test*message*throw_non_error.js:8 +*test*message*throw_non_error.js:6 throw ({ foo: 'bar' }); ^ [object Object] diff --git a/test/parallel/test-crypto.js b/test/parallel/test-crypto.js index 55b57e65154700..57191b24ae351a 100644 --- a/test/parallel/test-crypto.js +++ b/test/parallel/test-crypto.js @@ -124,5 +124,21 @@ assert.throws(function() { crypto.createSign('RSA-SHA256').update('test').sign(priv); }, /RSA_sign:digest too big for rsa key/); +assert.throws(function() { + // The correct header inside `test_bad_rsa_privkey.pem` should have been + // -----BEGIN PRIVATE KEY----- and -----END PRIVATE KEY----- + // instead of + // -----BEGIN RSA PRIVATE KEY----- and -----END RSA PRIVATE KEY----- + // It is generated in this way: + // $ openssl genrsa -out mykey.pem 512; + // $ openssl pkcs8 -topk8 -inform PEM -outform PEM -in mykey.pem \ + // -out private_key.pem -nocrypt; + // Then open private_key.pem and change its header and footer. + var sha1_privateKey = fs.readFileSync(common.fixturesDir + + '/test_bad_rsa_privkey.pem', 'ascii'); + // this would inject errors onto OpenSSL's error stack + crypto.createSign('sha1').sign(sha1_privateKey); +}, /asn1 encoding routines:ASN1_CHECK_TLEN:wrong tag/); + // Make sure memory isn't released before being returned console.log(crypto.randomBytes(16)); diff --git a/test/parallel/test-domain.js b/test/parallel/test-domain.js index cca01572123a45..918e3288e8dc91 100644 --- a/test/parallel/test-domain.js +++ b/test/parallel/test-domain.js @@ -163,7 +163,6 @@ expectCaught++; // as a callback instead. function fn(er) { throw new Error('This function should never be called!'); - process.exit(1); } var bound = d.intercept(fn); diff --git a/test/parallel/test-event-emitter-listener-count.js b/test/parallel/test-event-emitter-listener-count.js new file mode 100644 index 00000000000000..c5b75c819d17f9 --- /dev/null +++ b/test/parallel/test-event-emitter-listener-count.js @@ -0,0 +1,18 @@ +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const EventEmitter = require('events'); + +const emitter = new EventEmitter(); +emitter.on('foo', function() {}); +emitter.on('foo', function() {}); +emitter.on('baz', function() {}); +// Allow any type +emitter.on(123, function() {}); + +assert.strictEqual(EventEmitter.listenerCount(emitter, 'foo'), 2); +assert.strictEqual(emitter.listenerCount('foo'), 2); +assert.strictEqual(emitter.listenerCount('bar'), 0); +assert.strictEqual(emitter.listenerCount('baz'), 1); +assert.strictEqual(emitter.listenerCount(123), 1); diff --git a/test/parallel/test-event-emitter-subclass.js b/test/parallel/test-event-emitter-subclass.js index 327738271184b0..fe915be34efc2b 100644 --- a/test/parallel/test-event-emitter-subclass.js +++ b/test/parallel/test-event-emitter-subclass.js @@ -46,4 +46,4 @@ var ee2 = new MyEE2(); ee1.on('x', function() {}); -assert.equal(EventEmitter.listenerCount(ee2, 'x'), 0); +assert.equal(ee2.listenerCount('x'), 0); diff --git a/test/parallel/test-file-write-stream.js b/test/parallel/test-file-write-stream.js index 7309a6bf9db4d3..36ef283528b7c1 100644 --- a/test/parallel/test-file-write-stream.js +++ b/test/parallel/test-file-write-stream.js @@ -26,7 +26,6 @@ file }) .on('error', function(err) { throw err; - console.error('error!', err.stack); }) .on('drain', function() { console.error('drain!', callbacks.drain); diff --git a/test/parallel/test-fs-watchfile.js b/test/parallel/test-fs-watchfile.js index 35712741f773df..5e075fb056e8bc 100644 --- a/test/parallel/test-fs-watchfile.js +++ b/test/parallel/test-fs-watchfile.js @@ -4,7 +4,6 @@ const common = require('../common'); const fs = require('fs'); const path = require('path'); const assert = require('assert'); -const fixtures = path.join(__dirname, '..', 'fixtures'); // Basic usage tests. assert.throws(function() { @@ -19,7 +18,7 @@ assert.throws(function() { fs.watchFile(new Object(), function() {}); }, /Path must be a string/); -const enoentFile = path.join(fixtures, 'non-existent-file'); +const enoentFile = path.join(common.tmpDir, 'non-existent-file'); const expectedStatObject = new fs.Stats( 0, // dev 0, // mode @@ -37,24 +36,13 @@ const expectedStatObject = new fs.Stats( Date.UTC(1970, 0, 1, 0, 0, 0) // birthtime ); -function removeTestFile() { - try { - fs.unlinkSync(enoentFile); - } catch (ex) { - if (ex.code !== 'ENOENT') { - throw ex; - } - } -} - -// Make sure that the file does not exist, when the test starts -removeTestFile(); +common.refreshTmpDir(); // If the file initially didn't exist, and gets created at a later point of // time, the callback should be invoked again with proper values in stat object var fileExists = false; -fs.watchFile(enoentFile, common.mustCall(function(curr, prev) { +fs.watchFile(enoentFile, {interval: 0}, common.mustCall(function(curr, prev) { if (!fileExists) { // If the file does not exist, all the fields should be zero and the date // fields should be UNIX EPOCH time @@ -71,8 +59,7 @@ fs.watchFile(enoentFile, common.mustCall(function(curr, prev) { // As the file just got created, previous ino value should be lesser than // or equal to zero (non-existent file). assert(prev.ino <= 0); - // Stop watching the file and delete it + // Stop watching the file fs.unwatchFile(enoentFile); - removeTestFile(); } }, 2)); diff --git a/test/parallel/test-http-content-length.js b/test/parallel/test-http-content-length.js index 6c9576cdabf563..ec60396a24545d 100644 --- a/test/parallel/test-http-content-length.js +++ b/test/parallel/test-http-content-length.js @@ -40,7 +40,6 @@ var server = http.createServer(function(req, res) { break; default: throw new Error('Unreachable'); - break; } receivedRequests++; diff --git a/test/parallel/test-http-default-encoding.js b/test/parallel/test-http-default-encoding.js index a40c8841eceab9..612a75bce46647 100644 --- a/test/parallel/test-http-default-encoding.js +++ b/test/parallel/test-http-default-encoding.js @@ -11,16 +11,11 @@ var server = http.Server(function(req, res) { req.on('data', function(chunk) { result += chunk; }).on('end', function() { - clearTimeout(timeout); server.close(); + res.writeHead(200); + res.end('hello world\n'); }); - var timeout = setTimeout(function() { - process.exit(1); - }, 100); - - res.writeHead(200); - res.end('hello world\n'); }); server.listen(common.PORT, function() { diff --git a/test/parallel/test-http-request-end.js b/test/parallel/test-http-request-end.js index 43d023028a4fd0..6ecfc0672e35ff 100644 --- a/test/parallel/test-http-request-end.js +++ b/test/parallel/test-http-request-end.js @@ -14,10 +14,10 @@ var server = http.Server(function(req, res) { req.on('end', function() { server.close(); + res.writeHead(200); + res.end('hello world\n'); }); - res.writeHead(200); - res.end('hello world\n'); }); server.listen(common.PORT, function() { diff --git a/test/parallel/test-https-resume-after-renew.js b/test/parallel/test-https-resume-after-renew.js new file mode 100644 index 00000000000000..23626ccb40cb51 --- /dev/null +++ b/test/parallel/test-https-resume-after-renew.js @@ -0,0 +1,56 @@ +'use strict'; +var common = require('../common'); +var fs = require('fs'); +var https = require('https'); +var crypto = require('crypto'); + +var options = { + key: fs.readFileSync(common.fixturesDir + '/keys/agent1-key.pem'), + cert: fs.readFileSync(common.fixturesDir + '/keys/agent1-cert.pem'), + ca: fs.readFileSync(common.fixturesDir + '/keys/ca1-cert.pem') +}; + +var server = https.createServer(options, function(req, res) { + res.end('hello'); +}); + +var aes = new Buffer(16); +aes.fill('S'); +var hmac = new Buffer(16); +hmac.fill('H'); + +server._sharedCreds.context.enableTicketKeyCallback(); +server._sharedCreds.context.onticketkeycallback = function(name, iv, enc) { + if (enc) { + var newName = new Buffer(16); + var newIV = crypto.randomBytes(16); + newName.fill('A'); + } else { + // Renew + return [ 2, hmac, aes ]; + } + + return [ 1, hmac, aes, newName, newIV ]; +}; + +server.listen(common.PORT, function() { + var addr = this.address(); + + function doReq(callback) { + https.request({ + method: 'GET', + port: addr.port, + servername: 'agent1', + ca: options.ca + }, function(res) { + res.resume(); + res.once('end', callback); + }).end(); + } + + doReq(function() { + doReq(function() { + server.close(); + }); + }); +}); diff --git a/test/parallel/test-listen-fd-cluster.js b/test/parallel/test-listen-fd-cluster.js index e895a2944789bd..f6d00c72a0f78c 100644 --- a/test/parallel/test-listen-fd-cluster.js +++ b/test/parallel/test-listen-fd-cluster.js @@ -7,63 +7,62 @@ var PORT = common.PORT; var spawn = require('child_process').spawn; var cluster = require('cluster'); -console.error('Cluster listen fd test', process.argv.slice(2)); +console.error('Cluster listen fd test', process.argv[2] || 'runner'); if (common.isWindows) { console.log('1..0 # Skipped: This test is disabled on windows.'); return; } +// Process relationship is: +// +// parent: the test main script +// -> master: the cluster master +// -> worker: the cluster worker switch (process.argv[2]) { case 'master': return master(); case 'worker': return worker(); case 'parent': return parent(); - default: return test(); } +var ok; + +process.on('exit', function() { + assert.ok(ok); +}); + // spawn the parent, and listen for it to tell us the pid of the cluster. // WARNING: This is an example of listening on some arbitrary FD number // that has already been bound elsewhere in advance. However, binding // server handles to stdio fd's is NOT a good or reliable way to do // concurrency in HTTP servers! Use the cluster module, or if you want // a more low-level approach, use child process IPC manually. -function test() { - var parent = spawn(process.execPath, [__filename, 'parent'], { - stdio: [ 0, 'pipe', 2 ] - }); - var json = ''; - parent.stdout.on('data', function(c) { - json += c.toString(); - if (json.indexOf('\n') !== -1) next(); - }); - function next() { - console.error('output from parent = %s', json); - var cluster = JSON.parse(json); - // now make sure that we can request to the worker, then kill it. - http.get({ - server: 'localhost', - port: PORT, - path: '/', - }).on('response', function(res) { - var s = ''; - res.on('data', function(c) { - s += c.toString(); - }); - res.on('end', function() { - // kill the worker before we start doing asserts. - // it's really annoying when tests leave orphans! - parent.kill(); - process.kill(cluster.master, 'SIGKILL'); - +test(function(parent) { + // now make sure that we can request to the worker, then kill it. + http.get({ + server: 'localhost', + port: PORT, + path: '/', + }).on('response', function(res) { + var s = ''; + res.on('data', function(c) { + s += c.toString(); + }); + res.on('end', function() { + // kill the worker before we start doing asserts. + // it's really annoying when tests leave orphans! + parent.kill(); + parent.on('exit', function() { assert.equal(s, 'hello from worker\n'); assert.equal(res.statusCode, 200); console.log('ok'); + ok = true; }); }); - } -} + }); +}); -function parent() { +function test(cb) { console.error('about to listen in parent'); var server = net.createServer(function(conn) { console.error('connection on parent'); @@ -73,7 +72,7 @@ function parent() { var spawn = require('child_process').spawn; var master = spawn(process.execPath, [__filename, 'master'], { - stdio: [ 0, 1, 2, server._handle ], + stdio: [ 0, 'pipe', 2, server._handle, 'ipc' ], detached: true }); @@ -90,6 +89,11 @@ function parent() { console.error('master closed'); }); console.error('master spawned'); + master.on('message', function(msg) { + if (msg === 'started worker') { + cb(master); + } + }); }); } @@ -99,7 +103,17 @@ function master() { args: [ 'worker' ] }); var worker = cluster.fork(); - console.log('%j\n', { master: process.pid, worker: worker.pid }); + worker.on('message', function(msg) { + if (msg === 'worker ready') { + process.send('started worker'); + } + }); + // Prevent outliving our parent process in case it is abnormally killed - + // under normal conditions our parent kills this process before exiting. + process.on('disconnect', function() { + console.error('master exit on disconnect'); + process.exit(0); + }); } @@ -112,5 +126,6 @@ function worker() { res.end('hello from worker\n'); }).listen({ fd: 3 }, function() { console.error('worker listening on fd=3'); + process.send('worker ready'); }); } diff --git a/test/parallel/test-listen-fd-detached-inherit.js b/test/parallel/test-listen-fd-detached-inherit.js index 7d4df0c1aec106..472b1e8ff939f0 100644 --- a/test/parallel/test-listen-fd-detached-inherit.js +++ b/test/parallel/test-listen-fd-detached-inherit.js @@ -64,8 +64,8 @@ function test() { // Then output the child's pid, and immediately exit. function parent() { var server = net.createServer(function(conn) { - throw new Error('Should not see connections on parent'); conn.end('HTTP/1.1 403 Forbidden\r\n\r\nI got problems.\r\n'); + throw new Error('Should not see connections on parent'); }).listen(PORT, function() { console.error('server listening on %d', PORT); diff --git a/test/parallel/test-listen-fd-server.js b/test/parallel/test-listen-fd-server.js index d4d11e5a8d9693..d51d51ee8eb4de 100644 --- a/test/parallel/test-listen-fd-server.js +++ b/test/parallel/test-listen-fd-server.js @@ -13,54 +13,50 @@ if (common.isWindows) { switch (process.argv[2]) { case 'child': return child(); - case 'parent': return parent(); - default: return test(); } -// spawn the parent, and listen for it to tell us the pid of the child. +var ok; + +process.on('exit', function() { + assert.ok(ok); +}); + // WARNING: This is an example of listening on some arbitrary FD number // that has already been bound elsewhere in advance. However, binding // server handles to stdio fd's is NOT a good or reliable way to do // concurrency in HTTP servers! Use the cluster module, or if you want // a more low-level approach, use child process IPC manually. -function test() { - var parent = spawn(process.execPath, [__filename, 'parent'], { - stdio: [ 0, 'pipe', 2 ] - }); - var json = ''; - parent.stdout.on('data', function(c) { - json += c.toString(); - if (json.indexOf('\n') !== -1) next(); - }); - function next() { - console.error('output from parent = %s', json); - var child = JSON.parse(json); - // now make sure that we can request to the child, then kill it. - http.get({ - server: 'localhost', - port: PORT, - path: '/', - }).on('response', function(res) { - var s = ''; - res.on('data', function(c) { - s += c.toString(); - }); - res.on('end', function() { - // kill the child before we start doing asserts. - // it's really annoying when tests leave orphans! - process.kill(child.pid, 'SIGKILL'); - try { - parent.kill(); - } catch (e) {} - +test(function(child) { + // now make sure that we can request to the child, then kill it. + http.get({ + server: 'localhost', + port: PORT, + path: '/', + }).on('response', function(res) { + var s = ''; + res.on('data', function(c) { + s += c.toString(); + }); + res.on('end', function() { + child.kill(); + child.on('exit', function() { assert.equal(s, 'hello from child\n'); assert.equal(res.statusCode, 200); + console.log('ok'); + ok = true; }); }); - } -} + }); +}); function child() { + // Prevent outliving the parent process in case it is terminated before + // killing this child process. + process.on('disconnect', function() { + console.error('exit on disconnect'); + process.exit(0); + }); + // start a server on fd=3 http.createServer(function(req, res) { console.error('request on child'); @@ -68,10 +64,11 @@ function child() { res.end('hello from child\n'); }).listen({ fd: 3 }, function() { console.error('child listening on fd=3'); + process.send('listening'); }); } -function parent() { +function test(cb) { var server = net.createServer(function(conn) { console.error('connection on parent'); conn.end('hello from parent\n'); @@ -80,7 +77,7 @@ function parent() { var spawn = require('child_process').spawn; var child = spawn(process.execPath, [__filename, 'child'], { - stdio: [ 0, 1, 2, server._handle ] + stdio: [ 0, 1, 2, server._handle, 'ipc' ] }); console.log('%j\n', { pid: child.pid }); @@ -90,13 +87,10 @@ function parent() { // be accepted, because the child has the fd open. server.close(); - child.on('exit', function(code) { - console.error('child exited', code); - }); - - child.on('close', function() { - console.error('child closed'); + child.on('message', function(msg) { + if (msg === 'listening') { + cb(child); + } }); - console.error('child spawned'); }); } diff --git a/test/parallel/test-net-keepalive.js b/test/parallel/test-net-keepalive.js index 3c339f7abaa350..efbbc5ea7986bb 100644 --- a/test/parallel/test-net-keepalive.js +++ b/test/parallel/test-net-keepalive.js @@ -8,8 +8,8 @@ var echoServer = net.createServer(function(connection) { serverConnection = connection; connection.setTimeout(0); assert.notEqual(connection.setKeepAlive, undefined); - // send a keepalive packet after 1000 ms - connection.setKeepAlive(true, 1000); + // send a keepalive packet after 50 ms + connection.setKeepAlive(true, common.platformTimeout(50)); connection.on('end', function() { connection.end(); }); @@ -27,5 +27,5 @@ echoServer.on('listening', function() { serverConnection.end(); clientConnection.end(); echoServer.close(); - }, 1200); + }, common.platformTimeout(100)); }); diff --git a/test/parallel/test-net-listen-fd0.js b/test/parallel/test-net-listen-fd0.js index bf437268c92945..e326ac2b60beb1 100644 --- a/test/parallel/test-net-listen-fd0.js +++ b/test/parallel/test-net-listen-fd0.js @@ -11,7 +11,7 @@ process.on('exit', function() { // this should fail with an async EINVAL error, not throw an exception net.createServer(assert.fail).listen({fd:0}).on('error', function(e) { - switch(e.code) { + switch (e.code) { case 'EINVAL': case 'ENOTSOCK': gotError = e; diff --git a/test/parallel/test-net-pingpong.js b/test/parallel/test-net-pingpong.js index 9a0c8af5b0697d..be7dfa435949b5 100644 --- a/test/parallel/test-net-pingpong.js +++ b/test/parallel/test-net-pingpong.js @@ -27,20 +27,17 @@ function pingPongTest(port, host) { // than one message. assert.ok(0 <= socket.bufferSize && socket.bufferSize <= 4); - console.log('server got: ' + data); assert.equal(true, socket.writable); assert.equal(true, socket.readable); assert.equal(true, count <= N); - if (/PING/.exec(data)) { - socket.write('PONG', function() { - sentPongs++; - console.error('sent PONG'); - }); - } + assert.equal(data, 'PING'); + + socket.write('PONG', function() { + sentPongs++; + }); }); socket.on('end', function() { - console.error(socket); assert.equal(true, socket.allowHalfOpen); assert.equal(true, socket.writable); // because allowHalfOpen assert.equal(false, socket.readable); @@ -73,8 +70,6 @@ function pingPongTest(port, host) { }); client.on('data', function(data) { - console.log('client got: ' + data); - assert.equal('PONG', data); count += 1; diff --git a/test/parallel/test-net-server-pause-on-connect.js b/test/parallel/test-net-server-pause-on-connect.js index db57114302f490..3f54ecea3c6fd2 100644 --- a/test/parallel/test-net-server-pause-on-connect.js +++ b/test/parallel/test-net-server-pause-on-connect.js @@ -16,14 +16,22 @@ var server1 = net.createServer({pauseOnConnect: true}, function(socket) { }); setTimeout(function() { + // After 50(ish) ms, the other socket should have already read the data. + assert.equal(read, true); assert.equal(socket.bytesRead, 0, 'no data should have been read yet'); + socket.resume(); stopped = false; - }, 3000); + }, common.platformTimeout(50)); }); +// read is a timing check, as server1's timer should fire after server2's +// connection receives the data. Note that this could be race-y. +var read = false; var server2 = net.createServer({pauseOnConnect: false}, function(socket) { socket.on('data', function(data) { + read = true; + assert.equal(data.toString(), msg, 'invalid data received'); socket.end(); server2.close(); @@ -37,3 +45,8 @@ server1.listen(common.PORT, function() { server2.listen(common.PORT + 1, function() { net.createConnection({port: common.PORT + 1}).write(msg); }); + +process.on('exit', function() { + assert.equal(stopped, false); + assert.equal(read, true); +}); diff --git a/test/parallel/test-net-socket-local-address.js b/test/parallel/test-net-socket-local-address.js new file mode 100644 index 00000000000000..4c0e31d08c4771 --- /dev/null +++ b/test/parallel/test-net-socket-local-address.js @@ -0,0 +1,34 @@ +'use strict'; +var common = require('../common'); +var assert = require('assert'); +var net = require('net'); + +var conns = 0; +var clientLocalPorts = []; +var serverRemotePorts = []; + +var server = net.createServer(function(socket) { + serverRemotePorts.push(socket.remotePort); + conns++; +}); + +var client = new net.Socket(); + +server.on('close', function() { + assert.deepEqual(clientLocalPorts, serverRemotePorts, + 'client and server should agree on the ports used'); + assert.equal(2, conns); +}); + +server.listen(common.PORT, common.localhostIPv4, testConnect); + +function testConnect() { + if (conns == 2) { + return server.close(); + } + client.connect(common.PORT, common.localhostIPv4, function() { + clientLocalPorts.push(this.localPort); + this.once('close', testConnect); + this.destroy(); + }); +} diff --git a/test/parallel/test-path-parse-format.js b/test/parallel/test-path-parse-format.js index 677bf3241f0bae..e90fe217de93ec 100644 --- a/test/parallel/test-path-parse-format.js +++ b/test/parallel/test-path-parse-format.js @@ -9,6 +9,7 @@ var winPaths = [ '\\foo\\C:', 'file', '.\\file', + '', // unc '\\\\server\\share\\file_path', @@ -32,7 +33,8 @@ var unixPaths = [ 'file', '.\\file', './file', - 'C:\\foo' + 'C:\\foo', + '' ]; var unixSpecialCaseFormatTests = [ @@ -52,8 +54,6 @@ var errors = [ message: /Path must be a string. Received 1/}, {method: 'parse', input: [], message: /Path must be a string. Received undefined/}, - // {method: 'parse', input: [''], - // message: /Invalid path/}, // omitted because it's hard to trigger! {method: 'format', input: [null], message: /Parameter 'pathObject' must be an object, not/}, {method: 'format', input: [''], @@ -93,8 +93,13 @@ function checkErrors(path) { } function checkParseFormat(path, paths) { - paths.forEach(function(element, index, array) { + paths.forEach(function(element) { var output = path.parse(element); + assert.strictEqual(typeof output.root, 'string'); + assert.strictEqual(typeof output.dir, 'string'); + assert.strictEqual(typeof output.base, 'string'); + assert.strictEqual(typeof output.ext, 'string'); + assert.strictEqual(typeof output.name, 'string'); assert.strictEqual(path.format(output), element); assert.strictEqual(output.dir, output.dir ? path.dirname(element) : ''); assert.strictEqual(output.base, path.basename(element)); diff --git a/test/parallel/test-repl-tab-complete.js b/test/parallel/test-repl-tab-complete.js index 856fd9b041f973..aca26648cf44e1 100644 --- a/test/parallel/test-repl-tab-complete.js +++ b/test/parallel/test-repl-tab-complete.js @@ -248,3 +248,73 @@ testMe.complete('proxy.', common.mustCall(function(error, data) { assert.strictEqual(error, null); assert.deepEqual(data, [[], 'proxy.']); })); + +// Make sure tab completion does not include integer members of an Array +var array_elements = [ [ + 'ary.__defineGetter__', + 'ary.__defineSetter__', + 'ary.__lookupGetter__', + 'ary.__lookupSetter__', + 'ary.__proto__', + 'ary.constructor', + 'ary.hasOwnProperty', + 'ary.isPrototypeOf', + 'ary.propertyIsEnumerable', + 'ary.toLocaleString', + 'ary.toString', + 'ary.valueOf', + '', + 'ary.concat', + 'ary.entries', + 'ary.every', + 'ary.filter', + 'ary.forEach', + 'ary.indexOf', + 'ary.join', + 'ary.keys', + 'ary.lastIndexOf', + 'ary.length', + 'ary.map', + 'ary.pop', + 'ary.push', + 'ary.reduce', + 'ary.reduceRight', + 'ary.reverse', + 'ary.shift', + 'ary.slice', + 'ary.some', + 'ary.sort', + 'ary.splice', + 'ary.unshift' ], + 'ary.']; + +putIn.run(['.clear']); + +putIn.run(['var ary = [1,2,3];']); +testMe.complete('ary.', common.mustCall(function(error, data) { + assert.deepEqual(data, array_elements); +})); + +// Make sure tab completion does not include integer keys in an object +var obj_elements = [ [ + 'obj.__defineGetter__', + 'obj.__defineSetter__', + 'obj.__lookupGetter__', + 'obj.__lookupSetter__', + 'obj.__proto__', + 'obj.constructor', + 'obj.hasOwnProperty', + 'obj.isPrototypeOf', + 'obj.propertyIsEnumerable', + 'obj.toLocaleString', + 'obj.toString', + 'obj.valueOf', + '', + 'obj.a' ], + 'obj.' ]; +putIn.run(['.clear']); +putIn.run(['var obj = {1:"a","1a":"b",a:"b"};']); + +testMe.complete('obj.', common.mustCall(function(error, data) { + assert.deepEqual(data, obj_elements); +})); diff --git a/test/parallel/test-require-unicode.js b/test/parallel/test-require-unicode.js new file mode 100644 index 00000000000000..0bccf06916c326 --- /dev/null +++ b/test/parallel/test-require-unicode.js @@ -0,0 +1,16 @@ +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const fs = require('fs'); +const path = require('path'); + +common.refreshTmpDir(); + +const dirname = path.join(common.tmpDir, '\u4e2d\u6587\u76ee\u5f55'); +fs.mkdirSync(dirname); +fs.writeFileSync(path.join(dirname, 'file.js'), 'module.exports = 42;'); +fs.writeFileSync(path.join(dirname, 'package.json'), + JSON.stringify({ name: 'test', main: 'file.js' })); +assert.equal(require(dirname), 42); +assert.equal(require(path.join(dirname, 'file.js')), 42); diff --git a/test/parallel/test-stream2-base64-single-char-read-end.js b/test/parallel/test-stream2-base64-single-char-read-end.js index 37a97cd817e05b..e50ea5a0cc1370 100644 --- a/test/parallel/test-stream2-base64-single-char-read-end.js +++ b/test/parallel/test-stream2-base64-single-char-read-end.js @@ -11,7 +11,7 @@ var accum = []; var timeout; src._read = function(n) { - if(!hasRead) { + if (!hasRead) { hasRead = true; process.nextTick(function() { src.push(new Buffer('1')); diff --git a/test/parallel/test-sys.js b/test/parallel/test-sys.js index bbc8c092002af6..9367e55c687f2f 100644 --- a/test/parallel/test-sys.js +++ b/test/parallel/test-sys.js @@ -17,7 +17,7 @@ assert.equal(new Date('2010-02-14T12:48:40+01:00').toString(), assert.equal("'\\n\\u0001'", common.inspect('\n\u0001')); assert.equal('[]', common.inspect([])); -assert.equal('{}', common.inspect(Object.create([]))); +assert.equal('Array {}', common.inspect(Object.create([]))); assert.equal('[ 1, 2 ]', common.inspect([1, 2])); assert.equal('[ 1, [ 2, 3 ] ]', common.inspect([1, [2, 3]])); diff --git a/test/parallel/test-tick-processor.js b/test/parallel/test-tick-processor.js new file mode 100644 index 00000000000000..ebcda79d679d55 --- /dev/null +++ b/test/parallel/test-tick-processor.js @@ -0,0 +1,45 @@ +'use strict'; +var fs = require('fs'); +var assert = require('assert'); +var path = require('path'); +var cp = require('child_process'); +var common = require('../common'); + +common.refreshTmpDir(); + +process.chdir(common.tmpDir); +cp.execFileSync(process.execPath, ['-prof', '-pe', + 'function foo(n) {' + + 'require(\'vm\').runInDebugContext(\'Debug\');' + + 'return n < 2 ? n : setImmediate(function() { foo(n-1) + foo(n-2);}); };' + + 'setTimeout(function() { process.exit(0); }, 2000);' + + 'foo(40);']); +var matches = fs.readdirSync(common.tmpDir).filter(function(file) { + return /^isolate-/.test(file); +}); +if (matches.length != 1) { + assert.fail('There should be a single log file.'); +} +var log = matches[0]; +var processor = + path.join(common.testDir, '..', 'tools', 'v8-prof', getScriptName()); +var out = cp.execSync(processor + ' ' + log, {encoding: 'utf8'}); +assert(out.match(/LazyCompile.*foo/)); +if (process.platform === 'win32' || + process.platform === 'sunos' || + process.platform === 'freebsd') { + console.log('1..0 # Skipped: C++ symbols are not mapped for this os.'); + return; +} +assert(out.match(/RunInDebugContext/)); + +function getScriptName() { + switch (process.platform) { + case 'darwin': + return 'mac-tick-processor'; + case 'win32': + return 'windows-tick-processor.bat'; + default: + return 'linux-tick-processor'; + } +} diff --git a/test/parallel/test-tls-check-server-identity.js b/test/parallel/test-tls-check-server-identity.js index e659f40aa90232..8d2155b94ea9af 100644 --- a/test/parallel/test-tls-check-server-identity.js +++ b/test/parallel/test-tls-check-server-identity.js @@ -30,6 +30,13 @@ var tests = [ 'DNS:omg.com' }, + // Empty Cert + { + host: 'a.com', + cert: { }, + error: 'Cert is empty' + }, + // Multiple CN fields { host: 'foo.com', cert: { diff --git a/test/parallel/test-tls-cipher-list.js b/test/parallel/test-tls-cipher-list.js new file mode 100644 index 00000000000000..9ae8fefa0f4351 --- /dev/null +++ b/test/parallel/test-tls-cipher-list.js @@ -0,0 +1,32 @@ +'use strict'; +const common = require('../common'); + +if (!common.hasCrypto) { + console.log('1..0 # Skipped: missing crypto'); + return; +} + +const assert = require('assert'); +const spawn = require('child_process').spawn; +const defaultCoreList = require('constants').defaultCoreCipherList; + +function doCheck(arg, check) { + var out = ''; + var arg = arg.concat([ + '-pe', + 'require("constants").defaultCipherList' + ]); + spawn(process.execPath, arg, {}). + on('error', assert.fail). + stdout.on('data', function(chunk) { + out += chunk; + }).on('end', function() { + assert.equal(out.trim(), check); + }).on('error', assert.fail); +} + +// test the default unmodified version +doCheck([], defaultCoreList); + +// test the command line switch by itself +doCheck(['--tls-cipher-list=ABC'], 'ABC'); diff --git a/test/parallel/test-util-inspect.js b/test/parallel/test-util-inspect.js index f583005ce96f75..58a4c8a2f2b142 100644 --- a/test/parallel/test-util-inspect.js +++ b/test/parallel/test-util-inspect.js @@ -61,7 +61,7 @@ assert.ok(ex.indexOf('[message]') != -1); // GH-1941 // should not throw: -assert.equal(util.inspect(Object.create(Date.prototype)), '{}'); +assert.equal(util.inspect(Object.create(Date.prototype)), 'Date {}'); // GH-1944 assert.doesNotThrow(function() { @@ -106,7 +106,7 @@ assert.ok(util.inspect(y), '[ \'a\', \'b\', \'c\', \'\\\\\\\': \'d\' ]'); function test_color_style(style, input, implicit) { var color_name = util.inspect.styles[style]; var color = ['', '']; - if(util.inspect.colors[color_name]) + if (util.inspect.colors[color_name]) color = util.inspect.colors[color_name]; var without_color = util.inspect(input, false, 0, false); @@ -306,3 +306,44 @@ checkAlignment(function() { }()); checkAlignment(new Set(big_array)); checkAlignment(new Map(big_array.map(function(y) { return [y, null]; }))); + + +// Test display of constructors + +class ObjectSubclass {} +class ArraySubclass extends Array {} +class SetSubclass extends Set {} +class MapSubclass extends Map {} +class PromiseSubclass extends Promise {} + +var x = new ObjectSubclass(); +x.foo = 42; +assert.equal(util.inspect(x), + 'ObjectSubclass { foo: 42 }'); +assert.equal(util.inspect(new ArraySubclass(1, 2, 3)), + 'ArraySubclass [ 1, 2, 3 ]'); +assert.equal(util.inspect(new SetSubclass([1, 2, 3])), + 'SetSubclass { 1, 2, 3 }'); +assert.equal(util.inspect(new MapSubclass([['foo', 42]])), + 'MapSubclass { \'foo\' => 42 }'); +assert.equal(util.inspect(new PromiseSubclass(function() {})), + 'PromiseSubclass { }'); + +// Corner cases. +var x = { constructor: 42 }; +assert.equal(util.inspect(x), '{ constructor: 42 }'); + +var x = {}; +Object.defineProperty(x, 'constructor', { + get: function() { + throw new Error('should not access constructor'); + }, + enumerable: true +}); +assert.equal(util.inspect(x), '{ constructor: [Getter] }'); + +var x = new (function() {}); +assert.equal(util.inspect(x), '{}'); + +var x = Object.create(null); +assert.equal(util.inspect(x), '{}'); diff --git a/test/parallel/test-v8-stats.js b/test/parallel/test-v8-stats.js index e48c1d70365073..fc4a6df30f8ed8 100644 --- a/test/parallel/test-v8-stats.js +++ b/test/parallel/test-v8-stats.js @@ -6,6 +6,7 @@ var v8 = require('v8'); var s = v8.getHeapStatistics(); var keys = [ 'heap_size_limit', + 'total_available_size', 'total_heap_size', 'total_heap_size_executable', 'total_physical_size', diff --git a/test/sequential/test-repl-persistent-history.js b/test/sequential/test-repl-persistent-history.js index 8d550f6c1d7124..ef433912da5a65 100644 --- a/test/sequential/test-repl-persistent-history.js +++ b/test/sequential/test-repl-persistent-history.js @@ -69,6 +69,7 @@ const fixtures = path.join(common.testDir, 'fixtures'); const historyFixturePath = path.join(fixtures, '.node_repl_history'); const historyPath = path.join(common.tmpDir, '.fixture_copy_repl_history'); const oldHistoryPath = path.join(fixtures, 'old-repl-history-file.json'); +const enoentHistoryPath = path.join(fixtures, 'enoent-repl-history-file.json'); const tests = [{ @@ -76,6 +77,12 @@ const tests = [{ test: [UP], expected: [prompt, replDisabled, prompt] }, +{ + env: { NODE_REPL_HISTORY: '', + NODE_REPL_HISTORY_FILE: enoentHistoryPath }, + test: [UP], + expected: [prompt, replDisabled, prompt] +}, { env: { NODE_REPL_HISTORY: '', NODE_REPL_HISTORY_FILE: oldHistoryPath }, diff --git a/test/sequential/test-stdin-from-file.js b/test/sequential/test-stdin-from-file.js index 35aa5b1a89ac38..07b044769b3791 100644 --- a/test/sequential/test-stdin-from-file.js +++ b/test/sequential/test-stdin-from-file.js @@ -2,7 +2,7 @@ var common = require('../common'); var assert = require('assert'); var join = require('path').join; -var childProccess = require('child_process'); +var childProcess = require('child_process'); var fs = require('fs'); var stdoutScript = join(common.fixturesDir, 'echo-close-check.js'); @@ -32,7 +32,7 @@ try { fs.writeFileSync(tmpFile, string); -childProccess.exec(cmd, function(err, stdout, stderr) { +childProcess.exec(cmd, function(err, stdout, stderr) { fs.unlinkSync(tmpFile); if (err) throw err; diff --git a/test/sequential/test-stdout-to-file.js b/test/sequential/test-stdout-to-file.js index 953747688364f7..41b61df4d72bd5 100644 --- a/test/sequential/test-stdout-to-file.js +++ b/test/sequential/test-stdout-to-file.js @@ -2,7 +2,7 @@ var common = require('../common'); var assert = require('assert'); var path = require('path'); -var childProccess = require('child_process'); +var childProcess = require('child_process'); var fs = require('fs'); var scriptString = path.join(common.fixturesDir, 'print-chars.js'); @@ -26,7 +26,7 @@ function test(size, useBuffer, cb) { common.print(size + ' chars to ' + tmpFile + '...'); - childProccess.exec(cmd, function(err) { + childProcess.exec(cmd, function(err) { if (err) throw err; console.log('done!'); diff --git a/tools/check-imports.sh b/tools/check-imports.sh index caeb382ff73461..2fb263f2115c2b 100755 --- a/tools/check-imports.sh +++ b/tools/check-imports.sh @@ -1,5 +1,19 @@ #!/bin/sh +# Copyright (c) 2013, 2014, Ben Noordhuis +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + SED=sed UNAME=`uname` diff --git a/tools/icu/icu-generic.gyp b/tools/icu/icu-generic.gyp index bb2b5e5e4d5ec4..900cdb96f0a799 100644 --- a/tools/icu/icu-generic.gyp +++ b/tools/icu/icu-generic.gyp @@ -111,8 +111,8 @@ '<@(icu_src_i18n)' ], 'conditions': [ - [ 'icu_ver_major == 54', { 'sources!': [ - ## Strip out the following for ICU 54 only. + [ 'icu_ver_major == 55', { 'sources!': [ + ## Strip out the following for ICU 55 only. ## add more conditions in the future? ## if your compiler can dead-strip, this will ## make ZERO difference to binary size. @@ -369,8 +369,8 @@ '<@(icu_src_common)', ], 'conditions': [ - [ 'icu_ver_major == 54', { 'sources!': [ - ## Strip out the following for ICU 54 only. + [ 'icu_ver_major == 55', { 'sources!': [ + ## Strip out the following for ICU 55 only. ## add more conditions in the future? ## if your compiler can dead-strip, this will ## make ZERO difference to binary size. diff --git a/tools/icu/patches/54/source/io/ufile.c b/tools/icu/patches/54/source/io/ufile.c new file mode 100644 index 00000000000000..ab9f70a3d5cd84 --- /dev/null +++ b/tools/icu/patches/54/source/io/ufile.c @@ -0,0 +1,360 @@ +/* +****************************************************************************** +* +* Copyright (C) 1998-2015, International Business Machines +* Corporation and others. All Rights Reserved. +* +****************************************************************************** +* +* File ufile.c +* +* Modification History: +* +* Date Name Description +* 11/19/98 stephen Creation. +* 03/12/99 stephen Modified for new C API. +* 06/16/99 stephen Changed T_LocaleBundle to u_locbund +* 07/19/99 stephen Fixed to use ucnv's default codepage. +****************************************************************************** +*/ + +/* + * fileno is not declared when building with GCC in strict mode. + */ +#if defined(__GNUC__) && defined(__STRICT_ANSI__) +#undef __STRICT_ANSI__ +#endif + +#include "locmap.h" +#include "unicode/ustdio.h" + +#if !UCONFIG_NO_CONVERSION + +#include "ufile.h" +#include "unicode/uloc.h" +#include "unicode/ures.h" +#include "unicode/ucnv.h" +#include "unicode/ustring.h" +#include "cstring.h" +#include "cmemory.h" + +#if U_PLATFORM_USES_ONLY_WIN32_API && !defined(fileno) +/* Windows likes to rename Unix-like functions */ +#define fileno _fileno +#endif + +static UFILE* +finit_owner(FILE *f, + const char *locale, + const char *codepage, + UBool takeOwnership + ) +{ + UErrorCode status = U_ZERO_ERROR; + UFILE *result; + if(f == NULL) { + return 0; + } + result = (UFILE*) uprv_malloc(sizeof(UFILE)); + if(result == NULL) { + return 0; + } + + uprv_memset(result, 0, sizeof(UFILE)); + result->fFileno = fileno(f); + +#if U_PLATFORM_USES_ONLY_WIN32_API && _MSC_VER < 1900 + /* + * Below is a very old workaround (ICU ticket:231). + * + * Previously, 'FILE*' from inside and outside ICU's DLL + * were different, because they pointed into local copies + * of the io block. At least by VS 2015 the implementation + * is something like: + * stdio = _acrt_iob_func(0) + * .. which is a function call, so should return the same pointer + * regardless of call site. + * As of _MSC_VER 1900 this patch is retired, at 16 years old. + */ + if (0 <= result->fFileno && result->fFileno <= 2) { + /* stdin, stdout and stderr need to be special cased for Windows 98 */ +#if _MSC_VER >= 1400 + result->fFile = &__iob_func()[_fileno(f)]; +#else + result->fFile = &_iob[_fileno(f)]; +#endif + } + else +#endif + { + result->fFile = f; + } + + result->str.fBuffer = result->fUCBuffer; + result->str.fPos = result->fUCBuffer; + result->str.fLimit = result->fUCBuffer; + +#if !UCONFIG_NO_FORMATTING + /* if locale is 0, use the default */ + if(u_locbund_init(&result->str.fBundle, locale) == 0) { + /* DO NOT FCLOSE HERE! */ + uprv_free(result); + return 0; + } +#endif + + /* If the codepage is not "" use the ucnv_open default behavior */ + if(codepage == NULL || *codepage != '\0') { + result->fConverter = ucnv_open(codepage, &status); + } + /* else result->fConverter is already memset'd to NULL. */ + + if(U_SUCCESS(status)) { + result->fOwnFile = takeOwnership; + } + else { +#if !UCONFIG_NO_FORMATTING + u_locbund_close(&result->str.fBundle); +#endif + /* DO NOT fclose here!!!!!! */ + uprv_free(result); + result = NULL; + } + + return result; +} + +U_CAPI UFILE* U_EXPORT2 /* U_CAPI ... U_EXPORT2 added by Peter Kirk 17 Nov 2001 */ +u_finit(FILE *f, + const char *locale, + const char *codepage) +{ + return finit_owner(f, locale, codepage, FALSE); +} + +U_CAPI UFILE* U_EXPORT2 +u_fadopt(FILE *f, + const char *locale, + const char *codepage) +{ + return finit_owner(f, locale, codepage, TRUE); +} + +U_CAPI UFILE* U_EXPORT2 /* U_CAPI ... U_EXPORT2 added by Peter Kirk 17 Nov 2001 */ +u_fopen(const char *filename, + const char *perm, + const char *locale, + const char *codepage) +{ + UFILE *result; + FILE *systemFile = fopen(filename, perm); + if(systemFile == 0) { + return 0; + } + + result = finit_owner(systemFile, locale, codepage, TRUE); + + if (!result) { + /* Something bad happened. + Maybe the converter couldn't be opened. */ + fclose(systemFile); + } + + return result; /* not a file leak */ +} + +U_CAPI UFILE* U_EXPORT2 +u_fopen_u(const UChar *filename, + const char *perm, + const char *locale, + const char *codepage) +{ + UFILE *result; + char buffer[256]; + + u_austrcpy(buffer, filename); + + result = u_fopen(buffer, perm, locale, codepage); +#if U_PLATFORM_USES_ONLY_WIN32_API + /* Try Windows API _wfopen if the above fails. */ + if (!result) { + FILE *systemFile = _wfopen(filename, (UChar*)perm); + if (systemFile) { + result = finit_owner(systemFile, locale, codepage, TRUE); + } + if (!result) { + /* Something bad happened. + Maybe the converter couldn't be opened. */ + fclose(systemFile); + } + } +#endif + return result; /* not a file leak */ +} + +U_CAPI UFILE* U_EXPORT2 +u_fstropen(UChar *stringBuf, + int32_t capacity, + const char *locale) +{ + UFILE *result; + + if (capacity < 0) { + return NULL; + } + + result = (UFILE*) uprv_malloc(sizeof(UFILE)); + /* Null pointer test */ + if (result == NULL) { + return NULL; /* Just get out. */ + } + uprv_memset(result, 0, sizeof(UFILE)); + result->str.fBuffer = stringBuf; + result->str.fPos = stringBuf; + result->str.fLimit = stringBuf+capacity; + +#if !UCONFIG_NO_FORMATTING + /* if locale is 0, use the default */ + if(u_locbund_init(&result->str.fBundle, locale) == 0) { + /* DO NOT FCLOSE HERE! */ + uprv_free(result); + return 0; + } +#endif + + return result; +} + +U_CAPI UBool U_EXPORT2 +u_feof(UFILE *f) +{ + UBool endOfBuffer; + if (f == NULL) { + return TRUE; + } + endOfBuffer = (UBool)(f->str.fPos >= f->str.fLimit); + if (f->fFile != NULL) { + return endOfBuffer && feof(f->fFile); + } + return endOfBuffer; +} + +U_CAPI void U_EXPORT2 +u_fflush(UFILE *file) +{ + ufile_flush_translit(file); + ufile_flush_io(file); + if (file->fFile) { + fflush(file->fFile); + } + else if (file->str.fPos < file->str.fLimit) { + *(file->str.fPos++) = 0; + } + /* TODO: flush input */ +} + +U_CAPI void +u_frewind(UFILE *file) +{ + u_fflush(file); + ucnv_reset(file->fConverter); + if (file->fFile) { + rewind(file->fFile); + file->str.fLimit = file->fUCBuffer; + file->str.fPos = file->fUCBuffer; + } + else { + file->str.fPos = file->str.fBuffer; + } +} + +U_CAPI void U_EXPORT2 /* U_CAPI ... U_EXPORT2 added by Peter Kirk 17 Nov 2001 */ +u_fclose(UFILE *file) +{ + if (file) { + u_fflush(file); + ufile_close_translit(file); + + if(file->fOwnFile) + fclose(file->fFile); + +#if !UCONFIG_NO_FORMATTING + u_locbund_close(&file->str.fBundle); +#endif + + ucnv_close(file->fConverter); + uprv_free(file); + } +} + +U_CAPI FILE* U_EXPORT2 /* U_CAPI ... U_EXPORT2 added by Peter Kirk 17 Nov 2001 */ +u_fgetfile( UFILE *f) +{ + return f->fFile; +} + +#if !UCONFIG_NO_FORMATTING + +U_CAPI const char* U_EXPORT2 /* U_CAPI ... U_EXPORT2 added by Peter Kirk 17 Nov 2001 */ +u_fgetlocale( UFILE *file) +{ + return file->str.fBundle.fLocale; +} + +U_CAPI int32_t U_EXPORT2 /* U_CAPI ... U_EXPORT2 added by Peter Kirk 17 Nov 2001 */ +u_fsetlocale(UFILE *file, + const char *locale) +{ + u_locbund_close(&file->str.fBundle); + + return u_locbund_init(&file->str.fBundle, locale) == 0 ? -1 : 0; +} + +#endif + +U_CAPI const char* U_EXPORT2 /* U_CAPI ... U_EXPORT2 added by Peter Kirk 17 Nov 2001 */ +u_fgetcodepage(UFILE *file) +{ + UErrorCode status = U_ZERO_ERROR; + const char *codepage = NULL; + + if (file->fConverter) { + codepage = ucnv_getName(file->fConverter, &status); + if(U_FAILURE(status)) + return 0; + } + return codepage; +} + +U_CAPI int32_t U_EXPORT2 /* U_CAPI ... U_EXPORT2 added by Peter Kirk 17 Nov 2001 */ +u_fsetcodepage( const char *codepage, + UFILE *file) +{ + UErrorCode status = U_ZERO_ERROR; + int32_t retVal = -1; + + /* We use the normal default codepage for this system, and not the one for the locale. */ + if ((file->str.fPos == file->str.fBuffer) && (file->str.fLimit == file->str.fBuffer)) { + ucnv_close(file->fConverter); + file->fConverter = ucnv_open(codepage, &status); + if(U_SUCCESS(status)) { + retVal = 0; + } + } + return retVal; +} + + +U_CAPI UConverter * U_EXPORT2 /* U_CAPI ... U_EXPORT2 added by Peter Kirk 17 Nov 2001 */ +u_fgetConverter(UFILE *file) +{ + return file->fConverter; +} +#if !UCONFIG_NO_FORMATTING +U_CAPI const UNumberFormat* U_EXPORT2 u_fgetNumberFormat(UFILE *file) +{ + return u_locbund_getNumberFormat(&file->str.fBundle, UNUM_DECIMAL); +} +#endif + +#endif diff --git a/tools/icu/patches/55/source/io/ufile.c b/tools/icu/patches/55/source/io/ufile.c new file mode 100644 index 00000000000000..ab9f70a3d5cd84 --- /dev/null +++ b/tools/icu/patches/55/source/io/ufile.c @@ -0,0 +1,360 @@ +/* +****************************************************************************** +* +* Copyright (C) 1998-2015, International Business Machines +* Corporation and others. All Rights Reserved. +* +****************************************************************************** +* +* File ufile.c +* +* Modification History: +* +* Date Name Description +* 11/19/98 stephen Creation. +* 03/12/99 stephen Modified for new C API. +* 06/16/99 stephen Changed T_LocaleBundle to u_locbund +* 07/19/99 stephen Fixed to use ucnv's default codepage. +****************************************************************************** +*/ + +/* + * fileno is not declared when building with GCC in strict mode. + */ +#if defined(__GNUC__) && defined(__STRICT_ANSI__) +#undef __STRICT_ANSI__ +#endif + +#include "locmap.h" +#include "unicode/ustdio.h" + +#if !UCONFIG_NO_CONVERSION + +#include "ufile.h" +#include "unicode/uloc.h" +#include "unicode/ures.h" +#include "unicode/ucnv.h" +#include "unicode/ustring.h" +#include "cstring.h" +#include "cmemory.h" + +#if U_PLATFORM_USES_ONLY_WIN32_API && !defined(fileno) +/* Windows likes to rename Unix-like functions */ +#define fileno _fileno +#endif + +static UFILE* +finit_owner(FILE *f, + const char *locale, + const char *codepage, + UBool takeOwnership + ) +{ + UErrorCode status = U_ZERO_ERROR; + UFILE *result; + if(f == NULL) { + return 0; + } + result = (UFILE*) uprv_malloc(sizeof(UFILE)); + if(result == NULL) { + return 0; + } + + uprv_memset(result, 0, sizeof(UFILE)); + result->fFileno = fileno(f); + +#if U_PLATFORM_USES_ONLY_WIN32_API && _MSC_VER < 1900 + /* + * Below is a very old workaround (ICU ticket:231). + * + * Previously, 'FILE*' from inside and outside ICU's DLL + * were different, because they pointed into local copies + * of the io block. At least by VS 2015 the implementation + * is something like: + * stdio = _acrt_iob_func(0) + * .. which is a function call, so should return the same pointer + * regardless of call site. + * As of _MSC_VER 1900 this patch is retired, at 16 years old. + */ + if (0 <= result->fFileno && result->fFileno <= 2) { + /* stdin, stdout and stderr need to be special cased for Windows 98 */ +#if _MSC_VER >= 1400 + result->fFile = &__iob_func()[_fileno(f)]; +#else + result->fFile = &_iob[_fileno(f)]; +#endif + } + else +#endif + { + result->fFile = f; + } + + result->str.fBuffer = result->fUCBuffer; + result->str.fPos = result->fUCBuffer; + result->str.fLimit = result->fUCBuffer; + +#if !UCONFIG_NO_FORMATTING + /* if locale is 0, use the default */ + if(u_locbund_init(&result->str.fBundle, locale) == 0) { + /* DO NOT FCLOSE HERE! */ + uprv_free(result); + return 0; + } +#endif + + /* If the codepage is not "" use the ucnv_open default behavior */ + if(codepage == NULL || *codepage != '\0') { + result->fConverter = ucnv_open(codepage, &status); + } + /* else result->fConverter is already memset'd to NULL. */ + + if(U_SUCCESS(status)) { + result->fOwnFile = takeOwnership; + } + else { +#if !UCONFIG_NO_FORMATTING + u_locbund_close(&result->str.fBundle); +#endif + /* DO NOT fclose here!!!!!! */ + uprv_free(result); + result = NULL; + } + + return result; +} + +U_CAPI UFILE* U_EXPORT2 /* U_CAPI ... U_EXPORT2 added by Peter Kirk 17 Nov 2001 */ +u_finit(FILE *f, + const char *locale, + const char *codepage) +{ + return finit_owner(f, locale, codepage, FALSE); +} + +U_CAPI UFILE* U_EXPORT2 +u_fadopt(FILE *f, + const char *locale, + const char *codepage) +{ + return finit_owner(f, locale, codepage, TRUE); +} + +U_CAPI UFILE* U_EXPORT2 /* U_CAPI ... U_EXPORT2 added by Peter Kirk 17 Nov 2001 */ +u_fopen(const char *filename, + const char *perm, + const char *locale, + const char *codepage) +{ + UFILE *result; + FILE *systemFile = fopen(filename, perm); + if(systemFile == 0) { + return 0; + } + + result = finit_owner(systemFile, locale, codepage, TRUE); + + if (!result) { + /* Something bad happened. + Maybe the converter couldn't be opened. */ + fclose(systemFile); + } + + return result; /* not a file leak */ +} + +U_CAPI UFILE* U_EXPORT2 +u_fopen_u(const UChar *filename, + const char *perm, + const char *locale, + const char *codepage) +{ + UFILE *result; + char buffer[256]; + + u_austrcpy(buffer, filename); + + result = u_fopen(buffer, perm, locale, codepage); +#if U_PLATFORM_USES_ONLY_WIN32_API + /* Try Windows API _wfopen if the above fails. */ + if (!result) { + FILE *systemFile = _wfopen(filename, (UChar*)perm); + if (systemFile) { + result = finit_owner(systemFile, locale, codepage, TRUE); + } + if (!result) { + /* Something bad happened. + Maybe the converter couldn't be opened. */ + fclose(systemFile); + } + } +#endif + return result; /* not a file leak */ +} + +U_CAPI UFILE* U_EXPORT2 +u_fstropen(UChar *stringBuf, + int32_t capacity, + const char *locale) +{ + UFILE *result; + + if (capacity < 0) { + return NULL; + } + + result = (UFILE*) uprv_malloc(sizeof(UFILE)); + /* Null pointer test */ + if (result == NULL) { + return NULL; /* Just get out. */ + } + uprv_memset(result, 0, sizeof(UFILE)); + result->str.fBuffer = stringBuf; + result->str.fPos = stringBuf; + result->str.fLimit = stringBuf+capacity; + +#if !UCONFIG_NO_FORMATTING + /* if locale is 0, use the default */ + if(u_locbund_init(&result->str.fBundle, locale) == 0) { + /* DO NOT FCLOSE HERE! */ + uprv_free(result); + return 0; + } +#endif + + return result; +} + +U_CAPI UBool U_EXPORT2 +u_feof(UFILE *f) +{ + UBool endOfBuffer; + if (f == NULL) { + return TRUE; + } + endOfBuffer = (UBool)(f->str.fPos >= f->str.fLimit); + if (f->fFile != NULL) { + return endOfBuffer && feof(f->fFile); + } + return endOfBuffer; +} + +U_CAPI void U_EXPORT2 +u_fflush(UFILE *file) +{ + ufile_flush_translit(file); + ufile_flush_io(file); + if (file->fFile) { + fflush(file->fFile); + } + else if (file->str.fPos < file->str.fLimit) { + *(file->str.fPos++) = 0; + } + /* TODO: flush input */ +} + +U_CAPI void +u_frewind(UFILE *file) +{ + u_fflush(file); + ucnv_reset(file->fConverter); + if (file->fFile) { + rewind(file->fFile); + file->str.fLimit = file->fUCBuffer; + file->str.fPos = file->fUCBuffer; + } + else { + file->str.fPos = file->str.fBuffer; + } +} + +U_CAPI void U_EXPORT2 /* U_CAPI ... U_EXPORT2 added by Peter Kirk 17 Nov 2001 */ +u_fclose(UFILE *file) +{ + if (file) { + u_fflush(file); + ufile_close_translit(file); + + if(file->fOwnFile) + fclose(file->fFile); + +#if !UCONFIG_NO_FORMATTING + u_locbund_close(&file->str.fBundle); +#endif + + ucnv_close(file->fConverter); + uprv_free(file); + } +} + +U_CAPI FILE* U_EXPORT2 /* U_CAPI ... U_EXPORT2 added by Peter Kirk 17 Nov 2001 */ +u_fgetfile( UFILE *f) +{ + return f->fFile; +} + +#if !UCONFIG_NO_FORMATTING + +U_CAPI const char* U_EXPORT2 /* U_CAPI ... U_EXPORT2 added by Peter Kirk 17 Nov 2001 */ +u_fgetlocale( UFILE *file) +{ + return file->str.fBundle.fLocale; +} + +U_CAPI int32_t U_EXPORT2 /* U_CAPI ... U_EXPORT2 added by Peter Kirk 17 Nov 2001 */ +u_fsetlocale(UFILE *file, + const char *locale) +{ + u_locbund_close(&file->str.fBundle); + + return u_locbund_init(&file->str.fBundle, locale) == 0 ? -1 : 0; +} + +#endif + +U_CAPI const char* U_EXPORT2 /* U_CAPI ... U_EXPORT2 added by Peter Kirk 17 Nov 2001 */ +u_fgetcodepage(UFILE *file) +{ + UErrorCode status = U_ZERO_ERROR; + const char *codepage = NULL; + + if (file->fConverter) { + codepage = ucnv_getName(file->fConverter, &status); + if(U_FAILURE(status)) + return 0; + } + return codepage; +} + +U_CAPI int32_t U_EXPORT2 /* U_CAPI ... U_EXPORT2 added by Peter Kirk 17 Nov 2001 */ +u_fsetcodepage( const char *codepage, + UFILE *file) +{ + UErrorCode status = U_ZERO_ERROR; + int32_t retVal = -1; + + /* We use the normal default codepage for this system, and not the one for the locale. */ + if ((file->str.fPos == file->str.fBuffer) && (file->str.fLimit == file->str.fBuffer)) { + ucnv_close(file->fConverter); + file->fConverter = ucnv_open(codepage, &status); + if(U_SUCCESS(status)) { + retVal = 0; + } + } + return retVal; +} + + +U_CAPI UConverter * U_EXPORT2 /* U_CAPI ... U_EXPORT2 added by Peter Kirk 17 Nov 2001 */ +u_fgetConverter(UFILE *file) +{ + return file->fConverter; +} +#if !UCONFIG_NO_FORMATTING +U_CAPI const UNumberFormat* U_EXPORT2 u_fgetNumberFormat(UFILE *file) +{ + return u_locbund_getNumberFormat(&file->str.fBundle, UNUM_DECIMAL); +} +#endif + +#endif diff --git a/tools/test.py b/tools/test.py index 7e9ef3d6e035a8..43d349d49a892f 100755 --- a/tools/test.py +++ b/tools/test.py @@ -1272,6 +1272,9 @@ def BuildOptions(): result.add_option("--no-store-unexpected-output", help="Deletes the temporary JS files from tests that fails", dest="store_unexpected_output", action="store_false") + result.add_option("-r", "--run", + help="Divide the tests in m groups (interleaved) and run tests from group n (--run=n,m with n < m)", + default="") return result @@ -1280,6 +1283,24 @@ def ProcessOptions(options): VERBOSE = options.verbose options.arch = options.arch.split(',') options.mode = options.mode.split(',') + options.run = options.run.split(',') + if options.run == [""]: + options.run = None + elif len(options.run) != 2: + print "The run argument must be two comma-separated integers." + return False + else: + try: + options.run = map(int, options.run) + except ValueError: + print "Could not parse the integers from the run argument." + return False + if options.run[0] < 0 or options.run[1] < 0: + print "The run argument cannot have negative integers." + return False + if options.run[0] >= options.run[1]: + print "The test group to run (n) must be smaller than number of groups (m)." + return False if options.J: options.j = multiprocessing.cpu_count() return True @@ -1486,6 +1507,15 @@ def Main(): def DoSkip(case): return SKIP in case.outcomes or SLOW in case.outcomes cases_to_run = [ c for c in all_cases if not DoSkip(c) ] + if options.run is not None: + # Must ensure the list of tests is sorted before selecting, to avoid + # silent errors if this file is changed to list the tests in a way that + # can be different in different machines + cases_to_run.sort(key=lambda c: (c.case.arch, c.case.mode, c.case.file)) + cases_to_run = [ cases_to_run[i] for i + in xrange(options.run[0], + len(cases_to_run), + options.run[1]) ] if len(cases_to_run) == 0: print "No tests to run." return 1 diff --git a/tools/v8-prof/linux-tick-processor b/tools/v8-prof/linux-tick-processor new file mode 100755 index 00000000000000..858405c947fe5e --- /dev/null +++ b/tools/v8-prof/linux-tick-processor @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +umask 077 +TEMP_SCRIPT_FILE="/tmp/node-tick-processor-input-script-$$" +tools_path=`cd $(dirname "$0");pwd` +v8_tools="$tools_path/../../deps/v8/tools" + +cat "$tools_path/polyfill.js" "$v8_tools/splaytree.js" "$v8_tools/codemap.js" \ + "$v8_tools/csvparser.js" "$v8_tools/consarray.js" \ + "$v8_tools/profile.js" "$v8_tools/profile_view.js" \ + "$v8_tools/logreader.js" "$v8_tools/tickprocessor.js" \ + "$v8_tools/SourceMap.js" \ + "$v8_tools/tickprocessor-driver.js" >> "$TEMP_SCRIPT_FILE" + +NODE=${NODE:-node} + +if [ ! -x "$NODE" ] && [ -x "$(dirname "$0")/../../iojs" ]; then + NODE="$(dirname "$0")/../../iojs" +fi + +"$NODE" "$TEMP_SCRIPT_FILE" $@ + +rm -f "$TEMP_SCRIPT_FILE" diff --git a/tools/v8-prof/mac-tick-processor b/tools/v8-prof/mac-tick-processor new file mode 100755 index 00000000000000..968df80c286684 --- /dev/null +++ b/tools/v8-prof/mac-tick-processor @@ -0,0 +1,7 @@ +#!/bin/sh + +# A wrapper script to call 'linux-tick-processor' with Mac-specific settings. + +tools_path=`cd $(dirname "$0");pwd` +v8_tools="$tools_path/../../deps/v8/tools" +"$tools_path/linux-tick-processor" --mac --nm="$v8_tools/mac-nm" $@ diff --git a/tools/v8-prof/polyfill.js b/tools/v8-prof/polyfill.js new file mode 100644 index 00000000000000..0d78391b836d67 --- /dev/null +++ b/tools/v8-prof/polyfill.js @@ -0,0 +1,92 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Node polyfill +var fs = require('fs'); +var os = { + system: function(name, args) { + if (process.platform === 'linux' && name === 'nm') { + // Filter out vdso and vsyscall entries. + var arg = args[args.length - 1]; + if (arg === '[vdso]' || + arg == '[vsyscall]' || + /^[0-9a-f]+-[0-9a-f]+$/.test(arg)) { + return ''; + } + } + return require('child_process').execFileSync( + name, args, {encoding: 'utf8'}); + } +}; +var print = console.log; +function read(fileName) { + return fs.readFileSync(fileName, 'utf8'); +} +arguments = process.argv.slice(2); + +// Polyfill "readline()". +var fd = fs.openSync(arguments[arguments.length - 1], 'r'); +var buf = new Buffer(4096); +var dec = new (require('string_decoder').StringDecoder)('utf-8'); +var line = ''; +versionCheck(); +function readline() { + while (true) { + var lineBreak = line.indexOf('\n'); + if (lineBreak !== -1) { + var res = line.slice(0, lineBreak); + line = line.slice(lineBreak + 1); + return res; + } + var bytes = fs.readSync(fd, buf, 0, buf.length); + line += dec.write(buf.slice(0, bytes)); + if (line.length === 0) { + return false; + } + } +} + +function versionCheck() { + // v8-version looks like "v8-version,$major,$minor,$build,$patch,$candidate" + // whereas process.versions.v8 is either "$major.$minor.$build" or + // "$major.$minor.$build.$patch". + var firstLine = readline(); + line = firstLine + '\n' + line; + firstLine = firstLine.split(','); + var curVer = process.versions.v8.split('.'); + if (firstLine.length !== 6 && firstLine[0] !== 'v8-version') { + console.log('Unable to read v8-version from log file.'); + return; + } + // Compare major, minor and build; ignore the patch and candidate fields. + for (var i = 0; i < 3; i++) { + if (curVer[i] !== firstLine[i + 1]) { + console.log('Testing v8 version different from logging version'); + return; + } + } +} diff --git a/tools/v8-prof/windows-tick-processor.bat b/tools/v8-prof/windows-tick-processor.bat new file mode 100755 index 00000000000000..83e6dde6b76b05 --- /dev/null +++ b/tools/v8-prof/windows-tick-processor.bat @@ -0,0 +1,19 @@ +@echo off +setlocal + +SET tools_dir=%~dp0 +SET v8_tools=%tools_dir%..\..\deps\v8\tools\ + +SET temp_script=%TEMP%\node-tick-processor-input-script + +IF NOT DEFINED NODE (SET NODE=node.exe) +%NODE% --version 2> NUL +if %ERRORLEVEL%==9009 (SET NODE=%~dp0\..\..\Release\iojs.exe) + + +type %tools_dir%polyfill.js %v8_tools%splaytree.js %v8_tools%codemap.js^ + %v8_tools%csvparser.js %v8_tools%consarray.js %v8_tools%profile.js^ + %v8_tools%profile_view.js %v8_tools%logreader.js %v8_tools%SourceMap.js^ + %v8_tools%tickprocessor.js %v8_tools%tickprocessor-driver.js >> %temp_script% +%NODE% %temp_script% --windows %* +del %temp_script% diff --git a/vcbuild.bat b/vcbuild.bat index 44dfb634d780ec..23dc4282a3aafa 100644 --- a/vcbuild.bat +++ b/vcbuild.bat @@ -38,6 +38,7 @@ set download_arg= set release_urls_arg= :next-arg +set build_release= if "%1"=="" goto args-done if /i "%1"=="debug" set config=Debug&goto arg-ok if /i "%1"=="release" set config=Release&goto arg-ok @@ -61,7 +62,9 @@ if /i "%1"=="test-internet" set test_args=%test_args% internet&goto arg-ok if /i "%1"=="test-pummel" set test_args=%test_args% pummel&goto arg-ok if /i "%1"=="test-all" set test_args=%test_args% sequential parallel message gc internet pummel&set buildnodeweak=1&set jslint=1&goto arg-ok if /i "%1"=="jslint" set jslint=1&goto arg-ok -if /i "%1"=="msi" set msi=1&set licensertf=1&goto arg-ok +@rem Include small-icu support with MSI installer +if /i "%1"=="msi" set msi=1&set licensertf=1&set download_arg="--download=all"&set i18n_arg=small-icu&goto arg-ok +if /i "%1"=="build-release" set build_release=1&goto arg-ok if /i "%1"=="upload" set upload=1&goto arg-ok if /i "%1"=="small-icu" set i18n_arg=%1&goto arg-ok if /i "%1"=="full-icu" set i18n_arg=%1&goto arg-ok @@ -74,6 +77,15 @@ echo Warning: ignoring invalid command line option `%1`. :arg-ok shift goto next-arg +if defined build_release ( + set nosnapshot=1 + set config=Release + set msi=1 + set licensertf=1 + set download_arg="--download=all" + set i18n_arg=small-icu +) + :args-done if "%config%"=="Debug" set debug_arg=--debug @@ -224,6 +236,7 @@ echo vcbuild.bat test : builds debug build and runs tests goto exit :exit +echo vcbuild.bat build-release : builds the release distribution as used by nodejs.org goto :EOF rem ***************