diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index dbf016e6a..918cb1dd9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,8 +2,8 @@ * Before you open a ticket or send a pull request, [search](https://github.com/adaltas/node-csv/issues) for previous discussions about the same feature or issue. Add to the earlier ticket if you find one. -* Before sending a pull request for a feature, be sure to have [tests](https://github.com/adaltas/node-csv/tree/master/test). +* Before sending a pull request for a feature, be sure to have [tests](https://github.com/adaltas/node-csv/tree/master/packages/csv/test). -* Use the same coding style as the rest of the [codebase](https://github.com/adaltas/node-csv/tree/master/src). If you’re writing a test and if you're just getting started with CoffeeScript, there’s a nice [style guide](https://github.com/polarmobile/coffeescript-style-guide). +* Use the same coding style as the rest of the [codebase](https://github.com/adaltas/node-csv/tree/master/packages). If you’re writing a test and if you're just getting started with CoffeeScript, there’s a nice [style guide](https://github.com/polarmobile/coffeescript-style-guide). * Documentation is published on [GitHub](https://github.com/adaltas/node-csv-docs) and you are invited to submit a pull request with your changes. For convenience, you can also browse the website and click on the Edit link present at the top of every page. diff --git a/demo/eslint/.eslintrc.js b/demo/eslint/.eslintrc.js index 7ec14b54b..badc42d40 100644 --- a/demo/eslint/.eslintrc.js +++ b/demo/eslint/.eslintrc.js @@ -11,6 +11,7 @@ module.exports = { ecmaVersion: 'latest', }, rules: { - // 'import/no-unresolved': [2, { commonjs: false }], + 'import/no-unresolved': [2, { commonjs: false }], + 'no-console': 'off', }, }; diff --git a/package.json b/package.json index b8455d92f..1d8b63965 100644 --- a/package.json +++ b/package.json @@ -16,6 +16,8 @@ "build": "lerna run build", "postinstall": "husky install", "publish": "lerna publish from-git --yes", + "lint": "lerna run lint", + "pretest": "npm run lint", "test": "lerna run test", "test:legacy": "lerna run test:legacy", "version": "lerna version" diff --git a/packages/csv-generate/dist/cjs/stream.d.cts b/packages/csv-generate/dist/cjs/stream.d.cts index d53894a79..d577d3d34 100644 --- a/packages/csv-generate/dist/cjs/stream.d.cts +++ b/packages/csv-generate/dist/cjs/stream.d.cts @@ -1,5 +1,5 @@ -import { Options } from './index.js'; +import { Options } from './index.cjs'; declare function generate(options?: Options): ReadableStream; // export default generate; diff --git a/packages/csv-generate/dist/cjs/sync.d.cts b/packages/csv-generate/dist/cjs/sync.d.cts index 965ad6347..3a415acae 100644 --- a/packages/csv-generate/dist/cjs/sync.d.cts +++ b/packages/csv-generate/dist/cjs/sync.d.cts @@ -1,5 +1,5 @@ -import { Options } from './index.js'; +import { Options } from './index.cjs'; declare function generate(options: number | Options): string & Array; // export default generate; diff --git a/packages/csv-generate/dist/esm/index.js b/packages/csv-generate/dist/esm/index.js index cdd66c3c2..754c60e90 100644 --- a/packages/csv-generate/dist/esm/index.js +++ b/packages/csv-generate/dist/esm/index.js @@ -3295,6 +3295,26 @@ BufferList.prototype.concat = function (n) { }; // Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4381,6 +4401,9 @@ function indexOf(xs, x) { } // A bit simpler than readable streams. +// Implement an async ._write(chunk, encoding, cb), and it'll handle all +// the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4892,6 +4915,47 @@ function onEndNT(self) { } // a transform stream is a readable/writable stream where you do +// something with the data. Sometimes it's called a "filter", +// but that's not a great name for it, since that implies a thing where +// some bits pass through, and others are simply ignored. (That would +// be a valid example of a transform, of course.) +// +// While the output is causally related to the input, it's not a +// necessarily symmetric or synchronous transformation. For example, +// a zlib stream might take multiple plain-text writes(), and then +// emit a single compressed chunk some time in the future. +// +// Here's how this works: +// +// The Transform stream has all the aspects of the readable and writable +// stream classes. When you write(chunk), that calls _write(chunk,cb) +// internally, and returns false if there's a lot of pending writes +// buffered up. When you call read(), that calls _read(n) until +// there's enough pending readable data buffered up. +// +// In a transform stream, the written data is placed in a buffer. When +// _read(n) is called, it transforms the queued up data, calling the +// buffered _write cb's as it consumes chunks. If consuming a single +// written chunk would result in multiple output chunks, then the first +// outputted bit calls the readcb, and subsequent chunks just go into +// the read buffer, and will cause it to emit 'readable' if necessary. +// +// This way, back-pressure is actually determined by the reading side, +// since _read has to be called to start processing a new chunk. However, +// a pathological inflate type of transform can cause excessive buffering +// here. For example, imagine a stream where every byte of input is +// interpreted as an integer from 0-255, and then results in that many +// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in +// 1kb of data being output. In this case, you could write a very small +// amount of input, and end up with a very large amount of output. In +// such a pathological inflating mechanism, there'd be no way to tell +// the system to stop doing the transform. A single 4MB write could +// cause the system to run out of memory. +// +// However, even in such a pathological case, only a single written chunk +// would be consumed, and then the rest would wait (un-transformed) until +// the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/csv-generate/dist/esm/sync.js b/packages/csv-generate/dist/esm/sync.js index f67f66b7c..53d033ef6 100644 --- a/packages/csv-generate/dist/esm/sync.js +++ b/packages/csv-generate/dist/esm/sync.js @@ -3295,6 +3295,26 @@ BufferList.prototype.concat = function (n) { }; // Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4381,6 +4401,9 @@ function indexOf(xs, x) { } // A bit simpler than readable streams. +// Implement an async ._write(chunk, encoding, cb), and it'll handle all +// the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4892,6 +4915,47 @@ function onEndNT(self) { } // a transform stream is a readable/writable stream where you do +// something with the data. Sometimes it's called a "filter", +// but that's not a great name for it, since that implies a thing where +// some bits pass through, and others are simply ignored. (That would +// be a valid example of a transform, of course.) +// +// While the output is causally related to the input, it's not a +// necessarily symmetric or synchronous transformation. For example, +// a zlib stream might take multiple plain-text writes(), and then +// emit a single compressed chunk some time in the future. +// +// Here's how this works: +// +// The Transform stream has all the aspects of the readable and writable +// stream classes. When you write(chunk), that calls _write(chunk,cb) +// internally, and returns false if there's a lot of pending writes +// buffered up. When you call read(), that calls _read(n) until +// there's enough pending readable data buffered up. +// +// In a transform stream, the written data is placed in a buffer. When +// _read(n) is called, it transforms the queued up data, calling the +// buffered _write cb's as it consumes chunks. If consuming a single +// written chunk would result in multiple output chunks, then the first +// outputted bit calls the readcb, and subsequent chunks just go into +// the read buffer, and will cause it to emit 'readable' if necessary. +// +// This way, back-pressure is actually determined by the reading side, +// since _read has to be called to start processing a new chunk. However, +// a pathological inflate type of transform can cause excessive buffering +// here. For example, imagine a stream where every byte of input is +// interpreted as an integer from 0-255, and then results in that many +// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in +// 1kb of data being output. In this case, you could write a very small +// amount of input, and end up with a very large amount of output. In +// such a pathological inflating mechanism, there'd be no way to tell +// the system to stop doing the transform. A single 4MB write could +// cause the system to run out of memory. +// +// However, even in such a pathological case, only a single written chunk +// would be consumed, and then the rest would wait (un-transformed) until +// the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/csv-generate/dist/iife/index.js b/packages/csv-generate/dist/iife/index.js index 99a20924e..ace799875 100644 --- a/packages/csv-generate/dist/iife/index.js +++ b/packages/csv-generate/dist/iife/index.js @@ -3298,6 +3298,26 @@ var csv_generate = (function (exports) { }; // Copyright Joyent, Inc. and other Node contributors. + // + // Permission is hereby granted, free of charge, to any person obtaining a + // copy of this software and associated documentation files (the + // "Software"), to deal in the Software without restriction, including + // without limitation the rights to use, copy, modify, merge, publish, + // distribute, sublicense, and/or sell copies of the Software, and to permit + // persons to whom the Software is furnished to do so, subject to the + // following conditions: + // + // The above copyright notice and this permission notice shall be included + // in all copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN + // NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + // DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4384,6 +4404,9 @@ var csv_generate = (function (exports) { } // A bit simpler than readable streams. + // Implement an async ._write(chunk, encoding, cb), and it'll handle all + // the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4895,6 +4918,47 @@ var csv_generate = (function (exports) { } // a transform stream is a readable/writable stream where you do + // something with the data. Sometimes it's called a "filter", + // but that's not a great name for it, since that implies a thing where + // some bits pass through, and others are simply ignored. (That would + // be a valid example of a transform, of course.) + // + // While the output is causally related to the input, it's not a + // necessarily symmetric or synchronous transformation. For example, + // a zlib stream might take multiple plain-text writes(), and then + // emit a single compressed chunk some time in the future. + // + // Here's how this works: + // + // The Transform stream has all the aspects of the readable and writable + // stream classes. When you write(chunk), that calls _write(chunk,cb) + // internally, and returns false if there's a lot of pending writes + // buffered up. When you call read(), that calls _read(n) until + // there's enough pending readable data buffered up. + // + // In a transform stream, the written data is placed in a buffer. When + // _read(n) is called, it transforms the queued up data, calling the + // buffered _write cb's as it consumes chunks. If consuming a single + // written chunk would result in multiple output chunks, then the first + // outputted bit calls the readcb, and subsequent chunks just go into + // the read buffer, and will cause it to emit 'readable' if necessary. + // + // This way, back-pressure is actually determined by the reading side, + // since _read has to be called to start processing a new chunk. However, + // a pathological inflate type of transform can cause excessive buffering + // here. For example, imagine a stream where every byte of input is + // interpreted as an integer from 0-255, and then results in that many + // bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in + // 1kb of data being output. In this case, you could write a very small + // amount of input, and end up with a very large amount of output. In + // such a pathological inflating mechanism, there'd be no way to tell + // the system to stop doing the transform. A single 4MB write could + // cause the system to run out of memory. + // + // However, even in such a pathological case, only a single written chunk + // would be consumed, and then the rest would wait (un-transformed) until + // the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/csv-generate/dist/iife/sync.js b/packages/csv-generate/dist/iife/sync.js index fb5fe5ace..b497c3375 100644 --- a/packages/csv-generate/dist/iife/sync.js +++ b/packages/csv-generate/dist/iife/sync.js @@ -3298,6 +3298,26 @@ var csv_generate_sync = (function (exports) { }; // Copyright Joyent, Inc. and other Node contributors. + // + // Permission is hereby granted, free of charge, to any person obtaining a + // copy of this software and associated documentation files (the + // "Software"), to deal in the Software without restriction, including + // without limitation the rights to use, copy, modify, merge, publish, + // distribute, sublicense, and/or sell copies of the Software, and to permit + // persons to whom the Software is furnished to do so, subject to the + // following conditions: + // + // The above copyright notice and this permission notice shall be included + // in all copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN + // NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + // DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4384,6 +4404,9 @@ var csv_generate_sync = (function (exports) { } // A bit simpler than readable streams. + // Implement an async ._write(chunk, encoding, cb), and it'll handle all + // the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4895,6 +4918,47 @@ var csv_generate_sync = (function (exports) { } // a transform stream is a readable/writable stream where you do + // something with the data. Sometimes it's called a "filter", + // but that's not a great name for it, since that implies a thing where + // some bits pass through, and others are simply ignored. (That would + // be a valid example of a transform, of course.) + // + // While the output is causally related to the input, it's not a + // necessarily symmetric or synchronous transformation. For example, + // a zlib stream might take multiple plain-text writes(), and then + // emit a single compressed chunk some time in the future. + // + // Here's how this works: + // + // The Transform stream has all the aspects of the readable and writable + // stream classes. When you write(chunk), that calls _write(chunk,cb) + // internally, and returns false if there's a lot of pending writes + // buffered up. When you call read(), that calls _read(n) until + // there's enough pending readable data buffered up. + // + // In a transform stream, the written data is placed in a buffer. When + // _read(n) is called, it transforms the queued up data, calling the + // buffered _write cb's as it consumes chunks. If consuming a single + // written chunk would result in multiple output chunks, then the first + // outputted bit calls the readcb, and subsequent chunks just go into + // the read buffer, and will cause it to emit 'readable' if necessary. + // + // This way, back-pressure is actually determined by the reading side, + // since _read has to be called to start processing a new chunk. However, + // a pathological inflate type of transform can cause excessive buffering + // here. For example, imagine a stream where every byte of input is + // interpreted as an integer from 0-255, and then results in that many + // bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in + // 1kb of data being output. In this case, you could write a very small + // amount of input, and end up with a very large amount of output. In + // such a pathological inflating mechanism, there'd be no way to tell + // the system to stop doing the transform. A single 4MB write could + // cause the system to run out of memory. + // + // However, even in such a pathological case, only a single written chunk + // would be consumed, and then the rest would wait (un-transformed) until + // the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/csv-generate/dist/umd/index.js b/packages/csv-generate/dist/umd/index.js index be023e15c..345bb7e34 100644 --- a/packages/csv-generate/dist/umd/index.js +++ b/packages/csv-generate/dist/umd/index.js @@ -3301,6 +3301,26 @@ }; // Copyright Joyent, Inc. and other Node contributors. + // + // Permission is hereby granted, free of charge, to any person obtaining a + // copy of this software and associated documentation files (the + // "Software"), to deal in the Software without restriction, including + // without limitation the rights to use, copy, modify, merge, publish, + // distribute, sublicense, and/or sell copies of the Software, and to permit + // persons to whom the Software is furnished to do so, subject to the + // following conditions: + // + // The above copyright notice and this permission notice shall be included + // in all copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN + // NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + // DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4387,6 +4407,9 @@ } // A bit simpler than readable streams. + // Implement an async ._write(chunk, encoding, cb), and it'll handle all + // the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4898,6 +4921,47 @@ } // a transform stream is a readable/writable stream where you do + // something with the data. Sometimes it's called a "filter", + // but that's not a great name for it, since that implies a thing where + // some bits pass through, and others are simply ignored. (That would + // be a valid example of a transform, of course.) + // + // While the output is causally related to the input, it's not a + // necessarily symmetric or synchronous transformation. For example, + // a zlib stream might take multiple plain-text writes(), and then + // emit a single compressed chunk some time in the future. + // + // Here's how this works: + // + // The Transform stream has all the aspects of the readable and writable + // stream classes. When you write(chunk), that calls _write(chunk,cb) + // internally, and returns false if there's a lot of pending writes + // buffered up. When you call read(), that calls _read(n) until + // there's enough pending readable data buffered up. + // + // In a transform stream, the written data is placed in a buffer. When + // _read(n) is called, it transforms the queued up data, calling the + // buffered _write cb's as it consumes chunks. If consuming a single + // written chunk would result in multiple output chunks, then the first + // outputted bit calls the readcb, and subsequent chunks just go into + // the read buffer, and will cause it to emit 'readable' if necessary. + // + // This way, back-pressure is actually determined by the reading side, + // since _read has to be called to start processing a new chunk. However, + // a pathological inflate type of transform can cause excessive buffering + // here. For example, imagine a stream where every byte of input is + // interpreted as an integer from 0-255, and then results in that many + // bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in + // 1kb of data being output. In this case, you could write a very small + // amount of input, and end up with a very large amount of output. In + // such a pathological inflating mechanism, there'd be no way to tell + // the system to stop doing the transform. A single 4MB write could + // cause the system to run out of memory. + // + // However, even in such a pathological case, only a single written chunk + // would be consumed, and then the rest would wait (un-transformed) until + // the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/csv-generate/dist/umd/sync.js b/packages/csv-generate/dist/umd/sync.js index 275ff404a..53d7d282e 100644 --- a/packages/csv-generate/dist/umd/sync.js +++ b/packages/csv-generate/dist/umd/sync.js @@ -3301,6 +3301,26 @@ }; // Copyright Joyent, Inc. and other Node contributors. + // + // Permission is hereby granted, free of charge, to any person obtaining a + // copy of this software and associated documentation files (the + // "Software"), to deal in the Software without restriction, including + // without limitation the rights to use, copy, modify, merge, publish, + // distribute, sublicense, and/or sell copies of the Software, and to permit + // persons to whom the Software is furnished to do so, subject to the + // following conditions: + // + // The above copyright notice and this permission notice shall be included + // in all copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN + // NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + // DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4387,6 +4407,9 @@ } // A bit simpler than readable streams. + // Implement an async ._write(chunk, encoding, cb), and it'll handle all + // the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4898,6 +4921,47 @@ } // a transform stream is a readable/writable stream where you do + // something with the data. Sometimes it's called a "filter", + // but that's not a great name for it, since that implies a thing where + // some bits pass through, and others are simply ignored. (That would + // be a valid example of a transform, of course.) + // + // While the output is causally related to the input, it's not a + // necessarily symmetric or synchronous transformation. For example, + // a zlib stream might take multiple plain-text writes(), and then + // emit a single compressed chunk some time in the future. + // + // Here's how this works: + // + // The Transform stream has all the aspects of the readable and writable + // stream classes. When you write(chunk), that calls _write(chunk,cb) + // internally, and returns false if there's a lot of pending writes + // buffered up. When you call read(), that calls _read(n) until + // there's enough pending readable data buffered up. + // + // In a transform stream, the written data is placed in a buffer. When + // _read(n) is called, it transforms the queued up data, calling the + // buffered _write cb's as it consumes chunks. If consuming a single + // written chunk would result in multiple output chunks, then the first + // outputted bit calls the readcb, and subsequent chunks just go into + // the read buffer, and will cause it to emit 'readable' if necessary. + // + // This way, back-pressure is actually determined by the reading side, + // since _read has to be called to start processing a new chunk. However, + // a pathological inflate type of transform can cause excessive buffering + // here. For example, imagine a stream where every byte of input is + // interpreted as an integer from 0-255, and then results in that many + // bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in + // 1kb of data being output. In this case, you could write a very small + // amount of input, and end up with a very large amount of output. In + // such a pathological inflating mechanism, there'd be no way to tell + // the system to stop doing the transform. A single 4MB write could + // cause the system to run out of memory. + // + // However, even in such a pathological case, only a single written chunk + // would be consumed, and then the rest would wait (un-transformed) until + // the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/csv-generate/lib/stream.js b/packages/csv-generate/lib/stream.js index a2679ccef..38ca13946 100644 --- a/packages/csv-generate/lib/stream.js +++ b/packages/csv-generate/lib/stream.js @@ -11,13 +11,13 @@ const generate = (opts) => { async pull(controller) { read(options, state, 1024, function(chunk) { chunk = Buffer.from(chunk); - controller.enqueue(chunk) + controller.enqueue(chunk); }, function(){ - controller.close() + controller.close(); }); } }, {highWaterMark: 1024}); // return new Generator(options || {}) -} +}; export {generate}; diff --git a/packages/csv-generate/package.json b/packages/csv-generate/package.json index 40b981228..f55d04022 100644 --- a/packages/csv-generate/package.json +++ b/packages/csv-generate/package.json @@ -94,7 +94,9 @@ "build": "npm run build:rollup && npm run build:ts", "build:rollup": "npx rollup -c", "build:ts": "cp lib/index.d.ts dist/cjs/index.d.cts && cp lib/stream.d.ts dist/cjs/stream.d.cts && cp lib/sync.d.ts dist/cjs/sync.d.cts && cp lib/*.ts dist/esm", + "postbuild:ts": "find dist/cjs -name '*.d.cts' -exec sh -c \"sed -i \"s/\\.js'/\\.cjs'/g\" {} || sed -i '' \"s/\\.js'/\\.cjs'/g\" {}\" \\;", "lint": "npm run lint:lib && npm run lint:samples && npm run lint:test", + "postlint": "tsc --noEmit true", "lint:lib": "eslint --fix lib/*.js", "lint:samples": "eslint --fix samples/*.js", "lint:test": "coffeelint --fix test/*.coffee", diff --git a/packages/csv-generate/test/api.end.coffee b/packages/csv-generate/test/api.end.coffee index af05742e5..93421cb60 100644 --- a/packages/csv-generate/test/api.end.coffee +++ b/packages/csv-generate/test/api.end.coffee @@ -1,6 +1,6 @@ import { generate } from '../lib/index.js' -import { Writable } from 'stream'; +import { Writable } from 'stream' describe 'api end', -> diff --git a/packages/csv-generate/test/api.web_stream.coffee b/packages/csv-generate/test/api.web_stream.coffee index 6bace88da..5a0d979f6 100644 --- a/packages/csv-generate/test/api.web_stream.coffee +++ b/packages/csv-generate/test/api.web_stream.coffee @@ -3,13 +3,13 @@ import {generate as generateStream} from '../lib/stream.js' import {generate as generateClassic} from '../lib/index.js' describe 'api stream', -> - + it.skip 'perf classic', -> console.time('classic') generator = generateClassic({ objectMode: true, length: 10000000 - }); + }) for await record from generator continue console.timeEnd('classic') @@ -19,7 +19,7 @@ describe 'api stream', -> generator = generateStream({ objectMode: true, length: 10000000 - }); + }) reader = generator.getReader() while true { done, value } = await reader.read() @@ -32,7 +32,7 @@ describe 'api stream', -> generator = generateStream({ objectMode: true, length: 10 - }); + }) records = [] for await record from generator records.push record @@ -42,7 +42,7 @@ describe 'api stream', -> generator = generateStream({ objectMode: true, length: 10 - }); + }) records = [] reader = generator.getReader() while true diff --git a/packages/csv-generate/test/options.sleep.coffee b/packages/csv-generate/test/options.sleep.coffee index 29029577e..01f0dded3 100644 --- a/packages/csv-generate/test/options.sleep.coffee +++ b/packages/csv-generate/test/options.sleep.coffee @@ -18,8 +18,8 @@ describe 'option sleep', -> length: 2 sleep: 10 .on 'readable', -> - while (record = this.read()) isnt null - records.push record.toString() + while (record = this.read()) isnt null + records.push record.toString() .on 'error', next .on 'end', -> records.join().split('\n').length.should.eql 2 diff --git a/packages/csv-parse/dist/cjs/sync.d.cts b/packages/csv-parse/dist/cjs/sync.d.cts index b507e8b9c..835666e55 100644 --- a/packages/csv-parse/dist/cjs/sync.d.cts +++ b/packages/csv-parse/dist/cjs/sync.d.cts @@ -1,5 +1,5 @@ -import { Options } from './index.js'; +import { Options } from './index.cjs'; declare function parse(input: Buffer | string, options?: Options): any; // export default parse; @@ -8,4 +8,4 @@ export { parse }; export { CastingContext, CastingFunction, CastingDateFunction, ColumnOption, Options, Info, CsvErrorCode, CsvError -} from './index.js'; +} from './index.cjs'; diff --git a/packages/csv-parse/dist/esm/index.js b/packages/csv-parse/dist/esm/index.js index 9033260b3..732835411 100644 --- a/packages/csv-parse/dist/esm/index.js +++ b/packages/csv-parse/dist/esm/index.js @@ -3225,6 +3225,26 @@ BufferList.prototype.concat = function (n) { }; // Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4311,6 +4331,9 @@ function indexOf(xs, x) { } // A bit simpler than readable streams. +// Implement an async ._write(chunk, encoding, cb), and it'll handle all +// the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4822,6 +4845,47 @@ function onEndNT(self) { } // a transform stream is a readable/writable stream where you do +// something with the data. Sometimes it's called a "filter", +// but that's not a great name for it, since that implies a thing where +// some bits pass through, and others are simply ignored. (That would +// be a valid example of a transform, of course.) +// +// While the output is causally related to the input, it's not a +// necessarily symmetric or synchronous transformation. For example, +// a zlib stream might take multiple plain-text writes(), and then +// emit a single compressed chunk some time in the future. +// +// Here's how this works: +// +// The Transform stream has all the aspects of the readable and writable +// stream classes. When you write(chunk), that calls _write(chunk,cb) +// internally, and returns false if there's a lot of pending writes +// buffered up. When you call read(), that calls _read(n) until +// there's enough pending readable data buffered up. +// +// In a transform stream, the written data is placed in a buffer. When +// _read(n) is called, it transforms the queued up data, calling the +// buffered _write cb's as it consumes chunks. If consuming a single +// written chunk would result in multiple output chunks, then the first +// outputted bit calls the readcb, and subsequent chunks just go into +// the read buffer, and will cause it to emit 'readable' if necessary. +// +// This way, back-pressure is actually determined by the reading side, +// since _read has to be called to start processing a new chunk. However, +// a pathological inflate type of transform can cause excessive buffering +// here. For example, imagine a stream where every byte of input is +// interpreted as an integer from 0-255, and then results in that many +// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in +// 1kb of data being output. In this case, you could write a very small +// amount of input, and end up with a very large amount of output. In +// such a pathological inflating mechanism, there'd be no way to tell +// the system to stop doing the transform. A single 4MB write could +// cause the system to run out of memory. +// +// However, even in such a pathological case, only a single written chunk +// would be consumed, and then the rest would wait (un-transformed) until +// the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/csv-parse/dist/iife/index.js b/packages/csv-parse/dist/iife/index.js index 858abbba1..1638bc9fe 100644 --- a/packages/csv-parse/dist/iife/index.js +++ b/packages/csv-parse/dist/iife/index.js @@ -3228,6 +3228,26 @@ var csv_parse = (function (exports) { }; // Copyright Joyent, Inc. and other Node contributors. + // + // Permission is hereby granted, free of charge, to any person obtaining a + // copy of this software and associated documentation files (the + // "Software"), to deal in the Software without restriction, including + // without limitation the rights to use, copy, modify, merge, publish, + // distribute, sublicense, and/or sell copies of the Software, and to permit + // persons to whom the Software is furnished to do so, subject to the + // following conditions: + // + // The above copyright notice and this permission notice shall be included + // in all copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN + // NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + // DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4314,6 +4334,9 @@ var csv_parse = (function (exports) { } // A bit simpler than readable streams. + // Implement an async ._write(chunk, encoding, cb), and it'll handle all + // the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4825,6 +4848,47 @@ var csv_parse = (function (exports) { } // a transform stream is a readable/writable stream where you do + // something with the data. Sometimes it's called a "filter", + // but that's not a great name for it, since that implies a thing where + // some bits pass through, and others are simply ignored. (That would + // be a valid example of a transform, of course.) + // + // While the output is causally related to the input, it's not a + // necessarily symmetric or synchronous transformation. For example, + // a zlib stream might take multiple plain-text writes(), and then + // emit a single compressed chunk some time in the future. + // + // Here's how this works: + // + // The Transform stream has all the aspects of the readable and writable + // stream classes. When you write(chunk), that calls _write(chunk,cb) + // internally, and returns false if there's a lot of pending writes + // buffered up. When you call read(), that calls _read(n) until + // there's enough pending readable data buffered up. + // + // In a transform stream, the written data is placed in a buffer. When + // _read(n) is called, it transforms the queued up data, calling the + // buffered _write cb's as it consumes chunks. If consuming a single + // written chunk would result in multiple output chunks, then the first + // outputted bit calls the readcb, and subsequent chunks just go into + // the read buffer, and will cause it to emit 'readable' if necessary. + // + // This way, back-pressure is actually determined by the reading side, + // since _read has to be called to start processing a new chunk. However, + // a pathological inflate type of transform can cause excessive buffering + // here. For example, imagine a stream where every byte of input is + // interpreted as an integer from 0-255, and then results in that many + // bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in + // 1kb of data being output. In this case, you could write a very small + // amount of input, and end up with a very large amount of output. In + // such a pathological inflating mechanism, there'd be no way to tell + // the system to stop doing the transform. A single 4MB write could + // cause the system to run out of memory. + // + // However, even in such a pathological case, only a single written chunk + // would be consumed, and then the rest would wait (un-transformed) until + // the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/csv-parse/dist/umd/index.js b/packages/csv-parse/dist/umd/index.js index 5824a4475..d691d0fe1 100644 --- a/packages/csv-parse/dist/umd/index.js +++ b/packages/csv-parse/dist/umd/index.js @@ -3231,6 +3231,26 @@ }; // Copyright Joyent, Inc. and other Node contributors. + // + // Permission is hereby granted, free of charge, to any person obtaining a + // copy of this software and associated documentation files (the + // "Software"), to deal in the Software without restriction, including + // without limitation the rights to use, copy, modify, merge, publish, + // distribute, sublicense, and/or sell copies of the Software, and to permit + // persons to whom the Software is furnished to do so, subject to the + // following conditions: + // + // The above copyright notice and this permission notice shall be included + // in all copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN + // NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + // DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4317,6 +4337,9 @@ } // A bit simpler than readable streams. + // Implement an async ._write(chunk, encoding, cb), and it'll handle all + // the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4828,6 +4851,47 @@ } // a transform stream is a readable/writable stream where you do + // something with the data. Sometimes it's called a "filter", + // but that's not a great name for it, since that implies a thing where + // some bits pass through, and others are simply ignored. (That would + // be a valid example of a transform, of course.) + // + // While the output is causally related to the input, it's not a + // necessarily symmetric or synchronous transformation. For example, + // a zlib stream might take multiple plain-text writes(), and then + // emit a single compressed chunk some time in the future. + // + // Here's how this works: + // + // The Transform stream has all the aspects of the readable and writable + // stream classes. When you write(chunk), that calls _write(chunk,cb) + // internally, and returns false if there's a lot of pending writes + // buffered up. When you call read(), that calls _read(n) until + // there's enough pending readable data buffered up. + // + // In a transform stream, the written data is placed in a buffer. When + // _read(n) is called, it transforms the queued up data, calling the + // buffered _write cb's as it consumes chunks. If consuming a single + // written chunk would result in multiple output chunks, then the first + // outputted bit calls the readcb, and subsequent chunks just go into + // the read buffer, and will cause it to emit 'readable' if necessary. + // + // This way, back-pressure is actually determined by the reading side, + // since _read has to be called to start processing a new chunk. However, + // a pathological inflate type of transform can cause excessive buffering + // here. For example, imagine a stream where every byte of input is + // interpreted as an integer from 0-255, and then results in that many + // bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in + // 1kb of data being output. In this case, you could write a very small + // amount of input, and end up with a very large amount of output. In + // such a pathological inflating mechanism, there'd be no way to tell + // the system to stop doing the transform. A single 4MB write could + // cause the system to run out of memory. + // + // However, even in such a pathological case, only a single written chunk + // would be consumed, and then the rest would wait (un-transformed) until + // the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/csv-parse/lib/stream.js b/packages/csv-parse/lib/stream.js index effa7a8d8..3aba296b0 100644 --- a/packages/csv-parse/lib/stream.js +++ b/packages/csv-parse/lib/stream.js @@ -8,20 +8,20 @@ const parse = (opts) => { const api = transform(opts); return new TransformStream({ async transform(chunk, controller) { - const err = api.parse(chunk, false, (record) => { + api.parse(chunk, false, (record) => { controller.enqueue(record); }, () => { - controller.close() + controller.close(); }); }, async flush(controller){ - const err = api.parse(undefined, true, (record) => { + api.parse(undefined, true, (record) => { controller.enqueue(record); }, () => { - controller.close() + controller.close(); }); } }); -} +}; export {parse}; diff --git a/packages/csv-parse/package.json b/packages/csv-parse/package.json index c5f1f6048..989a949bd 100644 --- a/packages/csv-parse/package.json +++ b/packages/csv-parse/package.json @@ -107,7 +107,9 @@ "build": "npm run build:rollup && npm run build:ts", "build:rollup": "npx rollup -c", "build:ts": "cp lib/index.d.ts dist/cjs/index.d.cts && cp lib/sync.d.ts dist/cjs/sync.d.cts && cp lib/*.ts dist/esm", + "postbuild:ts": "find dist/cjs -name '*.d.cts' -exec sh -c \"sed -i \"s/\\.js'/\\.cjs'/g\" {} || sed -i '' \"s/\\.js'/\\.cjs'/g\" {}\" \\;", "lint": "npm run lint:lib && npm run lint:samples && npm run lint:test", + "postlint": "tsc --noEmit true", "lint:lib": "eslint --fix lib/*.js", "lint:samples": "eslint --fix samples/*.js", "lint:test": "coffeelint --fix test/*.coffee", diff --git a/packages/csv-parse/samples/option.raw.js b/packages/csv-parse/samples/option.raw.js index 2ddfa27dc..c4161bc84 100644 --- a/packages/csv-parse/samples/option.raw.js +++ b/packages/csv-parse/samples/option.raw.js @@ -9,5 +9,5 @@ d,e,f assert.deepStrictEqual(records, [ { record: [ 'a', 'b', 'c' ], raw: 'a,b,c\n' }, { record: [ 'd', 'e', 'f' ], raw: 'd,e,f' } - ]) -}) + ]); +}); diff --git a/packages/csv-parse/samples/option.to.js b/packages/csv-parse/samples/option.to.js index 9a8c77e47..31915e6ca 100644 --- a/packages/csv-parse/samples/option.to.js +++ b/packages/csv-parse/samples/option.to.js @@ -11,7 +11,7 @@ a,b columns: true, to: 2 }, function(err, records){ - console.log(err, records) + console.log(err, records); assert.deepStrictEqual( records, [{ a: '1', diff --git a/packages/csv-parse/test/api.web_stream.coffee b/packages/csv-parse/test/api.web_stream.coffee index 7ba7b865e..5165f8776 100644 --- a/packages/csv-parse/test/api.web_stream.coffee +++ b/packages/csv-parse/test/api.web_stream.coffee @@ -4,13 +4,13 @@ import {parse as parseStream} from '../lib/stream.js' import {parse as parseClassic} from '../lib/index.js' describe 'api stream', -> - + it.skip 'perf classic', -> console.time('classic') generator = generateClassic({ objectMode: true, length: 10000000 - }); + }) for await record from generator continue console.timeEnd('classic') @@ -20,7 +20,7 @@ describe 'api stream', -> generator = generateStream({ objectMode: true, length: 10000000 - }); + }) reader = generator.getReader() while true { done, value } = await reader.read() diff --git a/packages/csv-parse/test/option.objname.coffee b/packages/csv-parse/test/option.objname.coffee index 42fff8084..a3b914d9f 100644 --- a/packages/csv-parse/test/option.objname.coffee +++ b/packages/csv-parse/test/option.objname.coffee @@ -2,9 +2,9 @@ import { parse } from '../lib/index.js' describe 'Option `objname`', -> - + describe 'validation', -> - + it 'basic', -> parse '', objname: 'sth', columns: true, (->) parse '', objname: Buffer.from('sth'), columns: true, (->) @@ -20,7 +20,7 @@ describe 'Option `objname`', -> (-> parse '', objname: true, (->) ).should.throw 'Invalid Option: objname must be a string or a buffer, got true' - + it 'field require columns', -> (-> parse '', objname: 'field', (->) @@ -29,7 +29,7 @@ describe 'Option `objname`', -> 'objname field must be combined with columns' 'or be defined as an index' ].join ' ' - + it 'index incompatible with columns', -> (-> parse '', objname: 1, columns: true, (->) @@ -38,9 +38,9 @@ describe 'Option `objname`', -> 'objname index cannot be combined with columns' 'or be defined as a field' ].join ' ' - + describe 'map to a field', -> - + it 'convert a buffer to a column name', (next) -> parse ''' a,b,c @@ -110,10 +110,10 @@ describe 'Option `objname`', -> 'FIELD_2': 'e' 'FIELD_3': 'f' info: - bytes: 35 + bytes: 35 lines: 3 next() - + describe 'map to an index', -> it 'get value associated with index', (next) -> diff --git a/packages/csv-stringify/dist/cjs/sync.d.cts b/packages/csv-stringify/dist/cjs/sync.d.cts index 3814c21db..98699bd2d 100644 --- a/packages/csv-stringify/dist/cjs/sync.d.cts +++ b/packages/csv-stringify/dist/cjs/sync.d.cts @@ -1,5 +1,5 @@ -import { Input, Options } from './index.js' +import { Input, Options } from './index.cjs' declare function stringify(input: Input, options?: Options): string @@ -9,4 +9,4 @@ export { stringify }; export { RecordDelimiter, Cast, PlainObject, Input, ColumnOption, CastingContext, Options -} from './index.js'; +} from './index.cjs'; diff --git a/packages/csv-stringify/dist/esm/index.js b/packages/csv-stringify/dist/esm/index.js index f895afc3f..5650e8d6b 100644 --- a/packages/csv-stringify/dist/esm/index.js +++ b/packages/csv-stringify/dist/esm/index.js @@ -3225,6 +3225,26 @@ BufferList.prototype.concat = function (n) { }; // Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4311,6 +4331,9 @@ function indexOf(xs, x) { } // A bit simpler than readable streams. +// Implement an async ._write(chunk, encoding, cb), and it'll handle all +// the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4822,6 +4845,47 @@ function onEndNT(self) { } // a transform stream is a readable/writable stream where you do +// something with the data. Sometimes it's called a "filter", +// but that's not a great name for it, since that implies a thing where +// some bits pass through, and others are simply ignored. (That would +// be a valid example of a transform, of course.) +// +// While the output is causally related to the input, it's not a +// necessarily symmetric or synchronous transformation. For example, +// a zlib stream might take multiple plain-text writes(), and then +// emit a single compressed chunk some time in the future. +// +// Here's how this works: +// +// The Transform stream has all the aspects of the readable and writable +// stream classes. When you write(chunk), that calls _write(chunk,cb) +// internally, and returns false if there's a lot of pending writes +// buffered up. When you call read(), that calls _read(n) until +// there's enough pending readable data buffered up. +// +// In a transform stream, the written data is placed in a buffer. When +// _read(n) is called, it transforms the queued up data, calling the +// buffered _write cb's as it consumes chunks. If consuming a single +// written chunk would result in multiple output chunks, then the first +// outputted bit calls the readcb, and subsequent chunks just go into +// the read buffer, and will cause it to emit 'readable' if necessary. +// +// This way, back-pressure is actually determined by the reading side, +// since _read has to be called to start processing a new chunk. However, +// a pathological inflate type of transform can cause excessive buffering +// here. For example, imagine a stream where every byte of input is +// interpreted as an integer from 0-255, and then results in that many +// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in +// 1kb of data being output. In this case, you could write a very small +// amount of input, and end up with a very large amount of output. In +// such a pathological inflating mechanism, there'd be no way to tell +// the system to stop doing the transform. A single 4MB write could +// cause the system to run out of memory. +// +// However, even in such a pathological case, only a single written chunk +// would be consumed, and then the rest would wait (un-transformed) until +// the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/csv-stringify/dist/iife/index.js b/packages/csv-stringify/dist/iife/index.js index 1abc6b890..b51116e8d 100644 --- a/packages/csv-stringify/dist/iife/index.js +++ b/packages/csv-stringify/dist/iife/index.js @@ -3228,6 +3228,26 @@ var csv_stringify = (function (exports) { }; // Copyright Joyent, Inc. and other Node contributors. + // + // Permission is hereby granted, free of charge, to any person obtaining a + // copy of this software and associated documentation files (the + // "Software"), to deal in the Software without restriction, including + // without limitation the rights to use, copy, modify, merge, publish, + // distribute, sublicense, and/or sell copies of the Software, and to permit + // persons to whom the Software is furnished to do so, subject to the + // following conditions: + // + // The above copyright notice and this permission notice shall be included + // in all copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN + // NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + // DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4314,6 +4334,9 @@ var csv_stringify = (function (exports) { } // A bit simpler than readable streams. + // Implement an async ._write(chunk, encoding, cb), and it'll handle all + // the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4825,6 +4848,47 @@ var csv_stringify = (function (exports) { } // a transform stream is a readable/writable stream where you do + // something with the data. Sometimes it's called a "filter", + // but that's not a great name for it, since that implies a thing where + // some bits pass through, and others are simply ignored. (That would + // be a valid example of a transform, of course.) + // + // While the output is causally related to the input, it's not a + // necessarily symmetric or synchronous transformation. For example, + // a zlib stream might take multiple plain-text writes(), and then + // emit a single compressed chunk some time in the future. + // + // Here's how this works: + // + // The Transform stream has all the aspects of the readable and writable + // stream classes. When you write(chunk), that calls _write(chunk,cb) + // internally, and returns false if there's a lot of pending writes + // buffered up. When you call read(), that calls _read(n) until + // there's enough pending readable data buffered up. + // + // In a transform stream, the written data is placed in a buffer. When + // _read(n) is called, it transforms the queued up data, calling the + // buffered _write cb's as it consumes chunks. If consuming a single + // written chunk would result in multiple output chunks, then the first + // outputted bit calls the readcb, and subsequent chunks just go into + // the read buffer, and will cause it to emit 'readable' if necessary. + // + // This way, back-pressure is actually determined by the reading side, + // since _read has to be called to start processing a new chunk. However, + // a pathological inflate type of transform can cause excessive buffering + // here. For example, imagine a stream where every byte of input is + // interpreted as an integer from 0-255, and then results in that many + // bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in + // 1kb of data being output. In this case, you could write a very small + // amount of input, and end up with a very large amount of output. In + // such a pathological inflating mechanism, there'd be no way to tell + // the system to stop doing the transform. A single 4MB write could + // cause the system to run out of memory. + // + // However, even in such a pathological case, only a single written chunk + // would be consumed, and then the rest would wait (un-transformed) until + // the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/csv-stringify/dist/umd/index.js b/packages/csv-stringify/dist/umd/index.js index 2eae74294..3941c1237 100644 --- a/packages/csv-stringify/dist/umd/index.js +++ b/packages/csv-stringify/dist/umd/index.js @@ -3231,6 +3231,26 @@ }; // Copyright Joyent, Inc. and other Node contributors. + // + // Permission is hereby granted, free of charge, to any person obtaining a + // copy of this software and associated documentation files (the + // "Software"), to deal in the Software without restriction, including + // without limitation the rights to use, copy, modify, merge, publish, + // distribute, sublicense, and/or sell copies of the Software, and to permit + // persons to whom the Software is furnished to do so, subject to the + // following conditions: + // + // The above copyright notice and this permission notice shall be included + // in all copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN + // NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + // DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4317,6 +4337,9 @@ } // A bit simpler than readable streams. + // Implement an async ._write(chunk, encoding, cb), and it'll handle all + // the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4828,6 +4851,47 @@ } // a transform stream is a readable/writable stream where you do + // something with the data. Sometimes it's called a "filter", + // but that's not a great name for it, since that implies a thing where + // some bits pass through, and others are simply ignored. (That would + // be a valid example of a transform, of course.) + // + // While the output is causally related to the input, it's not a + // necessarily symmetric or synchronous transformation. For example, + // a zlib stream might take multiple plain-text writes(), and then + // emit a single compressed chunk some time in the future. + // + // Here's how this works: + // + // The Transform stream has all the aspects of the readable and writable + // stream classes. When you write(chunk), that calls _write(chunk,cb) + // internally, and returns false if there's a lot of pending writes + // buffered up. When you call read(), that calls _read(n) until + // there's enough pending readable data buffered up. + // + // In a transform stream, the written data is placed in a buffer. When + // _read(n) is called, it transforms the queued up data, calling the + // buffered _write cb's as it consumes chunks. If consuming a single + // written chunk would result in multiple output chunks, then the first + // outputted bit calls the readcb, and subsequent chunks just go into + // the read buffer, and will cause it to emit 'readable' if necessary. + // + // This way, back-pressure is actually determined by the reading side, + // since _read has to be called to start processing a new chunk. However, + // a pathological inflate type of transform can cause excessive buffering + // here. For example, imagine a stream where every byte of input is + // interpreted as an integer from 0-255, and then results in that many + // bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in + // 1kb of data being output. In this case, you could write a very small + // amount of input, and end up with a very large amount of output. In + // such a pathological inflating mechanism, there'd be no way to tell + // the system to stop doing the transform. A single 4MB write could + // cause the system to run out of memory. + // + // However, even in such a pathological case, only a single written chunk + // would be consumed, and then the rest would wait (un-transformed) until + // the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/csv-stringify/package.json b/packages/csv-stringify/package.json index 64827d9a7..b49c577e4 100644 --- a/packages/csv-stringify/package.json +++ b/packages/csv-stringify/package.json @@ -84,7 +84,9 @@ "build": "npm run build:rollup && npm run build:ts", "build:rollup": "npx rollup -c", "build:ts": "cp lib/index.d.ts dist/cjs/index.d.cts && cp lib/sync.d.ts dist/cjs/sync.d.cts && cp lib/*.ts dist/esm", + "postbuild:ts": "find dist/cjs -name '*.d.cts' -exec sh -c \"sed -i \"s/\\.js'/\\.cjs'/g\" {} || sed -i '' \"s/\\.js'/\\.cjs'/g\" {}\" \\;", "lint": "npm run lint:lib && npm run lint:samples && npm run lint:test", + "postlint": "tsc --noEmit true", "lint:lib": "eslint --fix lib/*.js", "lint:samples": "eslint --fix samples/*.js", "lint:test": "coffeelint --fix test/*.coffee", diff --git a/packages/csv-stringify/samples/option.escape.custom.js b/packages/csv-stringify/samples/option.escape.custom.js index 281ae8954..8f28fa2d8 100644 --- a/packages/csv-stringify/samples/option.escape.custom.js +++ b/packages/csv-stringify/samples/option.escape.custom.js @@ -6,4 +6,4 @@ const records = stringify([ ['a "value"'], ], {escape: '\\'}); -assert.equal(records, '"a \\"value\\""\n') +assert.equal(records, '"a \\"value\\""\n'); diff --git a/packages/csv-stringify/samples/option.escape.default.js b/packages/csv-stringify/samples/option.escape.default.js index db8a1439c..9fc426a7c 100644 --- a/packages/csv-stringify/samples/option.escape.default.js +++ b/packages/csv-stringify/samples/option.escape.default.js @@ -6,4 +6,4 @@ const records = stringify([ ['a "value"'], ]); -assert.equal(records, '"a ""value"""\n') +assert.equal(records, '"a ""value"""\n'); diff --git a/packages/csv-stringify/samples/option.escape_formulas.js b/packages/csv-stringify/samples/option.escape_formulas.js index b3303bf86..7e119ae33 100644 --- a/packages/csv-stringify/samples/option.escape_formulas.js +++ b/packages/csv-stringify/samples/option.escape_formulas.js @@ -7,4 +7,4 @@ const records = stringify([ ['=4', '@5', '6'] ], {escape_formulas: true}); -assert.equal(records, "'=1,'@2,3\n'=4,'@5,6\n") +assert.equal(records, "'=1,'@2,3\n'=4,'@5,6\n"); diff --git a/packages/csv/dist/esm/index.js b/packages/csv/dist/esm/index.js index 619f83b02..2567db8c3 100644 --- a/packages/csv/dist/esm/index.js +++ b/packages/csv/dist/esm/index.js @@ -3295,6 +3295,26 @@ BufferList.prototype.concat = function (n) { }; // Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4381,6 +4401,9 @@ function indexOf(xs, x) { } // A bit simpler than readable streams. +// Implement an async ._write(chunk, encoding, cb), and it'll handle all +// the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4892,6 +4915,47 @@ function onEndNT(self) { } // a transform stream is a readable/writable stream where you do +// something with the data. Sometimes it's called a "filter", +// but that's not a great name for it, since that implies a thing where +// some bits pass through, and others are simply ignored. (That would +// be a valid example of a transform, of course.) +// +// While the output is causally related to the input, it's not a +// necessarily symmetric or synchronous transformation. For example, +// a zlib stream might take multiple plain-text writes(), and then +// emit a single compressed chunk some time in the future. +// +// Here's how this works: +// +// The Transform stream has all the aspects of the readable and writable +// stream classes. When you write(chunk), that calls _write(chunk,cb) +// internally, and returns false if there's a lot of pending writes +// buffered up. When you call read(), that calls _read(n) until +// there's enough pending readable data buffered up. +// +// In a transform stream, the written data is placed in a buffer. When +// _read(n) is called, it transforms the queued up data, calling the +// buffered _write cb's as it consumes chunks. If consuming a single +// written chunk would result in multiple output chunks, then the first +// outputted bit calls the readcb, and subsequent chunks just go into +// the read buffer, and will cause it to emit 'readable' if necessary. +// +// This way, back-pressure is actually determined by the reading side, +// since _read has to be called to start processing a new chunk. However, +// a pathological inflate type of transform can cause excessive buffering +// here. For example, imagine a stream where every byte of input is +// interpreted as an integer from 0-255, and then results in that many +// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in +// 1kb of data being output. In this case, you could write a very small +// amount of input, and end up with a very large amount of output. In +// such a pathological inflating mechanism, there'd be no way to tell +// the system to stop doing the transform. A single 4MB write could +// cause the system to run out of memory. +// +// However, even in such a pathological case, only a single written chunk +// would be consumed, and then the rest would wait (un-transformed) until +// the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/csv/dist/esm/sync.js b/packages/csv/dist/esm/sync.js index 23a4561f2..a9c144be6 100644 --- a/packages/csv/dist/esm/sync.js +++ b/packages/csv/dist/esm/sync.js @@ -3295,6 +3295,26 @@ BufferList.prototype.concat = function (n) { }; // Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4381,6 +4401,9 @@ function indexOf(xs, x) { } // A bit simpler than readable streams. +// Implement an async ._write(chunk, encoding, cb), and it'll handle all +// the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4892,6 +4915,47 @@ function onEndNT(self) { } // a transform stream is a readable/writable stream where you do +// something with the data. Sometimes it's called a "filter", +// but that's not a great name for it, since that implies a thing where +// some bits pass through, and others are simply ignored. (That would +// be a valid example of a transform, of course.) +// +// While the output is causally related to the input, it's not a +// necessarily symmetric or synchronous transformation. For example, +// a zlib stream might take multiple plain-text writes(), and then +// emit a single compressed chunk some time in the future. +// +// Here's how this works: +// +// The Transform stream has all the aspects of the readable and writable +// stream classes. When you write(chunk), that calls _write(chunk,cb) +// internally, and returns false if there's a lot of pending writes +// buffered up. When you call read(), that calls _read(n) until +// there's enough pending readable data buffered up. +// +// In a transform stream, the written data is placed in a buffer. When +// _read(n) is called, it transforms the queued up data, calling the +// buffered _write cb's as it consumes chunks. If consuming a single +// written chunk would result in multiple output chunks, then the first +// outputted bit calls the readcb, and subsequent chunks just go into +// the read buffer, and will cause it to emit 'readable' if necessary. +// +// This way, back-pressure is actually determined by the reading side, +// since _read has to be called to start processing a new chunk. However, +// a pathological inflate type of transform can cause excessive buffering +// here. For example, imagine a stream where every byte of input is +// interpreted as an integer from 0-255, and then results in that many +// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in +// 1kb of data being output. In this case, you could write a very small +// amount of input, and end up with a very large amount of output. In +// such a pathological inflating mechanism, there'd be no way to tell +// the system to stop doing the transform. A single 4MB write could +// cause the system to run out of memory. +// +// However, even in such a pathological case, only a single written chunk +// would be consumed, and then the rest would wait (un-transformed) until +// the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/csv/dist/iife/index.js b/packages/csv/dist/iife/index.js index 184d43c85..895dd35ca 100644 --- a/packages/csv/dist/iife/index.js +++ b/packages/csv/dist/iife/index.js @@ -3298,6 +3298,26 @@ var csv = (function (exports) { }; // Copyright Joyent, Inc. and other Node contributors. + // + // Permission is hereby granted, free of charge, to any person obtaining a + // copy of this software and associated documentation files (the + // "Software"), to deal in the Software without restriction, including + // without limitation the rights to use, copy, modify, merge, publish, + // distribute, sublicense, and/or sell copies of the Software, and to permit + // persons to whom the Software is furnished to do so, subject to the + // following conditions: + // + // The above copyright notice and this permission notice shall be included + // in all copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN + // NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + // DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4384,6 +4404,9 @@ var csv = (function (exports) { } // A bit simpler than readable streams. + // Implement an async ._write(chunk, encoding, cb), and it'll handle all + // the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4895,6 +4918,47 @@ var csv = (function (exports) { } // a transform stream is a readable/writable stream where you do + // something with the data. Sometimes it's called a "filter", + // but that's not a great name for it, since that implies a thing where + // some bits pass through, and others are simply ignored. (That would + // be a valid example of a transform, of course.) + // + // While the output is causally related to the input, it's not a + // necessarily symmetric or synchronous transformation. For example, + // a zlib stream might take multiple plain-text writes(), and then + // emit a single compressed chunk some time in the future. + // + // Here's how this works: + // + // The Transform stream has all the aspects of the readable and writable + // stream classes. When you write(chunk), that calls _write(chunk,cb) + // internally, and returns false if there's a lot of pending writes + // buffered up. When you call read(), that calls _read(n) until + // there's enough pending readable data buffered up. + // + // In a transform stream, the written data is placed in a buffer. When + // _read(n) is called, it transforms the queued up data, calling the + // buffered _write cb's as it consumes chunks. If consuming a single + // written chunk would result in multiple output chunks, then the first + // outputted bit calls the readcb, and subsequent chunks just go into + // the read buffer, and will cause it to emit 'readable' if necessary. + // + // This way, back-pressure is actually determined by the reading side, + // since _read has to be called to start processing a new chunk. However, + // a pathological inflate type of transform can cause excessive buffering + // here. For example, imagine a stream where every byte of input is + // interpreted as an integer from 0-255, and then results in that many + // bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in + // 1kb of data being output. In this case, you could write a very small + // amount of input, and end up with a very large amount of output. In + // such a pathological inflating mechanism, there'd be no way to tell + // the system to stop doing the transform. A single 4MB write could + // cause the system to run out of memory. + // + // However, even in such a pathological case, only a single written chunk + // would be consumed, and then the rest would wait (un-transformed) until + // the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/csv/dist/iife/sync.js b/packages/csv/dist/iife/sync.js index 38bb36fad..71daaf343 100644 --- a/packages/csv/dist/iife/sync.js +++ b/packages/csv/dist/iife/sync.js @@ -3298,6 +3298,26 @@ var csv_sync = (function (exports) { }; // Copyright Joyent, Inc. and other Node contributors. + // + // Permission is hereby granted, free of charge, to any person obtaining a + // copy of this software and associated documentation files (the + // "Software"), to deal in the Software without restriction, including + // without limitation the rights to use, copy, modify, merge, publish, + // distribute, sublicense, and/or sell copies of the Software, and to permit + // persons to whom the Software is furnished to do so, subject to the + // following conditions: + // + // The above copyright notice and this permission notice shall be included + // in all copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN + // NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + // DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4384,6 +4404,9 @@ var csv_sync = (function (exports) { } // A bit simpler than readable streams. + // Implement an async ._write(chunk, encoding, cb), and it'll handle all + // the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4895,6 +4918,47 @@ var csv_sync = (function (exports) { } // a transform stream is a readable/writable stream where you do + // something with the data. Sometimes it's called a "filter", + // but that's not a great name for it, since that implies a thing where + // some bits pass through, and others are simply ignored. (That would + // be a valid example of a transform, of course.) + // + // While the output is causally related to the input, it's not a + // necessarily symmetric or synchronous transformation. For example, + // a zlib stream might take multiple plain-text writes(), and then + // emit a single compressed chunk some time in the future. + // + // Here's how this works: + // + // The Transform stream has all the aspects of the readable and writable + // stream classes. When you write(chunk), that calls _write(chunk,cb) + // internally, and returns false if there's a lot of pending writes + // buffered up. When you call read(), that calls _read(n) until + // there's enough pending readable data buffered up. + // + // In a transform stream, the written data is placed in a buffer. When + // _read(n) is called, it transforms the queued up data, calling the + // buffered _write cb's as it consumes chunks. If consuming a single + // written chunk would result in multiple output chunks, then the first + // outputted bit calls the readcb, and subsequent chunks just go into + // the read buffer, and will cause it to emit 'readable' if necessary. + // + // This way, back-pressure is actually determined by the reading side, + // since _read has to be called to start processing a new chunk. However, + // a pathological inflate type of transform can cause excessive buffering + // here. For example, imagine a stream where every byte of input is + // interpreted as an integer from 0-255, and then results in that many + // bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in + // 1kb of data being output. In this case, you could write a very small + // amount of input, and end up with a very large amount of output. In + // such a pathological inflating mechanism, there'd be no way to tell + // the system to stop doing the transform. A single 4MB write could + // cause the system to run out of memory. + // + // However, even in such a pathological case, only a single written chunk + // would be consumed, and then the rest would wait (un-transformed) until + // the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/csv/dist/umd/index.js b/packages/csv/dist/umd/index.js index a9c97acc9..c4ed3fcdc 100644 --- a/packages/csv/dist/umd/index.js +++ b/packages/csv/dist/umd/index.js @@ -3301,6 +3301,26 @@ }; // Copyright Joyent, Inc. and other Node contributors. + // + // Permission is hereby granted, free of charge, to any person obtaining a + // copy of this software and associated documentation files (the + // "Software"), to deal in the Software without restriction, including + // without limitation the rights to use, copy, modify, merge, publish, + // distribute, sublicense, and/or sell copies of the Software, and to permit + // persons to whom the Software is furnished to do so, subject to the + // following conditions: + // + // The above copyright notice and this permission notice shall be included + // in all copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN + // NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + // DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4387,6 +4407,9 @@ } // A bit simpler than readable streams. + // Implement an async ._write(chunk, encoding, cb), and it'll handle all + // the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4898,6 +4921,47 @@ } // a transform stream is a readable/writable stream where you do + // something with the data. Sometimes it's called a "filter", + // but that's not a great name for it, since that implies a thing where + // some bits pass through, and others are simply ignored. (That would + // be a valid example of a transform, of course.) + // + // While the output is causally related to the input, it's not a + // necessarily symmetric or synchronous transformation. For example, + // a zlib stream might take multiple plain-text writes(), and then + // emit a single compressed chunk some time in the future. + // + // Here's how this works: + // + // The Transform stream has all the aspects of the readable and writable + // stream classes. When you write(chunk), that calls _write(chunk,cb) + // internally, and returns false if there's a lot of pending writes + // buffered up. When you call read(), that calls _read(n) until + // there's enough pending readable data buffered up. + // + // In a transform stream, the written data is placed in a buffer. When + // _read(n) is called, it transforms the queued up data, calling the + // buffered _write cb's as it consumes chunks. If consuming a single + // written chunk would result in multiple output chunks, then the first + // outputted bit calls the readcb, and subsequent chunks just go into + // the read buffer, and will cause it to emit 'readable' if necessary. + // + // This way, back-pressure is actually determined by the reading side, + // since _read has to be called to start processing a new chunk. However, + // a pathological inflate type of transform can cause excessive buffering + // here. For example, imagine a stream where every byte of input is + // interpreted as an integer from 0-255, and then results in that many + // bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in + // 1kb of data being output. In this case, you could write a very small + // amount of input, and end up with a very large amount of output. In + // such a pathological inflating mechanism, there'd be no way to tell + // the system to stop doing the transform. A single 4MB write could + // cause the system to run out of memory. + // + // However, even in such a pathological case, only a single written chunk + // would be consumed, and then the rest would wait (un-transformed) until + // the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/csv/dist/umd/sync.js b/packages/csv/dist/umd/sync.js index 6a55360a6..e4c6ed874 100644 --- a/packages/csv/dist/umd/sync.js +++ b/packages/csv/dist/umd/sync.js @@ -3301,6 +3301,26 @@ }; // Copyright Joyent, Inc. and other Node contributors. + // + // Permission is hereby granted, free of charge, to any person obtaining a + // copy of this software and associated documentation files (the + // "Software"), to deal in the Software without restriction, including + // without limitation the rights to use, copy, modify, merge, publish, + // distribute, sublicense, and/or sell copies of the Software, and to permit + // persons to whom the Software is furnished to do so, subject to the + // following conditions: + // + // The above copyright notice and this permission notice shall be included + // in all copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN + // NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + // DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4387,6 +4407,9 @@ } // A bit simpler than readable streams. + // Implement an async ._write(chunk, encoding, cb), and it'll handle all + // the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4898,6 +4921,47 @@ } // a transform stream is a readable/writable stream where you do + // something with the data. Sometimes it's called a "filter", + // but that's not a great name for it, since that implies a thing where + // some bits pass through, and others are simply ignored. (That would + // be a valid example of a transform, of course.) + // + // While the output is causally related to the input, it's not a + // necessarily symmetric or synchronous transformation. For example, + // a zlib stream might take multiple plain-text writes(), and then + // emit a single compressed chunk some time in the future. + // + // Here's how this works: + // + // The Transform stream has all the aspects of the readable and writable + // stream classes. When you write(chunk), that calls _write(chunk,cb) + // internally, and returns false if there's a lot of pending writes + // buffered up. When you call read(), that calls _read(n) until + // there's enough pending readable data buffered up. + // + // In a transform stream, the written data is placed in a buffer. When + // _read(n) is called, it transforms the queued up data, calling the + // buffered _write cb's as it consumes chunks. If consuming a single + // written chunk would result in multiple output chunks, then the first + // outputted bit calls the readcb, and subsequent chunks just go into + // the read buffer, and will cause it to emit 'readable' if necessary. + // + // This way, back-pressure is actually determined by the reading side, + // since _read has to be called to start processing a new chunk. However, + // a pathological inflate type of transform can cause excessive buffering + // here. For example, imagine a stream where every byte of input is + // interpreted as an integer from 0-255, and then results in that many + // bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in + // 1kb of data being output. In this case, you could write a very small + // amount of input, and end up with a very large amount of output. In + // such a pathological inflating mechanism, there'd be no way to tell + // the system to stop doing the transform. A single 4MB write could + // cause the system to run out of memory. + // + // However, even in such a pathological case, only a single written chunk + // would be consumed, and then the rest would wait (un-transformed) until + // the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/csv/package.json b/packages/csv/package.json index d38fa1d88..5c4fc212d 100644 --- a/packages/csv/package.json +++ b/packages/csv/package.json @@ -103,7 +103,9 @@ "build": "npm run build:rollup && npm run build:ts", "build:rollup": "npx rollup -c", "build:ts": "cp lib/index.d.ts dist/cjs/index.d.cts && cp lib/sync.d.ts dist/cjs/sync.d.cts && cp lib/*.ts dist/esm", + "postbuild:ts": "find dist/cjs -name '*.d.cts' -exec sh -c \"sed -i \"s/\\.js'/\\.cjs'/g\" {} || sed -i '' \"s/\\.js'/\\.cjs'/g\" {}\" \\;", "lint": "npm run lint:lib && npm run lint:samples && npm run lint:test", + "postlint": "tsc --noEmit true", "lint:lib": "eslint --fix lib/*.js", "lint:samples": "eslint --fix samples/*.js", "lint:test": "coffeelint --fix test/*.coffee", diff --git a/packages/stream-transform/dist/cjs/sync.d.cts b/packages/stream-transform/dist/cjs/sync.d.cts index 538c3e5ff..03f421701 100644 --- a/packages/stream-transform/dist/cjs/sync.d.cts +++ b/packages/stream-transform/dist/cjs/sync.d.cts @@ -1,6 +1,6 @@ /// -import { Options } from './index.js'; +import { Options } from './index.cjs'; export type Handler = (record: T) => U // export default transform; diff --git a/packages/stream-transform/dist/esm/index.js b/packages/stream-transform/dist/esm/index.js index 8eff00c31..6e40886ea 100644 --- a/packages/stream-transform/dist/esm/index.js +++ b/packages/stream-transform/dist/esm/index.js @@ -3295,6 +3295,26 @@ BufferList.prototype.concat = function (n) { }; // Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4381,6 +4401,9 @@ function indexOf(xs, x) { } // A bit simpler than readable streams. +// Implement an async ._write(chunk, encoding, cb), and it'll handle all +// the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4892,6 +4915,47 @@ function onEndNT(self) { } // a transform stream is a readable/writable stream where you do +// something with the data. Sometimes it's called a "filter", +// but that's not a great name for it, since that implies a thing where +// some bits pass through, and others are simply ignored. (That would +// be a valid example of a transform, of course.) +// +// While the output is causally related to the input, it's not a +// necessarily symmetric or synchronous transformation. For example, +// a zlib stream might take multiple plain-text writes(), and then +// emit a single compressed chunk some time in the future. +// +// Here's how this works: +// +// The Transform stream has all the aspects of the readable and writable +// stream classes. When you write(chunk), that calls _write(chunk,cb) +// internally, and returns false if there's a lot of pending writes +// buffered up. When you call read(), that calls _read(n) until +// there's enough pending readable data buffered up. +// +// In a transform stream, the written data is placed in a buffer. When +// _read(n) is called, it transforms the queued up data, calling the +// buffered _write cb's as it consumes chunks. If consuming a single +// written chunk would result in multiple output chunks, then the first +// outputted bit calls the readcb, and subsequent chunks just go into +// the read buffer, and will cause it to emit 'readable' if necessary. +// +// This way, back-pressure is actually determined by the reading side, +// since _read has to be called to start processing a new chunk. However, +// a pathological inflate type of transform can cause excessive buffering +// here. For example, imagine a stream where every byte of input is +// interpreted as an integer from 0-255, and then results in that many +// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in +// 1kb of data being output. In this case, you could write a very small +// amount of input, and end up with a very large amount of output. In +// such a pathological inflating mechanism, there'd be no way to tell +// the system to stop doing the transform. A single 4MB write could +// cause the system to run out of memory. +// +// However, even in such a pathological case, only a single written chunk +// would be consumed, and then the rest would wait (un-transformed) until +// the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/stream-transform/dist/esm/sync.js b/packages/stream-transform/dist/esm/sync.js index ddc16298a..db7af14b0 100644 --- a/packages/stream-transform/dist/esm/sync.js +++ b/packages/stream-transform/dist/esm/sync.js @@ -3295,6 +3295,26 @@ BufferList.prototype.concat = function (n) { }; // Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4381,6 +4401,9 @@ function indexOf(xs, x) { } // A bit simpler than readable streams. +// Implement an async ._write(chunk, encoding, cb), and it'll handle all +// the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4892,6 +4915,47 @@ function onEndNT(self) { } // a transform stream is a readable/writable stream where you do +// something with the data. Sometimes it's called a "filter", +// but that's not a great name for it, since that implies a thing where +// some bits pass through, and others are simply ignored. (That would +// be a valid example of a transform, of course.) +// +// While the output is causally related to the input, it's not a +// necessarily symmetric or synchronous transformation. For example, +// a zlib stream might take multiple plain-text writes(), and then +// emit a single compressed chunk some time in the future. +// +// Here's how this works: +// +// The Transform stream has all the aspects of the readable and writable +// stream classes. When you write(chunk), that calls _write(chunk,cb) +// internally, and returns false if there's a lot of pending writes +// buffered up. When you call read(), that calls _read(n) until +// there's enough pending readable data buffered up. +// +// In a transform stream, the written data is placed in a buffer. When +// _read(n) is called, it transforms the queued up data, calling the +// buffered _write cb's as it consumes chunks. If consuming a single +// written chunk would result in multiple output chunks, then the first +// outputted bit calls the readcb, and subsequent chunks just go into +// the read buffer, and will cause it to emit 'readable' if necessary. +// +// This way, back-pressure is actually determined by the reading side, +// since _read has to be called to start processing a new chunk. However, +// a pathological inflate type of transform can cause excessive buffering +// here. For example, imagine a stream where every byte of input is +// interpreted as an integer from 0-255, and then results in that many +// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in +// 1kb of data being output. In this case, you could write a very small +// amount of input, and end up with a very large amount of output. In +// such a pathological inflating mechanism, there'd be no way to tell +// the system to stop doing the transform. A single 4MB write could +// cause the system to run out of memory. +// +// However, even in such a pathological case, only a single written chunk +// would be consumed, and then the rest would wait (un-transformed) until +// the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/stream-transform/dist/iife/index.js b/packages/stream-transform/dist/iife/index.js index d5a48c442..7d15f2435 100644 --- a/packages/stream-transform/dist/iife/index.js +++ b/packages/stream-transform/dist/iife/index.js @@ -3298,6 +3298,26 @@ var stream_transform = (function (exports) { }; // Copyright Joyent, Inc. and other Node contributors. + // + // Permission is hereby granted, free of charge, to any person obtaining a + // copy of this software and associated documentation files (the + // "Software"), to deal in the Software without restriction, including + // without limitation the rights to use, copy, modify, merge, publish, + // distribute, sublicense, and/or sell copies of the Software, and to permit + // persons to whom the Software is furnished to do so, subject to the + // following conditions: + // + // The above copyright notice and this permission notice shall be included + // in all copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN + // NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + // DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4384,6 +4404,9 @@ var stream_transform = (function (exports) { } // A bit simpler than readable streams. + // Implement an async ._write(chunk, encoding, cb), and it'll handle all + // the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4895,6 +4918,47 @@ var stream_transform = (function (exports) { } // a transform stream is a readable/writable stream where you do + // something with the data. Sometimes it's called a "filter", + // but that's not a great name for it, since that implies a thing where + // some bits pass through, and others are simply ignored. (That would + // be a valid example of a transform, of course.) + // + // While the output is causally related to the input, it's not a + // necessarily symmetric or synchronous transformation. For example, + // a zlib stream might take multiple plain-text writes(), and then + // emit a single compressed chunk some time in the future. + // + // Here's how this works: + // + // The Transform stream has all the aspects of the readable and writable + // stream classes. When you write(chunk), that calls _write(chunk,cb) + // internally, and returns false if there's a lot of pending writes + // buffered up. When you call read(), that calls _read(n) until + // there's enough pending readable data buffered up. + // + // In a transform stream, the written data is placed in a buffer. When + // _read(n) is called, it transforms the queued up data, calling the + // buffered _write cb's as it consumes chunks. If consuming a single + // written chunk would result in multiple output chunks, then the first + // outputted bit calls the readcb, and subsequent chunks just go into + // the read buffer, and will cause it to emit 'readable' if necessary. + // + // This way, back-pressure is actually determined by the reading side, + // since _read has to be called to start processing a new chunk. However, + // a pathological inflate type of transform can cause excessive buffering + // here. For example, imagine a stream where every byte of input is + // interpreted as an integer from 0-255, and then results in that many + // bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in + // 1kb of data being output. In this case, you could write a very small + // amount of input, and end up with a very large amount of output. In + // such a pathological inflating mechanism, there'd be no way to tell + // the system to stop doing the transform. A single 4MB write could + // cause the system to run out of memory. + // + // However, even in such a pathological case, only a single written chunk + // would be consumed, and then the rest would wait (un-transformed) until + // the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/stream-transform/dist/iife/sync.js b/packages/stream-transform/dist/iife/sync.js index d43616b66..ec0d5c5df 100644 --- a/packages/stream-transform/dist/iife/sync.js +++ b/packages/stream-transform/dist/iife/sync.js @@ -3298,6 +3298,26 @@ var stream_transform_sync = (function (exports) { }; // Copyright Joyent, Inc. and other Node contributors. + // + // Permission is hereby granted, free of charge, to any person obtaining a + // copy of this software and associated documentation files (the + // "Software"), to deal in the Software without restriction, including + // without limitation the rights to use, copy, modify, merge, publish, + // distribute, sublicense, and/or sell copies of the Software, and to permit + // persons to whom the Software is furnished to do so, subject to the + // following conditions: + // + // The above copyright notice and this permission notice shall be included + // in all copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN + // NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + // DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4384,6 +4404,9 @@ var stream_transform_sync = (function (exports) { } // A bit simpler than readable streams. + // Implement an async ._write(chunk, encoding, cb), and it'll handle all + // the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4895,6 +4918,47 @@ var stream_transform_sync = (function (exports) { } // a transform stream is a readable/writable stream where you do + // something with the data. Sometimes it's called a "filter", + // but that's not a great name for it, since that implies a thing where + // some bits pass through, and others are simply ignored. (That would + // be a valid example of a transform, of course.) + // + // While the output is causally related to the input, it's not a + // necessarily symmetric or synchronous transformation. For example, + // a zlib stream might take multiple plain-text writes(), and then + // emit a single compressed chunk some time in the future. + // + // Here's how this works: + // + // The Transform stream has all the aspects of the readable and writable + // stream classes. When you write(chunk), that calls _write(chunk,cb) + // internally, and returns false if there's a lot of pending writes + // buffered up. When you call read(), that calls _read(n) until + // there's enough pending readable data buffered up. + // + // In a transform stream, the written data is placed in a buffer. When + // _read(n) is called, it transforms the queued up data, calling the + // buffered _write cb's as it consumes chunks. If consuming a single + // written chunk would result in multiple output chunks, then the first + // outputted bit calls the readcb, and subsequent chunks just go into + // the read buffer, and will cause it to emit 'readable' if necessary. + // + // This way, back-pressure is actually determined by the reading side, + // since _read has to be called to start processing a new chunk. However, + // a pathological inflate type of transform can cause excessive buffering + // here. For example, imagine a stream where every byte of input is + // interpreted as an integer from 0-255, and then results in that many + // bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in + // 1kb of data being output. In this case, you could write a very small + // amount of input, and end up with a very large amount of output. In + // such a pathological inflating mechanism, there'd be no way to tell + // the system to stop doing the transform. A single 4MB write could + // cause the system to run out of memory. + // + // However, even in such a pathological case, only a single written chunk + // would be consumed, and then the rest would wait (un-transformed) until + // the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/stream-transform/dist/umd/index.js b/packages/stream-transform/dist/umd/index.js index 4b8474ffe..75e5361c9 100644 --- a/packages/stream-transform/dist/umd/index.js +++ b/packages/stream-transform/dist/umd/index.js @@ -3301,6 +3301,26 @@ }; // Copyright Joyent, Inc. and other Node contributors. + // + // Permission is hereby granted, free of charge, to any person obtaining a + // copy of this software and associated documentation files (the + // "Software"), to deal in the Software without restriction, including + // without limitation the rights to use, copy, modify, merge, publish, + // distribute, sublicense, and/or sell copies of the Software, and to permit + // persons to whom the Software is furnished to do so, subject to the + // following conditions: + // + // The above copyright notice and this permission notice shall be included + // in all copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN + // NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + // DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4387,6 +4407,9 @@ } // A bit simpler than readable streams. + // Implement an async ._write(chunk, encoding, cb), and it'll handle all + // the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4898,6 +4921,47 @@ } // a transform stream is a readable/writable stream where you do + // something with the data. Sometimes it's called a "filter", + // but that's not a great name for it, since that implies a thing where + // some bits pass through, and others are simply ignored. (That would + // be a valid example of a transform, of course.) + // + // While the output is causally related to the input, it's not a + // necessarily symmetric or synchronous transformation. For example, + // a zlib stream might take multiple plain-text writes(), and then + // emit a single compressed chunk some time in the future. + // + // Here's how this works: + // + // The Transform stream has all the aspects of the readable and writable + // stream classes. When you write(chunk), that calls _write(chunk,cb) + // internally, and returns false if there's a lot of pending writes + // buffered up. When you call read(), that calls _read(n) until + // there's enough pending readable data buffered up. + // + // In a transform stream, the written data is placed in a buffer. When + // _read(n) is called, it transforms the queued up data, calling the + // buffered _write cb's as it consumes chunks. If consuming a single + // written chunk would result in multiple output chunks, then the first + // outputted bit calls the readcb, and subsequent chunks just go into + // the read buffer, and will cause it to emit 'readable' if necessary. + // + // This way, back-pressure is actually determined by the reading side, + // since _read has to be called to start processing a new chunk. However, + // a pathological inflate type of transform can cause excessive buffering + // here. For example, imagine a stream where every byte of input is + // interpreted as an integer from 0-255, and then results in that many + // bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in + // 1kb of data being output. In this case, you could write a very small + // amount of input, and end up with a very large amount of output. In + // such a pathological inflating mechanism, there'd be no way to tell + // the system to stop doing the transform. A single 4MB write could + // cause the system to run out of memory. + // + // However, even in such a pathological case, only a single written chunk + // would be consumed, and then the rest would wait (un-transformed) until + // the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/stream-transform/dist/umd/sync.js b/packages/stream-transform/dist/umd/sync.js index 36174ca4f..cd95d5df5 100644 --- a/packages/stream-transform/dist/umd/sync.js +++ b/packages/stream-transform/dist/umd/sync.js @@ -3301,6 +3301,26 @@ }; // Copyright Joyent, Inc. and other Node contributors. + // + // Permission is hereby granted, free of charge, to any person obtaining a + // copy of this software and associated documentation files (the + // "Software"), to deal in the Software without restriction, including + // without limitation the rights to use, copy, modify, merge, publish, + // distribute, sublicense, and/or sell copies of the Software, and to permit + // persons to whom the Software is furnished to do so, subject to the + // following conditions: + // + // The above copyright notice and this permission notice shall be included + // in all copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN + // NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + // DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + var isBufferEncoding = Buffer.isEncoding || function(encoding) { switch (encoding && encoding.toLowerCase()) { @@ -4387,6 +4407,9 @@ } // A bit simpler than readable streams. + // Implement an async ._write(chunk, encoding, cb), and it'll handle all + // the drain event emission and buffering. + Writable.WritableState = WritableState; inherits$1(Writable, EventEmitter); @@ -4898,6 +4921,47 @@ } // a transform stream is a readable/writable stream where you do + // something with the data. Sometimes it's called a "filter", + // but that's not a great name for it, since that implies a thing where + // some bits pass through, and others are simply ignored. (That would + // be a valid example of a transform, of course.) + // + // While the output is causally related to the input, it's not a + // necessarily symmetric or synchronous transformation. For example, + // a zlib stream might take multiple plain-text writes(), and then + // emit a single compressed chunk some time in the future. + // + // Here's how this works: + // + // The Transform stream has all the aspects of the readable and writable + // stream classes. When you write(chunk), that calls _write(chunk,cb) + // internally, and returns false if there's a lot of pending writes + // buffered up. When you call read(), that calls _read(n) until + // there's enough pending readable data buffered up. + // + // In a transform stream, the written data is placed in a buffer. When + // _read(n) is called, it transforms the queued up data, calling the + // buffered _write cb's as it consumes chunks. If consuming a single + // written chunk would result in multiple output chunks, then the first + // outputted bit calls the readcb, and subsequent chunks just go into + // the read buffer, and will cause it to emit 'readable' if necessary. + // + // This way, back-pressure is actually determined by the reading side, + // since _read has to be called to start processing a new chunk. However, + // a pathological inflate type of transform can cause excessive buffering + // here. For example, imagine a stream where every byte of input is + // interpreted as an integer from 0-255, and then results in that many + // bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in + // 1kb of data being output. In this case, you could write a very small + // amount of input, and end up with a very large amount of output. In + // such a pathological inflating mechanism, there'd be no way to tell + // the system to stop doing the transform. A single 4MB write could + // cause the system to run out of memory. + // + // However, even in such a pathological case, only a single written chunk + // would be consumed, and then the rest would wait (un-transformed) until + // the results of the previous transformed chunk were consumed. + inherits$1(Transform, Duplex); function TransformState(stream) { diff --git a/packages/stream-transform/package.json b/packages/stream-transform/package.json index 3f0a156e3..9a38d45d9 100644 --- a/packages/stream-transform/package.json +++ b/packages/stream-transform/package.json @@ -84,7 +84,9 @@ "build": "npm run build:rollup && npm run build:ts", "build:rollup": "npx rollup -c", "build:ts": "cp lib/index.d.ts dist/cjs/index.d.cts && cp lib/sync.d.ts dist/cjs/sync.d.cts && cp lib/*.ts dist/esm", + "postbuild:ts": "find dist/cjs -name '*.d.cts' -exec sh -c \"sed -i \"s/\\.js'/\\.cjs'/g\" {} || sed -i '' \"s/\\.js'/\\.cjs'/g\" {}\" \\;", "lint": "npm run lint:lib && npm run lint:samples && npm run lint:test", + "postlint": "tsc --noEmit true", "lint:lib": "eslint --fix lib/*.js", "lint:samples": "eslint --fix samples/*.js", "lint:test": "coffeelint --fix test/*.coffee", diff --git a/packages/stream-transform/samples/api.mixed.output.stream.js b/packages/stream-transform/samples/api.mixed.output.stream.js index b6b48ea8d..fee1a7d4a 100644 --- a/packages/stream-transform/samples/api.mixed.output.stream.js +++ b/packages/stream-transform/samples/api.mixed.output.stream.js @@ -4,7 +4,7 @@ import assert from 'assert'; const records = []; // Create the parser -const transformer = transform([ +transform([ ['1','2','3','4'], ['a','b','c','d'] ], function(record){