Initial Save

This commit is contained in:
jackbeeby
2025-03-28 12:30:19 +11:00
parent e381994f19
commit d8773925e8
9910 changed files with 982718 additions and 0 deletions

1
node_modules/fs-capacitor/.eslintignore generated vendored Normal file
View File

@@ -0,0 +1 @@
lib

11
node_modules/fs-capacitor/.eslintrc.json generated vendored Normal file
View File

@@ -0,0 +1,11 @@
{
"extends": ["env"],
"rules": {
"require-jsdoc": "off"
},
"settings": {
"node": {
"tryExtensions": [".mjs", ".js", ".json", ".node"]
}
}
}

5
node_modules/fs-capacitor/.huskyrc.json generated vendored Normal file
View File

@@ -0,0 +1,5 @@
{
"hooks": {
"pre-commit": "npm test"
}
}

4
node_modules/fs-capacitor/.lintstagedrc.json generated vendored Normal file
View File

@@ -0,0 +1,4 @@
{
"*.{mjs,js}": "eslint",
"*.{json,yml,md}": "prettier -l"
}

7
node_modules/fs-capacitor/.npmignore generated vendored Normal file
View File

@@ -0,0 +1,7 @@
.DS_Store
node_modules
package-lock.json
npm-debug.log
yarn.lock
yarn-error.log
lib

1
node_modules/fs-capacitor/.npmrc generated vendored Normal file
View File

@@ -0,0 +1 @@
package-lock=false

2
node_modules/fs-capacitor/.prettierignore generated vendored Normal file
View File

@@ -0,0 +1,2 @@
package.json
package-lock.json

3
node_modules/fs-capacitor/.prettierrc.json generated vendored Normal file
View File

@@ -0,0 +1,3 @@
{
"proseWrap": "never"
}

8
node_modules/fs-capacitor/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,8 @@
language: node_js
node_js:
- "8"
- "10"
- "12"
- "node"
notifications:
email: false

13
node_modules/fs-capacitor/babel.config.js generated vendored Normal file
View File

@@ -0,0 +1,13 @@
module.exports = {
comments: false,
presets: [
[
"@babel/env",
{
modules: process.env.BABEL_ESM ? false : "commonjs",
shippedProposals: true,
loose: true
}
]
]
};

43
node_modules/fs-capacitor/changelog.md generated vendored Normal file
View File

@@ -0,0 +1,43 @@
# fs-capacitor changelog
## 1.0.0
- Initial release.
### 1.0.1
- Use default fs flags and mode
## 2.0.0
- Updated dependencies.
- Add tests for special stream scenarios.
- BREAKING: Remove special handling of terminating events, see [jaydenseric/graphql-upload#131](https://github.com/jaydenseric/graphql-upload/issues/131)
### 2.0.1
- Updated dependencies.
- Move configs out of package.json
- Use `wx` file flag instead of default `w` (thanks to @mattbretl via #8)
### 2.0.2
- Updated dev dependencies.
- Fix mjs structure to work with node v12.
- Fix a bug that would pause consumption of read streams until completion. (thanks to @Nikosmonaut's investigation in #9).
### 2.0.3
- Emit write event _after_ bytes have been written to the filesystem.
### 2.0.4
- Revert support for Node.js v12 `--experimental-modules` mode that was published in [v2.0.2](https://github.com/mike-marcacci/fs-capacitor/releases/tag/v2.0.2) that broke compatibility with earlier Node.js versions and test both ESM and CJS builds (skipping `--experimental-modules` tests for Node.js v12), via [#11](https://github.com/mike-marcacci/fs-capacitor/pull/11).
- Use package `browserslist` field instead of configuring `@babel/preset-env` directly.
- Configure `@babel/preset-env` to use shipped proposals and loose mode.
- Give dev tool config files `.json` extensions so they can be Prettier linted.
- Don't Prettier ignore the `lib` directory; it's meant to be pretty.
- Prettier ignore `package.json` and `package-lock.json` so npm can own the formatting.
- Configure [`eslint-plugin-node`](https://npm.im/eslint-plugin-node) to resolve `.mjs` before `.js` and other extensions, for compatibility with the pre Node.js v12 `--experimental-modules` behavior.
- Don't ESLint ignore `node_modules`, as it's already ignored by default.
- Use the `classic` TAP reporter for tests as it has more compact output.

230
node_modules/fs-capacitor/lib/index.js generated vendored Normal file
View File

@@ -0,0 +1,230 @@
"use strict";
exports.__esModule = true;
exports.default = exports.WriteStream = exports.ReadStream = exports.ReadAfterDestroyedError = void 0;
var _crypto = _interopRequireDefault(require("crypto"));
var _fs = _interopRequireDefault(require("fs"));
var _os = _interopRequireDefault(require("os"));
var _path = _interopRequireDefault(require("path"));
function _interopRequireDefault(obj) {
return obj && obj.__esModule ? obj : { default: obj };
}
class ReadAfterDestroyedError extends Error {}
exports.ReadAfterDestroyedError = ReadAfterDestroyedError;
class ReadStream extends _fs.default.ReadStream {
constructor(writeStream, name) {
super("", {});
this.name = name;
this._writeStream = writeStream;
this.error = this._writeStream.error;
this.addListener("error", error => {
this.error = error;
});
this.open();
}
get ended() {
return this._readableState.ended;
}
_read(n) {
if (typeof this.fd !== "number")
return this.once("open", function() {
this._read(n);
});
if (this._writeStream.finished || this._writeStream.closed)
return super._read(n);
const unread = this._writeStream.bytesWritten - this.bytesRead;
if (unread === 0) {
const retry = () => {
this._writeStream.removeListener("finish", retry);
this._writeStream.removeListener("write", retry);
this._read(n);
};
this._writeStream.addListener("finish", retry);
this._writeStream.addListener("write", retry);
return;
}
return super._read(Math.min(n, unread));
}
_destroy(error, callback) {
if (typeof this.fd !== "number") {
this.once("open", this._destroy.bind(this, error, callback));
return;
}
_fs.default.close(this.fd, closeError => {
callback(closeError || error);
this.fd = null;
this.closed = true;
this.emit("close");
});
}
open() {
if (!this._writeStream) return;
if (typeof this._writeStream.fd !== "number") {
this._writeStream.once("open", () => this.open());
return;
}
this.path = this._writeStream.path;
super.open();
}
}
exports.ReadStream = ReadStream;
class WriteStream extends _fs.default.WriteStream {
constructor() {
super("", {
autoClose: false
});
this._readStreams = new Set();
this.error = null;
this._cleanupSync = () => {
process.removeListener("exit", this._cleanupSync);
process.removeListener("SIGINT", this._cleanupSync);
if (typeof this.fd === "number")
try {
_fs.default.closeSync(this.fd);
} catch (error) {}
try {
_fs.default.unlinkSync(this.path);
} catch (error) {}
};
}
get finished() {
return this._writableState.finished;
}
open() {
_crypto.default.randomBytes(16, (error, buffer) => {
if (error) {
this.destroy(error);
return;
}
this.path = _path.default.join(
_os.default.tmpdir(),
`capacitor-${buffer.toString("hex")}.tmp`
);
_fs.default.open(this.path, "wx", this.mode, (error, fd) => {
if (error) {
this.destroy(error);
return;
}
process.addListener("exit", this._cleanupSync);
process.addListener("SIGINT", this._cleanupSync);
this.fd = fd;
this.emit("open", fd);
this.emit("ready");
});
});
}
_write(chunk, encoding, callback) {
super._write(chunk, encoding, error => {
if (!error) this.emit("write");
callback(error);
});
}
_destroy(error, callback) {
if (typeof this.fd !== "number") {
this.once("open", this._destroy.bind(this, error, callback));
return;
}
process.removeListener("exit", this._cleanupSync);
process.removeListener("SIGINT", this._cleanupSync);
const unlink = error => {
_fs.default.unlink(this.path, unlinkError => {
callback(unlinkError || error);
this.fd = null;
this.closed = true;
this.emit("close");
});
};
if (typeof this.fd === "number") {
_fs.default.close(this.fd, closeError => {
unlink(closeError || error);
});
return;
}
unlink(error);
}
destroy(error, callback) {
if (error) this.error = error;
if (this.destroyed) return super.destroy(error, callback);
if (typeof callback === "function")
this.once("close", callback.bind(this, error));
if (this._readStreams.size === 0) {
super.destroy(error, callback);
return;
}
this._destroyPending = true;
if (error)
for (let readStream of this._readStreams) readStream.destroy(error);
}
createReadStream(name) {
if (this.destroyed)
throw new ReadAfterDestroyedError(
"A ReadStream cannot be created from a destroyed WriteStream."
);
const readStream = new ReadStream(this, name);
this._readStreams.add(readStream);
const remove = () => {
this._deleteReadStream(readStream);
readStream.removeListener("end", remove);
readStream.removeListener("close", remove);
};
readStream.addListener("end", remove);
readStream.addListener("close", remove);
return readStream;
}
_deleteReadStream(readStream) {
if (this._readStreams.delete(readStream) && this._destroyPending)
this.destroy();
}
}
exports.WriteStream = WriteStream;
var _default = WriteStream;
exports.default = _default;

206
node_modules/fs-capacitor/lib/index.mjs generated vendored Normal file
View File

@@ -0,0 +1,206 @@
import crypto from "crypto";
import fs from "fs";
import os from "os";
import path from "path";
export class ReadAfterDestroyedError extends Error {}
export class ReadStream extends fs.ReadStream {
constructor(writeStream, name) {
super("", {});
this.name = name;
this._writeStream = writeStream;
this.error = this._writeStream.error;
this.addListener("error", error => {
this.error = error;
});
this.open();
}
get ended() {
return this._readableState.ended;
}
_read(n) {
if (typeof this.fd !== "number")
return this.once("open", function() {
this._read(n);
});
if (this._writeStream.finished || this._writeStream.closed)
return super._read(n);
const unread = this._writeStream.bytesWritten - this.bytesRead;
if (unread === 0) {
const retry = () => {
this._writeStream.removeListener("finish", retry);
this._writeStream.removeListener("write", retry);
this._read(n);
};
this._writeStream.addListener("finish", retry);
this._writeStream.addListener("write", retry);
return;
}
return super._read(Math.min(n, unread));
}
_destroy(error, callback) {
if (typeof this.fd !== "number") {
this.once("open", this._destroy.bind(this, error, callback));
return;
}
fs.close(this.fd, closeError => {
callback(closeError || error);
this.fd = null;
this.closed = true;
this.emit("close");
});
}
open() {
if (!this._writeStream) return;
if (typeof this._writeStream.fd !== "number") {
this._writeStream.once("open", () => this.open());
return;
}
this.path = this._writeStream.path;
super.open();
}
}
export class WriteStream extends fs.WriteStream {
constructor() {
super("", {
autoClose: false
});
this._readStreams = new Set();
this.error = null;
this._cleanupSync = () => {
process.removeListener("exit", this._cleanupSync);
process.removeListener("SIGINT", this._cleanupSync);
if (typeof this.fd === "number")
try {
fs.closeSync(this.fd);
} catch (error) {}
try {
fs.unlinkSync(this.path);
} catch (error) {}
};
}
get finished() {
return this._writableState.finished;
}
open() {
crypto.randomBytes(16, (error, buffer) => {
if (error) {
this.destroy(error);
return;
}
this.path = path.join(
os.tmpdir(),
`capacitor-${buffer.toString("hex")}.tmp`
);
fs.open(this.path, "wx", this.mode, (error, fd) => {
if (error) {
this.destroy(error);
return;
}
process.addListener("exit", this._cleanupSync);
process.addListener("SIGINT", this._cleanupSync);
this.fd = fd;
this.emit("open", fd);
this.emit("ready");
});
});
}
_write(chunk, encoding, callback) {
super._write(chunk, encoding, error => {
if (!error) this.emit("write");
callback(error);
});
}
_destroy(error, callback) {
if (typeof this.fd !== "number") {
this.once("open", this._destroy.bind(this, error, callback));
return;
}
process.removeListener("exit", this._cleanupSync);
process.removeListener("SIGINT", this._cleanupSync);
const unlink = error => {
fs.unlink(this.path, unlinkError => {
callback(unlinkError || error);
this.fd = null;
this.closed = true;
this.emit("close");
});
};
if (typeof this.fd === "number") {
fs.close(this.fd, closeError => {
unlink(closeError || error);
});
return;
}
unlink(error);
}
destroy(error, callback) {
if (error) this.error = error;
if (this.destroyed) return super.destroy(error, callback);
if (typeof callback === "function")
this.once("close", callback.bind(this, error));
if (this._readStreams.size === 0) {
super.destroy(error, callback);
return;
}
this._destroyPending = true;
if (error)
for (let readStream of this._readStreams) readStream.destroy(error);
}
createReadStream(name) {
if (this.destroyed)
throw new ReadAfterDestroyedError(
"A ReadStream cannot be created from a destroyed WriteStream."
);
const readStream = new ReadStream(this, name);
this._readStreams.add(readStream);
const remove = () => {
this._deleteReadStream(readStream);
readStream.removeListener("end", remove);
readStream.removeListener("close", remove);
};
readStream.addListener("end", remove);
readStream.addListener("close", remove);
return readStream;
}
_deleteReadStream(readStream) {
if (this._readStreams.delete(readStream) && this._destroyPending)
this.destroy();
}
}
export default WriteStream;

374
node_modules/fs-capacitor/lib/test.js generated vendored Normal file
View File

@@ -0,0 +1,374 @@
"use strict";
require("leaked-handles");
var _fs = _interopRequireDefault(require("fs"));
var _stream = _interopRequireDefault(require("stream"));
var _tap = _interopRequireDefault(require("tap"));
var _ = _interopRequireDefault(require("."));
function _interopRequireDefault(obj) {
return obj && obj.__esModule ? obj : { default: obj };
}
const streamToString = stream =>
new Promise((resolve, reject) => {
let ended = false;
let data = "";
stream
.on("error", reject)
.on("data", chunk => {
if (ended) throw new Error("`data` emitted after `end`");
data += chunk;
})
.on("end", () => {
ended = true;
resolve(data);
});
});
const waitForBytesWritten = (stream, bytes, resolve) => {
if (stream.bytesWritten >= bytes) {
setImmediate(resolve);
return;
}
setImmediate(() => waitForBytesWritten(stream, bytes, resolve));
};
_tap.default.test("Data from a complete stream.", async t => {
let data = "";
const source = new _stream.default.Readable({
read() {}
});
const chunk1 = "1".repeat(10);
source.push(chunk1);
source.push(null);
data += chunk1;
let capacitor1 = new _.default();
t.strictSame(
capacitor1._readStreams.size,
0,
"should start with 0 read streams"
);
source.pipe(capacitor1);
const capacitor1Stream1 = capacitor1.createReadStream("capacitor1Stream1");
t.strictSame(
capacitor1._readStreams.size,
1,
"should attach a new read stream before receiving data"
);
const result = await streamToString(capacitor1Stream1);
t.sameStrict(result, data, "should stream all data");
t.sameStrict(
capacitor1._readStreams.size,
0,
"should no longer have any attacheds read streams"
);
});
_tap.default.test(
"Data from an open stream, 1 chunk, no read streams.",
async t => {
let data = "";
const source = new _stream.default.Readable({
read() {}
});
let capacitor1 = new _.default();
t.strictSame(
capacitor1._readStreams.size,
0,
"should start with 0 read streams"
);
source.pipe(capacitor1);
const chunk1 = "1".repeat(10);
source.push(chunk1);
source.push(null);
data += chunk1;
const capacitor1Stream1 = capacitor1.createReadStream("capacitor1Stream1");
t.strictSame(
capacitor1._readStreams.size,
1,
"should attach a new read stream before receiving data"
);
const result = await streamToString(capacitor1Stream1);
t.sameStrict(result, data, "should stream all data");
t.sameStrict(
capacitor1._readStreams.size,
0,
"should no longer have any attacheds read streams"
);
}
);
_tap.default.test(
"Data from an open stream, 1 chunk, 1 read stream.",
async t => {
let data = "";
const source = new _stream.default.Readable({
read() {}
});
let capacitor1 = new _.default();
t.strictSame(
capacitor1._readStreams.size,
0,
"should start with 0 read streams"
);
source.pipe(capacitor1);
const capacitor1Stream1 = capacitor1.createReadStream("capacitor1Stream1");
t.strictSame(
capacitor1._readStreams.size,
1,
"should attach a new read stream before receiving data"
);
const chunk1 = "1".repeat(10);
source.push(chunk1);
source.push(null);
data += chunk1;
const result = await streamToString(capacitor1Stream1);
t.sameStrict(result, data, "should stream all data");
t.sameStrict(
capacitor1._readStreams.size,
0,
"should no longer have any attacheds read streams"
);
}
);
const withChunkSize = size =>
_tap.default.test(`--- with chunk size: ${size}`, async t => {
let data = "";
const source = new _stream.default.Readable({
read() {}
});
let capacitor1;
let capacitor1Stream1;
await t.test(
"can add a read stream before any data has been written",
async t => {
capacitor1 = new _.default();
t.strictSame(
capacitor1._readStreams.size,
0,
"should start with 0 read streams"
);
capacitor1Stream1 = capacitor1.createReadStream("capacitor1Stream1");
t.strictSame(
capacitor1._readStreams.size,
1,
"should attach a new read stream before receiving data"
);
await t.test("creates a temporary file", async t => {
t.plan(3);
await new Promise(resolve => capacitor1.on("open", resolve));
t.type(
capacitor1.path,
"string",
"capacitor1.path should be a string"
);
t.type(capacitor1.fd, "number", "capacitor1.fd should be a number");
t.ok(_fs.default.existsSync(capacitor1.path), "creates a temp file");
});
}
);
source.pipe(capacitor1);
const chunk1 = "1".repeat(size);
source.push(chunk1);
data += chunk1;
await new Promise(resolve =>
waitForBytesWritten(capacitor1, size, resolve)
);
let capacitor1Stream2;
t.test("can add a read stream after data has been written", t => {
capacitor1Stream2 = capacitor1.createReadStream("capacitor1Stream2");
t.strictSame(
capacitor1._readStreams.size,
2,
"should attach a new read stream after first write"
);
t.end();
});
const writeEventBytesWritten = new Promise(resolve => {
capacitor1.once("write", () => {
resolve(capacitor1.bytesWritten);
});
});
const chunk2 = "2".repeat(size);
source.push(chunk2);
data += chunk2;
await new Promise(resolve =>
waitForBytesWritten(capacitor1, 2 * size, resolve)
);
await t.test("write event emitted after bytes are written", async t => {
t.strictSame(
await writeEventBytesWritten,
2 * size,
"bytesWritten should include new chunk"
);
});
const finished = new Promise(resolve => capacitor1.once("finish", resolve));
source.push(null);
await finished;
let capacitor1Stream3;
let capacitor1Stream4;
t.test("can create a read stream after the source has ended", t => {
capacitor1Stream3 = capacitor1.createReadStream("capacitor1Stream3");
capacitor1Stream4 = capacitor1.createReadStream("capacitor1Stream4");
t.strictSame(
capacitor1._readStreams.size,
4,
"should attach new read streams after end"
);
t.end();
});
await t.test("streams complete data to a read stream", async t => {
const result2 = await streamToString(capacitor1Stream2);
t.strictSame(
capacitor1Stream2.ended,
true,
"should mark read stream as ended"
);
t.strictSame(result2, data, "should stream complete data");
const result4 = await streamToString(capacitor1Stream4);
t.strictSame(
capacitor1Stream4.ended,
true,
"should mark read stream as ended"
);
t.strictSame(result4, data, "should stream complete data");
t.strictSame(
capacitor1._readStreams.size,
2,
"should detach an ended read stream"
);
});
await t.test("can destroy a read stream", async t => {
await new Promise(resolve => {
capacitor1Stream1.once("error", resolve);
capacitor1Stream1.destroy(new Error("test"));
});
t.strictSame(
capacitor1Stream1.destroyed,
true,
"should mark read stream as destroyed"
);
t.type(
capacitor1Stream1.error,
Error,
"should store an error on read stream"
);
t.strictSame(
capacitor1._readStreams.size,
1,
"should detach a destroyed read stream"
);
});
t.test("can delay destruction of a capacitor", t => {
capacitor1.destroy(null);
t.strictSame(
capacitor1.destroyed,
false,
"should not destroy while read streams exist"
);
t.strictSame(
capacitor1._destroyPending,
true,
"should mark for future destruction"
);
t.end();
});
await t.test("destroys capacitor once no read streams exist", async t => {
const readStreamDestroyed = new Promise(resolve =>
capacitor1Stream3.on("close", resolve)
);
const capacitorDestroyed = new Promise(resolve =>
capacitor1.on("close", resolve)
);
capacitor1Stream3.destroy(null);
await readStreamDestroyed;
t.strictSame(
capacitor1Stream3.destroyed,
true,
"should mark read stream as destroyed"
);
t.strictSame(
capacitor1Stream3.error,
null,
"should not store an error on read stream"
);
t.strictSame(
capacitor1._readStreams.size,
0,
"should detach a destroyed read stream"
);
await capacitorDestroyed;
t.strictSame(capacitor1.closed, true, "should mark capacitor as closed");
t.strictSame(capacitor1.fd, null, "should set fd to null");
t.strictSame(
capacitor1.destroyed,
true,
"should mark capacitor as destroyed"
);
t.notOk(_fs.default.existsSync(capacitor1.path), "removes its temp file");
});
t.test("cannot create a read stream after destruction", t => {
try {
capacitor1.createReadStream();
} catch (error) {
t.ok(
error instanceof _.ReadAfterDestroyedError,
"should not create a read stream once destroyed"
);
t.end();
}
});
const capacitor2 = new _.default();
const capacitor2Stream1 = capacitor2.createReadStream("capacitor2Stream1");
const capacitor2Stream2 = capacitor2.createReadStream("capacitor2Stream2");
const capacitor2ReadStream1Destroyed = new Promise(resolve =>
capacitor2Stream1.on("close", resolve)
);
const capacitor2Destroyed = new Promise(resolve =>
capacitor2.on("close", resolve)
);
capacitor2Stream1.destroy();
await capacitor2ReadStream1Destroyed;
await t.test("propagates errors to attached read streams", async t => {
capacitor2.destroy();
await new Promise(resolve => setImmediate(resolve));
t.strictSame(
capacitor2Stream2.destroyed,
false,
"should not immediately mark attached read streams as destroyed"
);
capacitor2.destroy(new Error("test"));
await capacitor2Destroyed;
t.type(capacitor2.error, Error, "should store an error on capacitor");
t.strictSame(
capacitor2.destroyed,
true,
"should mark capacitor as destroyed"
);
t.type(
capacitor2Stream2.error,
Error,
"should store an error on attached read streams"
);
t.strictSame(
capacitor2Stream2.destroyed,
true,
"should mark attached read streams as destroyed"
);
t.strictSame(
capacitor2Stream1.error,
null,
"should not store an error on detached read streams"
);
});
});
withChunkSize(10);
withChunkSize(100000);

356
node_modules/fs-capacitor/lib/test.mjs generated vendored Normal file
View File

@@ -0,0 +1,356 @@
import "leaked-handles";
import fs from "fs";
import stream from "stream";
import t from "tap";
import WriteStream, { ReadAfterDestroyedError } from ".";
const streamToString = stream =>
new Promise((resolve, reject) => {
let ended = false;
let data = "";
stream
.on("error", reject)
.on("data", chunk => {
if (ended) throw new Error("`data` emitted after `end`");
data += chunk;
})
.on("end", () => {
ended = true;
resolve(data);
});
});
const waitForBytesWritten = (stream, bytes, resolve) => {
if (stream.bytesWritten >= bytes) {
setImmediate(resolve);
return;
}
setImmediate(() => waitForBytesWritten(stream, bytes, resolve));
};
t.test("Data from a complete stream.", async t => {
let data = "";
const source = new stream.Readable({
read() {}
});
const chunk1 = "1".repeat(10);
source.push(chunk1);
source.push(null);
data += chunk1;
let capacitor1 = new WriteStream();
t.strictSame(
capacitor1._readStreams.size,
0,
"should start with 0 read streams"
);
source.pipe(capacitor1);
const capacitor1Stream1 = capacitor1.createReadStream("capacitor1Stream1");
t.strictSame(
capacitor1._readStreams.size,
1,
"should attach a new read stream before receiving data"
);
const result = await streamToString(capacitor1Stream1);
t.sameStrict(result, data, "should stream all data");
t.sameStrict(
capacitor1._readStreams.size,
0,
"should no longer have any attacheds read streams"
);
});
t.test("Data from an open stream, 1 chunk, no read streams.", async t => {
let data = "";
const source = new stream.Readable({
read() {}
});
let capacitor1 = new WriteStream();
t.strictSame(
capacitor1._readStreams.size,
0,
"should start with 0 read streams"
);
source.pipe(capacitor1);
const chunk1 = "1".repeat(10);
source.push(chunk1);
source.push(null);
data += chunk1;
const capacitor1Stream1 = capacitor1.createReadStream("capacitor1Stream1");
t.strictSame(
capacitor1._readStreams.size,
1,
"should attach a new read stream before receiving data"
);
const result = await streamToString(capacitor1Stream1);
t.sameStrict(result, data, "should stream all data");
t.sameStrict(
capacitor1._readStreams.size,
0,
"should no longer have any attacheds read streams"
);
});
t.test("Data from an open stream, 1 chunk, 1 read stream.", async t => {
let data = "";
const source = new stream.Readable({
read() {}
});
let capacitor1 = new WriteStream();
t.strictSame(
capacitor1._readStreams.size,
0,
"should start with 0 read streams"
);
source.pipe(capacitor1);
const capacitor1Stream1 = capacitor1.createReadStream("capacitor1Stream1");
t.strictSame(
capacitor1._readStreams.size,
1,
"should attach a new read stream before receiving data"
);
const chunk1 = "1".repeat(10);
source.push(chunk1);
source.push(null);
data += chunk1;
const result = await streamToString(capacitor1Stream1);
t.sameStrict(result, data, "should stream all data");
t.sameStrict(
capacitor1._readStreams.size,
0,
"should no longer have any attacheds read streams"
);
});
const withChunkSize = size =>
t.test(`--- with chunk size: ${size}`, async t => {
let data = "";
const source = new stream.Readable({
read() {}
});
let capacitor1;
let capacitor1Stream1;
await t.test(
"can add a read stream before any data has been written",
async t => {
capacitor1 = new WriteStream();
t.strictSame(
capacitor1._readStreams.size,
0,
"should start with 0 read streams"
);
capacitor1Stream1 = capacitor1.createReadStream("capacitor1Stream1");
t.strictSame(
capacitor1._readStreams.size,
1,
"should attach a new read stream before receiving data"
);
await t.test("creates a temporary file", async t => {
t.plan(3);
await new Promise(resolve => capacitor1.on("open", resolve));
t.type(
capacitor1.path,
"string",
"capacitor1.path should be a string"
);
t.type(capacitor1.fd, "number", "capacitor1.fd should be a number");
t.ok(fs.existsSync(capacitor1.path), "creates a temp file");
});
}
);
source.pipe(capacitor1);
const chunk1 = "1".repeat(size);
source.push(chunk1);
data += chunk1;
await new Promise(resolve =>
waitForBytesWritten(capacitor1, size, resolve)
);
let capacitor1Stream2;
t.test("can add a read stream after data has been written", t => {
capacitor1Stream2 = capacitor1.createReadStream("capacitor1Stream2");
t.strictSame(
capacitor1._readStreams.size,
2,
"should attach a new read stream after first write"
);
t.end();
});
const writeEventBytesWritten = new Promise(resolve => {
capacitor1.once("write", () => {
resolve(capacitor1.bytesWritten);
});
});
const chunk2 = "2".repeat(size);
source.push(chunk2);
data += chunk2;
await new Promise(resolve =>
waitForBytesWritten(capacitor1, 2 * size, resolve)
);
await t.test("write event emitted after bytes are written", async t => {
t.strictSame(
await writeEventBytesWritten,
2 * size,
"bytesWritten should include new chunk"
);
});
const finished = new Promise(resolve => capacitor1.once("finish", resolve));
source.push(null);
await finished;
let capacitor1Stream3;
let capacitor1Stream4;
t.test("can create a read stream after the source has ended", t => {
capacitor1Stream3 = capacitor1.createReadStream("capacitor1Stream3");
capacitor1Stream4 = capacitor1.createReadStream("capacitor1Stream4");
t.strictSame(
capacitor1._readStreams.size,
4,
"should attach new read streams after end"
);
t.end();
});
await t.test("streams complete data to a read stream", async t => {
const result2 = await streamToString(capacitor1Stream2);
t.strictSame(
capacitor1Stream2.ended,
true,
"should mark read stream as ended"
);
t.strictSame(result2, data, "should stream complete data");
const result4 = await streamToString(capacitor1Stream4);
t.strictSame(
capacitor1Stream4.ended,
true,
"should mark read stream as ended"
);
t.strictSame(result4, data, "should stream complete data");
t.strictSame(
capacitor1._readStreams.size,
2,
"should detach an ended read stream"
);
});
await t.test("can destroy a read stream", async t => {
await new Promise(resolve => {
capacitor1Stream1.once("error", resolve);
capacitor1Stream1.destroy(new Error("test"));
});
t.strictSame(
capacitor1Stream1.destroyed,
true,
"should mark read stream as destroyed"
);
t.type(
capacitor1Stream1.error,
Error,
"should store an error on read stream"
);
t.strictSame(
capacitor1._readStreams.size,
1,
"should detach a destroyed read stream"
);
});
t.test("can delay destruction of a capacitor", t => {
capacitor1.destroy(null);
t.strictSame(
capacitor1.destroyed,
false,
"should not destroy while read streams exist"
);
t.strictSame(
capacitor1._destroyPending,
true,
"should mark for future destruction"
);
t.end();
});
await t.test("destroys capacitor once no read streams exist", async t => {
const readStreamDestroyed = new Promise(resolve =>
capacitor1Stream3.on("close", resolve)
);
const capacitorDestroyed = new Promise(resolve =>
capacitor1.on("close", resolve)
);
capacitor1Stream3.destroy(null);
await readStreamDestroyed;
t.strictSame(
capacitor1Stream3.destroyed,
true,
"should mark read stream as destroyed"
);
t.strictSame(
capacitor1Stream3.error,
null,
"should not store an error on read stream"
);
t.strictSame(
capacitor1._readStreams.size,
0,
"should detach a destroyed read stream"
);
await capacitorDestroyed;
t.strictSame(capacitor1.closed, true, "should mark capacitor as closed");
t.strictSame(capacitor1.fd, null, "should set fd to null");
t.strictSame(
capacitor1.destroyed,
true,
"should mark capacitor as destroyed"
);
t.notOk(fs.existsSync(capacitor1.path), "removes its temp file");
});
t.test("cannot create a read stream after destruction", t => {
try {
capacitor1.createReadStream();
} catch (error) {
t.ok(
error instanceof ReadAfterDestroyedError,
"should not create a read stream once destroyed"
);
t.end();
}
});
const capacitor2 = new WriteStream();
const capacitor2Stream1 = capacitor2.createReadStream("capacitor2Stream1");
const capacitor2Stream2 = capacitor2.createReadStream("capacitor2Stream2");
const capacitor2ReadStream1Destroyed = new Promise(resolve =>
capacitor2Stream1.on("close", resolve)
);
const capacitor2Destroyed = new Promise(resolve =>
capacitor2.on("close", resolve)
);
capacitor2Stream1.destroy();
await capacitor2ReadStream1Destroyed;
await t.test("propagates errors to attached read streams", async t => {
capacitor2.destroy();
await new Promise(resolve => setImmediate(resolve));
t.strictSame(
capacitor2Stream2.destroyed,
false,
"should not immediately mark attached read streams as destroyed"
);
capacitor2.destroy(new Error("test"));
await capacitor2Destroyed;
t.type(capacitor2.error, Error, "should store an error on capacitor");
t.strictSame(
capacitor2.destroyed,
true,
"should mark capacitor as destroyed"
);
t.type(
capacitor2Stream2.error,
Error,
"should store an error on attached read streams"
);
t.strictSame(
capacitor2Stream2.destroyed,
true,
"should mark attached read streams as destroyed"
);
t.strictSame(
capacitor2Stream1.error,
null,
"should not store an error on detached read streams"
);
});
});
withChunkSize(10);
withChunkSize(100000);

61
node_modules/fs-capacitor/package.json generated vendored Normal file
View File

@@ -0,0 +1,61 @@
{
"name": "fs-capacitor",
"version": "2.0.4",
"description": "Filesystem-buffered, passthrough stream that buffers indefinitely rather than propagate backpressure from downstream consumers.",
"license": "MIT",
"author": {
"name": "Mike Marcacci",
"email": "mike.marcacci@gmail.com"
},
"repository": "github:mike-marcacci/fs-capacitor",
"homepage": "https://github.com/mike-marcacci/fs-capacitor#readme",
"bugs": "https://github.com/mike-marcacci/fs-capacitor/issues",
"keywords": [
"stream",
"buffer",
"file",
"split",
"clone"
],
"files": [
"lib",
"!lib/test.*"
],
"main": "lib",
"engines": {
"node": ">=8.5"
},
"browserslist": "node >= 8.5",
"devDependencies": {
"@babel/cli": "^7.1.2",
"@babel/core": "^7.3.3",
"@babel/preset-env": "^7.4.4",
"babel-eslint": "^10.0.1",
"eslint": "^5.14.1",
"eslint-config-env": "^5.0.0",
"eslint-config-prettier": "^4.0.0",
"eslint-plugin-import": "^2.16.0",
"eslint-plugin-import-order-alphabetical": "^0.0.2",
"eslint-plugin-node": "^9.0.1",
"eslint-plugin-prettier": "^3.0.0",
"husky": "^2.2.0",
"if-ver": "^1.1.0",
"leaked-handles": "^5.2.0",
"lint-staged": "^8.1.4",
"prettier": "^1.16.4",
"tap": "^13.1.2"
},
"scripts": {
"prepare": "npm run prepare:clean && npm run prepare:mjs && npm run prepare:js && npm run prepare:prettier",
"prepare:clean": "rm -rf lib",
"prepare:mjs": "BABEL_ESM=1 babel src -d lib --keep-file-extension",
"prepare:js": "babel src -d lib",
"prepare:prettier": "prettier 'lib/**/*.{mjs,js}' --write",
"test": "npm run test:eslint && npm run test:prettier && npm run test:mjs && npm run test:js",
"test:eslint": "eslint . --ext mjs,js",
"test:prettier": "prettier '**/*.{json,yml,md}' -l",
"test:mjs": "if-ver -lt 12 || exit 0; node --experimental-modules --no-warnings lib/test | tap-mocha-reporter classic",
"test:js": "node lib/test | tap-mocha-reporter classic",
"prepublishOnly": "npm test"
}
}

72
node_modules/fs-capacitor/readme.md generated vendored Normal file
View File

@@ -0,0 +1,72 @@
[![Build status](https://travis-ci.org/mike-marcacci/fs-capacitor.svg?branch=master)](https://travis-ci.org/mike-marcacci/fs-capacitor) [![Current version](https://badgen.net/npm/v/fs-capacitor)](https://npm.im/fs-capacitor) ![Supported Node.js versions](https://badgen.net/npm/node/fs-capacitor)
# FS Capacitor
FS Capacitor is a filesystem buffer for finite node streams. It supports simultaneous read/write, and can be used to create multiple independent readable streams, each starting at the beginning of the buffer.
This is useful for file uploads and other situations where you want to avoid delays to the source stream, but have slow downstream transformations to apply:
```js
import fs from "fs";
import http from "http";
import WriteStream from "fs-capacitor";
http.createServer((req, res) => {
const capacitor = new WriteStream();
const destination = fs.createReadStream("destination.txt");
// pipe data to the capacitor
req.pipe(capacitor);
// read data from the capacitor
capacitor
.createReadStream()
.pipe(/* some slow Transform streams here */)
.pipe(destination);
// read data from the very beginning
setTimeout(() => {
capacitor.createReadStream().pipe(/* elsewhere */);
// you can destroy a capacitor as soon as no more read streams are needed
// without worrying if existing streams are fully consumed
capacitor.destroy();
}, 100);
});
```
It is especially important to use cases like [`graphql-upload`](https://github.com/jaydenseric/graphql-upload) where server code may need to stash earler parts of a stream until later parts have been processed, and needs to attach multiple consumers at different times.
FS Capacitor creates its temporary files in the directory ideneified by `os.tmpdir()` and attempts to remove them:
- after `readStream.destroy()` has been called and all read streams are fully consumed or destroyed
- before the process exits
Please do note that FS Capacitor does NOT release disk space _as data is consumed_, and therefore is not suitable for use with infinite streams or those larger than the filesystem.
## API
### WriteStream
`WriteStream` inherets all the methods of [`fs.WriteStream`](https://nodejs.org/api/fs.html#fs_class_fs_writestream)
#### `new WriteStream()`
Create a new `WriteStream` instance.
#### `.createReadStream(): ReadStream`
Create a new `ReadStream` instance attached to the `WriteStream` instance.
Once a `WriteStream` is fully destroyed, calling `.createReadStream()` will throw a `ReadAfterDestroyedError` error.
As soon as a `ReadStream` ends or is closed (such as by calling `readStream.destroy()`), it is detached from its `WriteStream`.
#### `.destroy(error?: ?Error): void`
- If `error` is present, `WriteStream`s still attached are destroyed with the same error.
- If `error` is null or undefined, destruction of underlying resources is delayed until no `ReadStream`s are attached the `WriteStream` instance.
### ReadStream
`ReadStream` inherets all the methods of [`fs.ReadStream`](https://nodejs.org/api/fs.html#fs_class_fs_readstream).

239
node_modules/fs-capacitor/src/index.mjs generated vendored Normal file
View File

@@ -0,0 +1,239 @@
import crypto from "crypto";
import fs from "fs";
import os from "os";
import path from "path";
export class ReadAfterDestroyedError extends Error {}
export class ReadStream extends fs.ReadStream {
constructor(writeStream, name) {
super("", {});
this.name = name;
this._writeStream = writeStream;
// Persist terminating events.
this.error = this._writeStream.error;
this.addListener("error", error => {
this.error = error;
});
this.open();
}
get ended() {
return this._readableState.ended;
}
_read(n) {
if (typeof this.fd !== "number")
return this.once("open", function() {
this._read(n);
});
// The writer has finished, so the reader can continue uninterupted.
if (this._writeStream.finished || this._writeStream.closed)
return super._read(n);
// Make sure there's something to read.
const unread = this._writeStream.bytesWritten - this.bytesRead;
if (unread === 0) {
const retry = () => {
this._writeStream.removeListener("finish", retry);
this._writeStream.removeListener("write", retry);
this._read(n);
};
this._writeStream.addListener("finish", retry);
this._writeStream.addListener("write", retry);
return;
}
// Make sure we don't get ahead of our writer.
return super._read(Math.min(n, unread));
}
_destroy(error, callback) {
if (typeof this.fd !== "number") {
this.once("open", this._destroy.bind(this, error, callback));
return;
}
fs.close(this.fd, closeError => {
callback(closeError || error);
this.fd = null;
this.closed = true;
this.emit("close");
});
}
open() {
if (!this._writeStream) return;
if (typeof this._writeStream.fd !== "number") {
this._writeStream.once("open", () => this.open());
return;
}
this.path = this._writeStream.path;
super.open();
}
}
export class WriteStream extends fs.WriteStream {
constructor() {
super("", {
autoClose: false
});
this._readStreams = new Set();
this.error = null;
this._cleanupSync = () => {
process.removeListener("exit", this._cleanupSync);
process.removeListener("SIGINT", this._cleanupSync);
if (typeof this.fd === "number")
try {
fs.closeSync(this.fd);
} catch (error) {
// An error here probably means the fd was already closed, but we can
// still try to unlink the file.
}
try {
fs.unlinkSync(this.path);
} catch (error) {
// If we are unable to unlink the file, the operating system will clean up
// on next restart, since we use store thes in `os.tmpdir()`
}
};
}
get finished() {
return this._writableState.finished;
}
open() {
// generage a random tmp path
crypto.randomBytes(16, (error, buffer) => {
if (error) {
this.destroy(error);
return;
}
this.path = path.join(
os.tmpdir(),
`capacitor-${buffer.toString("hex")}.tmp`
);
// create the file
fs.open(this.path, "wx", this.mode, (error, fd) => {
if (error) {
this.destroy(error);
return;
}
// cleanup when our stream closes or when the process exits
process.addListener("exit", this._cleanupSync);
process.addListener("SIGINT", this._cleanupSync);
this.fd = fd;
this.emit("open", fd);
this.emit("ready");
});
});
}
_write(chunk, encoding, callback) {
super._write(chunk, encoding, error => {
if (!error) this.emit("write");
callback(error);
});
}
_destroy(error, callback) {
if (typeof this.fd !== "number") {
this.once("open", this._destroy.bind(this, error, callback));
return;
}
process.removeListener("exit", this._cleanupSync);
process.removeListener("SIGINT", this._cleanupSync);
const unlink = error => {
fs.unlink(this.path, unlinkError => {
// If we are unable to unlink the file, the operating system will
// clean up on next restart, since we use store thes in `os.tmpdir()`
callback(unlinkError || error);
this.fd = null;
this.closed = true;
this.emit("close");
});
};
if (typeof this.fd === "number") {
fs.close(this.fd, closeError => {
// An error here probably means the fd was already closed, but we can
// still try to unlink the file.
unlink(closeError || error);
});
return;
}
unlink(error);
}
destroy(error, callback) {
if (error) this.error = error;
// This is already destroyed.
if (this.destroyed) return super.destroy(error, callback);
// Call the callback once destroyed.
if (typeof callback === "function")
this.once("close", callback.bind(this, error));
// All read streams have terminated, so we can destroy this.
if (this._readStreams.size === 0) {
super.destroy(error, callback);
return;
}
// Wait until all read streams have terminated before destroying this.
this._destroyPending = true;
// If there is an error, destroy all read streams with the error.
if (error)
for (let readStream of this._readStreams) readStream.destroy(error);
}
createReadStream(name) {
if (this.destroyed)
throw new ReadAfterDestroyedError(
"A ReadStream cannot be created from a destroyed WriteStream."
);
const readStream = new ReadStream(this, name);
this._readStreams.add(readStream);
const remove = () => {
this._deleteReadStream(readStream);
readStream.removeListener("end", remove);
readStream.removeListener("close", remove);
};
readStream.addListener("end", remove);
readStream.addListener("close", remove);
return readStream;
}
_deleteReadStream(readStream) {
if (this._readStreams.delete(readStream) && this._destroyPending)
this.destroy();
}
}
export default WriteStream;

434
node_modules/fs-capacitor/src/test.mjs generated vendored Normal file
View File

@@ -0,0 +1,434 @@
import "leaked-handles";
import fs from "fs";
import stream from "stream";
import t from "tap";
import WriteStream, { ReadAfterDestroyedError } from ".";
const streamToString = stream =>
new Promise((resolve, reject) => {
let ended = false;
let data = "";
stream
.on("error", reject)
.on("data", chunk => {
if (ended) throw new Error("`data` emitted after `end`");
data += chunk;
})
.on("end", () => {
ended = true;
resolve(data);
});
});
const waitForBytesWritten = (stream, bytes, resolve) => {
if (stream.bytesWritten >= bytes) {
setImmediate(resolve);
return;
}
setImmediate(() => waitForBytesWritten(stream, bytes, resolve));
};
t.test("Data from a complete stream.", async t => {
let data = "";
const source = new stream.Readable({
read() {}
});
// Add the first chunk of data (without any consumer)
const chunk1 = "1".repeat(10);
source.push(chunk1);
source.push(null);
data += chunk1;
// Create a new capacitor
let capacitor1 = new WriteStream();
t.strictSame(
capacitor1._readStreams.size,
0,
"should start with 0 read streams"
);
// Pipe data to the capacitor
source.pipe(capacitor1);
// Attach a read stream
const capacitor1Stream1 = capacitor1.createReadStream("capacitor1Stream1");
t.strictSame(
capacitor1._readStreams.size,
1,
"should attach a new read stream before receiving data"
);
// Wait until capacitor is finished writing all data
const result = await streamToString(capacitor1Stream1);
t.sameStrict(result, data, "should stream all data");
t.sameStrict(
capacitor1._readStreams.size,
0,
"should no longer have any attacheds read streams"
);
});
t.test("Data from an open stream, 1 chunk, no read streams.", async t => {
let data = "";
const source = new stream.Readable({
read() {}
});
// Create a new capacitor
let capacitor1 = new WriteStream();
t.strictSame(
capacitor1._readStreams.size,
0,
"should start with 0 read streams"
);
// Pipe data to the capacitor
source.pipe(capacitor1);
// Add the first chunk of data (without any read streams)
const chunk1 = "1".repeat(10);
source.push(chunk1);
source.push(null);
data += chunk1;
// Attach a read stream
const capacitor1Stream1 = capacitor1.createReadStream("capacitor1Stream1");
t.strictSame(
capacitor1._readStreams.size,
1,
"should attach a new read stream before receiving data"
);
// Wait until capacitor is finished writing all data
const result = await streamToString(capacitor1Stream1);
t.sameStrict(result, data, "should stream all data");
t.sameStrict(
capacitor1._readStreams.size,
0,
"should no longer have any attacheds read streams"
);
});
t.test("Data from an open stream, 1 chunk, 1 read stream.", async t => {
let data = "";
const source = new stream.Readable({
read() {}
});
// Create a new capacitor
let capacitor1 = new WriteStream();
t.strictSame(
capacitor1._readStreams.size,
0,
"should start with 0 read streams"
);
// Pipe data to the capacitor
source.pipe(capacitor1);
// Attach a read stream
const capacitor1Stream1 = capacitor1.createReadStream("capacitor1Stream1");
t.strictSame(
capacitor1._readStreams.size,
1,
"should attach a new read stream before receiving data"
);
// Add the first chunk of data (with 1 read stream)
const chunk1 = "1".repeat(10);
source.push(chunk1);
source.push(null);
data += chunk1;
// Wait until capacitor is finished writing all data
const result = await streamToString(capacitor1Stream1);
t.sameStrict(result, data, "should stream all data");
t.sameStrict(
capacitor1._readStreams.size,
0,
"should no longer have any attacheds read streams"
);
});
const withChunkSize = size =>
t.test(`--- with chunk size: ${size}`, async t => {
let data = "";
const source = new stream.Readable({
read() {}
});
// Create a new capacitor and read stream before any data has been written
let capacitor1;
let capacitor1Stream1;
await t.test(
"can add a read stream before any data has been written",
async t => {
capacitor1 = new WriteStream();
t.strictSame(
capacitor1._readStreams.size,
0,
"should start with 0 read streams"
);
capacitor1Stream1 = capacitor1.createReadStream("capacitor1Stream1");
t.strictSame(
capacitor1._readStreams.size,
1,
"should attach a new read stream before receiving data"
);
await t.test("creates a temporary file", async t => {
t.plan(3);
await new Promise(resolve => capacitor1.on("open", resolve));
t.type(
capacitor1.path,
"string",
"capacitor1.path should be a string"
);
t.type(capacitor1.fd, "number", "capacitor1.fd should be a number");
t.ok(fs.existsSync(capacitor1.path), "creates a temp file");
});
}
);
// Pipe data to the capacitor
source.pipe(capacitor1);
// Add the first chunk of data (without any read streams)
const chunk1 = "1".repeat(size);
source.push(chunk1);
data += chunk1;
// Wait until this chunk has been written to the buffer
await new Promise(resolve =>
waitForBytesWritten(capacitor1, size, resolve)
);
// Create a new stream after some data has been written
let capacitor1Stream2;
t.test("can add a read stream after data has been written", t => {
capacitor1Stream2 = capacitor1.createReadStream("capacitor1Stream2");
t.strictSame(
capacitor1._readStreams.size,
2,
"should attach a new read stream after first write"
);
t.end();
});
const writeEventBytesWritten = new Promise(resolve => {
capacitor1.once("write", () => {
resolve(capacitor1.bytesWritten);
});
});
// Add a second chunk of data
const chunk2 = "2".repeat(size);
source.push(chunk2);
data += chunk2;
// Wait until this chunk has been written to the buffer
await new Promise(resolve =>
waitForBytesWritten(capacitor1, 2 * size, resolve)
);
// Make sure write event is called after bytes are written to the filesystem
await t.test("write event emitted after bytes are written", async t => {
t.strictSame(
await writeEventBytesWritten,
2 * size,
"bytesWritten should include new chunk"
);
});
// End the source & wait until capacitor is finished
const finished = new Promise(resolve => capacitor1.once("finish", resolve));
source.push(null);
await finished;
// Create a new stream after the source has ended
let capacitor1Stream3;
let capacitor1Stream4;
t.test("can create a read stream after the source has ended", t => {
capacitor1Stream3 = capacitor1.createReadStream("capacitor1Stream3");
capacitor1Stream4 = capacitor1.createReadStream("capacitor1Stream4");
t.strictSame(
capacitor1._readStreams.size,
4,
"should attach new read streams after end"
);
t.end();
});
// Consume capacitor1Stream2, capacitor1Stream4
await t.test("streams complete data to a read stream", async t => {
const result2 = await streamToString(capacitor1Stream2);
t.strictSame(
capacitor1Stream2.ended,
true,
"should mark read stream as ended"
);
t.strictSame(result2, data, "should stream complete data");
const result4 = await streamToString(capacitor1Stream4);
t.strictSame(
capacitor1Stream4.ended,
true,
"should mark read stream as ended"
);
t.strictSame(result4, data, "should stream complete data");
t.strictSame(
capacitor1._readStreams.size,
2,
"should detach an ended read stream"
);
});
// Destroy capacitor1Stream1
await t.test("can destroy a read stream", async t => {
await new Promise(resolve => {
capacitor1Stream1.once("error", resolve);
capacitor1Stream1.destroy(new Error("test"));
});
t.strictSame(
capacitor1Stream1.destroyed,
true,
"should mark read stream as destroyed"
);
t.type(
capacitor1Stream1.error,
Error,
"should store an error on read stream"
);
t.strictSame(
capacitor1._readStreams.size,
1,
"should detach a destroyed read stream"
);
});
// Destroy the capacitor (without an error)
t.test("can delay destruction of a capacitor", t => {
capacitor1.destroy(null);
t.strictSame(
capacitor1.destroyed,
false,
"should not destroy while read streams exist"
);
t.strictSame(
capacitor1._destroyPending,
true,
"should mark for future destruction"
);
t.end();
});
// Destroy capacitor1Stream2
await t.test("destroys capacitor once no read streams exist", async t => {
const readStreamDestroyed = new Promise(resolve =>
capacitor1Stream3.on("close", resolve)
);
const capacitorDestroyed = new Promise(resolve =>
capacitor1.on("close", resolve)
);
capacitor1Stream3.destroy(null);
await readStreamDestroyed;
t.strictSame(
capacitor1Stream3.destroyed,
true,
"should mark read stream as destroyed"
);
t.strictSame(
capacitor1Stream3.error,
null,
"should not store an error on read stream"
);
t.strictSame(
capacitor1._readStreams.size,
0,
"should detach a destroyed read stream"
);
await capacitorDestroyed;
t.strictSame(capacitor1.closed, true, "should mark capacitor as closed");
t.strictSame(capacitor1.fd, null, "should set fd to null");
t.strictSame(
capacitor1.destroyed,
true,
"should mark capacitor as destroyed"
);
t.notOk(fs.existsSync(capacitor1.path), "removes its temp file");
});
// Try to create a new read stream
t.test("cannot create a read stream after destruction", t => {
try {
capacitor1.createReadStream();
} catch (error) {
t.ok(
error instanceof ReadAfterDestroyedError,
"should not create a read stream once destroyed"
);
t.end();
}
});
const capacitor2 = new WriteStream();
const capacitor2Stream1 = capacitor2.createReadStream("capacitor2Stream1");
const capacitor2Stream2 = capacitor2.createReadStream("capacitor2Stream2");
const capacitor2ReadStream1Destroyed = new Promise(resolve =>
capacitor2Stream1.on("close", resolve)
);
const capacitor2Destroyed = new Promise(resolve =>
capacitor2.on("close", resolve)
);
capacitor2Stream1.destroy();
await capacitor2ReadStream1Destroyed;
await t.test("propagates errors to attached read streams", async t => {
capacitor2.destroy();
await new Promise(resolve => setImmediate(resolve));
t.strictSame(
capacitor2Stream2.destroyed,
false,
"should not immediately mark attached read streams as destroyed"
);
capacitor2.destroy(new Error("test"));
await capacitor2Destroyed;
t.type(capacitor2.error, Error, "should store an error on capacitor");
t.strictSame(
capacitor2.destroyed,
true,
"should mark capacitor as destroyed"
);
t.type(
capacitor2Stream2.error,
Error,
"should store an error on attached read streams"
);
t.strictSame(
capacitor2Stream2.destroyed,
true,
"should mark attached read streams as destroyed"
);
t.strictSame(
capacitor2Stream1.error,
null,
"should not store an error on detached read streams"
);
});
});
// Test with small (sub-highWaterMark, 16384) chunks
withChunkSize(10);
// Test with large (above-highWaterMark, 16384) chunks
withChunkSize(100000);

4901
node_modules/fs-capacitor/yarn-error.log generated vendored Normal file

File diff suppressed because it is too large Load Diff

5402
node_modules/fs-capacitor/yarn.lock generated vendored Normal file

File diff suppressed because it is too large Load Diff