From a9d36bb268728b4b9e2654905558897da5db2da7 Mon Sep 17 00:00:00 2001 From: achingbrain Date: Thu, 16 Mar 2023 14:03:51 +0100 Subject: [PATCH] fix: test for file under/over reads When we read too much or too little data, it normally means that the UnixFS metadata in the root node is incorrect, so throw an error. --- .../src/resolvers/unixfs-v1/content/file.ts | 16 +- .../test/exporter-esoteric.spec.ts | 150 ++++++++++++++++++ 2 files changed, 164 insertions(+), 2 deletions(-) diff --git a/packages/ipfs-unixfs-exporter/src/resolvers/unixfs-v1/content/file.ts b/packages/ipfs-unixfs-exporter/src/resolvers/unixfs-v1/content/file.ts index 21ff5005..c5f80b51 100644 --- a/packages/ipfs-unixfs-exporter/src/resolvers/unixfs-v1/content/file.ts +++ b/packages/ipfs-unixfs-exporter/src/resolvers/unixfs-v1/content/file.ts @@ -137,15 +137,27 @@ const fileContent: UnixfsV1Resolver = (cid, node, unixfs, path, resolve, depth, return } + let read = 0n const queue = pushable() void walkDAG(blockstore, node, queue, 0n, offset, offset + length, options) + .then(() => { + const wanted = length - offset + + if (read < wanted) { + throw errCode(new Error('Traversed entire DAG but did not read enough bytes'), 'ERR_UNDER_READ') + } + + if (read > wanted) { + throw errCode(new Error('Read too many bytes - the file size reported by the UnixFS data in the root node may be incorrect'), 'ERR_OVER_READ') + } + + queue.end() + }) .catch(err => { queue.end(err) }) - let read = 0n - for await (const buf of queue) { if (buf == null) { continue diff --git a/packages/ipfs-unixfs-exporter/test/exporter-esoteric.spec.ts b/packages/ipfs-unixfs-exporter/test/exporter-esoteric.spec.ts index ad10c2d4..a99de23d 100644 --- a/packages/ipfs-unixfs-exporter/test/exporter-esoteric.spec.ts +++ b/packages/ipfs-unixfs-exporter/test/exporter-esoteric.spec.ts @@ -211,4 +211,154 @@ describe('exporter esoteric DAGs', () => { const data = uint8ArrayConcat(await all(exported.content())) expect(data).to.deep.equal(buf) }) + + it('errors on DAG with blocksizes that are too large', async () => { + const leaves = await Promise.all([ + randomBytes(5), + randomBytes(3), + randomBytes(6) + ].map(async buf => { + return { + cid: await storeBlock(buf, raw.code), + buf + } + })) + + const unixfs = new UnixFS({ + type: 'file', + blockSizes: [ + BigInt(leaves[0].buf.byteLength), + BigInt(leaves[1].buf.byteLength + 5), // this is wrong + BigInt(leaves[2].buf.byteLength) + ] + }) + + const rootNode = { + Data: unixfs.marshal(), + Links: [{ + Name: '', + Hash: leaves[0].cid, + Tsize: leaves[0].buf.byteLength + }, { + Name: '', + Hash: leaves[1].cid, + Tsize: leaves[1].buf.byteLength + }, { + Name: '', + Hash: leaves[2].cid, + Tsize: leaves[2].buf.byteLength + }] + } + + const rootBuf = dagPb.encode(rootNode) + const rootCid = await storeBlock(rootBuf, dagPb.code) + const exported = await exporter(rootCid, block) + + if (exported.type !== 'file') { + throw new Error('Unexpected type') + } + + await expect(all(exported.content())).to.eventually.be.rejected + .with.property('code', 'ERR_UNDER_READ') + }) + + it('errors on DAG with blocksizes that are too small', async () => { + const leaves = await Promise.all([ + randomBytes(5), + randomBytes(3), + randomBytes(6) + ].map(async buf => { + return { + cid: await storeBlock(buf, raw.code), + buf + } + })) + + const unixfs = new UnixFS({ + type: 'file', + blockSizes: [ + BigInt(leaves[0].buf.byteLength), + BigInt(leaves[1].buf.byteLength - 2), // this is wrong + BigInt(leaves[2].buf.byteLength) + ] + }) + + const rootNode = { + Data: unixfs.marshal(), + Links: [{ + Name: '', + Hash: leaves[0].cid, + Tsize: leaves[0].buf.byteLength + }, { + Name: '', + Hash: leaves[1].cid, + Tsize: leaves[1].buf.byteLength + }, { + Name: '', + Hash: leaves[2].cid, + Tsize: leaves[2].buf.byteLength + }] + } + + const rootBuf = dagPb.encode(rootNode) + const rootCid = await storeBlock(rootBuf, dagPb.code) + const exported = await exporter(rootCid, block) + + if (exported.type !== 'file') { + throw new Error('Unexpected type') + } + + await expect(all(exported.content())).to.eventually.be.rejected + .with.property('code', 'ERR_OVER_READ') + }) + + it('errors on DAG with incorrect number of blocksizes', async () => { + const leaves = await Promise.all([ + randomBytes(5), + randomBytes(3), + randomBytes(6) + ].map(async buf => { + return { + cid: await storeBlock(buf, raw.code), + buf + } + })) + + const unixfs = new UnixFS({ + type: 'file', + blockSizes: [ + BigInt(leaves[0].buf.byteLength), + // BigInt(leaves[1].buf.byteLength), // this is wrong + BigInt(leaves[2].buf.byteLength) + ] + }) + + const rootNode = { + Data: unixfs.marshal(), + Links: [{ + Name: '', + Hash: leaves[0].cid, + Tsize: leaves[0].buf.byteLength + }, { + Name: '', + Hash: leaves[1].cid, + Tsize: leaves[1].buf.byteLength + }, { + Name: '', + Hash: leaves[2].cid, + Tsize: leaves[2].buf.byteLength + }] + } + + const rootBuf = dagPb.encode(rootNode) + const rootCid = await storeBlock(rootBuf, dagPb.code) + const exported = await exporter(rootCid, block) + + if (exported.type !== 'file') { + throw new Error('Unexpected type') + } + + await expect(all(exported.content())).to.eventually.be.rejected + .with.property('code', 'ERR_NOT_UNIXFS') + }) })