From 20ba5961fcb5dc237e3a2a7642ad23547c6a0082 Mon Sep 17 00:00:00 2001
From: Daniel N <2color@users.noreply.github.com>
Date: Wed, 13 Mar 2024 17:29:13 +0100
Subject: [PATCH 1/6] fix: set cache-control header conditionally

Fixes #17
---
 packages/verified-fetch/src/verified-fetch.ts |   7 +-
 .../test/cache-control-header.spec.ts         | 105 ++++++++++++++++++
 2 files changed, 110 insertions(+), 2 deletions(-)
 create mode 100644 packages/verified-fetch/test/cache-control-header.spec.ts

diff --git a/packages/verified-fetch/src/verified-fetch.ts b/packages/verified-fetch/src/verified-fetch.ts
index ca8c6dbc..6c0aa3aa 100644
--- a/packages/verified-fetch/src/verified-fetch.ts
+++ b/packages/verified-fetch/src/verified-fetch.ts
@@ -413,7 +413,7 @@ export class VerifiedFetch {
     options?.onProgress?.(new CustomProgressEvent<CIDDetail>('verified-fetch:request:start', { resource }))
 
     // resolve the CID/path from the requested resource
-    const { path, query, cid } = await parseResource(resource, { ipns: this.ipns, logger: this.helia.logger }, options)
+    const { path, query, cid, protocol } = await parseResource(resource, { ipns: this.ipns, logger: this.helia.logger }, options)
 
     options?.onProgress?.(new CustomProgressEvent<CIDDetail>('verified-fetch:request:resolve', { cid, path }))
 
@@ -477,7 +477,10 @@ export class VerifiedFetch {
     }
 
     response.headers.set('etag', getETag({ cid, reqFormat, weak: false }))
-    response.headers.set('cache-control', 'public, max-age=29030400, immutable')
+
+    if (protocol === 'ipfs') {
+      response.headers.set('cache-control', 'public, max-age=29030400, immutable')
+    }
     // https://specs.ipfs.tech/http-gateways/path-gateway/#x-ipfs-path-response-header
     response.headers.set('X-Ipfs-Path', resource.toString())
 
diff --git a/packages/verified-fetch/test/cache-control-header.spec.ts b/packages/verified-fetch/test/cache-control-header.spec.ts
new file mode 100644
index 00000000..bae88476
--- /dev/null
+++ b/packages/verified-fetch/test/cache-control-header.spec.ts
@@ -0,0 +1,105 @@
+import { dagCbor } from '@helia/dag-cbor'
+import { ipns } from '@helia/ipns'
+import { stop } from '@libp2p/interface'
+import { createEd25519PeerId } from '@libp2p/peer-id-factory'
+import { expect } from 'aegir/chai'
+import Sinon from 'sinon'
+import { VerifiedFetch } from '../src/verified-fetch.js'
+import { createHelia } from './fixtures/create-offline-helia.js'
+import type { Helia } from '@helia/interface'
+import type { IPNS } from '@helia/ipns'
+
+describe('cache-control header', () => {
+  let helia: Helia
+  let name: IPNS
+  let verifiedFetch: VerifiedFetch
+
+  beforeEach(async () => {
+    helia = await createHelia()
+    name = ipns(helia)
+    verifiedFetch = new VerifiedFetch({
+      helia
+    })
+  })
+
+  afterEach(async () => {
+    await stop(helia, verifiedFetch)
+  })
+
+  it('should allow return the correct max-age in the cache header for immutable responses', async () => {
+    const obj = {
+      hello: 'world'
+    }
+    const c = dagCbor(helia)
+    const cid = await c.add(obj)
+
+    const resp = await verifiedFetch.fetch(cid)
+
+    expect(resp).to.be.ok()
+    expect(resp.status).to.equal(200)
+    expect(resp.headers.get('Cache-Control')).to.equal('public, max-age=29030400, immutable')
+  })
+
+  it('should return not contain immutable in the cache-control header for an IPNS name', async () => {
+    const obj = {
+      hello: 'world'
+    }
+    const c = dagCbor(helia)
+    const cid = await c.add(obj)
+
+    const oneHourInMs = 1000 * 60 * 60
+    const peerId = await createEd25519PeerId()
+
+    // ipns currently only allows customising the lifetime which is also used as the TTL
+    await name.publish(peerId, cid, { lifetime: oneHourInMs })
+
+    const resp = await verifiedFetch.fetch(`ipns://${peerId}`)
+    expect(resp).to.be.ok()
+    expect(resp.status).to.equal(200)
+
+    expect(resp.headers.get('Cache-Control')).to.not.containIgnoreCase('immutable')
+  })
+
+  it.skip('should return the correct max-age in the cache-control header for an IPNS name', async () => {
+    const obj = {
+      hello: 'world'
+    }
+    const c = dagCbor(helia)
+    const cid = await c.add(obj)
+
+    const oneHourInMs = 1000 * 60 * 60
+    const peerId = await createEd25519PeerId()
+
+    // ipns currently only allows customising the lifetime which is also used as the TTL
+    await name.publish(peerId, cid, { lifetime: oneHourInMs })
+
+    const resp = await verifiedFetch.fetch(`ipns://${peerId}`)
+    expect(resp).to.be.ok()
+    expect(resp.status).to.equal(200)
+
+    expect(resp.headers.get('Cache-Control')).to.equal(`public, max-age=${oneHourInMs.toString()}`)
+  })
+
+  it('should not contain immutable in the cache-control header for a DNSLink name', async () => {
+    const customDnsResolver = Sinon.stub()
+
+    verifiedFetch = new VerifiedFetch({
+      helia
+    }, {
+      dnsResolvers: [customDnsResolver]
+    })
+
+    const obj = {
+      hello: 'world'
+    }
+    const c = dagCbor(helia)
+    const cid = await c.add(obj)
+    customDnsResolver.returns(Promise.resolve(`/ipfs/${cid.toString()}`))
+
+    const resp = await verifiedFetch.fetch('ipns://example-domain.com')
+    expect(resp).to.be.ok()
+    expect(resp.status).to.equal(200)
+
+    expect(resp.headers.get('Cache-Control')).to.not.containIgnoreCase('immutable')
+  })
+})

From f9ba592e97dfb7717254a421b4fa073ed26d129b Mon Sep 17 00:00:00 2001
From: Daniel N <2color@users.noreply.github.com>
Date: Fri, 15 Mar 2024 12:37:52 +0100
Subject: [PATCH 2/6] chore: bump deps

---
 packages/verified-fetch/package.json | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/packages/verified-fetch/package.json b/packages/verified-fetch/package.json
index 63f99e1c..924e492b 100644
--- a/packages/verified-fetch/package.json
+++ b/packages/verified-fetch/package.json
@@ -61,7 +61,7 @@
     "@helia/car": "^3.1.0",
     "@helia/http": "^1.0.2",
     "@helia/interface": "^4.0.1",
-    "@helia/ipns": "^7.0.0",
+    "@helia/ipns": "^7.1.0",
     "@helia/routers": "^1.0.1",
     "@helia/unixfs": "^3.0.1",
     "@ipld/dag-cbor": "^9.2.0",
@@ -70,7 +70,7 @@
     "@libp2p/interface": "^1.1.4",
     "@libp2p/kad-dht": "^12.0.8",
     "@libp2p/peer-id": "^4.0.7",
-    "@multiformats/dns": "^1.0.2",
+    "@multiformats/dns": "^1.0.5",
     "cborg": "^4.0.9",
     "hashlru": "^2.3.0",
     "interface-blockstore": "^5.2.10",

From 9065abae4d19d5df63ae39ffb8cd05d34932431f Mon Sep 17 00:00:00 2001
From: Russell Dempsey <1173416+SgtPooki@users.noreply.github.com>
Date: Mon, 18 Mar 2024 13:55:57 -0700
Subject: [PATCH 3/6] feat: implement new ipns record&answer properties (#23)

* feat: implement new ipns record&answer properties

* fix: parseUrlString response includes defined ttl, set ttl if ipnsCached

* test: fix firefox failure

* feat: support http range header (#10)

* chore: limit body parameters to the types used

* chore: add response-header helper and tests

* feat: add range header parsing support

* feat: verified-fetch supports range-requests

* test: fix dns test asserting test failure since we are catching it now

* fix: return 500 error when streaming unixfs content throws

* fix: cleanup code and unexecuting tests hiding errors

* chore: some cleanup and code coverage

* tmp: most things working

* fix: stream slicing and test correctness

* chore: fixed some ByteRangeContext tests

* test: add back header helpers

* fix: unixfs tests are passing

* fix: range-requests on raw content

* feat: tests are passing

moved transform stream over to https://github.com/SgtPooki/streams

* chore: log string casing

* chore: use 502 response instead of 500

* chore: use libp2p/interface for types in src

* chore: failing to create range resp logs error

* chore: Apply suggestions from code review

* chore: fix broken tests from github PR patches (my own)

* chore: re-enable stream tests for ByteRangeContext

* chore: clean up getBody a bit

* chore: ByteRangeContext getBody cleanup

* chore: apply suggestions from code review

Co-authored-by: Alex Potsides <alex@achingbrain.net>

* fix: getSlicedBody uses correct types

* chore: remove extra stat call

* chore: fix jsdoc with '*/'

* chore: fileSize is public property, but should not be used

* test: fix blob comparisons that broke or were never worjing properly

* chore: Update byte-range-context.ts

Co-authored-by: Alex Potsides <alex@achingbrain.net>

* chore: jsdoc cleanup

* Revert "chore: fileSize is public property, but should not be used"

This reverts commit 46dc13383a8ab471e1cf3cfd624eceaf9044352c.

* chore: jsdoc comments explaining .fileSize use

* chore: isRangeRequest is public

* chore: getters/setters update

* chore: remove unnecessary _contentRangeHeaderValue

* chore: ByteRangeContext uses setFileSize and getFileSize

* chore: remove .stat changes that are no longer needed

---------

Co-authored-by: Alex Potsides <alex@achingbrain.net>

* chore(release): 1.2.0 [skip ci]

## @helia/verified-fetch [1.2.0](https://github.com/ipfs/helia-verified-fetch/compare/@helia/verified-fetch-1.1.3...@helia/verified-fetch-1.2.0) (2024-03-15)

### Features

* support http range header ([#10](https://github.com/ipfs/helia-verified-fetch/issues/10)) ([9f5078a](https://github.com/ipfs/helia-verified-fetch/commit/9f5078a09846ba6569d637ea1dd90a6d8fb4e629))

### Trivial Changes

* fix build ([#22](https://github.com/ipfs/helia-verified-fetch/issues/22)) ([01261fe](https://github.com/ipfs/helia-verified-fetch/commit/01261feabd4397c10446609b072a7cb97fb81911))

* chore(release): 1.7.0 [skip ci]

## @helia/verified-fetch-interop [1.7.0](https://github.com/ipfs/helia-verified-fetch/compare/@helia/verified-fetch-interop-1.6.0...@helia/verified-fetch-interop-1.7.0) (2024-03-15)

### Dependencies

* **@helia/verified-fetch:** upgraded to 1.2.0

* chore: apply pr comments

* fix: some ipns ttl precision cleanup

---------

Co-authored-by: Alex Potsides <alex@achingbrain.net>
Co-authored-by: semantic-release-bot <semantic-release-bot@martynus.net>
---
 packages/interop/CHANGELOG.md                 |   8 +
 packages/interop/package.json                 |   4 +-
 packages/verified-fetch/CHANGELOG.md          |  12 +
 packages/verified-fetch/package.json          |   2 +-
 packages/verified-fetch/src/types.ts          |   2 +
 .../src/utils/byte-range-context.ts           | 303 ++++++++++++++++++
 .../utils/get-stream-from-async-iterable.ts   |   2 +-
 .../src/utils/parse-resource.ts               |   5 +-
 .../src/utils/parse-url-string.ts             |  88 +++--
 .../src/utils/request-headers.ts              |  51 +++
 .../src/utils/response-headers.ts             |  70 ++++
 .../verified-fetch/src/utils/responses.ts     |  86 ++++-
 packages/verified-fetch/src/verified-fetch.ts |  95 ++++--
 .../test/cache-control-header.spec.ts         |  48 ++-
 .../test/custom-dns-resolvers.spec.ts         |  19 +-
 .../test/fixtures/ipns-stubs.ts               |  19 ++
 .../test/range-requests.spec.ts               | 162 ++++++++++
 .../test/utils/byte-range-context.spec.ts     | 150 +++++++++
 .../test/utils/parse-url-string.spec.ts       |  27 +-
 .../test/utils/request-headers.spec.ts        |  61 ++++
 .../test/utils/response-headers.spec.ts       |  33 ++
 21 files changed, 1161 insertions(+), 86 deletions(-)
 create mode 100644 packages/verified-fetch/src/utils/byte-range-context.ts
 create mode 100644 packages/verified-fetch/src/utils/request-headers.ts
 create mode 100644 packages/verified-fetch/src/utils/response-headers.ts
 create mode 100644 packages/verified-fetch/test/fixtures/ipns-stubs.ts
 create mode 100644 packages/verified-fetch/test/range-requests.spec.ts
 create mode 100644 packages/verified-fetch/test/utils/byte-range-context.spec.ts
 create mode 100644 packages/verified-fetch/test/utils/request-headers.spec.ts
 create mode 100644 packages/verified-fetch/test/utils/response-headers.spec.ts

diff --git a/packages/interop/CHANGELOG.md b/packages/interop/CHANGELOG.md
index 745ac922..1c81d4f1 100644
--- a/packages/interop/CHANGELOG.md
+++ b/packages/interop/CHANGELOG.md
@@ -1,3 +1,11 @@
+## @helia/verified-fetch-interop [1.7.0](https://github.com/ipfs/helia-verified-fetch/compare/@helia/verified-fetch-interop-1.6.0...@helia/verified-fetch-interop-1.7.0) (2024-03-15)
+
+
+
+### Dependencies
+
+* **@helia/verified-fetch:** upgraded to 1.2.0
+
 ## @helia/verified-fetch-interop [1.6.0](https://github.com/ipfs/helia-verified-fetch/compare/@helia/verified-fetch-interop-1.5.1...@helia/verified-fetch-interop-1.6.0) (2024-03-14)
 
 
diff --git a/packages/interop/package.json b/packages/interop/package.json
index 86d81157..0c974d9f 100644
--- a/packages/interop/package.json
+++ b/packages/interop/package.json
@@ -1,6 +1,6 @@
 {
   "name": "@helia/verified-fetch-interop",
-  "version": "1.6.0",
+  "version": "1.7.0",
   "description": "Interop tests for @helia/verified-fetch",
   "license": "Apache-2.0 OR MIT",
   "homepage": "https://github.com/ipfs/helia-verified-fetch/tree/main/packages/interop#readme",
@@ -57,7 +57,7 @@
     "test:electron-main": "aegir test -t electron-main"
   },
   "dependencies": {
-    "@helia/verified-fetch": "1.1.3",
+    "@helia/verified-fetch": "1.2.0",
     "aegir": "^42.2.5",
     "ipfsd-ctl": "^13.0.0",
     "it-drain": "^3.0.5",
diff --git a/packages/verified-fetch/CHANGELOG.md b/packages/verified-fetch/CHANGELOG.md
index 548a42e4..155d5332 100644
--- a/packages/verified-fetch/CHANGELOG.md
+++ b/packages/verified-fetch/CHANGELOG.md
@@ -1,3 +1,15 @@
+## @helia/verified-fetch [1.2.0](https://github.com/ipfs/helia-verified-fetch/compare/@helia/verified-fetch-1.1.3...@helia/verified-fetch-1.2.0) (2024-03-15)
+
+
+### Features
+
+* support http range header ([#10](https://github.com/ipfs/helia-verified-fetch/issues/10)) ([9f5078a](https://github.com/ipfs/helia-verified-fetch/commit/9f5078a09846ba6569d637ea1dd90a6d8fb4e629))
+
+
+### Trivial Changes
+
+* fix build ([#22](https://github.com/ipfs/helia-verified-fetch/issues/22)) ([01261fe](https://github.com/ipfs/helia-verified-fetch/commit/01261feabd4397c10446609b072a7cb97fb81911))
+
 ## @helia/verified-fetch [1.1.3](https://github.com/ipfs/helia-verified-fetch/compare/@helia/verified-fetch-1.1.2...@helia/verified-fetch-1.1.3) (2024-03-14)
 
 
diff --git a/packages/verified-fetch/package.json b/packages/verified-fetch/package.json
index 924e492b..3fe3060a 100644
--- a/packages/verified-fetch/package.json
+++ b/packages/verified-fetch/package.json
@@ -1,6 +1,6 @@
 {
   "name": "@helia/verified-fetch",
-  "version": "1.1.3",
+  "version": "1.2.0",
   "description": "A fetch-like API for obtaining verified & trustless IPFS content on the web",
   "license": "Apache-2.0 OR MIT",
   "homepage": "https://github.com/ipfs/helia-verified-fetch/tree/main/packages/verified-fetch#readme",
diff --git a/packages/verified-fetch/src/types.ts b/packages/verified-fetch/src/types.ts
index 4a235e1a..f46515d7 100644
--- a/packages/verified-fetch/src/types.ts
+++ b/packages/verified-fetch/src/types.ts
@@ -1 +1,3 @@
 export type RequestFormatShorthand = 'raw' | 'car' | 'tar' | 'ipns-record' | 'dag-json' | 'dag-cbor' | 'json' | 'cbor'
+
+export type SupportedBodyTypes = string | ArrayBuffer | Blob | ReadableStream<Uint8Array> | null
diff --git a/packages/verified-fetch/src/utils/byte-range-context.ts b/packages/verified-fetch/src/utils/byte-range-context.ts
new file mode 100644
index 00000000..54df115e
--- /dev/null
+++ b/packages/verified-fetch/src/utils/byte-range-context.ts
@@ -0,0 +1,303 @@
+import { calculateByteRangeIndexes, getHeader } from './request-headers.js'
+import { getContentRangeHeader } from './response-headers.js'
+import type { SupportedBodyTypes } from '../types.js'
+import type { ComponentLogger, Logger } from '@libp2p/interface'
+
+type SliceableBody = Exclude<SupportedBodyTypes, ReadableStream<Uint8Array> | null>
+
+/**
+ * Gets the body size of a given body if it's possible to calculate it synchronously.
+ */
+function getBodySizeSync (body: SupportedBodyTypes): number | null {
+  if (typeof body === 'string') {
+    return body.length
+  }
+  if (body instanceof ArrayBuffer || body instanceof Uint8Array) {
+    return body.byteLength
+  }
+  if (body instanceof Blob) {
+    return body.size
+  }
+
+  if (body instanceof ReadableStream) {
+    return null
+  }
+
+  return null
+}
+
+function getByteRangeFromHeader (rangeHeader: string): { start: string, end: string } {
+  /**
+   * Range: bytes=<start>-<end> | bytes=<start2>- | bytes=-<end2>
+   */
+  const match = rangeHeader.match(/^bytes=(?<start>\d+)?-(?<end>\d+)?$/)
+  if (match?.groups == null) {
+    throw new Error('Invalid range request')
+  }
+
+  const { start, end } = match.groups
+
+  return { start, end }
+}
+
+export class ByteRangeContext {
+  public readonly isRangeRequest: boolean
+
+  /**
+   * This property is purposefully only set in `set fileSize` and should not be set directly.
+   */
+  private _fileSize: number | null | undefined
+  private _body: SupportedBodyTypes = null
+  private readonly rangeRequestHeader: string | undefined
+  private readonly log: Logger
+  private readonly requestRangeStart: number | null
+  private readonly requestRangeEnd: number | null
+  private byteStart: number | undefined
+  private byteEnd: number | undefined
+  private byteSize: number | undefined
+
+  constructor (logger: ComponentLogger, private readonly headers?: HeadersInit) {
+    this.log = logger.forComponent('helia:verified-fetch:byte-range-context')
+    this.rangeRequestHeader = getHeader(this.headers, 'Range')
+    if (this.rangeRequestHeader != null) {
+      this.isRangeRequest = true
+      this.log.trace('range request detected')
+      try {
+        const { start, end } = getByteRangeFromHeader(this.rangeRequestHeader)
+        this.requestRangeStart = start != null ? parseInt(start) : null
+        this.requestRangeEnd = end != null ? parseInt(end) : null
+      } catch (e) {
+        this.log.error('error parsing range request header: %o', e)
+        this.requestRangeStart = null
+        this.requestRangeEnd = null
+      }
+
+      this.setOffsetDetails()
+    } else {
+      this.log.trace('no range request detected')
+      this.isRangeRequest = false
+      this.requestRangeStart = null
+      this.requestRangeEnd = null
+    }
+  }
+
+  public setBody (body: SupportedBodyTypes): void {
+    this._body = body
+    // if fileSize was already set, don't recalculate it
+    this.setFileSize(this._fileSize ?? getBodySizeSync(body))
+
+    this.log.trace('set request body with fileSize %o', this._fileSize)
+  }
+
+  public getBody (): SupportedBodyTypes {
+    const body = this._body
+    if (body == null) {
+      this.log.trace('body is null')
+      return body
+    }
+    if (!this.isRangeRequest || !this.isValidRangeRequest) {
+      this.log.trace('returning body unmodified for non-range, or invalid range, request')
+      return body
+    }
+    const byteStart = this.byteStart
+    const byteEnd = this.byteEnd
+    const byteSize = this.byteSize
+    if (byteStart != null || byteEnd != null) {
+      this.log.trace('returning body with byteStart=%o, byteEnd=%o, byteSize=%o', byteStart, byteEnd, byteSize)
+      if (body instanceof ReadableStream) {
+        // stream should already be spliced by `unixfs.cat`
+        return body
+      }
+      return this.getSlicedBody(body)
+    }
+
+    // we should not reach this point, but return body untouched.
+    this.log.error('returning unmodified body for valid range request')
+    return body
+  }
+
+  private getSlicedBody <T extends SliceableBody>(body: T): SliceableBody {
+    if (this.isPrefixLengthRequest) {
+      this.log.trace('sliced body with byteStart %o', this.byteStart)
+      return body.slice(this.offset) satisfies SliceableBody
+    }
+    if (this.isSuffixLengthRequest && this.length != null) {
+      this.log.trace('sliced body with length %o', -this.length)
+      return body.slice(-this.length) satisfies SliceableBody
+    }
+    const offset = this.byteStart ?? 0
+    const length = this.byteEnd == null ? undefined : this.byteEnd + 1
+    this.log.trace('returning body with offset %o and length %o', offset, length)
+
+    return body.slice(offset, length) satisfies SliceableBody
+  }
+
+  private get isSuffixLengthRequest (): boolean {
+    return this.requestRangeStart == null && this.requestRangeEnd != null
+  }
+
+  private get isPrefixLengthRequest (): boolean {
+    return this.requestRangeStart != null && this.requestRangeEnd == null
+  }
+
+  /**
+   * Sometimes, we need to set the fileSize explicitly because we can't calculate
+   * the size of the body (e.g. for unixfs content where we call .stat).
+   *
+   * This fileSize should otherwise only be called from `setBody`.
+   */
+  public setFileSize (size: number | bigint | null): void {
+    this._fileSize = size != null ? Number(size) : null
+    this.log.trace('set _fileSize to %o', this._fileSize)
+    // when fileSize changes, we need to recalculate the offset details
+    this.setOffsetDetails()
+  }
+
+  public getFileSize (): number | null | undefined {
+    return this._fileSize
+  }
+
+  private isValidByteStart (): boolean {
+    if (this.byteStart != null) {
+      if (this.byteStart < 0) {
+        return false
+      }
+      if (this._fileSize != null && this.byteStart > this._fileSize) {
+        return false
+      }
+    }
+    return true
+  }
+
+  private isValidByteEnd (): boolean {
+    if (this.byteEnd != null) {
+      if (this.byteEnd < 0) {
+        return false
+      }
+      if (this._fileSize != null && this.byteEnd > this._fileSize) {
+        return false
+      }
+    }
+    return true
+  }
+
+  /**
+   * We may get the values required to determine if this is a valid range request at different times
+   * so we need to calculate it when asked.
+   */
+  public get isValidRangeRequest (): boolean {
+    if (!this.isRangeRequest) {
+      return false
+    }
+    if (this.requestRangeStart == null && this.requestRangeEnd == null) {
+      this.log.trace('invalid range request, range request values not provided')
+      return false
+    }
+    if (!this.isValidByteStart()) {
+      this.log.trace('invalid range request, byteStart is less than 0 or greater than fileSize')
+      return false
+    }
+    if (!this.isValidByteEnd()) {
+      this.log.trace('invalid range request, byteEnd is less than 0 or greater than fileSize')
+      return false
+    }
+    if (this.requestRangeEnd != null && this.requestRangeStart != null) {
+      // we may not have enough info.. base check on requested bytes
+      if (this.requestRangeStart > this.requestRangeEnd) {
+        this.log.trace('invalid range request, start is greater than end')
+        return false
+      } else if (this.requestRangeStart < 0) {
+        this.log.trace('invalid range request, start is less than 0')
+        return false
+      } else if (this.requestRangeEnd < 0) {
+        this.log.trace('invalid range request, end is less than 0')
+        return false
+      }
+    }
+
+    return true
+  }
+
+  /**
+   * Given all the information we have, this function returns the offset that will be used when:
+   * 1. calling unixfs.cat
+   * 2. slicing the body
+   */
+  public get offset (): number {
+    if (this.byteStart === 0) {
+      return 0
+    }
+    if (this.isPrefixLengthRequest || this.isSuffixLengthRequest) {
+      if (this.byteStart != null) {
+        // we have to subtract by 1 because the offset is inclusive
+        return this.byteStart - 1
+      }
+    }
+
+    return this.byteStart ?? 0
+  }
+
+  /**
+   * Given all the information we have, this function returns the length that will be used when:
+   * 1. calling unixfs.cat
+   * 2. slicing the body
+   */
+  public get length (): number | undefined {
+    return this.byteSize ?? undefined
+  }
+
+  /**
+   * Converts a range request header into helia/unixfs supported range options
+   * Note that the gateway specification says we "MAY" support multiple ranges (https://specs.ipfs.tech/http-gateways/path-gateway/#range-request-header) but we don't
+   *
+   * Also note that @helia/unixfs and ipfs-unixfs-exporter expect length and offset to be numbers, the range header is a string, and the size of the resource is likely a bigint.
+   *
+   * SUPPORTED:
+   * Range: bytes=<range-start>-<range-end>
+   * Range: bytes=<range-start>-
+   * Range: bytes=-<suffix-length> // must pass size so we can calculate the offset. suffix-length is the number of bytes from the end of the file.
+   *
+   * NOT SUPPORTED:
+   * Range: bytes=<range-start>-<range-end>, <range-start>-<range-end>
+   * Range: bytes=<range-start>-<range-end>, <range-start>-<range-end>, <range-start>-<range-end>
+   *
+   * @see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Range#directives
+   */
+  private setOffsetDetails (): void {
+    if (this.requestRangeStart == null && this.requestRangeEnd == null) {
+      this.log.trace('requestRangeStart and requestRangeEnd are null')
+      return
+    }
+
+    const { start, end, byteSize } = calculateByteRangeIndexes(this.requestRangeStart ?? undefined, this.requestRangeEnd ?? undefined, this._fileSize ?? undefined)
+    this.log.trace('set byteStart to %o, byteEnd to %o, byteSize to %o', start, end, byteSize)
+    this.byteStart = start
+    this.byteEnd = end
+    this.byteSize = byteSize
+  }
+
+  /**
+   * This function returns the value of the "content-range" header.
+   *
+   * @see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Range
+   *
+   * Returns a string representing the following content ranges:
+   *
+   * @example
+   * - Content-Range: <unit> <byteStart>-<byteEnd>/<byteSize>
+   * - Content-Range: <unit> <byteStart>-<byteEnd>/*
+   */
+  // - Content-Range: <unit> */<byteSize> // this is purposefully not in jsdoc block
+  public get contentRangeHeaderValue (): string {
+    if (!this.isValidRangeRequest) {
+      this.log.error('cannot get contentRangeHeaderValue for invalid range request')
+      throw new Error('Invalid range request')
+    }
+
+    return getContentRangeHeader({
+      byteStart: this.byteStart,
+      byteEnd: this.byteEnd,
+      byteSize: this._fileSize ?? undefined
+    })
+  }
+}
diff --git a/packages/verified-fetch/src/utils/get-stream-from-async-iterable.ts b/packages/verified-fetch/src/utils/get-stream-from-async-iterable.ts
index c9266e2e..02342d59 100644
--- a/packages/verified-fetch/src/utils/get-stream-from-async-iterable.ts
+++ b/packages/verified-fetch/src/utils/get-stream-from-async-iterable.ts
@@ -11,7 +11,7 @@ export async function getStreamFromAsyncIterable (iterator: AsyncIterable<Uint8A
   const { value: firstChunk, done } = await reader.next()
 
   if (done === true) {
-    log.error('No content found for path', path)
+    log.error('no content found for path', path)
     throw new Error('No content found')
   }
 
diff --git a/packages/verified-fetch/src/utils/parse-resource.ts b/packages/verified-fetch/src/utils/parse-resource.ts
index 49e0b6d3..4b3b000c 100644
--- a/packages/verified-fetch/src/utils/parse-resource.ts
+++ b/packages/verified-fetch/src/utils/parse-resource.ts
@@ -32,8 +32,9 @@ export async function parseResource (resource: Resource, { ipns, logger }: Parse
       cid,
       protocol: 'ipfs',
       path: '',
-      query: {}
-    }
+      query: {},
+      ttl: 29030400 // 1 year for ipfs content
+    } satisfies ParsedUrlStringResults
   }
 
   throw new TypeError(`Invalid resource. Cannot determine CID from resource: ${resource}`)
diff --git a/packages/verified-fetch/src/utils/parse-url-string.ts b/packages/verified-fetch/src/utils/parse-url-string.ts
index 6866f7f1..d406710e 100644
--- a/packages/verified-fetch/src/utils/parse-url-string.ts
+++ b/packages/verified-fetch/src/utils/parse-url-string.ts
@@ -2,11 +2,11 @@ import { peerIdFromString } from '@libp2p/peer-id'
 import { CID } from 'multiformats/cid'
 import { TLRU } from './tlru.js'
 import type { RequestFormatShorthand } from '../types.js'
-import type { IPNS, ResolveDNSLinkProgressEvents, ResolveResult } from '@helia/ipns'
+import type { DNSLinkResolveResult, IPNS, IPNSResolveResult, ResolveDNSLinkProgressEvents, ResolveResult } from '@helia/ipns'
 import type { ComponentLogger } from '@libp2p/interface'
 import type { ProgressOptions } from 'progress-events'
 
-const ipnsCache = new TLRU<ResolveResult>(1000)
+const ipnsCache = new TLRU<DNSLinkResolveResult | IPNSResolveResult>(1000)
 
 export interface ParseUrlStringInput {
   urlString: string
@@ -23,30 +23,66 @@ export interface ParsedUrlQuery extends Record<string, string | unknown> {
   filename?: string
 }
 
-export interface ParsedUrlStringResults {
-  protocol: string
-  path: string
-  cid: CID
+interface ParsedUrlStringResultsBase extends ResolveResult {
+  protocol: 'ipfs' | 'ipns'
   query: ParsedUrlQuery
+
+  /**
+   * seconds as a number
+   */
+  ttl?: number
 }
 
+export type ParsedUrlStringResults = ParsedUrlStringResultsBase
+
 const URL_REGEX = /^(?<protocol>ip[fn]s):\/\/(?<cidOrPeerIdOrDnsLink>[^/?]+)\/?(?<path>[^?]*)\??(?<queryString>.*)$/
 const PATH_REGEX = /^\/(?<protocol>ip[fn]s)\/(?<cidOrPeerIdOrDnsLink>[^/?]+)\/?(?<path>[^?]*)\??(?<queryString>.*)$/
 const PATH_GATEWAY_REGEX = /^https?:\/\/(.*[^/])\/(?<protocol>ip[fn]s)\/(?<cidOrPeerIdOrDnsLink>[^/?]+)\/?(?<path>[^?]*)\??(?<queryString>.*)$/
 const SUBDOMAIN_GATEWAY_REGEX = /^https?:\/\/(?<cidOrPeerIdOrDnsLink>[^/?]+)\.(?<protocol>ip[fn]s)\.([^/?]+)\/?(?<path>[^?]*)\??(?<queryString>.*)$/
 
-function matchURLString (urlString: string): Record<string, string> {
+interface MatchUrlGroups {
+  protocol: 'ipfs' | 'ipns'
+  cidOrPeerIdOrDnsLink: string
+  path?: string
+  queryString?: string
+
+}
+function matchURLString (urlString: string): MatchUrlGroups {
   for (const pattern of [URL_REGEX, PATH_REGEX, PATH_GATEWAY_REGEX, SUBDOMAIN_GATEWAY_REGEX]) {
     const match = urlString.match(pattern)
 
     if (match?.groups != null) {
-      return match.groups
+      return match.groups as unknown as MatchUrlGroups // force cast to MatchUrlGroups, because if it matches, it has to contain this structure.
     }
   }
 
   throw new TypeError(`Invalid URL: ${urlString}, please use ipfs://, ipns://, or gateway URLs only`)
 }
 
+/**
+ * determines the TTL for the resolved resource that will be used for the `Cache-Control` header's `max-age` directive.
+ * max-age is in seconds
+ *
+ * @see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control#response_directives
+ *
+ * If we have ipnsTtlNs, it will be a BigInt representing "nanoseconds". We need to convert it back to seconds.
+ *
+ * For more TTL nuances:
+ *
+ * @see https://github.com/ipfs/js-ipns/blob/16e0e10682fa9a663e0bb493a44d3e99a5200944/src/index.ts#L200
+ * @see https://github.com/ipfs/js-ipns/pull/308
+ */
+function calculateTtl (resolveResult?: IPNSResolveResult | DNSLinkResolveResult): number | undefined {
+  if (resolveResult == null) {
+    return undefined
+  }
+  const dnsLinkTtl = (resolveResult as DNSLinkResolveResult).answer?.TTL
+  const ipnsTtlNs = (resolveResult as IPNSResolveResult).record?.ttl
+  // For some reason, ipns "nanoseconds" are 1e-8 of a second, instead of 1e-9.
+  const ipnsTtl = ipnsTtlNs != null ? Number(ipnsTtlNs / BigInt(1e8)) : undefined
+  return dnsLinkTtl ?? ipnsTtl
+}
+
 /**
  * For dnslinks see https://specs.ipfs.tech/http-gateways/subdomain-gateway/#host-request-header
  * DNSLink names include . which means they must be inlined into a single DNS label to provide unique origin and work with wildcard TLS certificates.
@@ -89,43 +125,48 @@ export async function parseUrlString ({ urlString, ipns, logger }: ParseUrlStrin
   let cid: CID | undefined
   let resolvedPath: string | undefined
   const errors: Error[] = []
+  let resolveResult: IPNSResolveResult | DNSLinkResolveResult | undefined
 
   if (protocol === 'ipfs') {
     try {
       cid = CID.parse(cidOrPeerIdOrDnsLink)
+      /**
+       * no ttl set. @link {setCacheControlHeader}
+       */
     } catch (err) {
       log.error(err)
       errors.push(new TypeError('Invalid CID for ipfs://<cid> URL'))
     }
   } else {
-    let resolveResult = ipnsCache.get(cidOrPeerIdOrDnsLink)
+    // protocol is ipns
+    resolveResult = ipnsCache.get(cidOrPeerIdOrDnsLink)
 
     if (resolveResult != null) {
       cid = resolveResult.cid
       resolvedPath = resolveResult.path
       log.trace('resolved %s to %c from cache', cidOrPeerIdOrDnsLink, cid)
     } else {
-      // protocol is ipns
       log.trace('Attempting to resolve PeerId for %s', cidOrPeerIdOrDnsLink)
       let peerId = null
       try {
+        // try resolving as an IPNS name
         peerId = peerIdFromString(cidOrPeerIdOrDnsLink)
         resolveResult = await ipns.resolve(peerId, { onProgress: options?.onProgress })
-        cid = resolveResult?.cid
-        resolvedPath = resolveResult?.path
+        cid = resolveResult.cid
+        resolvedPath = resolveResult.path
         log.trace('resolved %s to %c', cidOrPeerIdOrDnsLink, cid)
-        ipnsCache.set(cidOrPeerIdOrDnsLink, resolveResult, 60 * 1000 * 2)
       } catch (err) {
         if (peerId == null) {
-          log.error('Could not parse PeerId string "%s"', cidOrPeerIdOrDnsLink, err)
+          log.error('could not parse PeerId string "%s"', cidOrPeerIdOrDnsLink, err)
           errors.push(new TypeError(`Could not parse PeerId in ipns url "${cidOrPeerIdOrDnsLink}", ${(err as Error).message}`))
         } else {
-          log.error('Could not resolve PeerId %c', peerId, err)
+          log.error('could not resolve PeerId %c', peerId, err)
           errors.push(new TypeError(`Could not resolve PeerId "${cidOrPeerIdOrDnsLink}", ${(err as Error).message}`))
         }
       }
 
       if (cid == null) {
+        // cid is still null, try resolving as a DNSLink
         let decodedDnsLinkLabel = cidOrPeerIdOrDnsLink
         if (isInlinedDnsLink(cidOrPeerIdOrDnsLink)) {
           decodedDnsLinkLabel = dnsLinkLabelDecoder(cidOrPeerIdOrDnsLink)
@@ -138,9 +179,8 @@ export async function parseUrlString ({ urlString, ipns, logger }: ParseUrlStrin
           cid = resolveResult?.cid
           resolvedPath = resolveResult?.path
           log.trace('resolved %s to %c', decodedDnsLinkLabel, cid)
-          ipnsCache.set(cidOrPeerIdOrDnsLink, resolveResult, 60 * 1000 * 2)
         } catch (err: any) {
-          log.error('Could not resolve DnsLink for "%s"', cidOrPeerIdOrDnsLink, err)
+          log.error('could not resolve DnsLink for "%s"', cidOrPeerIdOrDnsLink, err)
           errors.push(err)
         }
       }
@@ -155,6 +195,13 @@ export async function parseUrlString ({ urlString, ipns, logger }: ParseUrlStrin
     throw new AggregateError(errors, `Invalid resource. Cannot determine CID from URL "${urlString}"`)
   }
 
+  const ttl = calculateTtl(resolveResult)
+
+  if (resolveResult != null) {
+    // use the ttl for the resolved resouce for the cache, but fallback to 2 minutes if not available
+    ipnsCache.set(cidOrPeerIdOrDnsLink, resolveResult, ttl ?? 60 * 1000 * 2)
+  }
+
   // parse query string
   const query: Record<string, any> = {}
 
@@ -177,9 +224,10 @@ export async function parseUrlString ({ urlString, ipns, logger }: ParseUrlStrin
   return {
     protocol,
     cid,
-    path: joinPaths(resolvedPath, urlPath),
-    query
-  }
+    path: joinPaths(resolvedPath, urlPath ?? ''),
+    query,
+    ttl
+  } satisfies ParsedUrlStringResults
 }
 
 /**
diff --git a/packages/verified-fetch/src/utils/request-headers.ts b/packages/verified-fetch/src/utils/request-headers.ts
new file mode 100644
index 00000000..3eed63a5
--- /dev/null
+++ b/packages/verified-fetch/src/utils/request-headers.ts
@@ -0,0 +1,51 @@
+export function getHeader (headers: HeadersInit | undefined, header: string): string | undefined {
+  if (headers == null) {
+    return undefined
+  }
+  if (headers instanceof Headers) {
+    return headers.get(header) ?? undefined
+  }
+  if (Array.isArray(headers)) {
+    const entry = headers.find(([key]) => key.toLowerCase() === header.toLowerCase())
+    return entry?.[1]
+  }
+  const key = Object.keys(headers).find(k => k.toLowerCase() === header.toLowerCase())
+  if (key == null) {
+    return undefined
+  }
+
+  return headers[key]
+}
+
+/**
+ * Given two ints from a Range header, and potential fileSize, returns:
+ * 1. number of bytes the response should contain.
+ * 2. the start index of the range. // inclusive
+ * 3. the end index of the range. // inclusive
+ */
+export function calculateByteRangeIndexes (start: number | undefined, end: number | undefined, fileSize?: number): { byteSize?: number, start?: number, end?: number } {
+  if (start != null && end != null) {
+    if (start > end) {
+      throw new Error('Invalid range')
+    }
+
+    return { byteSize: end - start + 1, start, end }
+  } else if (start == null && end != null) {
+    // suffix byte range requested
+    if (fileSize == null) {
+      return { end }
+    }
+    const result = { byteSize: end, start: fileSize - end + 1, end: fileSize }
+    return result
+  } else if (start != null && end == null) {
+    if (fileSize == null) {
+      return { start }
+    }
+    const byteSize = fileSize - start + 1
+    const end = fileSize
+    return { byteSize, start, end }
+  }
+
+  // both start and end are undefined
+  return { byteSize: fileSize }
+}
diff --git a/packages/verified-fetch/src/utils/response-headers.ts b/packages/verified-fetch/src/utils/response-headers.ts
new file mode 100644
index 00000000..47574dc8
--- /dev/null
+++ b/packages/verified-fetch/src/utils/response-headers.ts
@@ -0,0 +1,70 @@
+interface CacheControlHeaderOptions {
+  /**
+   * This should be seconds as a number.
+   *
+   * See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control#response_directives
+   */
+  ttl?: number
+  protocol: 'ipfs' | 'ipns'
+  response: Response
+}
+
+/**
+ * Implementations may place an upper bound on any TTL received, as noted in Section 8 of [rfc2181].
+ * If TTL value is unknown, implementations should not send a Cache-Control
+ * No matter if TTL value is known or not, implementations should always send a Last-Modified header with the timestamp of the record resolution.
+ *
+ * @see https://specs.ipfs.tech/http-gateways/path-gateway/#cache-control-response-header
+ */
+export function setCacheControlHeader ({ ttl, protocol, response }: CacheControlHeaderOptions): void {
+  let headerValue: string
+  if (protocol === 'ipfs') {
+    headerValue = 'public, max-age=29030400, immutable'
+  } else if (ttl == null) {
+    /**
+     * default limit for unknown TTL: "use 5 minute as default fallback when it is not available."
+     *
+     * @see https://github.com/ipfs/boxo/issues/329#issuecomment-1995236409
+     */
+    headerValue = 'public, max-age=300'
+  } else {
+    headerValue = `public, max-age=${ttl}`
+  }
+
+  if (headerValue != null) {
+    response.headers.set('cache-control', headerValue)
+  }
+}
+
+/**
+ * This function returns the value of the `Content-Range` header for a given range.
+ * If you know the total size of the body, pass it as `byteSize`
+ *
+ * @see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Range
+ */
+export function getContentRangeHeader ({ byteStart, byteEnd, byteSize }: { byteStart: number | undefined, byteEnd: number | undefined, byteSize: number | undefined }): string {
+  const total = byteSize ?? '*' // if we don't know the total size, we should use *
+
+  if (byteStart != null && byteEnd == null) {
+    // only byteStart in range
+    if (byteSize == null) {
+      return `bytes */${total}`
+    }
+    return `bytes ${byteStart}-${byteSize}/${byteSize}`
+  }
+
+  if (byteStart == null && byteEnd != null) {
+    // only byteEnd in range
+    if (byteSize == null) {
+      return `bytes */${total}`
+    }
+    return `bytes ${byteSize - byteEnd + 1}-${byteSize}/${byteSize}`
+  }
+
+  if (byteStart == null && byteEnd == null) {
+    // neither are provided, we can't return a valid range.
+    return `bytes */${total}`
+  }
+
+  return `bytes ${byteStart}-${byteEnd}/${total}`
+}
diff --git a/packages/verified-fetch/src/utils/responses.ts b/packages/verified-fetch/src/utils/responses.ts
index a5963706..667318c6 100644
--- a/packages/verified-fetch/src/utils/responses.ts
+++ b/packages/verified-fetch/src/utils/responses.ts
@@ -1,3 +1,7 @@
+import type { ByteRangeContext } from './byte-range-context'
+import type { SupportedBodyTypes } from '../types.js'
+import type { Logger } from '@libp2p/interface'
+
 function setField (response: Response, name: string, value: string | boolean): void {
   Object.defineProperty(response, name, {
     enumerable: true,
@@ -23,7 +27,7 @@ export interface ResponseOptions extends ResponseInit {
   redirected?: boolean
 }
 
-export function okResponse (url: string, body?: BodyInit | null, init?: ResponseOptions): Response {
+export function okResponse (url: string, body?: SupportedBodyTypes, init?: ResponseOptions): Response {
   const response = new Response(body, {
     ...(init ?? {}),
     status: 200,
@@ -34,13 +38,27 @@ export function okResponse (url: string, body?: BodyInit | null, init?: Response
     setRedirected(response)
   }
 
+  setType(response, 'basic')
+  setUrl(response, url)
+  response.headers.set('Accept-Ranges', 'bytes')
+
+  return response
+}
+
+export function badGatewayResponse (url: string, body?: SupportedBodyTypes, init?: ResponseInit): Response {
+  const response = new Response(body, {
+    ...(init ?? {}),
+    status: 502,
+    statusText: 'Bad Gateway'
+  })
+
   setType(response, 'basic')
   setUrl(response, url)
 
   return response
 }
 
-export function notSupportedResponse (url: string, body?: BodyInit | null, init?: ResponseInit): Response {
+export function notSupportedResponse (url: string, body?: SupportedBodyTypes, init?: ResponseInit): Response {
   const response = new Response(body, {
     ...(init ?? {}),
     status: 501,
@@ -54,7 +72,7 @@ export function notSupportedResponse (url: string, body?: BodyInit | null, init?
   return response
 }
 
-export function notAcceptableResponse (url: string, body?: BodyInit | null, init?: ResponseInit): Response {
+export function notAcceptableResponse (url: string, body?: SupportedBodyTypes, init?: ResponseInit): Response {
   const response = new Response(body, {
     ...(init ?? {}),
     status: 406,
@@ -67,7 +85,7 @@ export function notAcceptableResponse (url: string, body?: BodyInit | null, init
   return response
 }
 
-export function badRequestResponse (url: string, body?: BodyInit | null, init?: ResponseInit): Response {
+export function badRequestResponse (url: string, body?: SupportedBodyTypes, init?: ResponseInit): Response {
   const response = new Response(body, {
     ...(init ?? {}),
     status: 400,
@@ -96,3 +114,63 @@ export function movedPermanentlyResponse (url: string, location: string, init?:
 
   return response
 }
+
+interface RangeOptions {
+  byteRangeContext: ByteRangeContext
+  log?: Logger
+}
+
+export function okRangeResponse (url: string, body: SupportedBodyTypes, { byteRangeContext, log }: RangeOptions, init?: ResponseOptions): Response {
+  if (!byteRangeContext.isRangeRequest) {
+    return okResponse(url, body, init)
+  }
+
+  if (!byteRangeContext.isValidRangeRequest) {
+    return badRangeResponse(url, body, init)
+  }
+
+  let response: Response
+  try {
+    response = new Response(body, {
+      ...(init ?? {}),
+      status: 206,
+      statusText: 'Partial Content',
+      headers: {
+        ...(init?.headers ?? {}),
+        'content-range': byteRangeContext.contentRangeHeaderValue
+      }
+    })
+  } catch (e: any) {
+    log?.error('failed to create range response', e)
+    return badRangeResponse(url, body, init)
+  }
+
+  if (init?.redirected === true) {
+    setRedirected(response)
+  }
+
+  setType(response, 'basic')
+  setUrl(response, url)
+  response.headers.set('Accept-Ranges', 'bytes')
+
+  return response
+}
+
+/**
+ * We likely need to catch errors handled by upstream helia libraries if range-request throws an error. Some examples:
+ * * The range is out of bounds
+ * * The range is invalid
+ * * The range is not supported for the given type
+ */
+export function badRangeResponse (url: string, body?: SupportedBodyTypes, init?: ResponseInit): Response {
+  const response = new Response(body, {
+    ...(init ?? {}),
+    status: 416,
+    statusText: 'Requested Range Not Satisfiable'
+  })
+
+  setType(response, 'basic')
+  setUrl(response, url)
+
+  return response
+}
diff --git a/packages/verified-fetch/src/verified-fetch.ts b/packages/verified-fetch/src/verified-fetch.ts
index a8354ccd..15280d91 100644
--- a/packages/verified-fetch/src/verified-fetch.ts
+++ b/packages/verified-fetch/src/verified-fetch.ts
@@ -1,6 +1,6 @@
 import { car } from '@helia/car'
 import { ipns as heliaIpns, type IPNS } from '@helia/ipns'
-import { unixfs as heliaUnixFs, type UnixFS as HeliaUnixFs, type UnixFSStats } from '@helia/unixfs'
+import { unixfs as heliaUnixFs, type UnixFS as HeliaUnixFs } from '@helia/unixfs'
 import * as ipldDagCbor from '@ipld/dag-cbor'
 import * as ipldDagJson from '@ipld/dag-json'
 import { code as dagPbCode } from '@ipld/dag-pb'
@@ -15,17 +15,20 @@ import { CustomProgressEvent } from 'progress-events'
 import { concat as uint8ArrayConcat } from 'uint8arrays/concat'
 import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string'
 import { toString as uint8ArrayToString } from 'uint8arrays/to-string'
+import { ByteRangeContext } from './utils/byte-range-context.js'
 import { dagCborToSafeJSON } from './utils/dag-cbor-to-safe-json.js'
 import { getContentDispositionFilename } from './utils/get-content-disposition-filename.js'
 import { getETag } from './utils/get-e-tag.js'
 import { getStreamFromAsyncIterable } from './utils/get-stream-from-async-iterable.js'
 import { tarStream } from './utils/get-tar-stream.js'
 import { parseResource } from './utils/parse-resource.js'
-import { badRequestResponse, movedPermanentlyResponse, notAcceptableResponse, notSupportedResponse, okResponse } from './utils/responses.js'
+import { setCacheControlHeader } from './utils/response-headers.js'
+import { badRequestResponse, movedPermanentlyResponse, notAcceptableResponse, notSupportedResponse, okResponse, badRangeResponse, okRangeResponse, badGatewayResponse } from './utils/responses.js'
 import { selectOutputType, queryFormatToAcceptHeader } from './utils/select-output-type.js'
 import { walkPath } from './utils/walk-path.js'
 import type { CIDDetail, ContentTypeParser, Resource, VerifiedFetchInit as VerifiedFetchOptions } from './index.js'
 import type { RequestFormatShorthand } from './types.js'
+import type { ParsedUrlStringResults } from './utils/parse-url-string'
 import type { Helia } from '@helia/interface'
 import type { AbortOptions, Logger, PeerId } from '@libp2p/interface'
 import type { DNSResolver } from '@multiformats/dns/resolvers'
@@ -275,17 +278,19 @@ export class VerifiedFetch {
     let terminalElement: UnixFSEntry | undefined
     let ipfsRoots: CID[] | undefined
     let redirected = false
+    const byteRangeContext = new ByteRangeContext(this.helia.logger, options?.headers)
 
     try {
       const pathDetails = await walkPath(this.helia.blockstore, `${cid.toString()}/${path}`, options)
       ipfsRoots = pathDetails.ipfsRoots
       terminalElement = pathDetails.terminalElement
     } catch (err) {
-      this.log.error('Error walking path %s', path, err)
+      this.log.error('error walking path %s', path, err)
+
+      return badGatewayResponse('Error walking path')
     }
 
     let resolvedCID = terminalElement?.cid ?? cid
-    let stat: UnixFSStats
     if (terminalElement?.type === 'directory') {
       const dirCid = terminalElement.cid
 
@@ -307,7 +312,7 @@ export class VerifiedFetch {
       const rootFilePath = 'index.html'
       try {
         this.log.trace('found directory at %c/%s, looking for index.html', cid, path)
-        stat = await this.unixfs.stat(dirCid, {
+        const stat = await this.unixfs.stat(dirCid, {
           path: rootFilePath,
           signal: options?.signal,
           onProgress: options?.onProgress
@@ -323,30 +328,56 @@ export class VerifiedFetch {
       }
     }
 
+    // we have a validRangeRequest & terminalElement is a file, we know the size and should set it
+    if (byteRangeContext.isRangeRequest && byteRangeContext.isValidRangeRequest && terminalElement.type === 'file') {
+      byteRangeContext.setFileSize(terminalElement.unixfs.fileSize())
+
+      this.log.trace('fileSize for rangeRequest %d', byteRangeContext.getFileSize())
+    }
+    const offset = byteRangeContext.offset
+    const length = byteRangeContext.length
+    this.log.trace('calling unixfs.cat for %c/%s with offset=%o & length=%o', resolvedCID, path, offset, length)
     const asyncIter = this.unixfs.cat(resolvedCID, {
       signal: options?.signal,
-      onProgress: options?.onProgress
+      onProgress: options?.onProgress,
+      offset,
+      length
     })
     this.log('got async iterator for %c/%s', cid, path)
 
-    const { stream, firstChunk } = await getStreamFromAsyncIterable(asyncIter, path ?? '', this.helia.logger, {
-      onProgress: options?.onProgress
-    })
-    const response = okResponse(resource, stream, {
-      redirected
-    })
-    await this.setContentType(firstChunk, path, response)
+    try {
+      const { stream, firstChunk } = await getStreamFromAsyncIterable(asyncIter, path ?? '', this.helia.logger, {
+        onProgress: options?.onProgress
+      })
+      byteRangeContext.setBody(stream)
+      // if not a valid range request, okRangeRequest will call okResponse
+      const response = okRangeResponse(resource, byteRangeContext.getBody(), { byteRangeContext, log: this.log }, {
+        redirected
+      })
+
+      await this.setContentType(firstChunk, path, response)
+
+      if (ipfsRoots != null) {
+        response.headers.set('X-Ipfs-Roots', ipfsRoots.map(cid => cid.toV1().toString()).join(',')) // https://specs.ipfs.tech/http-gateways/path-gateway/#x-ipfs-roots-response-header
+      }
 
-    if (ipfsRoots != null) {
-      response.headers.set('X-Ipfs-Roots', ipfsRoots.map(cid => cid.toV1().toString()).join(',')) // https://specs.ipfs.tech/http-gateways/path-gateway/#x-ipfs-roots-response-header
+      return response
+    } catch (err: any) {
+      this.log.error('error streaming %c/%s', cid, path, err)
+      if (byteRangeContext.isRangeRequest && err.code === 'ERR_INVALID_PARAMS') {
+        return badRangeResponse(resource)
+      }
+      return badGatewayResponse('Unable to stream content')
     }
-
-    return response
   }
 
   private async handleRaw ({ resource, cid, path, options }: FetchHandlerFunctionArg): Promise<Response> {
+    const byteRangeContext = new ByteRangeContext(this.helia.logger, options?.headers)
     const result = await this.helia.blockstore.get(cid, options)
-    const response = okResponse(resource, result)
+    byteRangeContext.setBody(result)
+    const response = okRangeResponse(resource, byteRangeContext.getBody(), { byteRangeContext, log: this.log }, {
+      redirected: false
+    })
 
     // if the user has specified an `Accept` header that corresponds to a raw
     // type, honour that header, so for example they don't request
@@ -380,10 +411,10 @@ export class VerifiedFetch {
           contentType = parsed
         }
       } catch (err) {
-        this.log.error('Error parsing content type', err)
+        this.log.error('error parsing content type', err)
       }
     }
-
+    this.log.trace('setting content type to "%s"', contentType)
     response.headers.set('content-type', contentType)
   }
 
@@ -408,7 +439,23 @@ export class VerifiedFetch {
     options?.onProgress?.(new CustomProgressEvent<CIDDetail>('verified-fetch:request:start', { resource }))
 
     // resolve the CID/path from the requested resource
-    const { path, query, cid, protocol } = await parseResource(resource, { ipns: this.ipns, logger: this.helia.logger }, options)
+    let cid: ParsedUrlStringResults['cid']
+    let path: ParsedUrlStringResults['path']
+    let query: ParsedUrlStringResults['query']
+    let ttl: ParsedUrlStringResults['ttl']
+    let protocol: ParsedUrlStringResults['protocol']
+    try {
+      const result = await parseResource(resource, { ipns: this.ipns, logger: this.helia.logger }, options)
+      cid = result.cid
+      path = result.path
+      query = result.query
+      ttl = result.ttl
+      protocol = result.protocol
+    } catch (err) {
+      this.log.error('error parsing resource %s', resource, err)
+
+      return badRequestResponse('Invalid resource')
+    }
 
     options?.onProgress?.(new CustomProgressEvent<CIDDetail>('verified-fetch:request:resolve', { cid, path }))
 
@@ -461,21 +508,21 @@ export class VerifiedFetch {
       query.filename = query.filename ?? `${cid.toString()}.tar`
       response = await this.handleTar(handlerArgs)
     } else {
+      this.log.trace('finding handler for cid code "%s" and output type "%s"', cid.code, accept)
       // derive the handler from the CID type
       const codecHandler = this.codecHandlers[cid.code]
 
       if (codecHandler == null) {
         return notSupportedResponse(`Support for codec with code ${cid.code} is not yet implemented. Please open an issue at https://github.com/ipfs/helia/issues/new`)
       }
+      this.log.trace('calling handler "%s"', codecHandler.name)
 
       response = await codecHandler.call(this, handlerArgs)
     }
 
     response.headers.set('etag', getETag({ cid, reqFormat, weak: false }))
 
-    if (protocol === 'ipfs') {
-      response.headers.set('cache-control', 'public, max-age=29030400, immutable')
-    }
+    setCacheControlHeader({ response, ttl, protocol })
     // https://specs.ipfs.tech/http-gateways/path-gateway/#x-ipfs-path-response-header
     response.headers.set('X-Ipfs-Path', resource.toString())
 
diff --git a/packages/verified-fetch/test/cache-control-header.spec.ts b/packages/verified-fetch/test/cache-control-header.spec.ts
index bae88476..0cfe1268 100644
--- a/packages/verified-fetch/test/cache-control-header.spec.ts
+++ b/packages/verified-fetch/test/cache-control-header.spec.ts
@@ -2,20 +2,41 @@ import { dagCbor } from '@helia/dag-cbor'
 import { ipns } from '@helia/ipns'
 import { stop } from '@libp2p/interface'
 import { createEd25519PeerId } from '@libp2p/peer-id-factory'
+import { dns } from '@multiformats/dns'
 import { expect } from 'aegir/chai'
 import Sinon from 'sinon'
+import { stubInterface } from 'sinon-ts'
 import { VerifiedFetch } from '../src/verified-fetch.js'
 import { createHelia } from './fixtures/create-offline-helia.js'
 import type { Helia } from '@helia/interface'
 import type { IPNS } from '@helia/ipns'
-
+import type { DNSResponse } from '@multiformats/dns'
+
+function answerFake (data: string, TTL: number, name: string, type: number): DNSResponse {
+  const fake = stubInterface<DNSResponse>()
+  fake.Answer = [{
+    data,
+    TTL,
+    name,
+    type
+  }]
+  return fake
+}
 describe('cache-control header', () => {
   let helia: Helia
   let name: IPNS
   let verifiedFetch: VerifiedFetch
+  let customDnsResolver: Sinon.SinonStub<any[], Promise<DNSResponse>>
 
   beforeEach(async () => {
-    helia = await createHelia()
+    customDnsResolver = Sinon.stub()
+    helia = await createHelia({
+      dns: dns({
+        resolvers: {
+          '.': customDnsResolver
+        }
+      })
+    })
     name = ipns(helia)
     verifiedFetch = new VerifiedFetch({
       helia
@@ -60,29 +81,34 @@ describe('cache-control header', () => {
     expect(resp.headers.get('Cache-Control')).to.not.containIgnoreCase('immutable')
   })
 
-  it.skip('should return the correct max-age in the cache-control header for an IPNS name', async () => {
+  it('should return the correct max-age in the cache-control header for an IPNS name', async () => {
     const obj = {
       hello: 'world'
     }
     const c = dagCbor(helia)
     const cid = await c.add(obj)
 
-    const oneHourInMs = 1000 * 60 * 60
+    const oneHourInSeconds = 60 * 60
     const peerId = await createEd25519PeerId()
 
-    // ipns currently only allows customising the lifetime which is also used as the TTL
-    await name.publish(peerId, cid, { lifetime: oneHourInMs })
+    /**
+     * ipns currently only allows customising the lifetime which is also used as the TTL
+     *
+     * lifetime is coming back as 100000 times larger than expected
+     *
+     * @see https://github.com/ipfs/js-ipns/blob/16e0e10682fa9a663e0bb493a44d3e99a5200944/src/index.ts#L200
+     * @see https://github.com/ipfs/js-ipns/pull/308
+     */
+    await name.publish(peerId, cid, { lifetime: oneHourInSeconds * 1000 }) // pass to ipns as milliseconds
 
     const resp = await verifiedFetch.fetch(`ipns://${peerId}`)
     expect(resp).to.be.ok()
     expect(resp.status).to.equal(200)
 
-    expect(resp.headers.get('Cache-Control')).to.equal(`public, max-age=${oneHourInMs.toString()}`)
+    expect(resp.headers.get('Cache-Control')).to.equal(`public, max-age=${oneHourInSeconds}`)
   })
 
   it('should not contain immutable in the cache-control header for a DNSLink name', async () => {
-    const customDnsResolver = Sinon.stub()
-
     verifiedFetch = new VerifiedFetch({
       helia
     }, {
@@ -94,12 +120,12 @@ describe('cache-control header', () => {
     }
     const c = dagCbor(helia)
     const cid = await c.add(obj)
-    customDnsResolver.returns(Promise.resolve(`/ipfs/${cid.toString()}`))
+    customDnsResolver.withArgs('_dnslink.example-domain.com').resolves(answerFake(`dnslink=/ipfs/${cid}`, 666, '_dnslink.example-domain.com', 16))
 
     const resp = await verifiedFetch.fetch('ipns://example-domain.com')
     expect(resp).to.be.ok()
     expect(resp.status).to.equal(200)
 
-    expect(resp.headers.get('Cache-Control')).to.not.containIgnoreCase('immutable')
+    expect(resp.headers.get('Cache-Control')).to.equal('public, max-age=666')
   })
 })
diff --git a/packages/verified-fetch/test/custom-dns-resolvers.spec.ts b/packages/verified-fetch/test/custom-dns-resolvers.spec.ts
index ae44cde8..d6bdce65 100644
--- a/packages/verified-fetch/test/custom-dns-resolvers.spec.ts
+++ b/packages/verified-fetch/test/custom-dns-resolvers.spec.ts
@@ -19,16 +19,19 @@ describe('custom dns-resolvers', () => {
   })
 
   it('is used when passed to createVerifiedFetch', async () => {
-    const customDnsResolver = Sinon.stub()
-
-    customDnsResolver.returns(Promise.resolve('/ipfs/QmVP2ip92jQuMDezVSzQBWDqWFbp9nyCHNQSiciRauPLDg'))
+    const customDnsResolver = Sinon.stub().withArgs('_dnslink.some-non-cached-domain.com').resolves({
+      Answer: [{
+        data: 'dnslink=/ipfs/QmVP2ip92jQuMDezVSzQBWDqWFbp9nyCHNQSiciRauPLDg'
+      }]
+    })
 
     const fetch = await createVerifiedFetch({
       gateways: ['http://127.0.0.1:8080'],
       dnsResolvers: [customDnsResolver]
     })
-    // error of walking the CID/dag because we haven't actually added the block to the blockstore
-    await expect(fetch('ipns://some-non-cached-domain.com')).to.eventually.be.rejected.with.property('errors')
+    const response = await fetch('ipns://some-non-cached-domain.com')
+    expect(response.status).to.equal(502)
+    expect(response.statusText).to.equal('Bad Gateway')
 
     expect(customDnsResolver.callCount).to.equal(1)
     expect(customDnsResolver.getCall(0).args).to.deep.equal(['_dnslink.some-non-cached-domain.com', {
@@ -58,8 +61,10 @@ describe('custom dns-resolvers', () => {
     const verifiedFetch = new VerifiedFetch({
       helia
     })
-    // error of walking the CID/dag because we haven't actually added the block to the blockstore
-    await expect(verifiedFetch.fetch('ipns://some-non-cached-domain2.com')).to.eventually.be.rejected.with.property('errors').that.has.lengthOf(0)
+
+    const response = await verifiedFetch.fetch('ipns://some-non-cached-domain2.com')
+    expect(response.status).to.equal(502)
+    expect(response.statusText).to.equal('Bad Gateway')
 
     expect(customDnsResolver.callCount).to.equal(1)
     expect(customDnsResolver.getCall(0).args).to.deep.equal(['_dnslink.some-non-cached-domain2.com', {
diff --git a/packages/verified-fetch/test/fixtures/ipns-stubs.ts b/packages/verified-fetch/test/fixtures/ipns-stubs.ts
new file mode 100644
index 00000000..759790c3
--- /dev/null
+++ b/packages/verified-fetch/test/fixtures/ipns-stubs.ts
@@ -0,0 +1,19 @@
+import { stubInterface, type StubbedInstance } from 'sinon-ts'
+import type { PeerId } from '@libp2p/interface'
+import type { IPNSRecord } from 'ipns'
+
+export interface IpnsRecordStubOptions {
+  peerId: PeerId
+  ttl?: bigint
+}
+
+/**
+ * When stubbing an IPNSRecord, we need to provide a PeerId and some ttl value or else we will get
+ * "SyntaxError: Cannot convert stub to a BigInt" when parse-url-string.ts calls `calculateTtl`
+ */
+export function ipnsRecordStub ({ peerId, ttl }: IpnsRecordStubOptions): StubbedInstance<IPNSRecord> {
+  return stubInterface<IPNSRecord>({
+    value: peerId.toString(),
+    ttl
+  })
+}
diff --git a/packages/verified-fetch/test/range-requests.spec.ts b/packages/verified-fetch/test/range-requests.spec.ts
new file mode 100644
index 00000000..489ce3c8
--- /dev/null
+++ b/packages/verified-fetch/test/range-requests.spec.ts
@@ -0,0 +1,162 @@
+import { unixfs } from '@helia/unixfs'
+import { stop } from '@libp2p/interface'
+import { expect } from 'aegir/chai'
+import { CID } from 'multiformats/cid'
+import * as raw from 'multiformats/codecs/raw'
+import { sha256 } from 'multiformats/hashes/sha2'
+import { VerifiedFetch } from '../src/verified-fetch.js'
+import { createHelia } from './fixtures/create-offline-helia.js'
+import type { Helia } from '@helia/interface'
+
+/**
+ * Range request headers for IPFS gateways only support raw and unixfs
+ */
+describe('range requests', () => {
+  let helia: Helia
+  let verifiedFetch: VerifiedFetch
+  const content = new Uint8Array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
+
+  beforeEach(async () => {
+    helia = await createHelia()
+    verifiedFetch = new VerifiedFetch({
+      helia
+    })
+  })
+
+  afterEach(async () => {
+    await stop(helia, verifiedFetch)
+  })
+
+  interface SuccessfulTestExpectation {
+    contentRange: string
+    bytes: Uint8Array
+  }
+  async function testRange (cid: CID, headerRange: string, expected: SuccessfulTestExpectation): Promise<void> {
+    const response = await verifiedFetch.fetch(cid, {
+      headers: {
+        Range: headerRange
+      }
+    })
+
+    expect(response.status).to.equal(206)
+    expect(response.statusText).to.equal('Partial Content')
+
+    expect(response).to.have.property('headers')
+    const contentRange = response.headers.get('content-range')
+    expect(contentRange).to.be.ok()
+    expect(contentRange).to.equal(expected.contentRange) // the response should include the range that was requested
+
+    const responseContent = await response.arrayBuffer()
+    expect(new Uint8Array(responseContent)).to.deep.equal(expected.bytes)
+  }
+
+  async function assertFailingRange (response: Promise<Response>): Promise<void> {
+    await expect(response).to.eventually.have.property('status', 416)
+    await expect(response).to.eventually.have.property('statusText', 'Requested Range Not Satisfiable')
+  }
+
+  function runTests (description: string, getCid: () => Promise<CID>): void {
+    describe(description, () => {
+      let cid: CID
+      beforeEach(async () => {
+        cid = await getCid()
+      })
+      const validTestCases = [
+        {
+          byteSize: 6,
+          contentRange: 'bytes 0-5/11',
+          rangeHeader: 'bytes=0-5',
+          bytes: new Uint8Array([0, 1, 2, 3, 4, 5])
+        },
+        {
+          byteSize: 8,
+          contentRange: 'bytes 4-11/11',
+          rangeHeader: 'bytes=4-',
+          bytes: new Uint8Array([3, 4, 5, 6, 7, 8, 9, 10])
+        },
+        {
+          byteSize: 9,
+          contentRange: 'bytes 3-11/11',
+          rangeHeader: 'bytes=-9',
+          bytes: new Uint8Array([2, 3, 4, 5, 6, 7, 8, 9, 10])
+        }
+      ]
+      validTestCases.forEach(({ bytes, contentRange, rangeHeader }) => {
+        // if these fail, check response-headers.spec.ts first
+        it(`should return correct 206 Partial Content response for ${rangeHeader}`, async () => {
+          const expected: SuccessfulTestExpectation = {
+            bytes,
+            contentRange
+          }
+          await testRange(cid, rangeHeader, expected)
+        })
+      })
+
+      it('should return 416 Range Not Satisfiable when the range is invalid', async () => {
+        await assertFailingRange(verifiedFetch.fetch(cid, {
+          headers: {
+            Range: 'bytes=-0-'
+          }
+        }))
+        await assertFailingRange(verifiedFetch.fetch(cid, {
+          headers: {
+            Range: 'bytes=foobar'
+          }
+        }))
+      })
+
+      it('should return 416 Range Not Satisfiable when the range offset is larger than content', async () => {
+        await assertFailingRange(verifiedFetch.fetch(cid, {
+          headers: {
+            Range: 'bytes=50-'
+          }
+        }))
+      })
+
+      it('should return 416 Range Not Satisfiable when the suffix-length is larger than content', async () => {
+        await assertFailingRange(verifiedFetch.fetch(cid, {
+          headers: {
+            Range: 'bytes=-50'
+          }
+        }))
+      })
+
+      it('should return 416 Range Not Satisfiable when the range is out of bounds', async () => {
+        await assertFailingRange(verifiedFetch.fetch(cid, {
+          headers: {
+            Range: 'bytes=0-900'
+          }
+        }))
+      })
+
+      it('should return 416 Range Not Satisfiable when passed multiple ranges', async () => {
+        await assertFailingRange(verifiedFetch.fetch(cid, {
+          headers: {
+            Range: 'bytes=0-2,3-5'
+          }
+        }))
+      })
+    })
+  }
+
+  const testTuples = [
+    ['unixfs', async () => {
+      return unixfs(helia).addFile({
+        content
+      }, {
+        rawLeaves: false,
+        leafType: 'file'
+      })
+    }],
+    ['raw', async () => {
+      const buf = raw.encode(content)
+      const cid = CID.createV1(raw.code, await sha256.digest(buf))
+      await helia.blockstore.put(cid, buf)
+      return cid
+    }]
+  ] as const
+
+  testTuples.forEach(([name, fn]) => {
+    runTests(name, fn)
+  })
+})
diff --git a/packages/verified-fetch/test/utils/byte-range-context.spec.ts b/packages/verified-fetch/test/utils/byte-range-context.spec.ts
new file mode 100644
index 00000000..4f95a5b5
--- /dev/null
+++ b/packages/verified-fetch/test/utils/byte-range-context.spec.ts
@@ -0,0 +1,150 @@
+import { unixfs, type UnixFS } from '@helia/unixfs'
+import { stop } from '@libp2p/interface'
+import { defaultLogger, prefixLogger } from '@libp2p/logger'
+import { expect } from 'aegir/chai'
+import { ByteRangeContext } from '../../src/utils/byte-range-context.js'
+import { getStreamFromAsyncIterable } from '../../src/utils/get-stream-from-async-iterable.js'
+import { createHelia } from '../fixtures/create-offline-helia.js'
+import type { Helia } from 'helia'
+import type { CID } from 'multiformats/cid'
+
+describe('ByteRangeContext', () => {
+  const logger = prefixLogger('test')
+
+  it('should correctly detect range request', () => {
+    const context = new ByteRangeContext(logger, { Range: 'bytes=0-2' })
+    expect(context.isRangeRequest).to.be.true()
+  })
+
+  it('should correctly detect non-range request', () => {
+    const context = new ByteRangeContext(logger, {})
+    expect(context.isRangeRequest).to.be.false()
+  })
+
+  it('should correctly set body and calculate fileSize', () => {
+    const context = new ByteRangeContext(logger, {})
+    const body = new Uint8Array([1, 2, 3, 4, 5])
+    context.setBody(body)
+    expect(context.getBody()).to.equal(body)
+    expect(context.getFileSize()).to.equal(body.length)
+  })
+
+  it('should correctly handle invalid range request', () => {
+    const invalidRanges = [
+      'bytes=f',
+      'bytes=0-foobar',
+      'bytes=f-0',
+      'byte=0-2'
+    ]
+    invalidRanges.forEach(range => {
+      const context = new ByteRangeContext(logger, { Range: range })
+      expect(context.isValidRangeRequest).to.be.false()
+    })
+  })
+
+  const array = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
+  const uint8arrayRangeTests = [
+    // full ranges:
+    { type: 'Uint8Array', range: 'bytes=0-11', contentRange: 'bytes 0-11/11', body: new Uint8Array(array), expected: new Uint8Array(array) },
+    { type: 'Uint8Array', range: 'bytes=-11', contentRange: 'bytes 1-11/11', body: new Uint8Array(array), expected: new Uint8Array(array) },
+    { type: 'Uint8Array', range: 'bytes=0-', contentRange: 'bytes 0-11/11', body: new Uint8Array(array), expected: new Uint8Array(array) },
+
+    // partial ranges:
+    { type: 'Uint8Array', range: 'bytes=0-1', contentRange: 'bytes 0-1/11', body: new Uint8Array(array), expected: new Uint8Array([1, 2]) },
+    { type: 'Uint8Array', range: 'bytes=0-2', contentRange: 'bytes 0-2/11', body: new Uint8Array(array), expected: new Uint8Array([1, 2, 3]) },
+    { type: 'Uint8Array', range: 'bytes=0-3', contentRange: 'bytes 0-3/11', body: new Uint8Array(array), expected: new Uint8Array([1, 2, 3, 4]) },
+    { type: 'Uint8Array', range: 'bytes=0-4', contentRange: 'bytes 0-4/11', body: new Uint8Array(array), expected: new Uint8Array([1, 2, 3, 4, 5]) },
+    { type: 'Uint8Array', range: 'bytes=0-5', contentRange: 'bytes 0-5/11', body: new Uint8Array(array), expected: new Uint8Array([1, 2, 3, 4, 5, 6]) },
+    { type: 'Uint8Array', range: 'bytes=0-6', contentRange: 'bytes 0-6/11', body: new Uint8Array(array), expected: new Uint8Array([1, 2, 3, 4, 5, 6, 7]) },
+    { type: 'Uint8Array', range: 'bytes=0-7', contentRange: 'bytes 0-7/11', body: new Uint8Array(array), expected: new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8]) },
+    { type: 'Uint8Array', range: 'bytes=0-8', contentRange: 'bytes 0-8/11', body: new Uint8Array(array), expected: new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8, 9]) },
+    { type: 'Uint8Array', range: 'bytes=0-9', contentRange: 'bytes 0-9/11', body: new Uint8Array(array), expected: new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) },
+    { type: 'Uint8Array', range: 'bytes=0-10', contentRange: 'bytes 0-10/11', body: new Uint8Array(array), expected: new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) },
+    { type: 'Uint8Array', range: 'bytes=1-', contentRange: 'bytes 1-11/11', body: new Uint8Array(array), expected: new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) },
+    { type: 'Uint8Array', range: 'bytes=2-', contentRange: 'bytes 2-11/11', body: new Uint8Array(array), expected: new Uint8Array([2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) },
+    { type: 'Uint8Array', range: 'bytes=-2', contentRange: 'bytes 10-11/11', body: new Uint8Array(array), expected: new Uint8Array(array.slice(-2)) },
+
+    // single byte ranges:
+    { type: 'Uint8Array', range: 'bytes=1-1', contentRange: 'bytes 1-1/11', body: new Uint8Array(array), expected: new Uint8Array(array.slice(1, 2)) },
+    { type: 'Uint8Array', range: 'bytes=-1', contentRange: 'bytes 11-11/11', body: new Uint8Array(array), expected: new Uint8Array(array.slice(-1)) }
+
+  ]
+  const validRanges = [
+    ...uint8arrayRangeTests,
+    ...uint8arrayRangeTests.map(({ range, contentRange, body, expected }) => ({
+      type: 'ArrayBuffer',
+      range,
+      contentRange,
+      body: body.buffer,
+      expected: expected.buffer
+    })),
+    ...uint8arrayRangeTests.map(({ range, contentRange, body, expected }) => ({
+      type: 'Blob',
+      range,
+      contentRange,
+      body: new Blob([body]),
+      expected: new Blob([expected])
+    }))
+  ]
+  validRanges.forEach(({ type, range, expected, body, contentRange }) => {
+    it(`should correctly slice ${type} body with range ${range}`, async () => {
+      const context = new ByteRangeContext(logger, { Range: range })
+
+      context.setBody(body)
+      const actualBody = context.getBody()
+
+      if (actualBody instanceof Blob || type === 'Blob') {
+        const bodyAsUint8Array = new Uint8Array(await (actualBody as Blob).arrayBuffer())
+        const expectedAsUint8Array = new Uint8Array(await (expected as Blob).arrayBuffer())
+        // loop through the bytes and compare them
+        for (let i = 0; i < bodyAsUint8Array.length; i++) {
+          expect(bodyAsUint8Array[i]).to.equal(expectedAsUint8Array[i])
+        }
+      } else {
+        expect(actualBody).to.deep.equal(expected)
+      }
+
+      expect(context.contentRangeHeaderValue).to.equal(contentRange)
+    })
+  })
+
+  describe('handling ReadableStreams', () => {
+    let helia: Helia
+    let fs: UnixFS
+    let cid: CID
+    const getBodyStream = async (offset?: number, length?: number): Promise<ReadableStream<Uint8Array>> => {
+      const iter = fs.cat(cid, { offset, length })
+      const { stream } = await getStreamFromAsyncIterable(iter, 'test.txt', defaultLogger())
+      return stream
+    }
+
+    before(async () => {
+      helia = await createHelia()
+      fs = unixfs(helia)
+    })
+
+    after(async () => {
+      await stop(helia)
+    })
+
+    uint8arrayRangeTests.forEach(({ range, expected, body, contentRange }) => {
+      it(`should correctly slice Stream with range ${range}`, async () => {
+        const context = new ByteRangeContext(logger, { Range: range })
+        cid = await fs.addFile({
+          content: body
+        }, {
+          rawLeaves: false,
+          leafType: 'file'
+        })
+        const stat = await fs.stat(cid)
+        context.setFileSize(stat.fileSize)
+
+        context.setBody(await getBodyStream(context.offset, context.length))
+        const response = new Response(context.getBody())
+        const bodyResult = await response.arrayBuffer()
+        expect(new Uint8Array(bodyResult)).to.deep.equal(expected)
+        expect(context.contentRangeHeaderValue).to.equal(contentRange)
+      })
+    })
+  })
+})
diff --git a/packages/verified-fetch/test/utils/parse-url-string.spec.ts b/packages/verified-fetch/test/utils/parse-url-string.spec.ts
index cd91d8cd..fbf2341c 100644
--- a/packages/verified-fetch/test/utils/parse-url-string.spec.ts
+++ b/packages/verified-fetch/test/utils/parse-url-string.spec.ts
@@ -3,11 +3,11 @@ import { defaultLogger } from '@libp2p/logger'
 import { createEd25519PeerId } from '@libp2p/peer-id-factory'
 import { type Answer } from '@multiformats/dns'
 import { expect } from 'aegir/chai'
-import { type IPNSRecord } from 'ipns'
 import { CID } from 'multiformats/cid'
 import { match } from 'sinon'
 import { stubInterface } from 'sinon-ts'
 import { parseUrlString } from '../../src/utils/parse-url-string.js'
+import { ipnsRecordStub } from '../fixtures/ipns-stubs.js'
 import type { IPNS } from '@helia/ipns'
 import type { ComponentLogger, PeerId } from '@libp2p/interface'
 import type { StubbedInstance } from 'sinon-ts'
@@ -75,8 +75,7 @@ describe('parseUrlString', () => {
           ipns,
           logger
         })
-      ).to.eventually.be.rejected
-        .with.property('message', 'Could not parse PeerId in ipns url "mydomain.com", Non-base64 character')
+      ).to.eventually.be.rejected.with.property('message', 'Could not parse PeerId in ipns url "mydomain.com", Non-base64 character')
     })
   })
 
@@ -441,7 +440,7 @@ describe('parseUrlString', () => {
       ipns.resolve.withArgs(matchPeerId(testPeerId)).resolves({
         cid: CID.parse('QmQJ8fxavY54CUsxMSx9aE9Rdcmvhx8awJK2jzJp4iAqCr'),
         path: '',
-        record: stubInterface<IPNSRecord>()
+        record: ipnsRecordStub({ peerId: testPeerId })
       })
 
       await assertMatchUrl(
@@ -458,7 +457,7 @@ describe('parseUrlString', () => {
       ipns.resolve.withArgs(matchPeerId(testPeerId)).resolves({
         cid: CID.parse('QmQJ8fxavY54CUsxMSx9aE9Rdcmvhx8awJK2jzJp4iAqCr'),
         path: '',
-        record: stubInterface<IPNSRecord>()
+        record: ipnsRecordStub({ peerId: testPeerId })
       })
 
       await assertMatchUrl(
@@ -475,7 +474,7 @@ describe('parseUrlString', () => {
       ipns.resolve.withArgs(matchPeerId(testPeerId)).resolves({
         cid: CID.parse('QmQJ8fxavY54CUsxMSx9aE9Rdcmvhx8awJK2jzJp4iAqCr'),
         path: '',
-        record: stubInterface<IPNSRecord>()
+        record: ipnsRecordStub({ peerId: testPeerId })
       })
 
       await assertMatchUrl(
@@ -492,7 +491,7 @@ describe('parseUrlString', () => {
       ipns.resolve.withArgs(matchPeerId(testPeerId)).resolves({
         cid: CID.parse('QmQJ8fxavY54CUsxMSx9aE9Rdcmvhx8awJK2jzJp4iAqCr'),
         path: '',
-        record: stubInterface<IPNSRecord>()
+        record: ipnsRecordStub({ peerId: testPeerId })
       })
 
       await assertMatchUrl(
@@ -511,7 +510,7 @@ describe('parseUrlString', () => {
       ipns.resolve.withArgs(matchPeerId(testPeerId)).resolves({
         cid: CID.parse('QmQJ8fxavY54CUsxMSx9aE9Rdcmvhx8awJK2jzJp4iAqCr'),
         path: '',
-        record: stubInterface<IPNSRecord>()
+        record: ipnsRecordStub({ peerId: testPeerId })
       })
 
       await assertMatchUrl(
@@ -535,7 +534,7 @@ describe('parseUrlString', () => {
       ipns.resolve.withArgs(matchPeerId(peerId)).resolves({
         cid,
         path: recordPath,
-        record: stubInterface<IPNSRecord>()
+        record: ipnsRecordStub({ peerId: testPeerId })
       })
 
       await assertMatchUrl(
@@ -557,7 +556,7 @@ describe('parseUrlString', () => {
       ipns.resolve.withArgs(matchPeerId(peerId)).resolves({
         cid,
         path: recordPath,
-        record: stubInterface<IPNSRecord>()
+        record: ipnsRecordStub({ peerId: testPeerId })
       })
 
       await assertMatchUrl(
@@ -579,7 +578,7 @@ describe('parseUrlString', () => {
       ipns.resolve.withArgs(matchPeerId(peerId)).resolves({
         cid,
         path: recordPath,
-        record: stubInterface<IPNSRecord>()
+        record: ipnsRecordStub({ peerId: testPeerId })
       })
 
       await assertMatchUrl(
@@ -603,7 +602,7 @@ describe('parseUrlString', () => {
       ipns.resolve.withArgs(matchPeerId(peerId)).resolves({
         cid,
         path: '',
-        record: stubInterface<IPNSRecord>()
+        record: ipnsRecordStub({ peerId })
       })
     })
 
@@ -691,7 +690,7 @@ describe('parseUrlString', () => {
         ipns.resolve.withArgs(matchPeerId(peerId)).resolves({
           cid,
           path: '',
-          record: stubInterface<IPNSRecord>()
+          record: ipnsRecordStub({ peerId })
         })
       })
 
@@ -793,7 +792,7 @@ describe('parseUrlString', () => {
           ipns.resolve.withArgs(matchPeerId(value as PeerId)).resolves({
             cid,
             path: '',
-            record: stubInterface<IPNSRecord>()
+            record: ipnsRecordStub({ peerId: value as PeerId })
           })
         } else if (type === 'dnslink-encoded') {
           const matchValue = (value as string).replace(/-/g, '.')
diff --git a/packages/verified-fetch/test/utils/request-headers.spec.ts b/packages/verified-fetch/test/utils/request-headers.spec.ts
new file mode 100644
index 00000000..77c1697e
--- /dev/null
+++ b/packages/verified-fetch/test/utils/request-headers.spec.ts
@@ -0,0 +1,61 @@
+import { expect } from 'aegir/chai'
+import { getHeader, calculateByteRangeIndexes } from '../../src/utils/request-headers.js'
+
+describe('request-headers', () => {
+  describe('getHeader', () => {
+    it('should return undefined when headers are undefined', () => {
+      expect(getHeader(undefined, 'dummy')).to.be.undefined()
+      expect(getHeader(new Headers(), 'dummy')).to.be.undefined()
+      expect(getHeader({}, 'dummy')).to.be.undefined()
+      expect(getHeader([], 'dummy')).to.be.undefined()
+    })
+
+    it('should return correct header value for Headers instance', () => {
+      const headers = new Headers({ Dummy: 'value' })
+      expect(getHeader(headers, 'Dummy')).to.equal('value')
+      expect(getHeader(headers, 'dummy')).to.equal('value')
+    })
+
+    it('should return correct header value for array of tuples', () => {
+      const headers: Array<[string, string]> = [['Dummy', 'value']]
+      expect(getHeader(headers, 'Dummy')).to.equal('value')
+      expect(getHeader(headers, 'dummy')).to.equal('value')
+    })
+
+    it('should return correct header value for record', () => {
+      const headers: Record<string, string> = { Dummy: 'value' }
+      expect(getHeader(headers, 'Dummy')).to.equal('value')
+      expect(getHeader(headers, 'dummy')).to.equal('value')
+    })
+  })
+
+  describe('calculateByteRangeIndexes', () => {
+    const testCases = [
+      // Range: bytes=5-
+      { start: 5, end: undefined, fileSize: 10, expected: { byteSize: 6, start: 5, end: 10 } },
+      // Range: bytes=-5
+      { start: undefined, end: 5, fileSize: 10, expected: { byteSize: 5, start: 6, end: 10 } },
+      // Range: bytes=0-0
+      { start: 0, end: 0, fileSize: 10, expected: { byteSize: 1, start: 0, end: 0 } },
+      // Range: bytes=5- with unknown filesize
+      { start: 5, end: undefined, fileSize: undefined, expected: { start: 5 } },
+      // Range: bytes=-5 with unknown filesize
+      { start: undefined, end: 5, fileSize: undefined, expected: { end: 5 } },
+      // Range: bytes=0-0 with unknown filesize
+      { start: 0, end: 0, fileSize: undefined, expected: { byteSize: 1, start: 0, end: 0 } },
+      // Range: bytes=-9 & fileSize=11
+      { start: undefined, end: 9, fileSize: 11, expected: { byteSize: 9, start: 3, end: 11 } },
+      // Range: bytes=0-11 & fileSize=11
+      { start: 0, end: 11, fileSize: 11, expected: { byteSize: 12, start: 0, end: 11 } }
+    ]
+    testCases.forEach(({ start, end, fileSize, expected }) => {
+      it(`should return expected result for bytes=${start ?? ''}-${end ?? ''} and fileSize=${fileSize}`, () => {
+        const result = calculateByteRangeIndexes(start, end, fileSize)
+        expect(result).to.deep.equal(expected)
+      })
+    })
+    it('throws error for invalid range', () => {
+      expect(() => calculateByteRangeIndexes(5, 4, 10)).to.throw('Invalid range')
+    })
+  })
+})
diff --git a/packages/verified-fetch/test/utils/response-headers.spec.ts b/packages/verified-fetch/test/utils/response-headers.spec.ts
new file mode 100644
index 00000000..6197450a
--- /dev/null
+++ b/packages/verified-fetch/test/utils/response-headers.spec.ts
@@ -0,0 +1,33 @@
+import { expect } from 'aegir/chai'
+import { getContentRangeHeader } from '../../src/utils/response-headers.js'
+
+describe('response-headers', () => {
+  describe('getContentRangeHeader', () => {
+    it('should return correct content range header when all options are set', () => {
+      const byteStart = 0
+      const byteEnd = 500
+      const byteSize = 1000
+      expect(getContentRangeHeader({ byteStart, byteEnd, byteSize })).to.equal(`bytes ${byteStart}-${byteEnd}/${byteSize}`)
+    })
+
+    it('should return correct content range header when only byteEnd and byteSize are provided', () => {
+      expect(getContentRangeHeader({ byteStart: undefined, byteEnd: 9, byteSize: 11 })).to.equal('bytes 3-11/11')
+    })
+
+    it('should return correct content range header when only byteStart and byteSize are provided', () => {
+      expect(getContentRangeHeader({ byteStart: 5, byteEnd: undefined, byteSize: 11 })).to.equal('bytes 5-11/11')
+    })
+
+    it('should return correct content range header when only byteStart is provided', () => {
+      expect(getContentRangeHeader({ byteStart: 500, byteEnd: undefined, byteSize: undefined })).to.equal('bytes */*')
+    })
+
+    it('should return correct content range header when only byteEnd is provided', () => {
+      expect(getContentRangeHeader({ byteStart: undefined, byteEnd: 500, byteSize: undefined })).to.equal('bytes */*')
+    })
+
+    it('should return content range header with when only byteSize is provided', () => {
+      expect(getContentRangeHeader({ byteStart: undefined, byteEnd: undefined, byteSize: 50 })).to.equal('bytes */50')
+    })
+  })
+})

From 9a9c00a51526ca10fd29f0c7166a4d01e0805a8d Mon Sep 17 00:00:00 2001
From: Russell Dempsey <1173416+SgtPooki@users.noreply.github.com>
Date: Wed, 20 Mar 2024 15:32:19 -0700
Subject: [PATCH 4/6] chore: add matchUrlGroups typeguard

---
 .../src/utils/parse-url-string.ts              | 18 ++++++++++++++++--
 1 file changed, 16 insertions(+), 2 deletions(-)

diff --git a/packages/verified-fetch/src/utils/parse-url-string.ts b/packages/verified-fetch/src/utils/parse-url-string.ts
index d406710e..bf3b6127 100644
--- a/packages/verified-fetch/src/utils/parse-url-string.ts
+++ b/packages/verified-fetch/src/utils/parse-url-string.ts
@@ -45,14 +45,28 @@ interface MatchUrlGroups {
   cidOrPeerIdOrDnsLink: string
   path?: string
   queryString?: string
+}
 
+function matchUrlGroupsGuard (groups?: null | { [key in string]: string; } | MatchUrlGroups): groups is MatchUrlGroups {
+  const protocol = groups?.protocol
+  if (protocol == null) return false
+  const cidOrPeerIdOrDnsLink = groups?.cidOrPeerIdOrDnsLink
+  if (cidOrPeerIdOrDnsLink == null) return false
+  const path = groups?.path
+  const queryString = groups?.queryString
+
+  return ['ipns', 'ipfs'].includes(protocol) &&
+    typeof cidOrPeerIdOrDnsLink === 'string' &&
+    (path == null || typeof path === 'string') &&
+    (queryString == null || typeof queryString === 'string')
 }
+
 function matchURLString (urlString: string): MatchUrlGroups {
   for (const pattern of [URL_REGEX, PATH_REGEX, PATH_GATEWAY_REGEX, SUBDOMAIN_GATEWAY_REGEX]) {
     const match = urlString.match(pattern)
 
-    if (match?.groups != null) {
-      return match.groups as unknown as MatchUrlGroups // force cast to MatchUrlGroups, because if it matches, it has to contain this structure.
+    if (matchUrlGroupsGuard(match?.groups)) {
+      return match.groups satisfies MatchUrlGroups
     }
   }
 

From bbf4b8c60a386784af2ad2adf633f495f35eda4e Mon Sep 17 00:00:00 2001
From: Russell Dempsey <1173416+SgtPooki@users.noreply.github.com>
Date: Wed, 20 Mar 2024 15:32:36 -0700
Subject: [PATCH 5/6] chore: remove unnecessary headerValue != null check

---
 packages/verified-fetch/src/utils/response-headers.ts | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/packages/verified-fetch/src/utils/response-headers.ts b/packages/verified-fetch/src/utils/response-headers.ts
index dfb34b0e..1d980832 100644
--- a/packages/verified-fetch/src/utils/response-headers.ts
+++ b/packages/verified-fetch/src/utils/response-headers.ts
@@ -31,9 +31,7 @@ export function setCacheControlHeader ({ ttl, protocol, response }: CacheControl
     headerValue = `public, max-age=${ttl}`
   }
 
-  if (headerValue != null) {
-    response.headers.set('cache-control', headerValue)
-  }
+  response.headers.set('cache-control', headerValue)
 }
 
 /**

From 0492beafd0282c754b1cd1ffb7cbeee399147409 Mon Sep 17 00:00:00 2001
From: Russell Dempsey <1173416+SgtPooki@users.noreply.github.com>
Date: Wed, 20 Mar 2024 15:32:50 -0700
Subject: [PATCH 6/6] test: remove unnecessary redefinition of verifiedFetch

---
 packages/verified-fetch/test/cache-control-header.spec.ts | 6 ------
 1 file changed, 6 deletions(-)

diff --git a/packages/verified-fetch/test/cache-control-header.spec.ts b/packages/verified-fetch/test/cache-control-header.spec.ts
index 0cfe1268..5c234fea 100644
--- a/packages/verified-fetch/test/cache-control-header.spec.ts
+++ b/packages/verified-fetch/test/cache-control-header.spec.ts
@@ -109,12 +109,6 @@ describe('cache-control header', () => {
   })
 
   it('should not contain immutable in the cache-control header for a DNSLink name', async () => {
-    verifiedFetch = new VerifiedFetch({
-      helia
-    }, {
-      dnsResolvers: [customDnsResolver]
-    })
-
     const obj = {
       hello: 'world'
     }