diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 41626ecab9..397b05e40c 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -1,10 +1,9 @@ Component,Origin,License,Copyright require,tslib,Apache-2.0,Copyright Microsoft Corporation -file,pako,MIT,(C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin file,rrweb,MIT,Copyright (c) 2018 Contributors (https://github.com/rrweb-io/rrweb/graphs/contributors) and SmartX Inc. file,tracekit,MIT,Copyright 2013 Onur Can Cakmak and all TraceKit contributors +prod,@types/css-font-loading-module,MIT,Copyright Microsoft Corporation prod,rrweb-snapshot,MIT,Copyright (c) 2018 Contributors (https://github.com/rrweb-io/rrweb-snapshot/graphs/contributors) and SmartX Inc. -dev,@types/connect-busboy,MIT,Copyright Microsoft Corporation dev,@types/cors,MIT,Copyright Microsoft Corporation dev,@types/express,MIT,Copyright Microsoft Corporation dev,@types/jasmine,MIT,Copyright Microsoft Corporation @@ -20,7 +19,6 @@ dev,@wdio/sync,MIT,Copyright JS Foundation and other contributors dev,ajv,MIT,Copyright 2015-2017 Evgeny Poberezkin dev,browserstack-local,MIT,Copyright 2016 BrowserStack dev,codecov,MIT,Copyright 2014 Gregg Caines -dev,connect-busboy,MIT,Copyright Brian White dev,cors,MIT,Copyright 2013 Troy Goode dev,emoji-name-map,MIT,Copyright 2016-19 Ionică Bizău (https://ionicabizau.net) dev,express,MIT,Copyright 2009-2014 TJ Holowaychuk 2013-2014 Roman Shtylman 2014-2015 Douglas Christopher Wilson diff --git a/package.json b/package.json index 161d81d27a..63f62526d7 100644 --- a/package.json +++ b/package.json @@ -27,7 +27,6 @@ "rum-events-format:sync": "scripts/cli update_submodule && scripts/cli build_json2type && node scripts/generate-schema-types.js" }, "devDependencies": { - "@types/connect-busboy": "0.0.2", "@types/cors": "2.8.7", "@types/express": "4.17.8", "@types/jasmine": "3.5.10", @@ -43,7 +42,6 @@ "ajv": "6.12.6", "browserstack-local": "1.4.5", "codecov": "3.7.1", - "connect-busboy": "0.0.2", "cors": "2.8.5", "emoji-name-map": "1.2.8", "express": "4.17.1", diff --git a/packages/core/src/domain/configuration.spec.ts b/packages/core/src/domain/configuration.spec.ts index be1fe9e864..94036cc764 100644 --- a/packages/core/src/domain/configuration.spec.ts +++ b/packages/core/src/domain/configuration.spec.ts @@ -30,7 +30,6 @@ describe('configuration', () => { expect(configuration.rumEndpoint).toEqual('<<< E2E RUM ENDPOINT >>>') expect(configuration.logsEndpoint).toEqual('<<< E2E LOGS ENDPOINT >>>') expect(configuration.internalMonitoringEndpoint).toEqual('<<< E2E INTERNAL MONITORING ENDPOINT >>>') - expect(configuration.sessionReplayEndpoint).toEqual('<<< E2E SESSION REPLAY ENDPOINT >>>') }) }) @@ -116,7 +115,6 @@ describe('configuration', () => { expect(configuration.isIntakeUrl('https://rum-http-intake.logs.datadoghq.eu/v1/input/xxx')).toBe(true) expect(configuration.isIntakeUrl('https://browser-http-intake.logs.datadoghq.eu/v1/input/xxx')).toBe(true) expect(configuration.isIntakeUrl('https://public-trace-http-intake.logs.datadoghq.eu/v1/input/xxx')).toBe(true) - expect(configuration.isIntakeUrl('https://session-replay.browser-intake-datadoghq.eu/v1/input/xxx')).toBe(true) }) it('should detect intake request for US site', () => { @@ -125,7 +123,6 @@ describe('configuration', () => { expect(configuration.isIntakeUrl('https://rum-http-intake.logs.datadoghq.com/v1/input/xxx')).toBe(true) expect(configuration.isIntakeUrl('https://browser-http-intake.logs.datadoghq.com/v1/input/xxx')).toBe(true) expect(configuration.isIntakeUrl('https://public-trace-http-intake.logs.datadoghq.com/v1/input/xxx')).toBe(true) - expect(configuration.isIntakeUrl('https://session-replay.browser-intake-datadoghq.com/v1/input/xxx')).toBe(true) }) it('should detect alternate intake domains for US site', () => { @@ -133,7 +130,6 @@ describe('configuration', () => { expect(configuration.isIntakeUrl('https://rum.browser-intake-datadoghq.com/v1/input/xxx')).toBe(true) expect(configuration.isIntakeUrl('https://logs.browser-intake-datadoghq.com/v1/input/xxx')).toBe(true) expect(configuration.isIntakeUrl('https://trace.browser-intake-datadoghq.com/v1/input/xxx')).toBe(true) - expect(configuration.isIntakeUrl('https://session-replay.browser-intake-datadoghq.com/v1/input/xxx')).toBe(true) }) it('should handle sites with subdomains and classic intake', () => { @@ -143,9 +139,6 @@ describe('configuration', () => { expect(configuration.isIntakeUrl('https://public-trace-http-intake.logs.foo.datadoghq.com/v1/input/xxx')).toBe( true ) - expect(configuration.isIntakeUrl('https://session-replay.browser-intake-foo-datadoghq.com/v1/input/xxx')).toBe( - true - ) }) it('should handle sites with subdomains and alternate intake', () => { @@ -156,9 +149,6 @@ describe('configuration', () => { expect(configuration.isIntakeUrl('https://rum.browser-intake-foo-datadoghq.com/v1/input/xxx')).toBe(true) expect(configuration.isIntakeUrl('https://logs.browser-intake-foo-datadoghq.com/v1/input/xxx')).toBe(true) expect(configuration.isIntakeUrl('https://trace.browser-intake-foo-datadoghq.com/v1/input/xxx')).toBe(true) - expect(configuration.isIntakeUrl('https://session-replay.browser-intake-foo-datadoghq.com/v1/input/xxx')).toBe( - true - ) }) it('should force alternate intake for us3', () => { @@ -169,9 +159,6 @@ describe('configuration', () => { expect(configuration.isIntakeUrl('https://rum.browser-intake-us3-datadoghq.com/v1/input/xxx')).toBe(true) expect(configuration.isIntakeUrl('https://logs.browser-intake-us3-datadoghq.com/v1/input/xxx')).toBe(true) expect(configuration.isIntakeUrl('https://trace.browser-intake-us3-datadoghq.com/v1/input/xxx')).toBe(true) - expect(configuration.isIntakeUrl('https://session-replay.browser-intake-us3-datadoghq.com/v1/input/xxx')).toBe( - true - ) }) it('should detect proxy intake request', () => { @@ -199,7 +186,6 @@ describe('configuration', () => { expect(configuration.isIntakeUrl('https://rum-http-intake.logs.foo.com/v1/input/xxx')).toBe(true) expect(configuration.isIntakeUrl('https://browser-http-intake.logs.foo.com/v1/input/xxx')).toBe(true) expect(configuration.isIntakeUrl('https://public-trace-http-intake.logs.foo.com/v1/input/xxx')).toBe(true) - expect(configuration.isIntakeUrl('https://session-replay.browser-intake-foo.com/v1/input/xxx')).toBe(true) expect(configuration.isIntakeUrl('https://rum-http-intake.logs.datadoghq.com/v1/input/xxx')).toBe(true) expect(configuration.isIntakeUrl('https://browser-http-intake.logs.datadoghq.com/v1/input/xxx')).toBe(true) @@ -213,7 +199,6 @@ describe('configuration', () => { expect(configuration.isIntakeUrl('https://rum.browser-intake-foo.com/v1/input/xxx')).toBe(true) expect(configuration.isIntakeUrl('https://logs.browser-intake-foo.com/v1/input/xxx')).toBe(true) expect(configuration.isIntakeUrl('https://trace.browser-intake-foo.com/v1/input/xxx')).toBe(true) - expect(configuration.isIntakeUrl('https://session-replay.browser-intake-foo.com/v1/input/xxx')).toBe(true) expect(configuration.isIntakeUrl('https://rum.browser-intake-datadoghq.com/v1/input/xxx')).toBe(true) expect(configuration.isIntakeUrl('https://logs.browser-intake-datadoghq.com/v1/input/xxx')).toBe(true) diff --git a/packages/core/src/domain/configuration.ts b/packages/core/src/domain/configuration.ts index a95a4071f2..85ad258ff8 100644 --- a/packages/core/src/domain/configuration.ts +++ b/packages/core/src/domain/configuration.ts @@ -74,7 +74,6 @@ export type Configuration = typeof DEFAULT_CONFIGURATION & { logsEndpoint: string rumEndpoint: string traceEndpoint: string - sessionReplayEndpoint: string internalMonitoringEndpoint?: string proxyHost?: string @@ -112,14 +111,11 @@ const ENDPOINTS = { alternate: { logs: 'logs', rum: 'rum', - sessionReplay: 'session-replay', trace: 'trace', }, classic: { logs: 'browser', rum: 'rum', - // session-replay has no classic endpoint - sessionReplay: undefined, trace: 'public-trace', }, } @@ -155,7 +151,6 @@ export function buildConfiguration(userConfiguration: UserConfiguration, buildEn proxyHost: userConfiguration.proxyHost, rumEndpoint: getEndpoint(intakeType, 'rum', transportConfiguration), service: userConfiguration.service, - sessionReplayEndpoint: getEndpoint(intakeType, 'sessionReplay', transportConfiguration), traceEndpoint: getEndpoint(intakeType, 'trace', transportConfiguration), isIntakeUrl: (url) => intakeUrls.some((intakeUrl) => url.indexOf(intakeUrl) === 0), @@ -190,7 +185,6 @@ export function buildConfiguration(userConfiguration: UserConfiguration, buildEn configuration.internalMonitoringEndpoint = '<<< E2E INTERNAL MONITORING ENDPOINT >>>' configuration.logsEndpoint = '<<< E2E LOGS ENDPOINT >>>' configuration.rumEndpoint = '<<< E2E RUM ENDPOINT >>>' - configuration.sessionReplayEndpoint = '<<< E2E SESSION REPLAY ENDPOINT >>>' } if (transportConfiguration.buildMode === BuildMode.STAGING) { @@ -252,16 +246,10 @@ function getEndpoint( } function getHost(intakeType: IntakeType, endpointType: EndpointType, site: string) { - return (intakeType === 'classic' && getClassicHost(endpointType, site)) || getAlternateHost(endpointType, site) -} - -function getClassicHost(endpointType: EndpointType, site: string): string | undefined { - const endpoint = ENDPOINTS.classic[endpointType] - return endpoint && `${endpoint}-http-intake.logs.${site}` -} - -function getAlternateHost(endpointType: EndpointType, site: string): string { - const endpoint = ENDPOINTS.alternate[endpointType] + const endpoint = ENDPOINTS[intakeType][endpointType] + if (intakeType === 'classic') { + return `${endpoint}-http-intake.logs.${site}` + } const domainParts = site.split('.') const extension = domainParts.pop() const suffix = `${domainParts.join('-')}.${extension}` diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index 73431554ef..41d51fe629 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -17,7 +17,6 @@ export { monitored, monitor, addMonitoringMessage, - setDebugMode, } from './domain/internalMonitoring' export { Observable } from './tools/observable' export { diff --git a/packages/core/src/transport/transport.ts b/packages/core/src/transport/transport.ts index 5e76c6b979..19fcadd0e8 100644 --- a/packages/core/src/transport/transport.ts +++ b/packages/core/src/transport/transport.ts @@ -16,7 +16,7 @@ const HAS_MULTI_BYTES_CHARACTERS = /[^\u0000-\u007F]/ export class HttpRequest { constructor(private endpointUrl: string, private bytesLimit: number, private withBatchTime: boolean = false) {} - send(data: string | FormData, size: number) { + send(data: string, size: number) { const url = this.withBatchTime ? addBatchTime(this.endpointUrl) : this.endpointUrl if (navigator.sendBeacon && size < this.bytesLimit) { const isQueued = navigator.sendBeacon(url, data) diff --git a/packages/rum-core/src/boot/rum.ts b/packages/rum-core/src/boot/rum.ts index 76874225bd..f1abe89e8f 100644 --- a/packages/rum-core/src/boot/rum.ts +++ b/packages/rum-core/src/boot/rum.ts @@ -54,9 +54,6 @@ export function startRum(userConfiguration: RumUserConfiguration, getCommonConte addError, addTiming, configuration, - lifeCycle, - parentContexts, - session, getInternalContext: internalContext.get, } } diff --git a/packages/rum-core/src/boot/rumPublicApi.spec.ts b/packages/rum-core/src/boot/rumPublicApi.spec.ts index 9870ab733f..1daba0bb5f 100644 --- a/packages/rum-core/src/boot/rumPublicApi.spec.ts +++ b/packages/rum-core/src/boot/rumPublicApi.spec.ts @@ -6,15 +6,12 @@ import { makeRumPublicApi, RumPublicApi, RumUserConfiguration, StartRum } from ' const configuration: Partial = { isEnabled: () => false, } -const noopStartRum = (): ReturnType => ({ +const noopStartRum = () => ({ addAction: () => undefined, addError: () => undefined, addTiming: () => undefined, configuration: configuration as Configuration, getInternalContext: () => undefined, - lifeCycle: {} as any, - parentContexts: {} as any, - session: {} as any, }) const DEFAULT_INIT_CONFIGURATION = { applicationId: 'xxx', clientToken: 'xxx' } diff --git a/packages/rum-core/src/index.ts b/packages/rum-core/src/index.ts index 0e93fb94b0..d8a2934150 100644 --- a/packages/rum-core/src/index.ts +++ b/packages/rum-core/src/index.ts @@ -9,8 +9,4 @@ export { RumResourceEvent, RumLongTaskEvent, } from './rumEvent.types' -export { ViewContext, CommonContext } from './rawRumEvent.types' export { startRum } from './boot/rum' -export { LifeCycle, LifeCycleEventType } from './domain/lifeCycle' -export { ParentContexts } from './domain/parentContexts' -export { RumSession } from './domain/rumSession' diff --git a/packages/rum-core/src/rawRumEvent.types.ts b/packages/rum-core/src/rawRumEvent.types.ts index 57981d8df0..32d7d1e520 100644 --- a/packages/rum-core/src/rawRumEvent.types.ts +++ b/packages/rum-core/src/rawRumEvent.types.ts @@ -175,5 +175,4 @@ export interface User { export interface CommonContext { user: User context: Context - hasReplay?: boolean } diff --git a/packages/rum-recorder/package.json b/packages/rum-recorder/package.json index bc45aa272a..1d93bea50f 100644 --- a/packages/rum-recorder/package.json +++ b/packages/rum-recorder/package.json @@ -13,7 +13,8 @@ }, "dependencies": { "@datadog/browser-core": "2.2.1", - "@datadog/browser-rum-core": "2.2.1", + "@datadog/browser-rum": "2.2.1", + "@types/css-font-loading-module": "0.0.4", "rrweb-snapshot": "1.0.1", "tslib": "^1.10.0" }, diff --git a/packages/rum-recorder/src/boot/recorder.entry.ts b/packages/rum-recorder/src/boot/recorder.entry.ts index 6ab06e16f5..efe246f3e7 100644 --- a/packages/rum-recorder/src/boot/recorder.entry.ts +++ b/packages/rum-recorder/src/boot/recorder.entry.ts @@ -1,29 +1 @@ -import { defineGlobal, getGlobalObject } from '@datadog/browser-core' -import { - CommonContext, - makeRumPublicApi, - RumPublicApi, - RumUserConfiguration, - startRum, -} from '@datadog/browser-rum-core' - -import { startRecording } from './recorder' - -function startRumAndRecording(userConfiguration: RumUserConfiguration, getCommonContext: () => CommonContext) { - const startRumResult = startRum(userConfiguration, () => ({ - ...getCommonContext(), - hasReplay: true, - })) - - const { lifeCycle, parentContexts, configuration, session } = startRumResult - startRecording(lifeCycle, userConfiguration.applicationId, configuration, session, parentContexts) - - return startRumResult -} - -export const datadogRum = makeRumPublicApi(startRumAndRecording) - -interface BrowserWindow extends Window { - DD_RUM?: RumPublicApi -} -defineGlobal(getGlobalObject(), 'DD_RUM', datadogRum) +export * from '@datadog/browser-rum' diff --git a/packages/rum-recorder/src/boot/recorder.spec.ts b/packages/rum-recorder/src/boot/recorder.spec.ts deleted file mode 100644 index 22a68dbc24..0000000000 --- a/packages/rum-recorder/src/boot/recorder.spec.ts +++ /dev/null @@ -1,158 +0,0 @@ -import { createNewEvent, HttpRequest, isIE } from '@datadog/browser-core' -import { LifeCycle, LifeCycleEventType } from '@datadog/browser-rum-core' - -import { setup, TestSetupBuilder } from '../../../rum-core/test/specHelper' - -import { startRecording } from './recorder' - -describe('startRecording', () => { - let setupBuilder: TestSetupBuilder - let sessionId: string | undefined - let waitRequests: ( - expectedRequestCount: number, - callback: (requests: ReadonlyArray<{ data: FormData; size: number }>) => void - ) => void - let expectNoExtraRequest: (callback: () => void) => void - - beforeEach(() => { - if (isIE()) { - pending('IE not supported') - } - sessionId = 'session-id' - setupBuilder = setup() - .withParentContexts({ - findView() { - return { - session: { - id: sessionId, - }, - view: { - id: 'view-id', - referrer: '', - url: 'http://example.org', - }, - } - }, - }) - .beforeBuild(({ lifeCycle, applicationId, configuration, parentContexts, session }) => { - return startRecording(lifeCycle, applicationId, configuration, session, parentContexts) - }) - - const requestSendSpy = spyOn(HttpRequest.prototype, 'send') - - waitRequests = (expectedRequestCount, callback) => { - const requests: Array<{ data: FormData; size: number }> = [] - requestSendSpy.and.callFake((data, size) => { - if (requests.push({ size, data: data as FormData }) === expectedRequestCount) { - callback(requests) - } - }) - } - - expectNoExtraRequest = (done) => { - requestSendSpy.and.callFake(() => { - fail('Unexpected request received') - }) - setTimeout(done, 300) - } - }) - - afterEach(() => { - setupBuilder.cleanup() - }) - - it('sends recorded segments with valid context', (done) => { - const { lifeCycle } = setupBuilder.build() - flushSegment(lifeCycle) - - waitRequests(1, (requests) => { - expect(requests).toEqual([{ data: jasmine.any(FormData), size: jasmine.any(Number) }]) - expect(formDataAsObject(requests[0].data)).toEqual({ - 'application.id': 'appId', - creation_reason: 'init', - end: jasmine.stringMatching(/^\d{13}$/), - has_full_snapshot: 'true', - records_count: '2', - segment: jasmine.any(File), - 'session.id': 'session-id', - start: jasmine.stringMatching(/^\d{13}$/), - 'view.id': 'view-id', - }) - expectNoExtraRequest(done) - }) - }) - - it('flushes the segment when its compressed data is getting too large', (done) => { - setupBuilder.build() - const inputCount = 150 - const textField = document.createElement('input') - const inputEvent = createNewEvent('input', { target: textField }) - for (let i = 0; i < inputCount; i += 1) { - // Create a random value harder to deflate, so we don't have to send too many events to reach - // the limit. - textField.value = createRandomString(1000) - document.body.dispatchEvent(inputEvent) - } - - waitRequests(1, (requests) => { - expect(requests[0].data.get('records_count')).toBe(String(inputCount + 2)) - expectNoExtraRequest(done) - }) - }) - - it('stops sending new segment when the session is expired', (done) => { - const { lifeCycle } = setupBuilder.build() - - document.body.dispatchEvent(createNewEvent('click')) - - sessionId = undefined - flushSegment(lifeCycle) - document.body.dispatchEvent(createNewEvent('click')) - - flushSegment(lifeCycle) - - waitRequests(1, (requests) => { - expect(requests[0].data.get('records_count')).toBe('3') - expectNoExtraRequest(done) - }) - }) - - it('restarts sending segments when the session is renewed', (done) => { - sessionId = undefined - const { lifeCycle } = setupBuilder.build() - - document.body.dispatchEvent(createNewEvent('click')) - - sessionId = 'new-session-id' - flushSegment(lifeCycle) - document.body.dispatchEvent(createNewEvent('click')) - - flushSegment(lifeCycle) - - waitRequests(1, (requests) => { - expect(requests[0].data.get('records_count')).toBe('1') - expect(requests[0].data.get('session.id')).toBe('new-session-id') - expectNoExtraRequest(done) - }) - }) -}) - -function flushSegment(lifeCycle: LifeCycle) { - lifeCycle.notify(LifeCycleEventType.BEFORE_UNLOAD) -} - -function formDataAsObject(data: FormData) { - const result: { [key: string]: unknown } = {} - data.forEach((value, key) => { - result[key] = value - }) - return result -} - -function createRandomString(minLength: number) { - let result = '' - while (result.length < minLength) { - result += Math.random().toString(36) - } - return result -} diff --git a/packages/rum-recorder/src/boot/recorder.ts b/packages/rum-recorder/src/boot/recorder.ts deleted file mode 100644 index c700f27a55..0000000000 --- a/packages/rum-recorder/src/boot/recorder.ts +++ /dev/null @@ -1,33 +0,0 @@ -import { Configuration } from '@datadog/browser-core' -import { LifeCycle, ParentContexts, RumSession } from '@datadog/browser-rum-core' - -import { record } from '../domain/rrweb' -import { startSegmentCollection } from '../domain/segmentCollection' -import { send } from '../transport/send' - -export function startRecording( - lifeCycle: LifeCycle, - applicationId: string, - configuration: Configuration, - session: RumSession, - parentContexts: ParentContexts -) { - const { addRecord, stop: stopSegmentCollection } = startSegmentCollection( - lifeCycle, - applicationId, - session, - parentContexts, - (data, meta) => send(configuration.sessionReplayEndpoint, data, meta) - ) - - const stopRecording = record({ - emit: addRecord, - })! - - return { - stop() { - stopRecording() - stopSegmentCollection() - }, - } -} diff --git a/packages/rum-recorder/src/domain/deflateSegmentWriter.spec.ts b/packages/rum-recorder/src/domain/deflateSegmentWriter.spec.ts deleted file mode 100644 index 42bfe342a1..0000000000 --- a/packages/rum-recorder/src/domain/deflateSegmentWriter.spec.ts +++ /dev/null @@ -1,48 +0,0 @@ -import { noop, setDebugMode } from '@datadog/browser-core' - -import { MockWorker } from '../../test/utils' -import { SegmentMeta } from '../types' -import { DeflateSegmentWriter } from './deflateSegmentWriter' - -describe('DeflateWriter', () => { - let worker: MockWorker - - beforeEach(() => { - worker = new MockWorker() - setDebugMode(true) - }) - - afterEach(() => { - setDebugMode(false) - }) - - it('calls the onWrote callback when data is written', () => { - const onWroteSpy = jasmine.createSpy<(size: number) => void>() - const writer = new DeflateSegmentWriter(worker, onWroteSpy, noop) - writer.write('foo') - worker.process() - expect(onWroteSpy.calls.allArgs()).toEqual([[3]]) - }) - - it('calls the onFlushed callback when data is flush', () => { - const onFlushedSpy = jasmine.createSpy<(data: Uint8Array, meta: SegmentMeta) => void>() - const writer = new DeflateSegmentWriter(worker, noop, onFlushedSpy) - const meta: SegmentMeta = { start: 12 } as any - writer.flush(undefined, meta) - worker.process() - expect(onFlushedSpy.calls.allArgs()).toEqual([[jasmine.any(Uint8Array), meta]]) - }) - - it('calls the onFlushed callback with the correct meta even if a previous action failed somehow', () => { - const consoleSpy = spyOn(console, 'log') - const onFlushedSpy = jasmine.createSpy<(data: Uint8Array, meta: SegmentMeta) => void>() - const writer = new DeflateSegmentWriter(worker, noop, onFlushedSpy) - const meta1: SegmentMeta = { start: 12 } as any - const meta2: SegmentMeta = { start: 13 } as any - writer.flush(undefined, meta1) - writer.flush(undefined, meta2) - worker.process(0) - expect(onFlushedSpy.calls.allArgs()).toEqual([[jasmine.any(Uint8Array), meta2]]) - expect(consoleSpy).toHaveBeenCalledWith('[MONITORING MESSAGE]', '1 deflate worker responses have been lost') - }) -}) diff --git a/packages/rum-recorder/src/domain/deflateSegmentWriter.ts b/packages/rum-recorder/src/domain/deflateSegmentWriter.ts deleted file mode 100644 index bfd5b9f36b..0000000000 --- a/packages/rum-recorder/src/domain/deflateSegmentWriter.ts +++ /dev/null @@ -1,49 +0,0 @@ -import { addMonitoringMessage } from '@datadog/browser-core' -import { SegmentMeta } from '../types' -import { DeflateWorker } from './deflateWorker' -import { SegmentWriter } from './segment' - -export class DeflateSegmentWriter implements SegmentWriter { - private nextId = 0 - private pendingMeta: Array<{ id: number; meta: SegmentMeta }> = [] - - constructor( - private worker: DeflateWorker, - private onWrote: (size: number) => void, - private onFlushed: (data: Uint8Array, meta: SegmentMeta) => void - ) { - worker.addEventListener('message', ({ data }) => { - if ('result' in data) { - let pendingMeta = this.pendingMeta.shift()! - - // Messages should be received in the same order as they are sent, so the first - // 'pendingMeta' of the list should be the one corresponding to the handled message. - // But if something goes wrong in the worker and a response is lost, we need to avoid - // associating an incorrect meta to the flushed segment. Remove any pending meta with an id - // inferior to the one being waited for. - if (pendingMeta.id !== data.id) { - let lostCount = 0 - while (pendingMeta.id !== data.id) { - pendingMeta = this.pendingMeta.shift()! - lostCount += 1 - } - addMonitoringMessage(`${lostCount} deflate worker responses have been lost`) - } - this.onFlushed(data.result, pendingMeta.meta) - } else { - this.onWrote(data.size) - } - }) - } - - write(data: string): void { - this.worker.postMessage({ data, id: this.nextId, action: 'write' }) - this.nextId += 1 - } - - flush(data: string | undefined, meta: SegmentMeta): void { - this.worker.postMessage({ data, id: this.nextId, action: 'flush' }) - this.pendingMeta.push({ meta, id: this.nextId }) - this.nextId += 1 - } -} diff --git a/packages/rum-recorder/src/domain/deflateWorker.d.ts b/packages/rum-recorder/src/domain/deflateWorker.d.ts deleted file mode 100644 index f08d989910..0000000000 --- a/packages/rum-recorder/src/domain/deflateWorker.d.ts +++ /dev/null @@ -1,32 +0,0 @@ -export function createDeflateWorker(): DeflateWorker - -export interface DeflateWorker { - addEventListener(name: 'message', listener: DeflateWorkerListener): void - removeEventListener(name: 'message', listener: DeflateWorkerListener): void - postMessage(message: DeflateWorkerAction): void - terminate(): void -} - -export type DeflateWorkerListener = (event: { data: DeflateWorkerResponse }) => void - -export type DeflateWorkerAction = - | { - id: number - action: 'write' - data: string - } - | { - id: number - action: 'flush' - data?: string - } - -export type DeflateWorkerResponse = - | { - id: number - size: number - } - | { - id: number - result: Uint8Array - } diff --git a/packages/rum-recorder/src/domain/deflateWorker.js b/packages/rum-recorder/src/domain/deflateWorker.js deleted file mode 100644 index df6b34b64c..0000000000 --- a/packages/rum-recorder/src/domain/deflateWorker.js +++ /dev/null @@ -1,4558 +0,0 @@ -let workerURL - -export function createDeflateWorker() { - // Lazily compute the worker URL to allow importing the SDK in NodeJS - if (!workerURL) { - workerURL = URL.createObjectURL(new Blob([`(${workerCodeFn})(self)`])) - } - return new Worker(workerURL) -} - -function workerCodeFn() { - const { Deflate, constants } = makePakoDeflate() - - let deflate = new Deflate() - self.addEventListener('message', (event) => { - const data = event.data - switch (data.action) { - case 'write': - deflate.push(data.data, constants.Z_SYNC_FLUSH) - self.postMessage({ - id: data.id, - size: deflate.chunks.reduce((total, chunk) => total + chunk.length, 0), - }) - break - case 'flush': - if (data.data) { - deflate.push(data.data, constants.Z_SYNC_FLUSH) - } - deflate.push('', constants.Z_FINISH) - self.postMessage({ - id: data.id, - result: deflate.result, - }) - deflate = new Deflate() - break - } - }) - - // https://github.com/nodeca/pako/blob/034669ba0f1a4c0590e45f7c2820128200f972b3/dist/pako_deflate.es5.js - function makePakoDeflate() { - // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin - // - // This software is provided 'as-is', without any express or implied - // warranty. In no event will the authors be held liable for any damages - // arising from the use of this software. - // - // Permission is granted to anyone to use this software for any purpose, - // including commercial applications, and to alter it and redistribute it - // freely, subject to the following restrictions: - // - // 1. The origin of this software must not be misrepresented; you must not - // claim that you wrote the original software. If you use this software - // in a product, an acknowledgment in the product documentation would be - // appreciated but is not required. - // 2. Altered source versions must be plainly marked as such, and must not be - // misrepresented as being the original software. - // 3. This notice may not be removed or altered from any source distribution. - - /* eslint-disable space-unary-ops */ - - /* Public constants ==========================================================*/ - - /* ===========================================================================*/ - //const Z_FILTERED = 1; - //const Z_HUFFMAN_ONLY = 2; - //const Z_RLE = 3; - - var Z_FIXED = 4 //const Z_DEFAULT_STRATEGY = 0; - - /* Possible values of the data_type field (though see inflate()) */ - - var Z_BINARY = 0 - var Z_TEXT = 1 //const Z_ASCII = 1; // = Z_TEXT - - var Z_UNKNOWN = 2 - /*============================================================================*/ - - function zero(buf) { - var len = buf.length - - while (--len >= 0) { - buf[len] = 0 - } - } // From zutil.h - - var STORED_BLOCK = 0 - var STATIC_TREES = 1 - var DYN_TREES = 2 - /* The three kinds of block type */ - - var MIN_MATCH = 3 - var MAX_MATCH = 258 - /* The minimum and maximum match lengths */ - // From deflate.h - - /* =========================================================================== - * Internal compression state. - */ - - var LENGTH_CODES = 29 - /* number of length codes, not counting the special END_BLOCK code */ - - var LITERALS = 256 - /* number of literal bytes 0..255 */ - - var L_CODES = LITERALS + 1 + LENGTH_CODES - /* number of Literal or Length codes, including the END_BLOCK code */ - - var D_CODES = 30 - /* number of distance codes */ - - var BL_CODES = 19 - /* number of codes used to transfer the bit lengths */ - - var HEAP_SIZE = 2 * L_CODES + 1 - /* maximum heap size */ - - var MAX_BITS = 15 - /* All codes must not exceed MAX_BITS bits */ - - var Buf_size = 16 - /* size of bit buffer in bi_buf */ - - /* =========================================================================== - * Constants - */ - - var MAX_BL_BITS = 7 - /* Bit length codes must not exceed MAX_BL_BITS bits */ - - var END_BLOCK = 256 - /* end of block literal code */ - - var REP_3_6 = 16 - /* repeat previous bit length 3-6 times (2 bits of repeat count) */ - - var REPZ_3_10 = 17 - /* repeat a zero length 3-10 times (3 bits of repeat count) */ - - var REPZ_11_138 = 18 - /* repeat a zero length 11-138 times (7 bits of repeat count) */ - - /* eslint-disable comma-spacing,array-bracket-spacing */ - - var extra_lbits = - /* extra bits for each length code */ - new Uint8Array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0]) - var extra_dbits = - /* extra bits for each distance code */ - new Uint8Array([0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13]) - var extra_blbits = - /* extra bits for each bit length code */ - new Uint8Array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7]) - var bl_order = new Uint8Array([16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15]) - /* eslint-enable comma-spacing,array-bracket-spacing */ - - /* The lengths of the bit length codes are sent in order of decreasing - * probability, to avoid transmitting the lengths for unused bit length codes. - */ - - /* =========================================================================== - * Local data. These are initialized only once. - */ - // We pre-fill arrays with 0 to avoid uninitialized gaps - - var DIST_CODE_LEN = 512 - /* see definition of array dist_code below */ - // !!!! Use flat array instead of structure, Freq = i*2, Len = i*2+1 - - var static_ltree = new Array((L_CODES + 2) * 2) - zero(static_ltree) - /* The static literal tree. Since the bit lengths are imposed, there is no - * need for the L_CODES extra codes used during heap construction. However - * The codes 286 and 287 are needed to build a canonical tree (see _tr_init - * below). - */ - - var static_dtree = new Array(D_CODES * 2) - zero(static_dtree) - /* The static distance tree. (Actually a trivial tree since all codes use - * 5 bits.) - */ - - var _dist_code = new Array(DIST_CODE_LEN) - - zero(_dist_code) - /* Distance codes. The first 256 values correspond to the distances - * 3 .. 258, the last 256 values correspond to the top 8 bits of - * the 15 bit distances. - */ - - var _length_code = new Array(MAX_MATCH - MIN_MATCH + 1) - - zero(_length_code) - /* length code for each normalized match length (0 == MIN_MATCH) */ - - var base_length = new Array(LENGTH_CODES) - zero(base_length) - /* First normalized length for each code (0 = MIN_MATCH) */ - - var base_dist = new Array(D_CODES) - zero(base_dist) - /* First normalized distance for each code (0 = distance of 1) */ - - function StaticTreeDesc(static_tree, extra_bits, extra_base, elems, max_length) { - this.static_tree = static_tree - /* static tree or NULL */ - - this.extra_bits = extra_bits - /* extra bits for each code or NULL */ - - this.extra_base = extra_base - /* base index for extra_bits */ - - this.elems = elems - /* max number of elements in the tree */ - - this.max_length = max_length - /* max bit length for the codes */ - // show if `static_tree` has data or dummy - needed for monomorphic objects - - this.has_stree = static_tree && static_tree.length - } - - var static_l_desc - var static_d_desc - var static_bl_desc - - function TreeDesc(dyn_tree, stat_desc) { - this.dyn_tree = dyn_tree - /* the dynamic tree */ - - this.max_code = 0 - /* largest code with non zero frequency */ - - this.stat_desc = stat_desc - /* the corresponding static tree */ - } - - var d_code = function d_code(dist) { - return dist < 256 ? _dist_code[dist] : _dist_code[256 + (dist >>> 7)] - } - /* =========================================================================== - * Output a short LSB first on the stream. - * IN assertion: there is enough room in pendingBuf. - */ - - var put_short = function put_short(s, w) { - // put_byte(s, (uch)((w) & 0xff)); - // put_byte(s, (uch)((ush)(w) >> 8)); - s.pending_buf[s.pending++] = w & 0xff - s.pending_buf[s.pending++] = (w >>> 8) & 0xff - } - /* =========================================================================== - * Send a value on a given number of bits. - * IN assertion: length <= 16 and value fits in length bits. - */ - - var send_bits = function send_bits(s, value, length) { - if (s.bi_valid > Buf_size - length) { - s.bi_buf |= (value << s.bi_valid) & 0xffff - put_short(s, s.bi_buf) - s.bi_buf = value >> (Buf_size - s.bi_valid) - s.bi_valid += length - Buf_size - } else { - s.bi_buf |= (value << s.bi_valid) & 0xffff - s.bi_valid += length - } - } - - var send_code = function send_code(s, c, tree) { - send_bits( - s, - tree[c * 2], - /*.Code*/ - tree[c * 2 + 1] - /*.Len*/ - ) - } - /* =========================================================================== - * Reverse the first len bits of a code, using straightforward code (a faster - * method would use a table) - * IN assertion: 1 <= len <= 15 - */ - - var bi_reverse = function bi_reverse(code, len) { - var res = 0 - - do { - res |= code & 1 - code >>>= 1 - res <<= 1 - } while (--len > 0) - - return res >>> 1 - } - /* =========================================================================== - * Flush the bit buffer, keeping at most 7 bits in it. - */ - - var bi_flush = function bi_flush(s) { - if (s.bi_valid === 16) { - put_short(s, s.bi_buf) - s.bi_buf = 0 - s.bi_valid = 0 - } else if (s.bi_valid >= 8) { - s.pending_buf[s.pending++] = s.bi_buf & 0xff - s.bi_buf >>= 8 - s.bi_valid -= 8 - } - } - /* =========================================================================== - * Compute the optimal bit lengths for a tree and update the total bit length - * for the current block. - * IN assertion: the fields freq and dad are set, heap[heap_max] and - * above are the tree nodes sorted by increasing frequency. - * OUT assertions: the field len is set to the optimal bit length, the - * array bl_count contains the frequencies for each bit length. - * The length opt_len is updated; static_len is also updated if stree is - * not null. - */ - - var gen_bitlen = function gen_bitlen( - s, - desc // deflate_state *s; // tree_desc *desc; /* the tree descriptor */ - ) { - var tree = desc.dyn_tree - var max_code = desc.max_code - var stree = desc.stat_desc.static_tree - var has_stree = desc.stat_desc.has_stree - var extra = desc.stat_desc.extra_bits - var base = desc.stat_desc.extra_base - var max_length = desc.stat_desc.max_length - var h - /* heap index */ - - var n, m - /* iterate over the tree elements */ - - var bits - /* bit length */ - - var xbits - /* extra bits */ - - var f - /* frequency */ - - var overflow = 0 - /* number of elements with bit length too large */ - - for (bits = 0; bits <= MAX_BITS; bits++) { - s.bl_count[bits] = 0 - } - /* In a first pass, compute the optimal bit lengths (which may - * overflow in the case of the bit length tree). - */ - - tree[s.heap[s.heap_max] * 2 + 1] = - /*.Len*/ - 0 - /* root of the heap */ - - for (h = s.heap_max + 1; h < HEAP_SIZE; h++) { - n = s.heap[h] - bits = - tree[ - tree[n * 2 + 1] * - /*.Dad*/ - 2 + - 1 - ] + - /*.Len*/ - 1 - - if (bits > max_length) { - bits = max_length - overflow++ - } - - tree[n * 2 + 1] = - /*.Len*/ - bits - /* We overwrite tree[n].Dad which is no longer needed */ - - if (n > max_code) { - continue - } - /* not a leaf node */ - - s.bl_count[bits]++ - xbits = 0 - - if (n >= base) { - xbits = extra[n - base] - } - - f = tree[n * 2] - /*.Freq*/ - s.opt_len += f * (bits + xbits) - - if (has_stree) { - s.static_len += - f * - (stree[n * 2 + 1] + - /*.Len*/ - xbits) - } - } - - if (overflow === 0) { - return - } // Trace((stderr,"\nbit length overflow\n")); - - /* This happens for example on obj2 and pic of the Calgary corpus */ - - /* Find the first bit length which could increase: */ - - do { - bits = max_length - 1 - - while (s.bl_count[bits] === 0) { - bits-- - } - - s.bl_count[bits]-- - /* move one leaf down the tree */ - - s.bl_count[bits + 1] += 2 - /* move one overflow item as its brother */ - - s.bl_count[max_length]-- - /* The brother of the overflow item also moves one step up, - * but this does not affect bl_count[max_length] - */ - - overflow -= 2 - } while (overflow > 0) - /* Now recompute all bit lengths, scanning in increasing frequency. - * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all - * lengths instead of fixing only the wrong ones. This idea is taken - * from 'ar' written by Haruhiko Okumura.) - */ - - for (bits = max_length; bits !== 0; bits--) { - n = s.bl_count[bits] - - while (n !== 0) { - m = s.heap[--h] - - if (m > max_code) { - continue - } - - if ( - tree[m * 2 + 1] !== - /*.Len*/ - bits - ) { - // Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); - s.opt_len += - (bits - tree[m * 2 + 1]) * - /*.Len*/ - tree[m * 2] - /*.Freq*/ - tree[m * 2 + 1] = - /*.Len*/ - bits - } - - n-- - } - } - } - /* =========================================================================== - * Generate the codes for a given tree and bit counts (which need not be - * optimal). - * IN assertion: the array bl_count contains the bit length statistics for - * the given tree and the field len is set for all tree elements. - * OUT assertion: the field code is set for all tree elements of non - * zero code length. - */ - - var gen_codes = function gen_codes( - tree, - max_code, - bl_count // ct_data *tree; /* the tree to decorate */ // int max_code; /* largest code with non zero frequency */ // ushf *bl_count; /* number of codes at each bit length */ - ) { - var next_code = new Array(MAX_BITS + 1) - /* next code value for each bit length */ - - var code = 0 - /* running code value */ - - var bits - /* bit index */ - - var n - /* code index */ - - /* The distribution counts are first used to generate the code values - * without bit reversal. - */ - - for (bits = 1; bits <= MAX_BITS; bits++) { - next_code[bits] = code = (code + bl_count[bits - 1]) << 1 - } - /* Check that the bit counts in bl_count are consistent. The last code - * must be all ones. - */ - //Assert (code + bl_count[MAX_BITS]-1 == (1< length code (0..28) */ - - length = 0 - - for (code = 0; code < LENGTH_CODES - 1; code++) { - base_length[code] = length - - for (n = 0; n < 1 << extra_lbits[code]; n++) { - _length_code[length++] = code - } - } //Assert (length == 256, "tr_static_init: length != 256"); - - /* Note that the length 255 (match length 258) can be represented - * in two different ways: code 284 + 5 bits or code 285, so we - * overwrite length_code[255] to use the best encoding: - */ - - _length_code[length - 1] = code - /* Initialize the mapping dist (0..32K) -> dist code (0..29) */ - - dist = 0 - - for (code = 0; code < 16; code++) { - base_dist[code] = dist - - for (n = 0; n < 1 << extra_dbits[code]; n++) { - _dist_code[dist++] = code - } - } //Assert (dist == 256, "tr_static_init: dist != 256"); - - dist >>= 7 - /* from now on, all distances are divided by 128 */ - - for (; code < D_CODES; code++) { - base_dist[code] = dist << 7 - - for (n = 0; n < 1 << (extra_dbits[code] - 7); n++) { - _dist_code[256 + dist++] = code - } - } //Assert (dist == 256, "tr_static_init: 256+dist != 512"); - - /* Construct the codes of the static literal tree */ - - for (bits = 0; bits <= MAX_BITS; bits++) { - bl_count[bits] = 0 - } - - n = 0 - - while (n <= 143) { - static_ltree[n * 2 + 1] = - /*.Len*/ - 8 - n++ - bl_count[8]++ - } - - while (n <= 255) { - static_ltree[n * 2 + 1] = - /*.Len*/ - 9 - n++ - bl_count[9]++ - } - - while (n <= 279) { - static_ltree[n * 2 + 1] = - /*.Len*/ - 7 - n++ - bl_count[7]++ - } - - while (n <= 287) { - static_ltree[n * 2 + 1] = - /*.Len*/ - 8 - n++ - bl_count[8]++ - } - /* Codes 286 and 287 do not exist, but we must include them in the - * tree construction to get a canonical Huffman tree (longest code - * all ones) - */ - - gen_codes(static_ltree, L_CODES + 1, bl_count) - /* The static distance tree is trivial: */ - - for (n = 0; n < D_CODES; n++) { - static_dtree[n * 2 + 1] = - /*.Len*/ - 5 - static_dtree[n * 2] = - /*.Code*/ - bi_reverse(n, 5) - } // Now data ready and we can init static trees - - static_l_desc = new StaticTreeDesc(static_ltree, extra_lbits, LITERALS + 1, L_CODES, MAX_BITS) - static_d_desc = new StaticTreeDesc(static_dtree, extra_dbits, 0, D_CODES, MAX_BITS) - static_bl_desc = new StaticTreeDesc(new Array(0), extra_blbits, 0, BL_CODES, MAX_BL_BITS) //static_init_done = true; - } - /* =========================================================================== - * Initialize a new block. - */ - - var init_block = function init_block(s) { - var n - /* iterates over tree elements */ - - /* Initialize the trees. */ - - for (n = 0; n < L_CODES; n++) { - s.dyn_ltree[n * 2] = - /*.Freq*/ - 0 - } - - for (n = 0; n < D_CODES; n++) { - s.dyn_dtree[n * 2] = - /*.Freq*/ - 0 - } - - for (n = 0; n < BL_CODES; n++) { - s.bl_tree[n * 2] = - /*.Freq*/ - 0 - } - - s.dyn_ltree[END_BLOCK * 2] = - /*.Freq*/ - 1 - s.opt_len = s.static_len = 0 - s.last_lit = s.matches = 0 - } - /* =========================================================================== - * Flush the bit buffer and align the output on a byte boundary - */ - - var bi_windup = function bi_windup(s) { - if (s.bi_valid > 8) { - put_short(s, s.bi_buf) - } else if (s.bi_valid > 0) { - //put_byte(s, (Byte)s->bi_buf); - s.pending_buf[s.pending++] = s.bi_buf - } - - s.bi_buf = 0 - s.bi_valid = 0 - } - /* =========================================================================== - * Copy a stored block, storing first the length and its - * one's complement if requested. - */ - - var copy_block = function copy_block( - s, - buf, - len, - header //DeflateState *s; //charf *buf; /* the input data */ //unsigned len; /* its length */ //int header; /* true if block header must be written */ - ) { - bi_windup(s) - /* align on byte boundary */ - - if (header) { - put_short(s, len) - put_short(s, ~len) - } // while (len--) { - // put_byte(s, *buf++); - // } - - s.pending_buf.set(s.window.subarray(buf, buf + len), s.pending) - s.pending += len - } - /* =========================================================================== - * Compares to subtrees, using the tree depth as tie breaker when - * the subtrees have equal frequency. This minimizes the worst case length. - */ - - var smaller = function smaller(tree, n, m, depth) { - var _n2 = n * 2 - - var _m2 = m * 2 - - return ( - tree[_n2] < - /*.Freq*/ - tree[_m2] || - /*.Freq*/ - (tree[_n2] === - /*.Freq*/ - tree[_m2] && - /*.Freq*/ - depth[n] <= depth[m]) - ) - } - /* =========================================================================== - * Restore the heap property by moving down the tree starting at node k, - * exchanging a node with the smallest of its two sons if necessary, stopping - * when the heap property is re-established (each father smaller than its - * two sons). - */ - - var pqdownheap = function pqdownheap( - s, - tree, - k // deflate_state *s; // ct_data *tree; /* the tree to restore */ // int k; /* node to move down */ - ) { - var v = s.heap[k] - var j = k << 1 - /* left son of k */ - - while (j <= s.heap_len) { - /* Set j to the smallest of the two sons: */ - if (j < s.heap_len && smaller(tree, s.heap[j + 1], s.heap[j], s.depth)) { - j++ - } - /* Exit if v is smaller than both sons */ - - if (smaller(tree, v, s.heap[j], s.depth)) { - break - } - /* Exchange v with the smallest son */ - - s.heap[k] = s.heap[j] - k = j - /* And continue down the tree, setting j to the left son of k */ - - j <<= 1 - } - - s.heap[k] = v - } // inlined manually - // const SMALLEST = 1; - - /* =========================================================================== - * Send the block data compressed using the given Huffman trees - */ - - var compress_block = function compress_block( - s, - ltree, - dtree // deflate_state *s; // const ct_data *ltree; /* literal tree */ // const ct_data *dtree; /* distance tree */ - ) { - var dist - /* distance of matched string */ - - var lc - /* match length or unmatched char (if dist == 0) */ - - var lx = 0 - /* running index in l_buf */ - - var code - /* the code to send */ - - var extra - /* number of extra bits to send */ - - if (s.last_lit !== 0) { - do { - dist = (s.pending_buf[s.d_buf + lx * 2] << 8) | s.pending_buf[s.d_buf + lx * 2 + 1] - lc = s.pending_buf[s.l_buf + lx] - lx++ - - if (dist === 0) { - send_code(s, lc, ltree) - /* send a literal byte */ - //Tracecv(isgraph(lc), (stderr," '%c' ", lc)); - } else { - /* Here, lc is the match length - MIN_MATCH */ - code = _length_code[lc] - send_code(s, code + LITERALS + 1, ltree) - /* send the length code */ - - extra = extra_lbits[code] - - if (extra !== 0) { - lc -= base_length[code] - send_bits(s, lc, extra) - /* send the extra length bits */ - } - - dist-- - /* dist is now the match distance - 1 */ - - code = d_code(dist) //Assert (code < D_CODES, "bad d_code"); - - send_code(s, code, dtree) - /* send the distance code */ - - extra = extra_dbits[code] - - if (extra !== 0) { - dist -= base_dist[code] - send_bits(s, dist, extra) - /* send the extra distance bits */ - } - } - /* literal or match pair ? */ - - /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ - //Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx, - // "pendingBuf overflow"); - } while (lx < s.last_lit) - } - - send_code(s, END_BLOCK, ltree) - } - /* =========================================================================== - * Construct one Huffman tree and assigns the code bit strings and lengths. - * Update the total bit length for the current block. - * IN assertion: the field freq is set for all tree elements. - * OUT assertions: the fields len and code are set to the optimal bit length - * and corresponding code. The length opt_len is updated; static_len is - * also updated if stree is not null. The field max_code is set. - */ - - var build_tree = function build_tree( - s, - desc // deflate_state *s; // tree_desc *desc; /* the tree descriptor */ - ) { - var tree = desc.dyn_tree - var stree = desc.stat_desc.static_tree - var has_stree = desc.stat_desc.has_stree - var elems = desc.stat_desc.elems - var n, m - /* iterate over heap elements */ - - var max_code = -1 - /* largest code with non zero frequency */ - - var node - /* new node being created */ - - /* Construct the initial heap, with least frequent element in - * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. - * heap[0] is not used. - */ - - s.heap_len = 0 - s.heap_max = HEAP_SIZE - - for (n = 0; n < elems; n++) { - if ( - tree[n * 2] !== - /*.Freq*/ - 0 - ) { - s.heap[++s.heap_len] = max_code = n - s.depth[n] = 0 - } else { - tree[n * 2 + 1] = - /*.Len*/ - 0 - } - } - /* The pkzip format requires that at least one distance code exists, - * and that at least one bit should be sent even if there is only one - * possible code. So to avoid special checks later on we force at least - * two codes of non zero frequency. - */ - - while (s.heap_len < 2) { - node = s.heap[++s.heap_len] = max_code < 2 ? ++max_code : 0 - tree[node * 2] = - /*.Freq*/ - 1 - s.depth[node] = 0 - s.opt_len-- - - if (has_stree) { - s.static_len -= stree[node * 2 + 1] - /*.Len*/ - } - /* node is 0 or 1 so it does not have extra bits */ - } - - desc.max_code = max_code - /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, - * establish sub-heaps of increasing lengths: - */ - - for ( - n = s.heap_len >> 1; - /*int /2*/ - n >= 1; - n-- - ) { - pqdownheap(s, tree, n) - } - /* Construct the Huffman tree by repeatedly combining the least two - * frequent nodes. - */ - - node = elems - /* next internal node of the tree */ - - do { - //pqremove(s, tree, n); /* n = node of least frequency */ - - /*** pqremove ***/ - n = s.heap[1] - /*SMALLEST*/ - s.heap[1] = s.heap[s.heap_len--] - /*SMALLEST*/ - pqdownheap( - s, - tree, - 1 - /*SMALLEST*/ - ) - /***/ - - m = s.heap[1] - /*SMALLEST*/ - /* m = node of next least frequency */ - - s.heap[--s.heap_max] = n - /* keep the nodes sorted by frequency */ - - s.heap[--s.heap_max] = m - /* Create a new node father of n and m */ - - tree[node * 2] = - /*.Freq*/ - tree[n * 2] + - /*.Freq*/ - tree[m * 2] - /*.Freq*/ - s.depth[node] = (s.depth[n] >= s.depth[m] ? s.depth[n] : s.depth[m]) + 1 - tree[n * 2 + 1] = - /*.Dad*/ - tree[m * 2 + 1] = - /*.Dad*/ - node - /* and insert the new node in the heap */ - - s.heap[1] = node++ - /*SMALLEST*/ - pqdownheap( - s, - tree, - 1 - /*SMALLEST*/ - ) - } while (s.heap_len >= 2) - - s.heap[--s.heap_max] = s.heap[1] - /*SMALLEST*/ - /* At this point, the fields freq and dad are set. We can now - * generate the bit lengths. - */ - - gen_bitlen(s, desc) - /* The field len is now set, we can generate the bit codes */ - - gen_codes(tree, max_code, s.bl_count) - } - /* =========================================================================== - * Scan a literal or distance tree to determine the frequencies of the codes - * in the bit length tree. - */ - - var scan_tree = function scan_tree( - s, - tree, - max_code // deflate_state *s; // ct_data *tree; /* the tree to be scanned */ // int max_code; /* and its largest code of non zero frequency */ - ) { - var n - /* iterates over all tree elements */ - - var prevlen = -1 - /* last emitted length */ - - var curlen - /* length of current code */ - - var nextlen = tree[0 * 2 + 1] - /*.Len*/ - /* length of next code */ - - var count = 0 - /* repeat count of the current code */ - - var max_count = 7 - /* max repeat count */ - - var min_count = 4 - /* min repeat count */ - - if (nextlen === 0) { - max_count = 138 - min_count = 3 - } - - tree[(max_code + 1) * 2 + 1] = - /*.Len*/ - 0xffff - /* guard */ - - for (n = 0; n <= max_code; n++) { - curlen = nextlen - nextlen = tree[(n + 1) * 2 + 1] - /*.Len*/ - - if (++count < max_count && curlen === nextlen) { - continue - } else if (count < min_count) { - s.bl_tree[curlen * 2] += - /*.Freq*/ - count - } else if (curlen !== 0) { - if (curlen !== prevlen) { - s.bl_tree[curlen * 2] /*.Freq*/++ - } - - s.bl_tree[REP_3_6 * 2] /*.Freq*/++ - } else if (count <= 10) { - s.bl_tree[REPZ_3_10 * 2] /*.Freq*/++ - } else { - s.bl_tree[REPZ_11_138 * 2] /*.Freq*/++ - } - - count = 0 - prevlen = curlen - - if (nextlen === 0) { - max_count = 138 - min_count = 3 - } else if (curlen === nextlen) { - max_count = 6 - min_count = 3 - } else { - max_count = 7 - min_count = 4 - } - } - } - /* =========================================================================== - * Send a literal or distance tree in compressed form, using the codes in - * bl_tree. - */ - - var send_tree = function send_tree( - s, - tree, - max_code // deflate_state *s; // ct_data *tree; /* the tree to be scanned */ // int max_code; /* and its largest code of non zero frequency */ - ) { - var n - /* iterates over all tree elements */ - - var prevlen = -1 - /* last emitted length */ - - var curlen - /* length of current code */ - - var nextlen = tree[0 * 2 + 1] - /*.Len*/ - /* length of next code */ - - var count = 0 - /* repeat count of the current code */ - - var max_count = 7 - /* max repeat count */ - - var min_count = 4 - /* min repeat count */ - - /* tree[max_code+1].Len = -1; */ - - /* guard already set */ - - if (nextlen === 0) { - max_count = 138 - min_count = 3 - } - - for (n = 0; n <= max_code; n++) { - curlen = nextlen - nextlen = tree[(n + 1) * 2 + 1] - /*.Len*/ - - if (++count < max_count && curlen === nextlen) { - continue - } else if (count < min_count) { - do { - send_code(s, curlen, s.bl_tree) - } while (--count !== 0) - } else if (curlen !== 0) { - if (curlen !== prevlen) { - send_code(s, curlen, s.bl_tree) - count-- - } //Assert(count >= 3 && count <= 6, " 3_6?"); - - send_code(s, REP_3_6, s.bl_tree) - send_bits(s, count - 3, 2) - } else if (count <= 10) { - send_code(s, REPZ_3_10, s.bl_tree) - send_bits(s, count - 3, 3) - } else { - send_code(s, REPZ_11_138, s.bl_tree) - send_bits(s, count - 11, 7) - } - - count = 0 - prevlen = curlen - - if (nextlen === 0) { - max_count = 138 - min_count = 3 - } else if (curlen === nextlen) { - max_count = 6 - min_count = 3 - } else { - max_count = 7 - min_count = 4 - } - } - } - /* =========================================================================== - * Construct the Huffman tree for the bit lengths and return the index in - * bl_order of the last bit length code to send. - */ - - var build_bl_tree = function build_bl_tree(s) { - var max_blindex - /* index of last bit length code of non zero freq */ - - /* Determine the bit length frequencies for literal and distance trees */ - - scan_tree(s, s.dyn_ltree, s.l_desc.max_code) - scan_tree(s, s.dyn_dtree, s.d_desc.max_code) - /* Build the bit length tree: */ - - build_tree(s, s.bl_desc) - /* opt_len now includes the length of the tree representations, except - * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. - */ - - /* Determine the number of bit length codes to send. The pkzip format - * requires that at least 4 bit length codes be sent. (appnote.txt says - * 3 but the actual value used is 4.) - */ - - for (max_blindex = BL_CODES - 1; max_blindex >= 3; max_blindex--) { - if ( - s.bl_tree[bl_order[max_blindex] * 2 + 1] !== - /*.Len*/ - 0 - ) { - break - } - } - /* Update opt_len to include the bit length tree and counts */ - - s.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4 //Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", - // s->opt_len, s->static_len)); - - return max_blindex - } - /* =========================================================================== - * Send the header for a block using dynamic Huffman trees: the counts, the - * lengths of the bit length codes, the literal tree and the distance tree. - * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. - */ - - var send_all_trees = function send_all_trees( - s, - lcodes, - dcodes, - blcodes // deflate_state *s; // int lcodes, dcodes, blcodes; /* number of codes for each tree */ - ) { - var rank - /* index in bl_order */ - //Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); - //Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, - // "too many codes"); - //Tracev((stderr, "\nbl counts: ")); - - send_bits(s, lcodes - 257, 5) - /* not +255 as stated in appnote.txt */ - - send_bits(s, dcodes - 1, 5) - send_bits(s, blcodes - 4, 4) - /* not -3 as stated in appnote.txt */ - - for (rank = 0; rank < blcodes; rank++) { - //Tracev((stderr, "\nbl code %2d ", bl_order[rank])); - send_bits( - s, - s.bl_tree[bl_order[rank] * 2 + 1], - /*.Len*/ - 3 - ) - } //Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); - - send_tree(s, s.dyn_ltree, lcodes - 1) - /* literal tree */ - //Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); - - send_tree(s, s.dyn_dtree, dcodes - 1) - /* distance tree */ - //Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); - } - /* =========================================================================== - * Check if the data type is TEXT or BINARY, using the following algorithm: - * - TEXT if the two conditions below are satisfied: - * a) There are no non-portable control characters belonging to the - * "black list" (0..6, 14..25, 28..31). - * b) There is at least one printable character belonging to the - * "white list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). - * - BINARY otherwise. - * - The following partially-portable control characters form a - * "gray list" that is ignored in this detection algorithm: - * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}). - * IN assertion: the fields Freq of dyn_ltree are set. - */ - - var detect_data_type = function detect_data_type(s) { - /* black_mask is the bit mask of black-listed bytes - * set bits 0..6, 14..25, and 28..31 - * 0xf3ffc07f = binary 11110011111111111100000001111111 - */ - var black_mask = 0xf3ffc07f - var n - /* Check for non-textual ("black-listed") bytes. */ - - for (n = 0; n <= 31; n++, black_mask >>>= 1) { - if ( - black_mask & 1 && - s.dyn_ltree[n * 2] !== - /*.Freq*/ - 0 - ) { - return Z_BINARY - } - } - /* Check for textual ("white-listed") bytes. */ - - if ( - s.dyn_ltree[9 * 2] !== - /*.Freq*/ - 0 || - s.dyn_ltree[10 * 2] !== - /*.Freq*/ - 0 || - s.dyn_ltree[13 * 2] !== - /*.Freq*/ - 0 - ) { - return Z_TEXT - } - - for (n = 32; n < LITERALS; n++) { - if ( - s.dyn_ltree[n * 2] !== - /*.Freq*/ - 0 - ) { - return Z_TEXT - } - } - /* There are no "black-listed" or "white-listed" bytes: - * this stream either is empty or has tolerated ("gray-listed") bytes only. - */ - - return Z_BINARY - } - - var static_init_done = false - /* =========================================================================== - * Initialize the tree data structures for a new zlib stream. - */ - - var _tr_init = function _tr_init(s) { - if (!static_init_done) { - tr_static_init() - static_init_done = true - } - - s.l_desc = new TreeDesc(s.dyn_ltree, static_l_desc) - s.d_desc = new TreeDesc(s.dyn_dtree, static_d_desc) - s.bl_desc = new TreeDesc(s.bl_tree, static_bl_desc) - s.bi_buf = 0 - s.bi_valid = 0 - /* Initialize the first block of the first file: */ - - init_block(s) - } - /* =========================================================================== - * Send a stored block - */ - - var _tr_stored_block = function _tr_stored_block( - s, - buf, - stored_len, - last //DeflateState *s; //charf *buf; /* input block */ //ulg stored_len; /* length of input block */ //int last; /* one if this is the last block for a file */ - ) { - send_bits(s, (STORED_BLOCK << 1) + (last ? 1 : 0), 3) - /* send block type */ - - copy_block(s, buf, stored_len, true) - /* with header */ - } - /* =========================================================================== - * Send one empty static block to give enough lookahead for inflate. - * This takes 10 bits, of which 7 may remain in the bit buffer. - */ - - var _tr_align = function _tr_align(s) { - send_bits(s, STATIC_TREES << 1, 3) - send_code(s, END_BLOCK, static_ltree) - bi_flush(s) - } - /* =========================================================================== - * Determine the best encoding for the current block: dynamic trees, static - * trees or store, and output the encoded block to the zip file. - */ - - var _tr_flush_block = function _tr_flush_block( - s, - buf, - stored_len, - last //DeflateState *s; //charf *buf; /* input block, or NULL if too old */ //ulg stored_len; /* length of input block */ //int last; /* one if this is the last block for a file */ - ) { - var opt_lenb, static_lenb - /* opt_len and static_len in bytes */ - - var max_blindex = 0 - /* index of last bit length code of non zero freq */ - - /* Build the Huffman trees unless a stored block is forced */ - - if (s.level > 0) { - /* Check if the file is binary or text */ - if (s.strm.data_type === Z_UNKNOWN) { - s.strm.data_type = detect_data_type(s) - } - /* Construct the literal and distance trees */ - - build_tree(s, s.l_desc) // Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, - // s->static_len)); - - build_tree(s, s.d_desc) // Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, - // s->static_len)); - - /* At this point, opt_len and static_len are the total bit lengths of - * the compressed block data, excluding the tree representations. - */ - - /* Build the bit length tree for the above two trees, and get the index - * in bl_order of the last bit length code to send. - */ - - max_blindex = build_bl_tree(s) - /* Determine the best encoding. Compute the block lengths in bytes. */ - - opt_lenb = (s.opt_len + 3 + 7) >>> 3 - static_lenb = (s.static_len + 3 + 7) >>> 3 // Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", - // opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, - // s->last_lit)); - - if (static_lenb <= opt_lenb) { - opt_lenb = static_lenb - } - } else { - // Assert(buf != (char*)0, "lost buf"); - opt_lenb = static_lenb = stored_len + 5 - /* force a stored block */ - } - - if (stored_len + 4 <= opt_lenb && buf !== -1) { - /* 4: two words for the lengths */ - - /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. - * Otherwise we can't have processed more than WSIZE input bytes since - * the last block flush, because compression would have been - * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to - * transform a block into a stored block. - */ - _tr_stored_block(s, buf, stored_len, last) - } else if (s.strategy === Z_FIXED || static_lenb === opt_lenb) { - send_bits(s, (STATIC_TREES << 1) + (last ? 1 : 0), 3) - compress_block(s, static_ltree, static_dtree) - } else { - send_bits(s, (DYN_TREES << 1) + (last ? 1 : 0), 3) - send_all_trees(s, s.l_desc.max_code + 1, s.d_desc.max_code + 1, max_blindex + 1) - compress_block(s, s.dyn_ltree, s.dyn_dtree) - } // Assert (s->compressed_len == s->bits_sent, "bad compressed size"); - - /* The above check is made mod 2^32, for files larger than 512 MB - * and uLong implemented on 32 bits. - */ - - init_block(s) - - if (last) { - bi_windup(s) - } // Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, - // s->compressed_len-7*last)); - } - /* =========================================================================== - * Save the match info and tally the frequency counts. Return true if - * the current block must be flushed. - */ - - var _tr_tally = function _tr_tally( - s, - dist, - lc // deflate_state *s; // unsigned dist; /* distance of matched string */ // unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ - ) { - //let out_length, in_length, dcode; - s.pending_buf[s.d_buf + s.last_lit * 2] = (dist >>> 8) & 0xff - s.pending_buf[s.d_buf + s.last_lit * 2 + 1] = dist & 0xff - s.pending_buf[s.l_buf + s.last_lit] = lc & 0xff - s.last_lit++ - - if (dist === 0) { - /* lc is the unmatched char */ - s.dyn_ltree[lc * 2] /*.Freq*/++ - } else { - s.matches++ - /* Here, lc is the match length - MIN_MATCH */ - - dist-- - /* dist = match distance - 1 */ - //Assert((ush)dist < (ush)MAX_DIST(s) && - // (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && - // (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); - - s.dyn_ltree[(_length_code[lc] + LITERALS + 1) * 2] /*.Freq*/++ - s.dyn_dtree[d_code(dist) * 2] /*.Freq*/++ - } // (!) This block is disabled in zlib defaults, - // don't enable it for binary compatibility - //#ifdef TRUNCATE_BLOCK - // /* Try to guess if it is profitable to stop the current block here */ - // if ((s.last_lit & 0x1fff) === 0 && s.level > 2) { - // /* Compute an upper bound for the compressed length */ - // out_length = s.last_lit*8; - // in_length = s.strstart - s.block_start; - // - // for (dcode = 0; dcode < D_CODES; dcode++) { - // out_length += s.dyn_dtree[dcode*2]/*.Freq*/ * (5 + extra_dbits[dcode]); - // } - // out_length >>>= 3; - // //Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", - // // s->last_lit, in_length, out_length, - // // 100L - out_length*100L/in_length)); - // if (s.matches < (s.last_lit>>1)/*int /2*/ && out_length < (in_length>>1)/*int /2*/) { - // return true; - // } - // } - //#endif - - return s.last_lit === s.lit_bufsize - 1 - /* We avoid equality with lit_bufsize because of wraparound at 64K - * on 16 bit machines and because stored blocks are restricted to - * 64K-1 bytes. - */ - } - - var _tr_init_1 = _tr_init - var _tr_stored_block_1 = _tr_stored_block - var _tr_flush_block_1 = _tr_flush_block - var _tr_tally_1 = _tr_tally - var _tr_align_1 = _tr_align - var trees = { - _tr_init: _tr_init_1, - _tr_stored_block: _tr_stored_block_1, - _tr_flush_block: _tr_flush_block_1, - _tr_tally: _tr_tally_1, - _tr_align: _tr_align_1, - } - - // It isn't worth it to make additional optimizations as in original. - // Small size is preferable. - // (C) 1995-2013 Jean-loup Gailly and Mark Adler - // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin - // - // This software is provided 'as-is', without any express or implied - // warranty. In no event will the authors be held liable for any damages - // arising from the use of this software. - // - // Permission is granted to anyone to use this software for any purpose, - // including commercial applications, and to alter it and redistribute it - // freely, subject to the following restrictions: - // - // 1. The origin of this software must not be misrepresented; you must not - // claim that you wrote the original software. If you use this software - // in a product, an acknowledgment in the product documentation would be - // appreciated but is not required. - // 2. Altered source versions must be plainly marked as such, and must not be - // misrepresented as being the original software. - // 3. This notice may not be removed or altered from any source distribution. - - var adler32 = function adler32(adler, buf, len, pos) { - var s1 = (adler & 0xffff) | 0, - s2 = ((adler >>> 16) & 0xffff) | 0, - n = 0 - - while (len !== 0) { - // Set limit ~ twice less than 5552, to keep - // s2 in 31-bits, because we force signed ints. - // in other case %= will fail. - n = len > 2000 ? 2000 : len - len -= n - - do { - s1 = (s1 + buf[pos++]) | 0 - s2 = (s2 + s1) | 0 - } while (--n) - - s1 %= 65521 - s2 %= 65521 - } - - return s1 | (s2 << 16) | 0 - } - - var adler32_1 = adler32 - - // So write code to minimize size - no pregenerated tables - // and array tools dependencies. - // (C) 1995-2013 Jean-loup Gailly and Mark Adler - // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin - // - // This software is provided 'as-is', without any express or implied - // warranty. In no event will the authors be held liable for any damages - // arising from the use of this software. - // - // Permission is granted to anyone to use this software for any purpose, - // including commercial applications, and to alter it and redistribute it - // freely, subject to the following restrictions: - // - // 1. The origin of this software must not be misrepresented; you must not - // claim that you wrote the original software. If you use this software - // in a product, an acknowledgment in the product documentation would be - // appreciated but is not required. - // 2. Altered source versions must be plainly marked as such, and must not be - // misrepresented as being the original software. - // 3. This notice may not be removed or altered from any source distribution. - // Use ordinary array, since untyped makes no boost here - - var makeTable = function makeTable() { - var c, - table = [] - - for (var n = 0; n < 256; n++) { - c = n - - for (var k = 0; k < 8; k++) { - c = c & 1 ? 0xedb88320 ^ (c >>> 1) : c >>> 1 - } - - table[n] = c - } - - return table - } // Create table on load. Just 255 signed longs. Not a problem. - - var crcTable = new Uint32Array(makeTable()) - - var crc32 = function crc32(crc, buf, len, pos) { - var t = crcTable - var end = pos + len - crc ^= -1 - - for (var i = pos; i < end; i++) { - crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & 0xff] - } - - return crc ^ -1 // >>> 0; - } - - var crc32_1 = crc32 - - // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin - // - // This software is provided 'as-is', without any express or implied - // warranty. In no event will the authors be held liable for any damages - // arising from the use of this software. - // - // Permission is granted to anyone to use this software for any purpose, - // including commercial applications, and to alter it and redistribute it - // freely, subject to the following restrictions: - // - // 1. The origin of this software must not be misrepresented; you must not - // claim that you wrote the original software. If you use this software - // in a product, an acknowledgment in the product documentation would be - // appreciated but is not required. - // 2. Altered source versions must be plainly marked as such, and must not be - // misrepresented as being the original software. - // 3. This notice may not be removed or altered from any source distribution. - - var messages = { - 2: 'need dictionary', - - /* Z_NEED_DICT 2 */ - 1: 'stream end', - - /* Z_STREAM_END 1 */ - 0: '', - - /* Z_OK 0 */ - '-1': 'file error', - - /* Z_ERRNO (-1) */ - '-2': 'stream error', - - /* Z_STREAM_ERROR (-2) */ - '-3': 'data error', - - /* Z_DATA_ERROR (-3) */ - '-4': 'insufficient memory', - - /* Z_MEM_ERROR (-4) */ - '-5': 'buffer error', - - /* Z_BUF_ERROR (-5) */ - '-6': 'incompatible version', - /* Z_VERSION_ERROR (-6) */ - } - - // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin - // - // This software is provided 'as-is', without any express or implied - // warranty. In no event will the authors be held liable for any damages - // arising from the use of this software. - // - // Permission is granted to anyone to use this software for any purpose, - // including commercial applications, and to alter it and redistribute it - // freely, subject to the following restrictions: - // - // 1. The origin of this software must not be misrepresented; you must not - // claim that you wrote the original software. If you use this software - // in a product, an acknowledgment in the product documentation would be - // appreciated but is not required. - // 2. Altered source versions must be plainly marked as such, and must not be - // misrepresented as being the original software. - // 3. This notice may not be removed or altered from any source distribution. - - var constants = { - /* Allowed flush values; see deflate() and inflate() below for details */ - Z_NO_FLUSH: 0, - Z_PARTIAL_FLUSH: 1, - Z_SYNC_FLUSH: 2, - Z_FULL_FLUSH: 3, - Z_FINISH: 4, - Z_BLOCK: 5, - Z_TREES: 6, - - /* Return codes for the compression/decompression functions. Negative values - * are errors, positive values are used for special but normal events. - */ - Z_OK: 0, - Z_STREAM_END: 1, - Z_NEED_DICT: 2, - Z_ERRNO: -1, - Z_STREAM_ERROR: -2, - Z_DATA_ERROR: -3, - Z_MEM_ERROR: -4, - Z_BUF_ERROR: -5, - //Z_VERSION_ERROR: -6, - - /* compression levels */ - Z_NO_COMPRESSION: 0, - Z_BEST_SPEED: 1, - Z_BEST_COMPRESSION: 9, - Z_DEFAULT_COMPRESSION: -1, - Z_FILTERED: 1, - Z_HUFFMAN_ONLY: 2, - Z_RLE: 3, - Z_FIXED: 4, - Z_DEFAULT_STRATEGY: 0, - - /* Possible values of the data_type field (though see inflate()) */ - Z_BINARY: 0, - Z_TEXT: 1, - //Z_ASCII: 1, // = Z_TEXT (deprecated) - Z_UNKNOWN: 2, - - /* The deflate compression method */ - Z_DEFLATED: 8, //Z_NULL: null // Use -1 or null inline, depending on var type - } - - // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin - // - // This software is provided 'as-is', without any express or implied - // warranty. In no event will the authors be held liable for any damages - // arising from the use of this software. - // - // Permission is granted to anyone to use this software for any purpose, - // including commercial applications, and to alter it and redistribute it - // freely, subject to the following restrictions: - // - // 1. The origin of this software must not be misrepresented; you must not - // claim that you wrote the original software. If you use this software - // in a product, an acknowledgment in the product documentation would be - // appreciated but is not required. - // 2. Altered source versions must be plainly marked as such, and must not be - // misrepresented as being the original software. - // 3. This notice may not be removed or altered from any source distribution. - - var _tr_init$1 = trees._tr_init, - _tr_stored_block$1 = trees._tr_stored_block, - _tr_flush_block$1 = trees._tr_flush_block, - _tr_tally$1 = trees._tr_tally, - _tr_align$1 = trees._tr_align - /* Public constants ==========================================================*/ - - /* ===========================================================================*/ - - var Z_NO_FLUSH = constants.Z_NO_FLUSH, - Z_PARTIAL_FLUSH = constants.Z_PARTIAL_FLUSH, - Z_FULL_FLUSH = constants.Z_FULL_FLUSH, - Z_FINISH = constants.Z_FINISH, - Z_BLOCK = constants.Z_BLOCK, - Z_OK = constants.Z_OK, - Z_STREAM_END = constants.Z_STREAM_END, - Z_STREAM_ERROR = constants.Z_STREAM_ERROR, - Z_DATA_ERROR = constants.Z_DATA_ERROR, - Z_BUF_ERROR = constants.Z_BUF_ERROR, - Z_DEFAULT_COMPRESSION = constants.Z_DEFAULT_COMPRESSION, - Z_FILTERED = constants.Z_FILTERED, - Z_HUFFMAN_ONLY = constants.Z_HUFFMAN_ONLY, - Z_RLE = constants.Z_RLE, - Z_FIXED$1 = constants.Z_FIXED, - Z_DEFAULT_STRATEGY = constants.Z_DEFAULT_STRATEGY, - Z_UNKNOWN$1 = constants.Z_UNKNOWN, - Z_DEFLATED = constants.Z_DEFLATED - /*============================================================================*/ - - var MAX_MEM_LEVEL = 9 - /* Maximum value for memLevel in deflateInit2 */ - - var MAX_WBITS = 15 - /* 32K LZ77 window */ - - var DEF_MEM_LEVEL = 8 - var LENGTH_CODES$1 = 29 - /* number of length codes, not counting the special END_BLOCK code */ - - var LITERALS$1 = 256 - /* number of literal bytes 0..255 */ - - var L_CODES$1 = LITERALS$1 + 1 + LENGTH_CODES$1 - /* number of Literal or Length codes, including the END_BLOCK code */ - - var D_CODES$1 = 30 - /* number of distance codes */ - - var BL_CODES$1 = 19 - /* number of codes used to transfer the bit lengths */ - - var HEAP_SIZE$1 = 2 * L_CODES$1 + 1 - /* maximum heap size */ - - var MAX_BITS$1 = 15 - /* All codes must not exceed MAX_BITS bits */ - - var MIN_MATCH$1 = 3 - var MAX_MATCH$1 = 258 - var MIN_LOOKAHEAD = MAX_MATCH$1 + MIN_MATCH$1 + 1 - var PRESET_DICT = 0x20 - var INIT_STATE = 42 - var EXTRA_STATE = 69 - var NAME_STATE = 73 - var COMMENT_STATE = 91 - var HCRC_STATE = 103 - var BUSY_STATE = 113 - var FINISH_STATE = 666 - var BS_NEED_MORE = 1 - /* block not completed, need more input or more output */ - - var BS_BLOCK_DONE = 2 - /* block flush performed */ - - var BS_FINISH_STARTED = 3 - /* finish started, need only more output at next deflate */ - - var BS_FINISH_DONE = 4 - /* finish done, accept no more input or output */ - - var OS_CODE = 0x03 // Unix :) . Don't detect, use this default. - - var err = function err(strm, errorCode) { - strm.msg = messages[errorCode] - return errorCode - } - - var rank = function rank(f) { - return (f << 1) - (f > 4 ? 9 : 0) - } - - var zero$1 = function zero(buf) { - var len = buf.length - - while (--len >= 0) { - buf[len] = 0 - } - } - /* eslint-disable new-cap */ - - var HASH_ZLIB = function HASH_ZLIB(s, prev, data) { - return ((prev << s.hash_shift) ^ data) & s.hash_mask - } // This hash causes less collisions, https://github.com/nodeca/pako/issues/135 - // But breaks binary compatibility - //let HASH_FAST = (s, prev, data) => ((prev << 8) + (prev >> 8) + (data << 4)) & s.hash_mask; - - var HASH = HASH_ZLIB - /* ========================================================================= - * Flush as much pending output as possible. All deflate() output goes - * through this function so some applications may wish to modify it - * to avoid allocating a large strm->output buffer and copying into it. - * (See also read_buf()). - */ - - var flush_pending = function flush_pending(strm) { - var s = strm.state //_tr_flush_bits(s); - - var len = s.pending - - if (len > strm.avail_out) { - len = strm.avail_out - } - - if (len === 0) { - return - } - - strm.output.set(s.pending_buf.subarray(s.pending_out, s.pending_out + len), strm.next_out) - strm.next_out += len - s.pending_out += len - strm.total_out += len - strm.avail_out -= len - s.pending -= len - - if (s.pending === 0) { - s.pending_out = 0 - } - } - - var flush_block_only = function flush_block_only(s, last) { - _tr_flush_block$1(s, s.block_start >= 0 ? s.block_start : -1, s.strstart - s.block_start, last) - - s.block_start = s.strstart - flush_pending(s.strm) - } - - var put_byte = function put_byte(s, b) { - s.pending_buf[s.pending++] = b - } - /* ========================================================================= - * Put a short in the pending buffer. The 16-bit value is put in MSB order. - * IN assertion: the stream state is correct and there is enough room in - * pending_buf. - */ - - var putShortMSB = function putShortMSB(s, b) { - // put_byte(s, (Byte)(b >> 8)); - // put_byte(s, (Byte)(b & 0xff)); - s.pending_buf[s.pending++] = (b >>> 8) & 0xff - s.pending_buf[s.pending++] = b & 0xff - } - /* =========================================================================== - * Read a new buffer from the current input stream, update the adler32 - * and total number of bytes read. All deflate() input goes through - * this function so some applications may wish to modify it to avoid - * allocating a large strm->input buffer and copying from it. - * (See also flush_pending()). - */ - - var read_buf = function read_buf(strm, buf, start, size) { - var len = strm.avail_in - - if (len > size) { - len = size - } - - if (len === 0) { - return 0 - } - - strm.avail_in -= len // zmemcpy(buf, strm->next_in, len); - - buf.set(strm.input.subarray(strm.next_in, strm.next_in + len), start) - - if (strm.state.wrap === 1) { - strm.adler = adler32_1(strm.adler, buf, len, start) - } else if (strm.state.wrap === 2) { - strm.adler = crc32_1(strm.adler, buf, len, start) - } - - strm.next_in += len - strm.total_in += len - return len - } - /* =========================================================================== - * Set match_start to the longest match starting at the given string and - * return its length. Matches shorter or equal to prev_length are discarded, - * in which case the result is equal to prev_length and match_start is - * garbage. - * IN assertions: cur_match is the head of the hash chain for the current - * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 - * OUT assertion: the match length is not greater than s->lookahead. - */ - - var longest_match = function longest_match(s, cur_match) { - var chain_length = s.max_chain_length - /* max hash chain length */ - - var scan = s.strstart - /* current string */ - - var match - /* matched string */ - - var len - /* length of current match */ - - var best_len = s.prev_length - /* best match length so far */ - - var nice_match = s.nice_match - /* stop if match long enough */ - - var limit = s.strstart > s.w_size - MIN_LOOKAHEAD ? s.strstart - (s.w_size - MIN_LOOKAHEAD) : 0 - /*NIL*/ - var _win = s.window // shortcut - - var wmask = s.w_mask - var prev = s.prev - /* Stop when cur_match becomes <= limit. To simplify the code, - * we prevent matches with the string of window index 0. - */ - - var strend = s.strstart + MAX_MATCH$1 - var scan_end1 = _win[scan + best_len - 1] - var scan_end = _win[scan + best_len] - /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. - * It is easy to get rid of this optimization if necessary. - */ - // Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); - - /* Do not waste too much time if we already have a good match: */ - - if (s.prev_length >= s.good_match) { - chain_length >>= 2 - } - /* Do not look for matches beyond the end of the input. This is necessary - * to make deflate deterministic. - */ - - if (nice_match > s.lookahead) { - nice_match = s.lookahead - } // Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); - - do { - // Assert(cur_match < s->strstart, "no future"); - match = cur_match - /* Skip to next match if the match length cannot increase - * or if the match length is less than 2. Note that the checks below - * for insufficient lookahead only occur occasionally for performance - * reasons. Therefore uninitialized memory will be accessed, and - * conditional jumps will be made that depend on those values. - * However the length of the match is limited to the lookahead, so - * the output of deflate is not affected by the uninitialized values. - */ - - if ( - _win[match + best_len] !== scan_end || - _win[match + best_len - 1] !== scan_end1 || - _win[match] !== _win[scan] || - _win[++match] !== _win[scan + 1] - ) { - continue - } - /* The check at best_len-1 can be removed because it will be made - * again later. (This heuristic is not always a win.) - * It is not necessary to compare scan[2] and match[2] since they - * are always equal when the other bytes match, given that - * the hash keys are equal and that HASH_BITS >= 8. - */ - - scan += 2 - match++ // Assert(*scan == *match, "match[2]?"); - - /* We check for insufficient lookahead only every 8th comparison; - * the 256th check will be made at strstart+258. - */ - - do { - /*jshint noempty:false*/ - } while ( - _win[++scan] === _win[++match] && - _win[++scan] === _win[++match] && - _win[++scan] === _win[++match] && - _win[++scan] === _win[++match] && - _win[++scan] === _win[++match] && - _win[++scan] === _win[++match] && - _win[++scan] === _win[++match] && - _win[++scan] === _win[++match] && - scan < strend - ) // Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); - - len = MAX_MATCH$1 - (strend - scan) - scan = strend - MAX_MATCH$1 - - if (len > best_len) { - s.match_start = cur_match - best_len = len - - if (len >= nice_match) { - break - } - - scan_end1 = _win[scan + best_len - 1] - scan_end = _win[scan + best_len] - } - } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length !== 0) - - if (best_len <= s.lookahead) { - return best_len - } - - return s.lookahead - } - /* =========================================================================== - * Fill the window when the lookahead becomes insufficient. - * Updates strstart and lookahead. - * - * IN assertion: lookahead < MIN_LOOKAHEAD - * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD - * At least one byte has been read, or avail_in == 0; reads are - * performed for at least two bytes (required for the zip translate_eol - * option -- not supported here). - */ - - var fill_window = function fill_window(s) { - var _w_size = s.w_size - var p, n, m, more, str //Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead"); - - do { - more = s.window_size - s.lookahead - s.strstart // JS ints have 32 bit, block below not needed - - /* Deal with !@#$% 64K limit: */ - //if (sizeof(int) <= 2) { - // if (more == 0 && s->strstart == 0 && s->lookahead == 0) { - // more = wsize; - // - // } else if (more == (unsigned)(-1)) { - // /* Very unlikely, but possible on 16 bit machine if - // * strstart == 0 && lookahead == 1 (input done a byte at time) - // */ - // more--; - // } - //} - - /* If the window is almost full and there is insufficient lookahead, - * move the upper half to the lower one to make room in the upper half. - */ - - if (s.strstart >= _w_size + (_w_size - MIN_LOOKAHEAD)) { - s.window.set(s.window.subarray(_w_size, _w_size + _w_size), 0) - s.match_start -= _w_size - s.strstart -= _w_size - /* we now have strstart >= MAX_DIST */ - - s.block_start -= _w_size - /* Slide the hash table (could be avoided with 32 bit values - at the expense of memory usage). We slide even when level == 0 - to keep the hash table consistent if we switch back to level > 0 - later. (Using level 0 permanently is not an optimal usage of - zlib, so we don't care about this pathological case.) - */ - - n = s.hash_size - p = n - - do { - m = s.head[--p] - s.head[p] = m >= _w_size ? m - _w_size : 0 - } while (--n) - - n = _w_size - p = n - - do { - m = s.prev[--p] - s.prev[p] = m >= _w_size ? m - _w_size : 0 - /* If n is not on any hash chain, prev[n] is garbage but - * its value will never be used. - */ - } while (--n) - - more += _w_size - } - - if (s.strm.avail_in === 0) { - break - } - /* If there was no sliding: - * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && - * more == window_size - lookahead - strstart - * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) - * => more >= window_size - 2*WSIZE + 2 - * In the BIG_MEM or MMAP case (not yet supported), - * window_size == input_size + MIN_LOOKAHEAD && - * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. - * Otherwise, window_size == 2*WSIZE so more >= 2. - * If there was sliding, more >= WSIZE. So in all cases, more >= 2. - */ - //Assert(more >= 2, "more < 2"); - - n = read_buf(s.strm, s.window, s.strstart + s.lookahead, more) - s.lookahead += n - /* Initialize the hash value now that we have some input: */ - - if (s.lookahead + s.insert >= MIN_MATCH$1) { - str = s.strstart - s.insert - s.ins_h = s.window[str] - /* UPDATE_HASH(s, s->ins_h, s->window[str + 1]); */ - - s.ins_h = HASH(s, s.ins_h, s.window[str + 1]) //#if MIN_MATCH != 3 - // Call update_hash() MIN_MATCH-3 more times - //#endif - - while (s.insert) { - /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */ - s.ins_h = HASH(s, s.ins_h, s.window[str + MIN_MATCH$1 - 1]) - s.prev[str & s.w_mask] = s.head[s.ins_h] - s.head[s.ins_h] = str - str++ - s.insert-- - - if (s.lookahead + s.insert < MIN_MATCH$1) { - break - } - } - } - /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, - * but this is not important since only literal bytes will be emitted. - */ - } while (s.lookahead < MIN_LOOKAHEAD && s.strm.avail_in !== 0) - /* If the WIN_INIT bytes after the end of the current data have never been - * written, then zero those bytes in order to avoid memory check reports of - * the use of uninitialized (or uninitialised as Julian writes) bytes by - * the longest match routines. Update the high water mark for the next - * time through here. WIN_INIT is set to MAX_MATCH since the longest match - * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead. - */ - // if (s.high_water < s.window_size) { - // const curr = s.strstart + s.lookahead; - // let init = 0; - // - // if (s.high_water < curr) { - // /* Previous high water mark below current data -- zero WIN_INIT - // * bytes or up to end of window, whichever is less. - // */ - // init = s.window_size - curr; - // if (init > WIN_INIT) - // init = WIN_INIT; - // zmemzero(s->window + curr, (unsigned)init); - // s->high_water = curr + init; - // } - // else if (s->high_water < (ulg)curr + WIN_INIT) { - // /* High water mark at or above current data, but below current data - // * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up - // * to end of window, whichever is less. - // */ - // init = (ulg)curr + WIN_INIT - s->high_water; - // if (init > s->window_size - s->high_water) - // init = s->window_size - s->high_water; - // zmemzero(s->window + s->high_water, (unsigned)init); - // s->high_water += init; - // } - // } - // - // Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, - // "not enough room for search"); - } - /* =========================================================================== - * Copy without compression as much as possible from the input stream, return - * the current block state. - * This function does not insert new strings in the dictionary since - * uncompressible data is probably not useful. This function is used - * only for the level=0 compression option. - * NOTE: this function should be optimized to avoid extra copying from - * window to pending_buf. - */ - - var deflate_stored = function deflate_stored(s, flush) { - /* Stored blocks are limited to 0xffff bytes, pending_buf is limited - * to pending_buf_size, and each stored block has a 5 byte header: - */ - var max_block_size = 0xffff - - if (max_block_size > s.pending_buf_size - 5) { - max_block_size = s.pending_buf_size - 5 - } - /* Copy as much as possible from input to output: */ - - for (;;) { - /* Fill the window as much as possible: */ - if (s.lookahead <= 1) { - //Assert(s->strstart < s->w_size+MAX_DIST(s) || - // s->block_start >= (long)s->w_size, "slide too late"); - // if (!(s.strstart < s.w_size + (s.w_size - MIN_LOOKAHEAD) || - // s.block_start >= s.w_size)) { - // throw new Error("slide too late"); - // } - fill_window(s) - - if (s.lookahead === 0 && flush === Z_NO_FLUSH) { - return BS_NEED_MORE - } - - if (s.lookahead === 0) { - break - } - /* flush the current block */ - } //Assert(s->block_start >= 0L, "block gone"); - // if (s.block_start < 0) throw new Error("block gone"); - - s.strstart += s.lookahead - s.lookahead = 0 - /* Emit a stored block if pending_buf will be full: */ - - var max_start = s.block_start + max_block_size - - if (s.strstart === 0 || s.strstart >= max_start) { - /* strstart == 0 is possible when wraparound on 16-bit machine */ - s.lookahead = s.strstart - max_start - s.strstart = max_start - /*** FLUSH_BLOCK(s, 0); ***/ - - flush_block_only(s, false) - - if (s.strm.avail_out === 0) { - return BS_NEED_MORE - } - /***/ - } - /* Flush if we may have to slide, otherwise block_start may become - * negative and the data will be gone: - */ - - if (s.strstart - s.block_start >= s.w_size - MIN_LOOKAHEAD) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false) - - if (s.strm.avail_out === 0) { - return BS_NEED_MORE - } - /***/ - } - } - - s.insert = 0 - - if (flush === Z_FINISH) { - /*** FLUSH_BLOCK(s, 1); ***/ - flush_block_only(s, true) - - if (s.strm.avail_out === 0) { - return BS_FINISH_STARTED - } - /***/ - - return BS_FINISH_DONE - } - - if (s.strstart > s.block_start) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false) - - if (s.strm.avail_out === 0) { - return BS_NEED_MORE - } - /***/ - } - - return BS_NEED_MORE - } - /* =========================================================================== - * Compress as much as possible from the input stream, return the current - * block state. - * This function does not perform lazy evaluation of matches and inserts - * new strings in the dictionary only for unmatched strings or for short - * matches. It is used only for the fast compression options. - */ - - var deflate_fast = function deflate_fast(s, flush) { - var hash_head - /* head of the hash chain */ - - var bflush - /* set if current block must be flushed */ - - for (;;) { - /* Make sure that we always have enough lookahead, except - * at the end of the input file. We need MAX_MATCH bytes - * for the next match, plus MIN_MATCH bytes to insert the - * string following the next match. - */ - if (s.lookahead < MIN_LOOKAHEAD) { - fill_window(s) - - if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) { - return BS_NEED_MORE - } - - if (s.lookahead === 0) { - break - /* flush the current block */ - } - } - /* Insert the string window[strstart .. strstart+2] in the - * dictionary, and set hash_head to the head of the hash chain: - */ - - hash_head = 0 - /*NIL*/ - - if (s.lookahead >= MIN_MATCH$1) { - /*** INSERT_STRING(s, s.strstart, hash_head); ***/ - s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH$1 - 1]) - hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h] - s.head[s.ins_h] = s.strstart - /***/ - } - /* Find the longest match, discarding those <= prev_length. - * At this point we have always match_length < MIN_MATCH - */ - - if ( - hash_head !== 0 && - /*NIL*/ - s.strstart - hash_head <= s.w_size - MIN_LOOKAHEAD - ) { - /* To simplify the code, we prevent matches with the string - * of window index 0 (in particular we have to avoid a match - * of the string with itself at the start of the input file). - */ - s.match_length = longest_match(s, hash_head) - /* longest_match() sets match_start */ - } - - if (s.match_length >= MIN_MATCH$1) { - // check_match(s, s.strstart, s.match_start, s.match_length); // for debug only - - /*** _tr_tally_dist(s, s.strstart - s.match_start, - s.match_length - MIN_MATCH, bflush); ***/ - bflush = _tr_tally$1(s, s.strstart - s.match_start, s.match_length - MIN_MATCH$1) - s.lookahead -= s.match_length - /* Insert new strings in the hash table only if the match length - * is not too large. This saves time but degrades compression. - */ - - if ( - s.match_length <= s.max_lazy_match && - /*max_insert_length*/ - s.lookahead >= MIN_MATCH$1 - ) { - s.match_length-- - /* string at strstart already in table */ - - do { - s.strstart++ - /*** INSERT_STRING(s, s.strstart, hash_head); ***/ - - s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH$1 - 1]) - hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h] - s.head[s.ins_h] = s.strstart - /***/ - - /* strstart never exceeds WSIZE-MAX_MATCH, so there are - * always MIN_MATCH bytes ahead. - */ - } while (--s.match_length !== 0) - - s.strstart++ - } else { - s.strstart += s.match_length - s.match_length = 0 - s.ins_h = s.window[s.strstart] - /* UPDATE_HASH(s, s.ins_h, s.window[s.strstart+1]); */ - - s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + 1]) //#if MIN_MATCH != 3 - // Call UPDATE_HASH() MIN_MATCH-3 more times - //#endif - - /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not - * matter since it will be recomputed at next deflate call. - */ - } - } else { - /* No match, output a literal byte */ - //Tracevv((stderr,"%c", s.window[s.strstart])); - - /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ - bflush = _tr_tally$1(s, 0, s.window[s.strstart]) - s.lookahead-- - s.strstart++ - } - - if (bflush) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false) - - if (s.strm.avail_out === 0) { - return BS_NEED_MORE - } - /***/ - } - } - - s.insert = s.strstart < MIN_MATCH$1 - 1 ? s.strstart : MIN_MATCH$1 - 1 - - if (flush === Z_FINISH) { - /*** FLUSH_BLOCK(s, 1); ***/ - flush_block_only(s, true) - - if (s.strm.avail_out === 0) { - return BS_FINISH_STARTED - } - /***/ - - return BS_FINISH_DONE - } - - if (s.last_lit) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false) - - if (s.strm.avail_out === 0) { - return BS_NEED_MORE - } - /***/ - } - - return BS_BLOCK_DONE - } - /* =========================================================================== - * Same as above, but achieves better compression. We use a lazy - * evaluation for matches: a match is finally adopted only if there is - * no better match at the next window position. - */ - - var deflate_slow = function deflate_slow(s, flush) { - var hash_head - /* head of hash chain */ - - var bflush - /* set if current block must be flushed */ - - var max_insert - /* Process the input block. */ - - for (;;) { - /* Make sure that we always have enough lookahead, except - * at the end of the input file. We need MAX_MATCH bytes - * for the next match, plus MIN_MATCH bytes to insert the - * string following the next match. - */ - if (s.lookahead < MIN_LOOKAHEAD) { - fill_window(s) - - if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) { - return BS_NEED_MORE - } - - if (s.lookahead === 0) { - break - } - /* flush the current block */ - } - /* Insert the string window[strstart .. strstart+2] in the - * dictionary, and set hash_head to the head of the hash chain: - */ - - hash_head = 0 - /*NIL*/ - - if (s.lookahead >= MIN_MATCH$1) { - /*** INSERT_STRING(s, s.strstart, hash_head); ***/ - s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH$1 - 1]) - hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h] - s.head[s.ins_h] = s.strstart - /***/ - } - /* Find the longest match, discarding those <= prev_length. - */ - - s.prev_length = s.match_length - s.prev_match = s.match_start - s.match_length = MIN_MATCH$1 - 1 - - if ( - hash_head !== 0 && - /*NIL*/ - s.prev_length < s.max_lazy_match && - s.strstart - hash_head <= s.w_size - MIN_LOOKAHEAD - /*MAX_DIST(s)*/ - ) { - /* To simplify the code, we prevent matches with the string - * of window index 0 (in particular we have to avoid a match - * of the string with itself at the start of the input file). - */ - s.match_length = longest_match(s, hash_head) - /* longest_match() sets match_start */ - - if ( - s.match_length <= 5 && - (s.strategy === Z_FILTERED || (s.match_length === MIN_MATCH$1 && s.strstart - s.match_start > 4096)) - /*TOO_FAR*/ - ) { - /* If prev_match is also MIN_MATCH, match_start is garbage - * but we will ignore the current match anyway. - */ - s.match_length = MIN_MATCH$1 - 1 - } - } - /* If there was a match at the previous step and the current - * match is not better, output the previous match: - */ - - if (s.prev_length >= MIN_MATCH$1 && s.match_length <= s.prev_length) { - max_insert = s.strstart + s.lookahead - MIN_MATCH$1 - /* Do not insert strings in hash table beyond this. */ - //check_match(s, s.strstart-1, s.prev_match, s.prev_length); - - /***_tr_tally_dist(s, s.strstart - 1 - s.prev_match, - s.prev_length - MIN_MATCH, bflush);***/ - - bflush = _tr_tally$1(s, s.strstart - 1 - s.prev_match, s.prev_length - MIN_MATCH$1) - /* Insert in hash table all strings up to the end of the match. - * strstart-1 and strstart are already inserted. If there is not - * enough lookahead, the last two strings are not inserted in - * the hash table. - */ - - s.lookahead -= s.prev_length - 1 - s.prev_length -= 2 - - do { - if (++s.strstart <= max_insert) { - /*** INSERT_STRING(s, s.strstart, hash_head); ***/ - s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH$1 - 1]) - hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h] - s.head[s.ins_h] = s.strstart - /***/ - } - } while (--s.prev_length !== 0) - - s.match_available = 0 - s.match_length = MIN_MATCH$1 - 1 - s.strstart++ - - if (bflush) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false) - - if (s.strm.avail_out === 0) { - return BS_NEED_MORE - } - /***/ - } - } else if (s.match_available) { - /* If there was no match at the previous position, output a - * single literal. If there was a match but the current match - * is longer, truncate the previous match to a single literal. - */ - //Tracevv((stderr,"%c", s->window[s->strstart-1])); - - /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/ - bflush = _tr_tally$1(s, 0, s.window[s.strstart - 1]) - - if (bflush) { - /*** FLUSH_BLOCK_ONLY(s, 0) ***/ - flush_block_only(s, false) - /***/ - } - - s.strstart++ - s.lookahead-- - - if (s.strm.avail_out === 0) { - return BS_NEED_MORE - } - } else { - /* There is no previous match to compare with, wait for - * the next step to decide. - */ - s.match_available = 1 - s.strstart++ - s.lookahead-- - } - } //Assert (flush != Z_NO_FLUSH, "no flush?"); - - if (s.match_available) { - //Tracevv((stderr,"%c", s->window[s->strstart-1])); - - /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/ - bflush = _tr_tally$1(s, 0, s.window[s.strstart - 1]) - s.match_available = 0 - } - - s.insert = s.strstart < MIN_MATCH$1 - 1 ? s.strstart : MIN_MATCH$1 - 1 - - if (flush === Z_FINISH) { - /*** FLUSH_BLOCK(s, 1); ***/ - flush_block_only(s, true) - - if (s.strm.avail_out === 0) { - return BS_FINISH_STARTED - } - /***/ - - return BS_FINISH_DONE - } - - if (s.last_lit) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false) - - if (s.strm.avail_out === 0) { - return BS_NEED_MORE - } - /***/ - } - - return BS_BLOCK_DONE - } - /* =========================================================================== - * For Z_RLE, simply look for runs of bytes, generate matches only of distance - * one. Do not maintain a hash table. (It will be regenerated if this run of - * deflate switches away from Z_RLE.) - */ - - var deflate_rle = function deflate_rle(s, flush) { - var bflush - /* set if current block must be flushed */ - - var prev - /* byte at distance one to match */ - - var scan, strend - /* scan goes up to strend for length of run */ - - var _win = s.window - - for (;;) { - /* Make sure that we always have enough lookahead, except - * at the end of the input file. We need MAX_MATCH bytes - * for the longest run, plus one for the unrolled loop. - */ - if (s.lookahead <= MAX_MATCH$1) { - fill_window(s) - - if (s.lookahead <= MAX_MATCH$1 && flush === Z_NO_FLUSH) { - return BS_NEED_MORE - } - - if (s.lookahead === 0) { - break - } - /* flush the current block */ - } - /* See how many times the previous byte repeats */ - - s.match_length = 0 - - if (s.lookahead >= MIN_MATCH$1 && s.strstart > 0) { - scan = s.strstart - 1 - prev = _win[scan] - - if (prev === _win[++scan] && prev === _win[++scan] && prev === _win[++scan]) { - strend = s.strstart + MAX_MATCH$1 - - do { - /*jshint noempty:false*/ - } while ( - prev === _win[++scan] && - prev === _win[++scan] && - prev === _win[++scan] && - prev === _win[++scan] && - prev === _win[++scan] && - prev === _win[++scan] && - prev === _win[++scan] && - prev === _win[++scan] && - scan < strend - ) - - s.match_length = MAX_MATCH$1 - (strend - scan) - - if (s.match_length > s.lookahead) { - s.match_length = s.lookahead - } - } //Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan"); - } - /* Emit match if have run of MIN_MATCH or longer, else emit literal */ - - if (s.match_length >= MIN_MATCH$1) { - //check_match(s, s.strstart, s.strstart - 1, s.match_length); - - /*** _tr_tally_dist(s, 1, s.match_length - MIN_MATCH, bflush); ***/ - bflush = _tr_tally$1(s, 1, s.match_length - MIN_MATCH$1) - s.lookahead -= s.match_length - s.strstart += s.match_length - s.match_length = 0 - } else { - /* No match, output a literal byte */ - //Tracevv((stderr,"%c", s->window[s->strstart])); - - /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ - bflush = _tr_tally$1(s, 0, s.window[s.strstart]) - s.lookahead-- - s.strstart++ - } - - if (bflush) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false) - - if (s.strm.avail_out === 0) { - return BS_NEED_MORE - } - /***/ - } - } - - s.insert = 0 - - if (flush === Z_FINISH) { - /*** FLUSH_BLOCK(s, 1); ***/ - flush_block_only(s, true) - - if (s.strm.avail_out === 0) { - return BS_FINISH_STARTED - } - /***/ - - return BS_FINISH_DONE - } - - if (s.last_lit) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false) - - if (s.strm.avail_out === 0) { - return BS_NEED_MORE - } - /***/ - } - - return BS_BLOCK_DONE - } - /* =========================================================================== - * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table. - * (It will be regenerated if this run of deflate switches away from Huffman.) - */ - - var deflate_huff = function deflate_huff(s, flush) { - var bflush - /* set if current block must be flushed */ - - for (;;) { - /* Make sure that we have a literal to write. */ - if (s.lookahead === 0) { - fill_window(s) - - if (s.lookahead === 0) { - if (flush === Z_NO_FLUSH) { - return BS_NEED_MORE - } - - break - /* flush the current block */ - } - } - /* Output a literal byte */ - - s.match_length = 0 //Tracevv((stderr,"%c", s->window[s->strstart])); - - /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ - - bflush = _tr_tally$1(s, 0, s.window[s.strstart]) - s.lookahead-- - s.strstart++ - - if (bflush) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false) - - if (s.strm.avail_out === 0) { - return BS_NEED_MORE - } - /***/ - } - } - - s.insert = 0 - - if (flush === Z_FINISH) { - /*** FLUSH_BLOCK(s, 1); ***/ - flush_block_only(s, true) - - if (s.strm.avail_out === 0) { - return BS_FINISH_STARTED - } - /***/ - - return BS_FINISH_DONE - } - - if (s.last_lit) { - /*** FLUSH_BLOCK(s, 0); ***/ - flush_block_only(s, false) - - if (s.strm.avail_out === 0) { - return BS_NEED_MORE - } - /***/ - } - - return BS_BLOCK_DONE - } - /* Values for max_lazy_match, good_match and max_chain_length, depending on - * the desired pack level (0..9). The values given below have been tuned to - * exclude worst case performance for pathological files. Better values may be - * found for specific files. - */ - - function Config(good_length, max_lazy, nice_length, max_chain, func) { - this.good_length = good_length - this.max_lazy = max_lazy - this.nice_length = nice_length - this.max_chain = max_chain - this.func = func - } - - var configuration_table = [ - /* good lazy nice chain */ - new Config(0, 0, 0, 0, deflate_stored), - /* 0 store only */ - new Config(4, 4, 8, 4, deflate_fast), - /* 1 max speed, no lazy matches */ - new Config(4, 5, 16, 8, deflate_fast), - /* 2 */ - new Config(4, 6, 32, 32, deflate_fast), - /* 3 */ - new Config(4, 4, 16, 16, deflate_slow), - /* 4 lazy matches */ - new Config(8, 16, 32, 32, deflate_slow), - /* 5 */ - new Config(8, 16, 128, 128, deflate_slow), - /* 6 */ - new Config(8, 32, 128, 256, deflate_slow), - /* 7 */ - new Config(32, 128, 258, 1024, deflate_slow), - /* 8 */ - new Config(32, 258, 258, 4096, deflate_slow), - /* 9 max compression */ - ] - /* =========================================================================== - * Initialize the "longest match" routines for a new zlib stream - */ - - var lm_init = function lm_init(s) { - s.window_size = 2 * s.w_size - /*** CLEAR_HASH(s); ***/ - - zero$1(s.head) // Fill with NIL (= 0); - - /* Set the default configuration parameters: - */ - - s.max_lazy_match = configuration_table[s.level].max_lazy - s.good_match = configuration_table[s.level].good_length - s.nice_match = configuration_table[s.level].nice_length - s.max_chain_length = configuration_table[s.level].max_chain - s.strstart = 0 - s.block_start = 0 - s.lookahead = 0 - s.insert = 0 - s.match_length = s.prev_length = MIN_MATCH$1 - 1 - s.match_available = 0 - s.ins_h = 0 - } - - function DeflateState() { - this.strm = null - /* pointer back to this zlib stream */ - - this.status = 0 - /* as the name implies */ - - this.pending_buf = null - /* output still pending */ - - this.pending_buf_size = 0 - /* size of pending_buf */ - - this.pending_out = 0 - /* next pending byte to output to the stream */ - - this.pending = 0 - /* nb of bytes in the pending buffer */ - - this.wrap = 0 - /* bit 0 true for zlib, bit 1 true for gzip */ - - this.gzhead = null - /* gzip header information to write */ - - this.gzindex = 0 - /* where in extra, name, or comment */ - - this.method = Z_DEFLATED - /* can only be DEFLATED */ - - this.last_flush = -1 - /* value of flush param for previous deflate call */ - - this.w_size = 0 - /* LZ77 window size (32K by default) */ - - this.w_bits = 0 - /* log2(w_size) (8..16) */ - - this.w_mask = 0 - /* w_size - 1 */ - - this.window = null - /* Sliding window. Input bytes are read into the second half of the window, - * and move to the first half later to keep a dictionary of at least wSize - * bytes. With this organization, matches are limited to a distance of - * wSize-MAX_MATCH bytes, but this ensures that IO is always - * performed with a length multiple of the block size. - */ - - this.window_size = 0 - /* Actual size of window: 2*wSize, except when the user input buffer - * is directly used as sliding window. - */ - - this.prev = null - /* Link to older string with same hash index. To limit the size of this - * array to 64K, this link is maintained only for the last 32K strings. - * An index in this array is thus a window index modulo 32K. - */ - - this.head = null - /* Heads of the hash chains or NIL. */ - - this.ins_h = 0 - /* hash index of string to be inserted */ - - this.hash_size = 0 - /* number of elements in hash table */ - - this.hash_bits = 0 - /* log2(hash_size) */ - - this.hash_mask = 0 - /* hash_size-1 */ - - this.hash_shift = 0 - /* Number of bits by which ins_h must be shifted at each input - * step. It must be such that after MIN_MATCH steps, the oldest - * byte no longer takes part in the hash key, that is: - * hash_shift * MIN_MATCH >= hash_bits - */ - - this.block_start = 0 - /* Window position at the beginning of the current output block. Gets - * negative when the window is moved backwards. - */ - - this.match_length = 0 - /* length of best match */ - - this.prev_match = 0 - /* previous match */ - - this.match_available = 0 - /* set if previous match exists */ - - this.strstart = 0 - /* start of string to insert */ - - this.match_start = 0 - /* start of matching string */ - - this.lookahead = 0 - /* number of valid bytes ahead in window */ - - this.prev_length = 0 - /* Length of the best match at previous step. Matches not greater than this - * are discarded. This is used in the lazy match evaluation. - */ - - this.max_chain_length = 0 - /* To speed up deflation, hash chains are never searched beyond this - * length. A higher limit improves compression ratio but degrades the - * speed. - */ - - this.max_lazy_match = 0 - /* Attempt to find a better match only when the current match is strictly - * smaller than this value. This mechanism is used only for compression - * levels >= 4. - */ - // That's alias to max_lazy_match, don't use directly - //this.max_insert_length = 0; - - /* Insert new strings in the hash table only if the match length is not - * greater than this length. This saves time but degrades compression. - * max_insert_length is used only for compression levels <= 3. - */ - - this.level = 0 - /* compression level (1..9) */ - - this.strategy = 0 - /* favor or force Huffman coding*/ - - this.good_match = 0 - /* Use a faster search when the previous match is longer than this */ - - this.nice_match = 0 - /* Stop searching when current match exceeds this */ - - /* used by trees.c: */ - - /* Didn't use ct_data typedef below to suppress compiler warning */ - // struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ - // struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ - // struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ - // Use flat array of DOUBLE size, with interleaved fata, - // because JS does not support effective - - this.dyn_ltree = new Uint16Array(HEAP_SIZE$1 * 2) - this.dyn_dtree = new Uint16Array((2 * D_CODES$1 + 1) * 2) - this.bl_tree = new Uint16Array((2 * BL_CODES$1 + 1) * 2) - zero$1(this.dyn_ltree) - zero$1(this.dyn_dtree) - zero$1(this.bl_tree) - this.l_desc = null - /* desc. for literal tree */ - - this.d_desc = null - /* desc. for distance tree */ - - this.bl_desc = null - /* desc. for bit length tree */ - //ush bl_count[MAX_BITS+1]; - - this.bl_count = new Uint16Array(MAX_BITS$1 + 1) - /* number of codes at each bit length for an optimal tree */ - //int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ - - this.heap = new Uint16Array(2 * L_CODES$1 + 1) - /* heap used to build the Huffman trees */ - - zero$1(this.heap) - this.heap_len = 0 - /* number of elements in the heap */ - - this.heap_max = 0 - /* element of largest frequency */ - - /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. - * The same heap array is used to build all trees. - */ - - this.depth = new Uint16Array(2 * L_CODES$1 + 1) //uch depth[2*L_CODES+1]; - - zero$1(this.depth) - /* Depth of each subtree used as tie breaker for trees of equal frequency - */ - - this.l_buf = 0 - /* buffer index for literals or lengths */ - - this.lit_bufsize = 0 - /* Size of match buffer for literals/lengths. There are 4 reasons for - * limiting lit_bufsize to 64K: - * - frequencies can be kept in 16 bit counters - * - if compression is not successful for the first block, all input - * data is still in the window so we can still emit a stored block even - * when input comes from standard input. (This can also be done for - * all blocks if lit_bufsize is not greater than 32K.) - * - if compression is not successful for a file smaller than 64K, we can - * even emit a stored file instead of a stored block (saving 5 bytes). - * This is applicable only for zip (not gzip or zlib). - * - creating new Huffman trees less frequently may not provide fast - * adaptation to changes in the input data statistics. (Take for - * example a binary file with poorly compressible code followed by - * a highly compressible string table.) Smaller buffer sizes give - * fast adaptation but have of course the overhead of transmitting - * trees more frequently. - * - I can't count above 4 - */ - - this.last_lit = 0 - /* running index in l_buf */ - - this.d_buf = 0 - /* Buffer index for distances. To simplify the code, d_buf and l_buf have - * the same number of elements. To use different lengths, an extra flag - * array would be necessary. - */ - - this.opt_len = 0 - /* bit length of current block with optimal trees */ - - this.static_len = 0 - /* bit length of current block with static trees */ - - this.matches = 0 - /* number of string matches in current block */ - - this.insert = 0 - /* bytes at end of window left to insert */ - - this.bi_buf = 0 - /* Output buffer. bits are inserted starting at the bottom (least - * significant bits). - */ - - this.bi_valid = 0 - /* Number of valid bits in bi_buf. All bits above the last valid bit - * are always zero. - */ - // Used for window memory init. We safely ignore it for JS. That makes - // sense only for pointers and memory check tools. - //this.high_water = 0; - - /* High water mark offset in window for initialized bytes -- bytes above - * this are set to zero in order to avoid memory check warnings when - * longest match routines access bytes past the input. This is then - * updated to the new high water mark. - */ - } - - var deflateResetKeep = function deflateResetKeep(strm) { - if (!strm || !strm.state) { - return err(strm, Z_STREAM_ERROR) - } - - strm.total_in = strm.total_out = 0 - strm.data_type = Z_UNKNOWN$1 - var s = strm.state - s.pending = 0 - s.pending_out = 0 - - if (s.wrap < 0) { - s.wrap = -s.wrap - /* was made negative by deflate(..., Z_FINISH); */ - } - - s.status = s.wrap ? INIT_STATE : BUSY_STATE - strm.adler = - s.wrap === 2 - ? 0 // crc32(0, Z_NULL, 0) - : 1 // adler32(0, Z_NULL, 0) - - s.last_flush = Z_NO_FLUSH - - _tr_init$1(s) - - return Z_OK - } - - var deflateReset = function deflateReset(strm) { - var ret = deflateResetKeep(strm) - - if (ret === Z_OK) { - lm_init(strm.state) - } - - return ret - } - - var deflateSetHeader = function deflateSetHeader(strm, head) { - if (!strm || !strm.state) { - return Z_STREAM_ERROR - } - - if (strm.state.wrap !== 2) { - return Z_STREAM_ERROR - } - - strm.state.gzhead = head - return Z_OK - } - - var deflateInit2 = function deflateInit2(strm, level, method, windowBits, memLevel, strategy) { - if (!strm) { - // === Z_NULL - return Z_STREAM_ERROR - } - - var wrap = 1 - - if (level === Z_DEFAULT_COMPRESSION) { - level = 6 - } - - if (windowBits < 0) { - /* suppress zlib wrapper */ - wrap = 0 - windowBits = -windowBits - } else if (windowBits > 15) { - wrap = 2 - /* write gzip wrapper instead */ - - windowBits -= 16 - } - - if ( - memLevel < 1 || - memLevel > MAX_MEM_LEVEL || - method !== Z_DEFLATED || - windowBits < 8 || - windowBits > 15 || - level < 0 || - level > 9 || - strategy < 0 || - strategy > Z_FIXED$1 - ) { - return err(strm, Z_STREAM_ERROR) - } - - if (windowBits === 8) { - windowBits = 9 - } - /* until 256-byte window bug fixed */ - - var s = new DeflateState() - strm.state = s - s.strm = strm - s.wrap = wrap - s.gzhead = null - s.w_bits = windowBits - s.w_size = 1 << s.w_bits - s.w_mask = s.w_size - 1 - s.hash_bits = memLevel + 7 - s.hash_size = 1 << s.hash_bits - s.hash_mask = s.hash_size - 1 - s.hash_shift = ~~((s.hash_bits + MIN_MATCH$1 - 1) / MIN_MATCH$1) - s.window = new Uint8Array(s.w_size * 2) - s.head = new Uint16Array(s.hash_size) - s.prev = new Uint16Array(s.w_size) // Don't need mem init magic for JS. - //s.high_water = 0; /* nothing written to s->window yet */ - - s.lit_bufsize = 1 << (memLevel + 6) - /* 16K elements by default */ - - s.pending_buf_size = s.lit_bufsize * 4 //overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); - //s->pending_buf = (uchf *) overlay; - - s.pending_buf = new Uint8Array(s.pending_buf_size) // It is offset from `s.pending_buf` (size is `s.lit_bufsize * 2`) - //s->d_buf = overlay + s->lit_bufsize/sizeof(ush); - - s.d_buf = 1 * s.lit_bufsize //s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; - - s.l_buf = (1 + 2) * s.lit_bufsize - s.level = level - s.strategy = strategy - s.method = method - return deflateReset(strm) - } - - var deflateInit = function deflateInit(strm, level) { - return deflateInit2(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY) - } - - var deflate = function deflate(strm, flush) { - var beg, val // for gzip header write only - - if (!strm || !strm.state || flush > Z_BLOCK || flush < 0) { - return strm ? err(strm, Z_STREAM_ERROR) : Z_STREAM_ERROR - } - - var s = strm.state - - if (!strm.output || (!strm.input && strm.avail_in !== 0) || (s.status === FINISH_STATE && flush !== Z_FINISH)) { - return err(strm, strm.avail_out === 0 ? Z_BUF_ERROR : Z_STREAM_ERROR) - } - - s.strm = strm - /* just in case */ - - var old_flush = s.last_flush - s.last_flush = flush - /* Write the header */ - - if (s.status === INIT_STATE) { - if (s.wrap === 2) { - // GZIP header - strm.adler = 0 //crc32(0L, Z_NULL, 0); - - put_byte(s, 31) - put_byte(s, 139) - put_byte(s, 8) - - if (!s.gzhead) { - // s->gzhead == Z_NULL - put_byte(s, 0) - put_byte(s, 0) - put_byte(s, 0) - put_byte(s, 0) - put_byte(s, 0) - put_byte(s, s.level === 9 ? 2 : s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ? 4 : 0) - put_byte(s, OS_CODE) - s.status = BUSY_STATE - } else { - put_byte( - s, - (s.gzhead.text ? 1 : 0) + - (s.gzhead.hcrc ? 2 : 0) + - (!s.gzhead.extra ? 0 : 4) + - (!s.gzhead.name ? 0 : 8) + - (!s.gzhead.comment ? 0 : 16) - ) - put_byte(s, s.gzhead.time & 0xff) - put_byte(s, (s.gzhead.time >> 8) & 0xff) - put_byte(s, (s.gzhead.time >> 16) & 0xff) - put_byte(s, (s.gzhead.time >> 24) & 0xff) - put_byte(s, s.level === 9 ? 2 : s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ? 4 : 0) - put_byte(s, s.gzhead.os & 0xff) - - if (s.gzhead.extra && s.gzhead.extra.length) { - put_byte(s, s.gzhead.extra.length & 0xff) - put_byte(s, (s.gzhead.extra.length >> 8) & 0xff) - } - - if (s.gzhead.hcrc) { - strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending, 0) - } - - s.gzindex = 0 - s.status = EXTRA_STATE - } - } // DEFLATE header - else { - var header = (Z_DEFLATED + ((s.w_bits - 8) << 4)) << 8 - var level_flags = -1 - - if (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2) { - level_flags = 0 - } else if (s.level < 6) { - level_flags = 1 - } else if (s.level === 6) { - level_flags = 2 - } else { - level_flags = 3 - } - - header |= level_flags << 6 - - if (s.strstart !== 0) { - header |= PRESET_DICT - } - - header += 31 - (header % 31) - s.status = BUSY_STATE - putShortMSB(s, header) - /* Save the adler32 of the preset dictionary: */ - - if (s.strstart !== 0) { - putShortMSB(s, strm.adler >>> 16) - putShortMSB(s, strm.adler & 0xffff) - } - - strm.adler = 1 // adler32(0L, Z_NULL, 0); - } - } //#ifdef GZIP - - if (s.status === EXTRA_STATE) { - if ( - s.gzhead.extra - /* != Z_NULL*/ - ) { - beg = s.pending - /* start of bytes to update crc */ - - while (s.gzindex < (s.gzhead.extra.length & 0xffff)) { - if (s.pending === s.pending_buf_size) { - if (s.gzhead.hcrc && s.pending > beg) { - strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg) - } - - flush_pending(strm) - beg = s.pending - - if (s.pending === s.pending_buf_size) { - break - } - } - - put_byte(s, s.gzhead.extra[s.gzindex] & 0xff) - s.gzindex++ - } - - if (s.gzhead.hcrc && s.pending > beg) { - strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg) - } - - if (s.gzindex === s.gzhead.extra.length) { - s.gzindex = 0 - s.status = NAME_STATE - } - } else { - s.status = NAME_STATE - } - } - - if (s.status === NAME_STATE) { - if ( - s.gzhead.name - /* != Z_NULL*/ - ) { - beg = s.pending - /* start of bytes to update crc */ - //int val; - - do { - if (s.pending === s.pending_buf_size) { - if (s.gzhead.hcrc && s.pending > beg) { - strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg) - } - - flush_pending(strm) - beg = s.pending - - if (s.pending === s.pending_buf_size) { - val = 1 - break - } - } // JS specific: little magic to add zero terminator to end of string - - if (s.gzindex < s.gzhead.name.length) { - val = s.gzhead.name.charCodeAt(s.gzindex++) & 0xff - } else { - val = 0 - } - - put_byte(s, val) - } while (val !== 0) - - if (s.gzhead.hcrc && s.pending > beg) { - strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg) - } - - if (val === 0) { - s.gzindex = 0 - s.status = COMMENT_STATE - } - } else { - s.status = COMMENT_STATE - } - } - - if (s.status === COMMENT_STATE) { - if ( - s.gzhead.comment - /* != Z_NULL*/ - ) { - beg = s.pending - /* start of bytes to update crc */ - //int val; - - do { - if (s.pending === s.pending_buf_size) { - if (s.gzhead.hcrc && s.pending > beg) { - strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg) - } - - flush_pending(strm) - beg = s.pending - - if (s.pending === s.pending_buf_size) { - val = 1 - break - } - } // JS specific: little magic to add zero terminator to end of string - - if (s.gzindex < s.gzhead.comment.length) { - val = s.gzhead.comment.charCodeAt(s.gzindex++) & 0xff - } else { - val = 0 - } - - put_byte(s, val) - } while (val !== 0) - - if (s.gzhead.hcrc && s.pending > beg) { - strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg) - } - - if (val === 0) { - s.status = HCRC_STATE - } - } else { - s.status = HCRC_STATE - } - } - - if (s.status === HCRC_STATE) { - if (s.gzhead.hcrc) { - if (s.pending + 2 > s.pending_buf_size) { - flush_pending(strm) - } - - if (s.pending + 2 <= s.pending_buf_size) { - put_byte(s, strm.adler & 0xff) - put_byte(s, (strm.adler >> 8) & 0xff) - strm.adler = 0 //crc32(0L, Z_NULL, 0); - - s.status = BUSY_STATE - } - } else { - s.status = BUSY_STATE - } - } //#endif - - /* Flush as much pending output as possible */ - - if (s.pending !== 0) { - flush_pending(strm) - - if (strm.avail_out === 0) { - /* Since avail_out is 0, deflate will be called again with - * more output space, but possibly with both pending and - * avail_in equal to zero. There won't be anything to do, - * but this is not an error situation so make sure we - * return OK instead of BUF_ERROR at next call of deflate: - */ - s.last_flush = -1 - return Z_OK - } - /* Make sure there is something to do and avoid duplicate consecutive - * flushes. For repeated and useless calls with Z_FINISH, we keep - * returning Z_STREAM_END instead of Z_BUF_ERROR. - */ - } else if (strm.avail_in === 0 && rank(flush) <= rank(old_flush) && flush !== Z_FINISH) { - return err(strm, Z_BUF_ERROR) - } - /* User must not provide more input after the first FINISH: */ - - if (s.status === FINISH_STATE && strm.avail_in !== 0) { - return err(strm, Z_BUF_ERROR) - } - /* Start a new block or continue the current one. - */ - - if (strm.avail_in !== 0 || s.lookahead !== 0 || (flush !== Z_NO_FLUSH && s.status !== FINISH_STATE)) { - var bstate = - s.strategy === Z_HUFFMAN_ONLY - ? deflate_huff(s, flush) - : s.strategy === Z_RLE - ? deflate_rle(s, flush) - : configuration_table[s.level].func(s, flush) - - if (bstate === BS_FINISH_STARTED || bstate === BS_FINISH_DONE) { - s.status = FINISH_STATE - } - - if (bstate === BS_NEED_MORE || bstate === BS_FINISH_STARTED) { - if (strm.avail_out === 0) { - s.last_flush = -1 - /* avoid BUF_ERROR next call, see above */ - } - - return Z_OK - /* If flush != Z_NO_FLUSH && avail_out == 0, the next call - * of deflate should use the same flush parameter to make sure - * that the flush is complete. So we don't have to output an - * empty block here, this will be done at next call. This also - * ensures that for a very small output buffer, we emit at most - * one empty block. - */ - } - - if (bstate === BS_BLOCK_DONE) { - if (flush === Z_PARTIAL_FLUSH) { - _tr_align$1(s) - } else if (flush !== Z_BLOCK) { - /* FULL_FLUSH or SYNC_FLUSH */ - _tr_stored_block$1(s, 0, 0, false) - /* For a full flush, this empty block will be recognized - * as a special marker by inflate_sync(). - */ - - if (flush === Z_FULL_FLUSH) { - /*** CLEAR_HASH(s); ***/ - - /* forget history */ - zero$1(s.head) // Fill with NIL (= 0); - - if (s.lookahead === 0) { - s.strstart = 0 - s.block_start = 0 - s.insert = 0 - } - } - } - - flush_pending(strm) - - if (strm.avail_out === 0) { - s.last_flush = -1 - /* avoid BUF_ERROR at next call, see above */ - - return Z_OK - } - } - } //Assert(strm->avail_out > 0, "bug2"); - //if (strm.avail_out <= 0) { throw new Error("bug2");} - - if (flush !== Z_FINISH) { - return Z_OK - } - - if (s.wrap <= 0) { - return Z_STREAM_END - } - /* Write the trailer */ - - if (s.wrap === 2) { - put_byte(s, strm.adler & 0xff) - put_byte(s, (strm.adler >> 8) & 0xff) - put_byte(s, (strm.adler >> 16) & 0xff) - put_byte(s, (strm.adler >> 24) & 0xff) - put_byte(s, strm.total_in & 0xff) - put_byte(s, (strm.total_in >> 8) & 0xff) - put_byte(s, (strm.total_in >> 16) & 0xff) - put_byte(s, (strm.total_in >> 24) & 0xff) - } else { - putShortMSB(s, strm.adler >>> 16) - putShortMSB(s, strm.adler & 0xffff) - } - - flush_pending(strm) - /* If avail_out is zero, the application will call deflate again - * to flush the rest. - */ - - if (s.wrap > 0) { - s.wrap = -s.wrap - } - /* write the trailer only once! */ - - return s.pending !== 0 ? Z_OK : Z_STREAM_END - } - - var deflateEnd = function deflateEnd(strm) { - if ( - !strm || - /*== Z_NULL*/ - !strm.state - /*== Z_NULL*/ - ) { - return Z_STREAM_ERROR - } - - var status = strm.state.status - - if ( - status !== INIT_STATE && - status !== EXTRA_STATE && - status !== NAME_STATE && - status !== COMMENT_STATE && - status !== HCRC_STATE && - status !== BUSY_STATE && - status !== FINISH_STATE - ) { - return err(strm, Z_STREAM_ERROR) - } - - strm.state = null - return status === BUSY_STATE ? err(strm, Z_DATA_ERROR) : Z_OK - } - /* ========================================================================= - * Initializes the compression dictionary from the given byte - * sequence without producing any compressed output. - */ - - var deflateSetDictionary = function deflateSetDictionary(strm, dictionary) { - var dictLength = dictionary.length - - if ( - !strm || - /*== Z_NULL*/ - !strm.state - /*== Z_NULL*/ - ) { - return Z_STREAM_ERROR - } - - var s = strm.state - var wrap = s.wrap - - if (wrap === 2 || (wrap === 1 && s.status !== INIT_STATE) || s.lookahead) { - return Z_STREAM_ERROR - } - /* when using zlib wrappers, compute Adler-32 for provided dictionary */ - - if (wrap === 1) { - /* adler32(strm->adler, dictionary, dictLength); */ - strm.adler = adler32_1(strm.adler, dictionary, dictLength, 0) - } - - s.wrap = 0 - /* avoid computing Adler-32 in read_buf */ - - /* if dictionary would fill window, just replace the history */ - - if (dictLength >= s.w_size) { - if (wrap === 0) { - /* already empty otherwise */ - - /*** CLEAR_HASH(s); ***/ - zero$1(s.head) // Fill with NIL (= 0); - - s.strstart = 0 - s.block_start = 0 - s.insert = 0 - } - /* use the tail */ - // dictionary = dictionary.slice(dictLength - s.w_size); - - var tmpDict = new Uint8Array(s.w_size) - tmpDict.set(dictionary.subarray(dictLength - s.w_size, dictLength), 0) - dictionary = tmpDict - dictLength = s.w_size - } - /* insert dictionary into window and hash */ - - var avail = strm.avail_in - var next = strm.next_in - var input = strm.input - strm.avail_in = dictLength - strm.next_in = 0 - strm.input = dictionary - fill_window(s) - - while (s.lookahead >= MIN_MATCH$1) { - var str = s.strstart - var n = s.lookahead - (MIN_MATCH$1 - 1) - - do { - /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */ - s.ins_h = HASH(s, s.ins_h, s.window[str + MIN_MATCH$1 - 1]) - s.prev[str & s.w_mask] = s.head[s.ins_h] - s.head[s.ins_h] = str - str++ - } while (--n) - - s.strstart = str - s.lookahead = MIN_MATCH$1 - 1 - fill_window(s) - } - - s.strstart += s.lookahead - s.block_start = s.strstart - s.insert = s.lookahead - s.lookahead = 0 - s.match_length = s.prev_length = MIN_MATCH$1 - 1 - s.match_available = 0 - strm.next_in = next - strm.input = input - strm.avail_in = avail - s.wrap = wrap - return Z_OK - } - - var deflateInit_1 = deflateInit - var deflateInit2_1 = deflateInit2 - var deflateReset_1 = deflateReset - var deflateResetKeep_1 = deflateResetKeep - var deflateSetHeader_1 = deflateSetHeader - var deflate_2 = deflate - var deflateEnd_1 = deflateEnd - var deflateSetDictionary_1 = deflateSetDictionary - var deflateInfo = 'pako deflate (from Nodeca project)' - /* Not implemented - module.exports.deflateBound = deflateBound; - module.exports.deflateCopy = deflateCopy; - module.exports.deflateParams = deflateParams; - module.exports.deflatePending = deflatePending; - module.exports.deflatePrime = deflatePrime; - module.exports.deflateTune = deflateTune; - */ - - var deflate_1 = { - deflateInit: deflateInit_1, - deflateInit2: deflateInit2_1, - deflateReset: deflateReset_1, - deflateResetKeep: deflateResetKeep_1, - deflateSetHeader: deflateSetHeader_1, - deflate: deflate_2, - deflateEnd: deflateEnd_1, - deflateSetDictionary: deflateSetDictionary_1, - deflateInfo: deflateInfo, - } - - function _typeof(obj) { - '@babel/helpers - typeof' - - if (typeof Symbol === 'function' && typeof Symbol.iterator === 'symbol') { - _typeof = function (obj) { - return typeof obj - } - } else { - _typeof = function (obj) { - return obj && typeof Symbol === 'function' && obj.constructor === Symbol && obj !== Symbol.prototype - ? 'symbol' - : typeof obj - } - } - - return _typeof(obj) - } - - var _has = function _has(obj, key) { - return Object.prototype.hasOwnProperty.call(obj, key) - } - - var assign = function assign( - obj - /*from1, from2, from3, ...*/ - ) { - var sources = Array.prototype.slice.call(arguments, 1) - - while (sources.length) { - var source = sources.shift() - - if (!source) { - continue - } - - if (_typeof(source) !== 'object') { - throw new TypeError(source + 'must be non-object') - } - - for (var p in source) { - if (_has(source, p)) { - obj[p] = source[p] - } - } - } - - return obj - } // Join array of chunks to single array. - - var flattenChunks = function flattenChunks(chunks) { - // calculate data length - var len = 0 - - for (var i = 0, l = chunks.length; i < l; i++) { - len += chunks[i].length - } // join chunks - - var result = new Uint8Array(len) - - for (var _i = 0, pos = 0, _l = chunks.length; _i < _l; _i++) { - var chunk = chunks[_i] - result.set(chunk, pos) - pos += chunk.length - } - - return result - } - - var common = { - assign: assign, - flattenChunks: flattenChunks, - } - - // String encode/decode helpers - // - // - apply(Array) can fail on Android 2.2 - // - apply(Uint8Array) can fail on iOS 5.1 Safari - // - - var STR_APPLY_UIA_OK = true - - try { - String.fromCharCode.apply(null, new Uint8Array(1)) - } catch (__) { - STR_APPLY_UIA_OK = false - } // Table with utf8 lengths (calculated by first byte of sequence) - // Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS, - // because max possible codepoint is 0x10ffff - - var _utf8len = new Uint8Array(256) - - for (var q = 0; q < 256; q++) { - _utf8len[q] = q >= 252 ? 6 : q >= 248 ? 5 : q >= 240 ? 4 : q >= 224 ? 3 : q >= 192 ? 2 : 1 - } - - _utf8len[254] = _utf8len[254] = 1 // Invalid sequence start - // convert string to array (typed, when possible) - - var string2buf = function string2buf(str) { - var buf, - c, - c2, - m_pos, - i, - str_len = str.length, - buf_len = 0 // count binary size - - for (m_pos = 0; m_pos < str_len; m_pos++) { - c = str.charCodeAt(m_pos) - - if ((c & 0xfc00) === 0xd800 && m_pos + 1 < str_len) { - c2 = str.charCodeAt(m_pos + 1) - - if ((c2 & 0xfc00) === 0xdc00) { - c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00) - m_pos++ - } - } - - buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4 - } // allocate buffer - - buf = new Uint8Array(buf_len) // convert - - for (i = 0, m_pos = 0; i < buf_len; m_pos++) { - c = str.charCodeAt(m_pos) - - if ((c & 0xfc00) === 0xd800 && m_pos + 1 < str_len) { - c2 = str.charCodeAt(m_pos + 1) - - if ((c2 & 0xfc00) === 0xdc00) { - c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00) - m_pos++ - } - } - - if (c < 0x80) { - /* one byte */ - buf[i++] = c - } else if (c < 0x800) { - /* two bytes */ - buf[i++] = 0xc0 | (c >>> 6) - buf[i++] = 0x80 | (c & 0x3f) - } else if (c < 0x10000) { - /* three bytes */ - buf[i++] = 0xe0 | (c >>> 12) - buf[i++] = 0x80 | ((c >>> 6) & 0x3f) - buf[i++] = 0x80 | (c & 0x3f) - } else { - /* four bytes */ - buf[i++] = 0xf0 | (c >>> 18) - buf[i++] = 0x80 | ((c >>> 12) & 0x3f) - buf[i++] = 0x80 | ((c >>> 6) & 0x3f) - buf[i++] = 0x80 | (c & 0x3f) - } - } - - return buf - } // Helper - - var buf2binstring = function buf2binstring(buf, len) { - // On Chrome, the arguments in a function call that are allowed is `65534`. - // If the length of the buffer is smaller than that, we can use this optimization, - // otherwise we will take a slower path. - if (len < 65534) { - if (buf.subarray && STR_APPLY_UIA_OK) { - return String.fromCharCode.apply(null, buf.length === len ? buf : buf.subarray(0, len)) - } - } - - var result = '' - - for (var i = 0; i < len; i++) { - result += String.fromCharCode(buf[i]) - } - - return result - } // convert array to string - - var buf2string = function buf2string(buf, max) { - var i, out - var len = max || buf.length // Reserve max possible length (2 words per char) - // NB: by unknown reasons, Array is significantly faster for - // String.fromCharCode.apply than Uint16Array. - - var utf16buf = new Array(len * 2) - - for (out = 0, i = 0; i < len; ) { - var c = buf[i++] // quick process ascii - - if (c < 0x80) { - utf16buf[out++] = c - continue - } - - var c_len = _utf8len[c] // skip 5 & 6 byte codes - - if (c_len > 4) { - utf16buf[out++] = 0xfffd - i += c_len - 1 - continue - } // apply mask on first byte - - c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07 // join the rest - - while (c_len > 1 && i < len) { - c = (c << 6) | (buf[i++] & 0x3f) - c_len-- - } // terminated by end of string? - - if (c_len > 1) { - utf16buf[out++] = 0xfffd - continue - } - - if (c < 0x10000) { - utf16buf[out++] = c - } else { - c -= 0x10000 - utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff) - utf16buf[out++] = 0xdc00 | (c & 0x3ff) - } - } - - return buf2binstring(utf16buf, out) - } // Calculate max possible position in utf8 buffer, - // that will not break sequence. If that's not possible - // - (very small limits) return max size as is. - // - // buf[] - utf8 bytes array - // max - length limit (mandatory); - - var utf8border = function utf8border(buf, max) { - max = max || buf.length - - if (max > buf.length) { - max = buf.length - } // go back from last position, until start of sequence found - - var pos = max - 1 - - while (pos >= 0 && (buf[pos] & 0xc0) === 0x80) { - pos-- - } // Very small and broken sequence, - // return max, because we should return something anyway. - - if (pos < 0) { - return max - } // If we came to start of buffer - that means buffer is too small, - // return max too. - - if (pos === 0) { - return max - } - - return pos + _utf8len[buf[pos]] > max ? pos : max - } - - var strings = { - string2buf: string2buf, - buf2string: buf2string, - utf8border: utf8border, - } - - // (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin - // - // This software is provided 'as-is', without any express or implied - // warranty. In no event will the authors be held liable for any damages - // arising from the use of this software. - // - // Permission is granted to anyone to use this software for any purpose, - // including commercial applications, and to alter it and redistribute it - // freely, subject to the following restrictions: - // - // 1. The origin of this software must not be misrepresented; you must not - // claim that you wrote the original software. If you use this software - // in a product, an acknowledgment in the product documentation would be - // appreciated but is not required. - // 2. Altered source versions must be plainly marked as such, and must not be - // misrepresented as being the original software. - // 3. This notice may not be removed or altered from any source distribution. - - function ZStream() { - /* next input byte */ - this.input = null // JS specific, because we have no pointers - - this.next_in = 0 - /* number of bytes available at input */ - - this.avail_in = 0 - /* total number of input bytes read so far */ - - this.total_in = 0 - /* next output byte should be put there */ - - this.output = null // JS specific, because we have no pointers - - this.next_out = 0 - /* remaining free space at output */ - - this.avail_out = 0 - /* total number of bytes output so far */ - - this.total_out = 0 - /* last error message, NULL if no error */ - - this.msg = '' - /*Z_NULL*/ - /* not visible by applications */ - - this.state = null - /* best guess about the data type: binary or text */ - - this.data_type = 2 - /*Z_UNKNOWN*/ - /* adler32 value of the uncompressed data */ - - this.adler = 0 - } - - var zstream = ZStream - - var toString = Object.prototype.toString - /* Public constants ==========================================================*/ - - /* ===========================================================================*/ - - var Z_NO_FLUSH$1 = constants.Z_NO_FLUSH, - Z_SYNC_FLUSH = constants.Z_SYNC_FLUSH, - Z_FULL_FLUSH$1 = constants.Z_FULL_FLUSH, - Z_FINISH$1 = constants.Z_FINISH, - Z_OK$1 = constants.Z_OK, - Z_STREAM_END$1 = constants.Z_STREAM_END, - Z_DEFAULT_COMPRESSION$1 = constants.Z_DEFAULT_COMPRESSION, - Z_DEFAULT_STRATEGY$1 = constants.Z_DEFAULT_STRATEGY, - Z_DEFLATED$1 = constants.Z_DEFLATED - /* ===========================================================================*/ - - /** - * class Deflate - * - * Generic JS-style wrapper for zlib calls. If you don't need - * streaming behaviour - use more simple functions: [[deflate]], - * [[deflateRaw]] and [[gzip]]. - **/ - - /* internal - * Deflate.chunks -> Array - * - * Chunks of output data, if [[Deflate#onData]] not overridden. - **/ - - /** - * Deflate.result -> Uint8Array - * - * Compressed result, generated by default [[Deflate#onData]] - * and [[Deflate#onEnd]] handlers. Filled after you push last chunk - * (call [[Deflate#push]] with `Z_FINISH` / `true` param). - **/ - - /** - * Deflate.err -> Number - * - * Error code after deflate finished. 0 (Z_OK) on success. - * You will not need it in real life, because deflate errors - * are possible only on wrong options or bad `onData` / `onEnd` - * custom handlers. - **/ - - /** - * Deflate.msg -> String - * - * Error message, if [[Deflate.err]] != 0 - **/ - - /** - * new Deflate(options) - * - options (Object): zlib deflate options. - * - * Creates new deflator instance with specified params. Throws exception - * on bad params. Supported options: - * - * - `level` - * - `windowBits` - * - `memLevel` - * - `strategy` - * - `dictionary` - * - * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) - * for more information on these. - * - * Additional options, for internal needs: - * - * - `chunkSize` - size of generated data chunks (16K by default) - * - `raw` (Boolean) - do raw deflate - * - `gzip` (Boolean) - create gzip wrapper - * - `header` (Object) - custom header for gzip - * - `text` (Boolean) - true if compressed data believed to be text - * - `time` (Number) - modification time, unix timestamp - * - `os` (Number) - operation system code - * - `extra` (Array) - array of bytes with extra data (max 65536) - * - `name` (String) - file name (binary string) - * - `comment` (String) - comment (binary string) - * - `hcrc` (Boolean) - true if header crc should be added - * - * ##### Example: - * - * ```javascript - * const pako = require('pako') - * , chunk1 = new Uint8Array([1,2,3,4,5,6,7,8,9]) - * , chunk2 = new Uint8Array([10,11,12,13,14,15,16,17,18,19]); - * - * const deflate = new pako.Deflate({ level: 3}); - * - * deflate.push(chunk1, false); - * deflate.push(chunk2, true); // true -> last chunk - * - * if (deflate.err) { throw new Error(deflate.err); } - * - * console.log(deflate.result); - * ``` - **/ - - function Deflate(options) { - this.options = common.assign( - { - level: Z_DEFAULT_COMPRESSION$1, - method: Z_DEFLATED$1, - chunkSize: 16384, - windowBits: 15, - memLevel: 8, - strategy: Z_DEFAULT_STRATEGY$1, - }, - options || {} - ) - var opt = this.options - - if (opt.raw && opt.windowBits > 0) { - opt.windowBits = -opt.windowBits - } else if (opt.gzip && opt.windowBits > 0 && opt.windowBits < 16) { - opt.windowBits += 16 - } - - this.err = 0 // error code, if happens (0 = Z_OK) - - this.msg = '' // error message - - this.ended = false // used to avoid multiple onEnd() calls - - this.chunks = [] // chunks of compressed data - - this.strm = new zstream() - this.strm.avail_out = 0 - var status = deflate_1.deflateInit2(this.strm, opt.level, opt.method, opt.windowBits, opt.memLevel, opt.strategy) - - if (status !== Z_OK$1) { - throw new Error(messages[status]) - } - - if (opt.header) { - deflate_1.deflateSetHeader(this.strm, opt.header) - } - - if (opt.dictionary) { - var dict // Convert data if needed - - if (typeof opt.dictionary === 'string') { - // If we need to compress text, change encoding to utf8. - dict = strings.string2buf(opt.dictionary) - } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') { - dict = new Uint8Array(opt.dictionary) - } else { - dict = opt.dictionary - } - - status = deflate_1.deflateSetDictionary(this.strm, dict) - - if (status !== Z_OK$1) { - throw new Error(messages[status]) - } - - this._dict_set = true - } - } - /** - * Deflate#push(data[, flush_mode]) -> Boolean - * - data (Uint8Array|ArrayBuffer|String): input data. Strings will be - * converted to utf8 byte sequence. - * - flush_mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes. - * See constants. Skipped or `false` means Z_NO_FLUSH, `true` means Z_FINISH. - * - * Sends input data to deflate pipe, generating [[Deflate#onData]] calls with - * new compressed chunks. Returns `true` on success. The last data block must - * have `flush_mode` Z_FINISH (or `true`). That will flush internal pending - * buffers and call [[Deflate#onEnd]]. - * - * On fail call [[Deflate#onEnd]] with error code and return false. - * - * ##### Example - * - * ```javascript - * push(chunk, false); // push one of data chunks - * ... - * push(chunk, true); // push last chunk - * ``` - **/ - - Deflate.prototype.push = function (data, flush_mode) { - var strm = this.strm - var chunkSize = this.options.chunkSize - - var status, _flush_mode - - if (this.ended) { - return false - } - - if (flush_mode === ~~flush_mode) _flush_mode = flush_mode - else _flush_mode = flush_mode === true ? Z_FINISH$1 : Z_NO_FLUSH$1 // Convert data if needed - - if (typeof data === 'string') { - // If we need to compress text, change encoding to utf8. - strm.input = strings.string2buf(data) - } else if (toString.call(data) === '[object ArrayBuffer]') { - strm.input = new Uint8Array(data) - } else { - strm.input = data - } - - strm.next_in = 0 - strm.avail_in = strm.input.length - - for (;;) { - if (strm.avail_out === 0) { - strm.output = new Uint8Array(chunkSize) - strm.next_out = 0 - strm.avail_out = chunkSize - } // Make sure avail_out > 6 to avoid repeating markers - - if ((_flush_mode === Z_SYNC_FLUSH || _flush_mode === Z_FULL_FLUSH$1) && strm.avail_out <= 6) { - this.onData(strm.output.subarray(0, strm.next_out)) - strm.avail_out = 0 - continue - } - - status = deflate_1.deflate(strm, _flush_mode) // Ended => flush and finish - - if (status === Z_STREAM_END$1) { - if (strm.next_out > 0) { - this.onData(strm.output.subarray(0, strm.next_out)) - } - - status = deflate_1.deflateEnd(this.strm) - this.onEnd(status) - this.ended = true - return status === Z_OK$1 - } // Flush if out buffer full - - if (strm.avail_out === 0) { - this.onData(strm.output) - continue - } // Flush if requested and has data - - if (_flush_mode > 0 && strm.next_out > 0) { - this.onData(strm.output.subarray(0, strm.next_out)) - strm.avail_out = 0 - continue - } - - if (strm.avail_in === 0) break - } - - return true - } - /** - * Deflate#onData(chunk) -> Void - * - chunk (Uint8Array): output data. - * - * By default, stores data blocks in `chunks[]` property and glue - * those in `onEnd`. Override this handler, if you need another behaviour. - **/ - - Deflate.prototype.onData = function (chunk) { - this.chunks.push(chunk) - } - /** - * Deflate#onEnd(status) -> Void - * - status (Number): deflate status. 0 (Z_OK) on success, - * other if not. - * - * Called once after you tell deflate that the input stream is - * complete (Z_FINISH). By default - join collected chunks, - * free memory and fill `results` / `err` properties. - **/ - - Deflate.prototype.onEnd = function (status) { - // On success - join - if (status === Z_OK$1) { - this.result = common.flattenChunks(this.chunks) - } - - this.chunks = [] - this.err = status - this.msg = this.strm.msg - } - /** - * deflate(data[, options]) -> Uint8Array - * - data (Uint8Array|String): input data to compress. - * - options (Object): zlib deflate options. - * - * Compress `data` with deflate algorithm and `options`. - * - * Supported options are: - * - * - level - * - windowBits - * - memLevel - * - strategy - * - dictionary - * - * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) - * for more information on these. - * - * Sugar (options): - * - * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify - * negative windowBits implicitly. - * - * ##### Example: - * - * ```javascript - * const pako = require('pako') - * const data = new Uint8Array([1,2,3,4,5,6,7,8,9]); - * - * console.log(pako.deflate(data)); - * ``` - **/ - - function deflate$1(input, options) { - var deflator = new Deflate(options) - deflator.push(input, true) // That will never happens, if you don't cheat with options :) - - if (deflator.err) { - throw deflator.msg || messages[deflator.err] - } - - return deflator.result - } - /** - * deflateRaw(data[, options]) -> Uint8Array - * - data (Uint8Array|String): input data to compress. - * - options (Object): zlib deflate options. - * - * The same as [[deflate]], but creates raw data, without wrapper - * (header and adler32 crc). - **/ - - function deflateRaw(input, options) { - options = options || {} - options.raw = true - return deflate$1(input, options) - } - /** - * gzip(data[, options]) -> Uint8Array - * - data (Uint8Array|String): input data to compress. - * - options (Object): zlib deflate options. - * - * The same as [[deflate]], but create gzip wrapper instead of - * deflate one. - **/ - - function gzip(input, options) { - options = options || {} - options.gzip = true - return deflate$1(input, options) - } - - return { Deflate: Deflate, constants: constants } - } -} diff --git a/packages/rum-recorder/src/domain/deflateWorker.spec.ts b/packages/rum-recorder/src/domain/deflateWorker.spec.ts deleted file mode 100644 index bb7094c506..0000000000 --- a/packages/rum-recorder/src/domain/deflateWorker.spec.ts +++ /dev/null @@ -1,93 +0,0 @@ -import { createDeflateWorker, DeflateWorker, DeflateWorkerResponse } from './deflateWorker' - -describe('deflateWorker', () => { - it('buffers data and responds with the buffer deflated size when writing', (done) => { - const deflateWorker = createDeflateWorker() - listen(deflateWorker, 3, (events) => { - expect(events).toEqual([ - { id: 0, size: 11 }, - { id: 1, size: 20 }, - { id: 2, size: 29 }, - ]) - done() - }) - deflateWorker.postMessage({ id: 0, action: 'write', data: 'foo' }) - deflateWorker.postMessage({ id: 1, action: 'write', data: 'bar' }) - deflateWorker.postMessage({ id: 2, action: 'write', data: 'baz' }) - }) - - it('responds with the resulting bytes when completing', (done) => { - const deflateWorker = createDeflateWorker() - listen(deflateWorker, 2, (events) => { - expect(events).toEqual([ - { id: 0, size: 11 }, - { - id: 1, - result: new Uint8Array([120, 156, 74, 203, 207, 7, 0, 0, 0, 255, 255, 3, 0, 2, 130, 1, 69]), - }, - ]) - done() - }) - deflateWorker.postMessage({ id: 0, action: 'write', data: 'foo' }) - deflateWorker.postMessage({ id: 1, action: 'flush' }) - }) - - it('writes the remaining data specified by "flush"', (done) => { - const deflateWorker = createDeflateWorker() - listen(deflateWorker, 1, (events) => { - expect(events).toEqual([ - { - id: 0, - result: new Uint8Array([120, 156, 74, 203, 207, 7, 0, 0, 0, 255, 255, 3, 0, 2, 130, 1, 69]), - }, - ]) - done() - }) - deflateWorker.postMessage({ id: 0, action: 'flush', data: 'foo' }) - }) - - it('flushes several deflates one after the other', (done) => { - const deflateWorker = createDeflateWorker() - listen(deflateWorker, 4, (events) => { - expect(events).toEqual([ - { - id: 0, - size: 11, - }, - { - id: 1, - result: new Uint8Array([120, 156, 74, 203, 207, 7, 0, 0, 0, 255, 255, 3, 0, 2, 130, 1, 69]), - }, - { - id: 2, - size: 11, - }, - { - id: 3, - result: new Uint8Array([120, 156, 74, 74, 44, 2, 0, 0, 0, 255, 255, 3, 0, 2, 93, 1, 54]), - }, - ]) - done() - }) - deflateWorker.postMessage({ id: 0, action: 'write', data: 'foo' }) - deflateWorker.postMessage({ id: 1, action: 'flush' }) - deflateWorker.postMessage({ id: 2, action: 'write', data: 'bar' }) - deflateWorker.postMessage({ id: 3, action: 'flush' }) - }) - - function listen( - deflateWorker: DeflateWorker, - expectedResponseCount: number, - onDone: (responses: DeflateWorkerResponse[]) => void - ) { - const responses: DeflateWorkerResponse[] = [] - const listener = (event: { data: DeflateWorkerResponse }) => { - const responsesCount = responses.push(event.data) - if (responsesCount === expectedResponseCount) { - deflateWorker.removeEventListener('message', listener) - onDone(responses) - } - } - deflateWorker.addEventListener('message', listener) - } -}) diff --git a/packages/rum-recorder/src/domain/rrweb/observer.ts b/packages/rum-recorder/src/domain/rrweb/observer.ts index 59358da672..f9e301e299 100644 --- a/packages/rum-recorder/src/domain/rrweb/observer.ts +++ b/packages/rum-recorder/src/domain/rrweb/observer.ts @@ -1,5 +1,7 @@ /* tslint:disable:no-null-keyword */ import { noop } from '@datadog/browser-core' +// tslint:disable-next-line: no-implicit-dependencies +import { FontFaceDescriptors, FontFaceSet } from 'css-font-loading-module' import { INode, MaskInputOptions, SlimDOMOptions } from 'rrweb-snapshot' import { MutationBuffer } from './mutation' import { @@ -7,7 +9,6 @@ import { BlockClass, CanvasMutationCallback, FontCallback, - FontFaceDescriptors, FontParam, HookResetter, HooksParam, @@ -361,31 +362,18 @@ function initCanvasMutationObserver(cb: CanvasMutationCallback, blockClass: Bloc } } -declare class FontFace { - constructor(family: string, source: string | ArrayBufferView, descriptors?: FontFaceDescriptors) -} - -type WindowWithFontFace = typeof window & { - FontFace: typeof FontFace -} - -type DocumentWithFonts = Document & { - fonts: { add(fontFace: FontFace): void } -} - function initFontObserver(cb: FontCallback): ListenerHandler { const handlers: ListenerHandler[] = [] - const fontMap = new WeakMap() - - const originalFontFace = (window as WindowWithFontFace).FontFace + const fontMap = new WeakMap() - // tslint:disable-next-line: no-shadowed-variable - ;(window as WindowWithFontFace).FontFace = (function FontFace( + const originalFontFace = FontFace + // tslint:disable-next-line: no-any + ;(window as any).FontFace = function FontFace( family: string, source: string | ArrayBufferView, descriptors?: FontFaceDescriptors - ): FontFace { + ) { const fontFace = new originalFontFace(family, source, descriptors) fontMap.set(fontFace, { descriptors, @@ -398,24 +386,20 @@ function initFontObserver(cb: FontCallback): ListenerHandler { JSON.stringify(Array.from(new Uint8Array(source as any))), }) return fontFace - } as unknown) as typeof FontFace + } - const restoreHandler = patch( - (document as DocumentWithFonts).fonts, - 'add', - (original: (fontFace: FontFace) => unknown) => { - return function (this: unknown, fontFace: FontFace) { - setTimeout(() => { - const p = fontMap.get(fontFace) - if (p) { - cb(p) - fontMap.delete(fontFace) - } - }, 0) - return original.apply(this, [fontFace]) - } + const restoreHandler = patch(document.fonts, 'add', (original: (fontFace: FontFace) => unknown) => { + return function (this: FontFaceSet, fontFace: FontFace) { + setTimeout(() => { + const p = fontMap.get(fontFace) + if (p) { + cb(p) + fontMap.delete(fontFace) + } + }, 0) + return original.apply(this, [fontFace]) } - ) + }) handlers.push(() => { // tslint:disable-next-line: no-any diff --git a/packages/rum-recorder/src/domain/rrweb/types.ts b/packages/rum-recorder/src/domain/rrweb/types.ts index eeba910161..5332ce1028 100644 --- a/packages/rum-recorder/src/domain/rrweb/types.ts +++ b/packages/rum-recorder/src/domain/rrweb/types.ts @@ -1,3 +1,5 @@ +// tslint:disable-next-line: no-implicit-dependencies +import { FontFaceDescriptors } from 'css-font-loading-module' import { idNodeMap, INode, MaskInputOptions, serializedNodeWithId, SlimDOMOptions } from 'rrweb-snapshot' export enum EventType { @@ -335,15 +337,6 @@ export interface CanvasMutationParam { setter?: true } -export interface FontFaceDescriptors { - style?: string - weight?: string - stretch?: string - unicodeRange?: string - variant?: string - featureSettings?: string -} - export interface FontParam { family: string fontSource: string diff --git a/packages/rum-recorder/src/domain/segment.spec.ts b/packages/rum-recorder/src/domain/segment.spec.ts deleted file mode 100644 index d062de55ad..0000000000 --- a/packages/rum-recorder/src/domain/segment.spec.ts +++ /dev/null @@ -1,108 +0,0 @@ -import { Record, RecordType, SegmentContext, SegmentMeta } from '../types' -import { Segment, SegmentWriter } from './segment' - -const CONTEXT: SegmentContext = { application: { id: 'a' }, view: { id: 'b' }, session: { id: 'c' } } - -const LOAD_RECORD: Record = { type: RecordType.Load, timestamp: 10, data: {} } -const FULLSNAPSHOT_RECORD: Record = { type: RecordType.FullSnapshot, timestamp: 10, data: {} as any } -const DOM_CONTENT_LOADED_RECORD: Record = { type: RecordType.DomContentLoaded, timestamp: 10, data: {} as any } -const META_RECORD: Record = { type: RecordType.Meta, timestamp: 10, data: {} as any } - -describe('Segment', () => { - it('writes a segment', () => { - const writer = new StringWriter() - const segment = new Segment(writer, CONTEXT, 'init', LOAD_RECORD) - expect(writer.output).toEqual('{"records":[{"type":1,"timestamp":10,"data":{}}') - expect(writer.flushed).toEqual([]) - segment.flush() - - expect(writer.flushed).toEqual([ - { - meta: { - creation_reason: 'init' as const, - end: 10, - has_full_snapshot: false, - records_count: 1, - start: 10, - ...CONTEXT, - }, - segment: { - creation_reason: 'init' as const, - end: 10, - has_full_snapshot: false, - records: [ - { - data: {}, - timestamp: 10, - type: RecordType.Load, - }, - ], - records_count: 1, - start: 10, - ...CONTEXT, - }, - }, - ]) - }) - - it('adjusts meta when adding a record', () => { - const writer = new StringWriter() - const segment = new Segment(writer, CONTEXT, 'init', LOAD_RECORD) - segment.addRecord({ type: RecordType.DomContentLoaded, timestamp: 15, data: {} }) - segment.flush() - expect(writer.flushed[0].meta).toEqual({ - creation_reason: 'init', - end: 15, - has_full_snapshot: false, - records_count: 2, - start: 10, - ...CONTEXT, - }) - }) - - it("doesn't set has_full_snapshot to true if a FullSnapshot is the initial record", () => { - const writer = new StringWriter() - const segment = new Segment(writer, CONTEXT, 'init', FULLSNAPSHOT_RECORD) - segment.flush() - expect(writer.flushed[0].meta.has_full_snapshot).toEqual(false) - }) - - it("doesn't set has_full_snapshot to true if a FullSnapshot is not directly preceded by a Meta record", () => { - const writer = new StringWriter() - const segment = new Segment(writer, CONTEXT, 'init', LOAD_RECORD) - segment.addRecord(FULLSNAPSHOT_RECORD) - segment.flush() - expect(writer.flushed[0].meta.has_full_snapshot).toEqual(false) - }) - - it('sets has_full_snapshot to true if a FullSnapshot is preceded by a Meta record', () => { - const writer = new StringWriter() - const segment = new Segment(writer, CONTEXT, 'init', LOAD_RECORD) - segment.addRecord(META_RECORD) - segment.addRecord(FULLSNAPSHOT_RECORD) - segment.flush() - expect(writer.flushed[0].meta.has_full_snapshot).toEqual(true) - }) - - it("doesn't overrides has_full_snapshot to false once it has been set to true", () => { - const writer = new StringWriter() - const segment = new Segment(writer, CONTEXT, 'init', LOAD_RECORD) - segment.addRecord(META_RECORD) - segment.addRecord(FULLSNAPSHOT_RECORD) - segment.addRecord(DOM_CONTENT_LOADED_RECORD) - segment.flush() - expect(writer.flushed[0].meta.has_full_snapshot).toEqual(true) - }) -}) - -class StringWriter implements SegmentWriter { - output = '' - flushed: Array<{ meta: SegmentMeta; segment: SegmentMeta & { records: Record[] } }> = [] - write(data: string) { - this.output += data - } - flush(data: string, meta: SegmentMeta) { - this.flushed.push({ meta, segment: JSON.parse(this.output + data) as any }) - this.output = '' - } -} diff --git a/packages/rum-recorder/src/domain/segment.ts b/packages/rum-recorder/src/domain/segment.ts deleted file mode 100644 index d9a4cc808f..0000000000 --- a/packages/rum-recorder/src/domain/segment.ts +++ /dev/null @@ -1,53 +0,0 @@ -import { CreationReason, Record, RecordType, SegmentContext, SegmentMeta } from '../types' - -export interface SegmentWriter { - write(data: string): void - flush(data: string, meta: SegmentMeta): void -} - -export class Segment { - private start: number - private end: number - private recordsCount: number - private hasFullSnapshot: boolean - private lastRecordType: RecordType - - constructor( - private writer: SegmentWriter, - readonly context: SegmentContext, - private creationReason: CreationReason, - initialRecord: Record - ) { - this.start = initialRecord.timestamp - this.end = initialRecord.timestamp - this.lastRecordType = initialRecord.type - this.hasFullSnapshot = false - this.recordsCount = 1 - this.writer.write(`{"records":[${JSON.stringify(initialRecord)}`) - } - - addRecord(record: Record): void { - this.end = record.timestamp - if (!this.hasFullSnapshot) { - // Note: to be exploitable by the replay, this field should be true only if the FullSnapshot - // is preceded by a Meta record. Because rrweb is emitting both records synchronously and - // contiguously, it should always be the case, but check it nonetheless. - this.hasFullSnapshot = record.type === RecordType.FullSnapshot && this.lastRecordType === RecordType.Meta - } - this.lastRecordType = record.type - this.recordsCount += 1 - this.writer.write(`,${JSON.stringify(record)}`) - } - - flush() { - const meta: SegmentMeta = { - creation_reason: this.creationReason, - end: this.end, - has_full_snapshot: this.hasFullSnapshot, - records_count: this.recordsCount, - start: this.start, - ...this.context, - } - this.writer.flush(`],${JSON.stringify(meta).slice(1)}\n`, meta) - } -} diff --git a/packages/rum-recorder/src/domain/segmentCollection.spec.ts b/packages/rum-recorder/src/domain/segmentCollection.spec.ts deleted file mode 100644 index b2e99992d0..0000000000 --- a/packages/rum-recorder/src/domain/segmentCollection.spec.ts +++ /dev/null @@ -1,203 +0,0 @@ -import { createNewEvent, DOM_EVENT, restorePageVisibility, setPageVisibility } from '@datadog/browser-core' -import { LifeCycle, LifeCycleEventType, ParentContexts, RumSession, ViewContext } from '@datadog/browser-rum-core' -import { Record, RecordType, SegmentContext, SegmentMeta } from '../types' -import { Segment } from './segment' -import { computeSegmentContext, doStartSegmentCollection, MAX_SEGMENT_DURATION } from './segmentCollection' - -import { MockWorker } from '../../test/utils' -import { SEND_BEACON_BYTE_LENGTH_LIMIT } from '../transport/send' - -const CONTEXT: SegmentContext = { application: { id: 'a' }, view: { id: 'b' }, session: { id: 'c' } } -const RECORD: Record = { type: RecordType.Load, timestamp: 10, data: {} } - -const BEFORE_MAX_SEGMENT_DURATION = MAX_SEGMENT_DURATION * 0.9 - -describe('startSegmentCollection', () => { - let stopErrorCollection: () => void - - function startSegmentCollection(context: SegmentContext | undefined) { - const lifeCycle = new LifeCycle() - const worker = new MockWorker() - const eventEmitter = document.createElement('div') - const sendSpy = jasmine.createSpy<(data: Uint8Array, meta: SegmentMeta) => void>() - - const { stop, addRecord } = doStartSegmentCollection(lifeCycle, () => context, sendSpy, worker, eventEmitter) - stopErrorCollection = stop - const segmentFlushSpy = spyOn(Segment.prototype, 'flush').and.callThrough() - return { - addRecord, - eventEmitter, - lifeCycle, - segmentFlushSpy, - worker, - sendCurrentSegment() { - // Make sure the segment is not empty - addRecord(RECORD) - // Flush segment - lifeCycle.notify(LifeCycleEventType.BEFORE_UNLOAD) - worker.process() - return sendSpy.calls.mostRecent().args[1] - }, - } - } - - afterEach(() => { - jasmine.clock().uninstall() - stopErrorCollection() - }) - - it('immediately starts a new segment', () => { - const { addRecord, worker, segmentFlushSpy, sendCurrentSegment } = startSegmentCollection(CONTEXT) - expect(worker.pendingData).toBe('') - addRecord(RECORD) - expect(worker.pendingData).toBe('{"records":[{"type":1,"timestamp":10,"data":{}}') - expect(segmentFlushSpy).not.toHaveBeenCalled() - expect(sendCurrentSegment().creation_reason).toBe('init') - }) - - it('flushes a segment', () => { - const { lifeCycle, segmentFlushSpy, addRecord } = startSegmentCollection(CONTEXT) - addRecord(RECORD) - lifeCycle.notify(LifeCycleEventType.BEFORE_UNLOAD) - - expect(segmentFlushSpy).toHaveBeenCalledTimes(1) - }) - - it("ignores calls to addRecord if context can't be get", () => { - const { worker, lifeCycle, addRecord, segmentFlushSpy } = startSegmentCollection(undefined) - addRecord(RECORD) - lifeCycle.notify(LifeCycleEventType.BEFORE_UNLOAD) - expect(worker.pendingData).toBe('') - expect(segmentFlushSpy).not.toHaveBeenCalled() - }) - - describe('segment flush strategy', () => { - afterEach(() => { - restorePageVisibility() - }) - - it('flushes segment on unload', () => { - const { lifeCycle, sendCurrentSegment } = startSegmentCollection(CONTEXT) - lifeCycle.notify(LifeCycleEventType.BEFORE_UNLOAD) - expect(sendCurrentSegment().creation_reason).toBe('before_unload') - }) - - it('flushes segment on view change', () => { - const { lifeCycle, sendCurrentSegment } = startSegmentCollection(CONTEXT) - lifeCycle.notify(LifeCycleEventType.VIEW_CREATED, {} as any) - expect(sendCurrentSegment().creation_reason).toBe('view_change') - }) - - it('flushes segment on session renew', () => { - const { lifeCycle, sendCurrentSegment } = startSegmentCollection(CONTEXT) - lifeCycle.notify(LifeCycleEventType.SESSION_RENEWED) - expect(sendCurrentSegment().creation_reason).toBe('session_renewed') - }) - - it('flushes segment when the page become hidden', () => { - setPageVisibility('hidden') - const { eventEmitter, sendCurrentSegment } = startSegmentCollection(CONTEXT) - eventEmitter.dispatchEvent(createNewEvent(DOM_EVENT.VISIBILITY_CHANGE)) - expect(sendCurrentSegment().creation_reason).toBe('visibility_hidden') - }) - - it('does not flush segment when the page become visible', () => { - setPageVisibility('visible') - const { eventEmitter, segmentFlushSpy, sendCurrentSegment } = startSegmentCollection(CONTEXT) - eventEmitter.dispatchEvent(createNewEvent(DOM_EVENT.VISIBILITY_CHANGE)) - expect(segmentFlushSpy).not.toHaveBeenCalled() - expect(sendCurrentSegment().creation_reason).not.toBe('visibility_hidden') - }) - - it('flushes segment when the current segment deflate size reaches SEND_BEACON_BYTE_LENGTH_LIMIT', () => { - const { worker, addRecord, sendCurrentSegment } = startSegmentCollection(CONTEXT) - worker.deflatedSize = SEND_BEACON_BYTE_LENGTH_LIMIT - addRecord(RECORD) - worker.process() - - expect(sendCurrentSegment().creation_reason).toBe('max_size') - }) - - it('flushes a segment after MAX_SEGMENT_DURATION', () => { - jasmine.clock().install() - const { segmentFlushSpy, sendCurrentSegment, addRecord } = startSegmentCollection(CONTEXT) - addRecord(RECORD) - jasmine.clock().tick(MAX_SEGMENT_DURATION) - - expect(segmentFlushSpy).toHaveBeenCalledTimes(1) - expect(sendCurrentSegment().creation_reason).toBe('max_duration') - }) - - it('does not flush a segment after MAX_SEGMENT_DURATION if a segment has been created in the meantime', () => { - jasmine.clock().install() - const { lifeCycle, segmentFlushSpy, sendCurrentSegment, addRecord } = startSegmentCollection(CONTEXT) - addRecord(RECORD) - jasmine.clock().tick(BEFORE_MAX_SEGMENT_DURATION) - lifeCycle.notify(LifeCycleEventType.BEFORE_UNLOAD) - addRecord(RECORD) - jasmine.clock().tick(BEFORE_MAX_SEGMENT_DURATION) - - expect(segmentFlushSpy).toHaveBeenCalledTimes(1) - expect(sendCurrentSegment().creation_reason).not.toBe('max_duration') - }) - }) -}) - -describe('computeSegmentContext', () => { - const DEFAULT_VIEW_CONTEXT: ViewContext = { - session: { id: '456' }, - view: { id: '123', url: 'http://foo.com', referrer: 'http://bar.com' }, - } - - const DEFAULT_SESSION: RumSession = { - getId: () => 'session-id', - isTracked: () => true, - isTrackedWithResource: () => true, - } - - it('returns a segment context', () => { - expect(computeSegmentContext('appid', DEFAULT_SESSION, mockParentContexts(DEFAULT_VIEW_CONTEXT))).toEqual({ - application: { id: 'appid' }, - session: { id: '456' }, - view: { id: '123' }, - }) - }) - - it('returns undefined if there is no current view', () => { - expect(computeSegmentContext('appid', DEFAULT_SESSION, mockParentContexts(undefined))).toBeUndefined() - }) - - it('returns undefined if there is no session id', () => { - expect( - computeSegmentContext( - 'appid', - DEFAULT_SESSION, - mockParentContexts({ - ...DEFAULT_VIEW_CONTEXT, - session: { id: undefined }, - }) - ) - ).toBeUndefined() - }) - - it('returns undefined if the session is not tracked', () => { - expect( - computeSegmentContext( - 'appid', - { - ...DEFAULT_SESSION, - isTracked: () => false, - }, - mockParentContexts(DEFAULT_VIEW_CONTEXT) - ) - ).toBeUndefined() - }) - - function mockParentContexts(view: ViewContext | undefined): ParentContexts { - return { - findView() { - return view - }, - } as any - } -}) diff --git a/packages/rum-recorder/src/domain/segmentCollection.ts b/packages/rum-recorder/src/domain/segmentCollection.ts deleted file mode 100644 index 5418f49e9b..0000000000 --- a/packages/rum-recorder/src/domain/segmentCollection.ts +++ /dev/null @@ -1,156 +0,0 @@ -import { addEventListener, DOM_EVENT, EventEmitter, monitor } from '@datadog/browser-core' -import { LifeCycle, LifeCycleEventType, ParentContexts, RumSession } from '@datadog/browser-rum-core' -import { SEND_BEACON_BYTE_LENGTH_LIMIT } from '../transport/send' -import { CreationReason, Record, SegmentContext, SegmentMeta } from '../types' -import { DeflateSegmentWriter } from './deflateSegmentWriter' -import { createDeflateWorker, DeflateWorker } from './deflateWorker' -import { Segment } from './segment' - -export const MAX_SEGMENT_DURATION = 30_000 - -// Segments are the main data structure for session replays. They contain context information used -// for indexing or UI needs, and a list of records (RRWeb 'events', renamed to avoid confusing -// namings). They are stored without any processing from the intake, and fetched one after the -// other while a session is being replayed. Their encoding (deflate) are carefully crafted to allow -// concatenating multiple segments together. Segments have a size overhead (meta), so our goal is to -// build segments containing as much records as possible while complying with the various flush -// strategies to guarantee a good replay quality. -// -// When the recording starts, a segment is initially created. The segment is flushed (finalized and -// sent) based on various events (non-exhaustive list): -// -// * the page visibility change or becomes to unload -// * the segment duration reaches a limit -// * the encoded segment size reaches a limit -// * ... -// -// A segment cannot be created without its context. If the RUM session ends and no session id is -// available when creating a new segment, records will be ignored, until the session is renewed and -// a new session id is available. -// -// Empty segments (segments with no record) aren't useful and should be ignored. -// -// To help investigate session replays issues, each segment is created with a "creation reason", -// indicating why the session has been created. - -export function startSegmentCollection( - lifeCycle: LifeCycle, - applicationId: string, - session: RumSession, - parentContexts: ParentContexts, - send: (data: Uint8Array, meta: SegmentMeta) => void -) { - const worker = createDeflateWorker() - return doStartSegmentCollection( - lifeCycle, - () => computeSegmentContext(applicationId, session, parentContexts), - send, - worker - ) -} - -export function doStartSegmentCollection( - lifeCycle: LifeCycle, - getSegmentContext: () => SegmentContext | undefined, - send: (data: Uint8Array, meta: SegmentMeta) => void, - worker: DeflateWorker, - emitter: EventEmitter = window -) { - let currentSegment: Segment | undefined - let currentSegmentExpirationTimeoutId: number - let nextSegmentCreationReason: CreationReason = 'init' - - const writer = new DeflateSegmentWriter( - worker, - (size) => { - if (size > SEND_BEACON_BYTE_LENGTH_LIMIT) { - flushSegment('max_size') - } - }, - (data, meta) => { - send(data, meta) - } - ) - - const { unsubscribe: unsubscribeViewCreated } = lifeCycle.subscribe(LifeCycleEventType.VIEW_CREATED, () => { - flushSegment('view_change') - }) - - const { unsubscribe: unsubscribeSessionRenewed } = lifeCycle.subscribe(LifeCycleEventType.SESSION_RENEWED, () => { - flushSegment('session_renewed') - }) - - const { unsubscribe: unsubscribeBeforeUnload } = lifeCycle.subscribe(LifeCycleEventType.BEFORE_UNLOAD, () => { - flushSegment('before_unload') - }) - - const { stop: unsubscribeVisibilityChange } = addEventListener( - emitter, - DOM_EVENT.VISIBILITY_CHANGE, - () => { - if (document.visibilityState === 'hidden') { - flushSegment('visibility_hidden') - } - }, - { capture: true } - ) - - function flushSegment(creationReason: CreationReason) { - if (currentSegment) { - currentSegment.flush() - currentSegment = undefined - clearTimeout(currentSegmentExpirationTimeoutId) - } - - nextSegmentCreationReason = creationReason - } - - return { - addRecord(record: Record) { - if (!currentSegment) { - const context = getSegmentContext() - if (!context) { - return - } - - currentSegment = new Segment(writer, context, nextSegmentCreationReason, record) - currentSegmentExpirationTimeoutId = window.setTimeout( - monitor(() => { - flushSegment('max_duration') - }), - MAX_SEGMENT_DURATION - ) - } else { - currentSegment.addRecord(record) - } - }, - stop() { - unsubscribeViewCreated() - unsubscribeBeforeUnload() - unsubscribeVisibilityChange() - unsubscribeSessionRenewed() - worker.terminate() - }, - } -} - -export function computeSegmentContext(applicationId: string, session: RumSession, parentContexts: ParentContexts) { - if (!session.isTracked()) { - return undefined - } - const viewContext = parentContexts.findView() - if (!viewContext?.session.id) { - return undefined - } - return { - application: { - id: applicationId, - }, - session: { - id: viewContext.session.id, - }, - view: { - id: viewContext.view.id, - }, - } -} diff --git a/packages/rum-recorder/src/index.ts b/packages/rum-recorder/src/index.ts index 76b3b7832a..0c7941c4bf 100644 --- a/packages/rum-recorder/src/index.ts +++ b/packages/rum-recorder/src/index.ts @@ -1,15 +1 @@ -// Keep the following in sync with packages/rum/src/index.ts -export { datadogRum } from './boot/recorder.entry' -export { - CommonProperties, - ProvidedSource, - RumPublicApi as RumGlobal, - RumUserConfiguration, - // Events - RumEvent, - RumActionEvent, - RumErrorEvent, - RumLongTaskEvent, - RumResourceEvent, - RumViewEvent, -} from '@datadog/browser-rum-core' +export * from './boot/recorder.entry' diff --git a/packages/rum-recorder/src/transport/send.spec.ts b/packages/rum-recorder/src/transport/send.spec.ts deleted file mode 100644 index d9d42f8041..0000000000 --- a/packages/rum-recorder/src/transport/send.spec.ts +++ /dev/null @@ -1,34 +0,0 @@ -import { toFormEntries } from './send' - -describe('toFormEntries', () => { - let callbackSpy: jasmine.Spy<(key: string, value: string) => void> - - beforeEach(() => { - callbackSpy = jasmine.createSpy() - }) - - it('handles top level properties', () => { - toFormEntries({ foo: 'bar', zig: 'zag' }, callbackSpy) - expect(callbackSpy.calls.allArgs()).toEqual([ - ['foo', 'bar'], - ['zig', 'zag'], - ]) - }) - - it('handles nested properties', () => { - toFormEntries({ foo: { bar: 'baz', zig: { zag: 'zug' } } }, callbackSpy) - expect(callbackSpy.calls.allArgs()).toEqual([ - ['foo.bar', 'baz'], - ['foo.zig.zag', 'zug'], - ]) - }) - - it('converts values to string', () => { - // tslint:disable-next-line: no-null-keyword - toFormEntries({ foo: 42, bar: null }, callbackSpy) - expect(callbackSpy.calls.allArgs()).toEqual([ - ['foo', '42'], - ['bar', 'null'], - ]) - }) -}) diff --git a/packages/rum-recorder/src/transport/send.ts b/packages/rum-recorder/src/transport/send.ts deleted file mode 100644 index 2702b64a62..0000000000 --- a/packages/rum-recorder/src/transport/send.ts +++ /dev/null @@ -1,31 +0,0 @@ -import { HttpRequest, objectEntries } from '@datadog/browser-core' -import { SegmentMeta } from '../types' - -export const SEND_BEACON_BYTE_LENGTH_LIMIT = 60_000 - -export function send(endpointUrl: string, data: Uint8Array, meta: SegmentMeta): void { - const formData = new FormData() - - formData.set( - 'segment', - new Blob([data], { - type: 'application/octet-stream', - }), - `${meta.session.id}-${meta.start}` - ) - - toFormEntries(meta, (key, value) => formData.set(key, value)) - - const request = new HttpRequest(endpointUrl, SEND_BEACON_BYTE_LENGTH_LIMIT) - request.send(formData, data.byteLength) -} - -export function toFormEntries(input: object, onEntry: (key: string, value: string) => void, prefix = '') { - objectEntries(input as { [key: string]: unknown }).forEach(([key, value]) => { - if (typeof value === 'object' && value !== null) { - toFormEntries(value, onEntry, `${prefix}${key}.`) - } else { - onEntry(`${prefix}${key}`, String(value)) - } - }) -} diff --git a/packages/rum-recorder/src/types.ts b/packages/rum-recorder/src/types.ts deleted file mode 100644 index d5cd3d368d..0000000000 --- a/packages/rum-recorder/src/types.ts +++ /dev/null @@ -1,45 +0,0 @@ -// Alias EventWithTime to Record, to avoid naming clash between RRWeb events and RUM events -import { - EventType as RecordType, - EventWithTime as Record, - IncrementalSource, - MousePosition, -} from './domain/rrweb/types' - -export { Record, RecordType, IncrementalSource, MousePosition } - -export interface MouseMoveRecord { - type: RecordType.IncrementalSnapshot - timestamp: number - data: { - source: IncrementalSource.TouchMove | IncrementalSource.MouseMove - positions: MousePosition[] - } -} - -export interface Segment extends SegmentMeta { - records: Record[] -} - -export interface SegmentMeta extends SegmentContext { - start: number - end: number - has_full_snapshot: boolean - records_count: number - creation_reason: CreationReason -} - -export interface SegmentContext { - application: { id: string } - session: { id: string } - view: { id: string } -} - -export type CreationReason = - | 'init' - | 'max_duration' - | 'max_size' - | 'view_change' - | 'session_renewed' - | 'before_unload' - | 'visibility_hidden' diff --git a/packages/rum-recorder/test/utils.ts b/packages/rum-recorder/test/utils.ts deleted file mode 100644 index 4100c726a0..0000000000 --- a/packages/rum-recorder/test/utils.ts +++ /dev/null @@ -1,53 +0,0 @@ -import { DeflateWorker, DeflateWorkerAction, DeflateWorkerListener } from '../src/domain/deflateWorker' - -export class MockWorker implements DeflateWorker { - readonly pendingMessages: DeflateWorkerAction[] = [] - deflatedSize = 0 - private listener: DeflateWorkerListener | undefined - - get pendingData() { - return this.pendingMessages.map((message) => message.data || '').join('') - } - - addEventListener(_: 'message', listener: DeflateWorkerListener): void { - if (this.listener) { - throw new Error('MockWorker supports only one listener') - } - this.listener = listener - } - - removeEventListener(): void { - this.listener = undefined - } - - postMessage(message: DeflateWorkerAction): void { - this.pendingMessages.push(message) - } - - terminate(): void { - // do nothing - } - - process(ignoreMessageWithId?: number): void { - if (this.listener) { - for (const message of this.pendingMessages) { - if (ignoreMessageWithId === message.id) { - continue - } - switch (message.action) { - case 'write': - this.deflatedSize += message.data.length - this.listener({ data: { id: message.id, size: this.deflatedSize } }) - break - case 'flush': - if (message.data) { - this.deflatedSize += message.data.length - } - this.listener({ data: { id: message.id, result: new Uint8Array(this.deflatedSize) } }) - this.deflatedSize = 0 - } - } - } - this.pendingMessages.length = 0 - } -} diff --git a/packages/rum-recorder/tsconfig.cjs.json b/packages/rum-recorder/tsconfig.cjs.json index e5c966beca..43709ece19 100644 --- a/packages/rum-recorder/tsconfig.cjs.json +++ b/packages/rum-recorder/tsconfig.cjs.json @@ -3,10 +3,9 @@ "compilerOptions": { "baseUrl": ".", "declaration": true, - "allowJs": true, "module": "commonjs", "outDir": "./cjs/" }, - "include": ["./src"], + "include": ["./src/**/*.ts"], "exclude": ["./src/**/*.spec.ts"] } diff --git a/packages/rum-recorder/tsconfig.esm.json b/packages/rum-recorder/tsconfig.esm.json index bda89a5c0e..87be0a16c9 100644 --- a/packages/rum-recorder/tsconfig.esm.json +++ b/packages/rum-recorder/tsconfig.esm.json @@ -4,9 +4,8 @@ "baseUrl": ".", "declaration": true, "module": "es6", - "allowJs": true, "outDir": "./esm/" }, - "include": ["./src"], + "include": ["./src/**/*.ts"], "exclude": ["./src/**/*.spec.ts"] } diff --git a/packages/rum/src/index.ts b/packages/rum/src/index.ts index 543c091dc0..116108156a 100644 --- a/packages/rum/src/index.ts +++ b/packages/rum/src/index.ts @@ -1,4 +1,3 @@ -// Keep the following in sync with packages/rum-recorder/src/index.ts export { datadogRum } from './boot/rum.entry' export { CommonProperties, diff --git a/test/app/yarn.lock b/test/app/yarn.lock index a12a463411..a57b1b4cd1 100644 --- a/test/app/yarn.lock +++ b/test/app/yarn.lock @@ -23,17 +23,23 @@ version "2.1.2" dependencies: "@datadog/browser-core" "2.1.2" - "@datadog/browser-rum-core" "2.1.2" + "@datadog/browser-rum" "2.1.2" + "@types/css-font-loading-module" "0.0.4" rrweb-snapshot "1.0.1" tslib "^1.10.0" -"@datadog/browser-rum@file:../../packages/rum": +"@datadog/browser-rum@2.1.2", "@datadog/browser-rum@file:../../packages/rum": version "2.1.2" dependencies: "@datadog/browser-core" "2.1.2" "@datadog/browser-rum-core" "2.1.2" tslib "^1.10.0" +"@types/css-font-loading-module@0.0.4": + version "0.0.4" + resolved "https://registry.yarnpkg.com/@types/css-font-loading-module/-/css-font-loading-module-0.0.4.tgz#94a835e27d1af444c65cba88523533c174463d64" + integrity sha512-ENdXf7MW4m9HeDojB2Ukbi7lYMIuQNBHVf98dbzaiG4EEJREBd6oleVAjrLRCrp7dm6CK1mmdmU9tcgF61acbw== + "@webassemblyjs/ast@1.8.5": version "1.8.5" resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.8.5.tgz#51b1c5fe6576a34953bf4b253df9f0d490d9e359" diff --git a/test/e2e/lib/framework/createTest.ts b/test/e2e/lib/framework/createTest.ts index ac5aa468fa..59083eca16 100644 --- a/test/e2e/lib/framework/createTest.ts +++ b/test/e2e/lib/framework/createTest.ts @@ -34,7 +34,6 @@ type TestRunner = (testContext: TestContext) => Promise class TestBuilder { private rumOptions: RumSetupOptions | undefined = undefined - private rumRecorderOptions: RumSetupOptions | undefined = undefined private logsOptions: LogsSetupOptions | undefined = undefined private head: string = '' private body: string = '' @@ -47,11 +46,6 @@ class TestBuilder { return this } - withRumRecorder(rumRecorderOptions?: RumSetupOptions) { - this.rumRecorderOptions = { ...DEFAULT_RUM_OPTIONS, ...rumRecorderOptions } - return this - } - withLogs(logsOptions?: LogsSetupOptions) { this.logsOptions = { ...DEFAULT_LOGS_OPTIONS, ...logsOptions } return this @@ -83,7 +77,6 @@ class TestBuilder { head: this.head, logs: this.logsOptions, rum: this.rumOptions, - rumRecorder: this.rumRecorderOptions, } if (setups.length > 1) { @@ -138,7 +131,6 @@ function createTestContext(servers: Servers): TestContext { internalMonitoring: `${servers.intake.url}/v1/input/internalMonitoring`, logs: `${servers.intake.url}/v1/input/logs`, rum: `${servers.intake.url}/v1/input/rum`, - sessionReplay: `${servers.intake.url}/v1/input/sessionReplay`, }, events: new EventRegistry(), } diff --git a/test/e2e/lib/framework/eventsRegistry.ts b/test/e2e/lib/framework/eventsRegistry.ts index 64f68b69a8..a83f26fe1a 100644 --- a/test/e2e/lib/framework/eventsRegistry.ts +++ b/test/e2e/lib/framework/eventsRegistry.ts @@ -5,16 +5,14 @@ import { isRumResourceEvent, isRumUserActionEvent, isRumViewEvent, - SerssionReplayCall, ServerInternalMonitoringMessage, } from '../types/serverEvents' -type IntakeType = 'logs' | 'rum' | 'internalMonitoring' | 'sessionReplay' +type IntakeType = 'logs' | 'rum' | 'internalMonitoring' export class EventRegistry { readonly rum: RumEvent[] = [] readonly logs: LogsEvent[] = [] - readonly sessionReplay: SerssionReplayCall[] = [] readonly internalMonitoring: ServerInternalMonitoringMessage[] = [] push(type: IntakeType, event: any) { @@ -23,7 +21,7 @@ export class EventRegistry { } get count() { - return this.logs.length + this.rum.length + this.internalMonitoring.length + this.sessionReplay.length + return this.logs.length + this.rum.length + this.internalMonitoring.length } get rumActions() { diff --git a/test/e2e/lib/framework/pageSetups.ts b/test/e2e/lib/framework/pageSetups.ts index 1fbf8b2304..8b30bb6531 100644 --- a/test/e2e/lib/framework/pageSetups.ts +++ b/test/e2e/lib/framework/pageSetups.ts @@ -15,7 +15,6 @@ export interface LogsSetupOptions { export interface SetupOptions { rum?: RumSetupOptions - rumRecorder?: RumSetupOptions logs?: LogsSetupOptions head?: string body?: string @@ -57,13 +56,12 @@ n=o.getElementsByTagName(u)[0];n.parentNode.insertBefore(d,n) ` } - const rumOptions = options.rumRecorder || options.rum - if (rumOptions) { + if (options.rum) { body += html` ` @@ -86,16 +84,11 @@ export function bundleSetup(options: SetupOptions) { ` } - - const rumOptions = options.rumRecorder || options.rum - if (rumOptions) { + if (options.rum) { header += html` - + ` } @@ -116,12 +109,10 @@ export function npmSetup(options: SetupOptions) { ` } - - const rumOptions = options.rumRecorder || options.rum - if (rumOptions) { + if (options.rum) { header += html` ` } @@ -156,7 +147,6 @@ export function html(parts: ReadonlyArray, ...vars: string[]) { function formatLogsOptions(options: LogsSetupOptions) { return JSON.stringify(options) } - function formatRumOptions(options: RumSetupOptions) { return JSON.stringify(options).replace('"LOCATION_ORIGIN"', 'location.origin') } diff --git a/test/e2e/lib/framework/sdkBuilds.ts b/test/e2e/lib/framework/sdkBuilds.ts index 632c787764..617a79bf62 100644 --- a/test/e2e/lib/framework/sdkBuilds.ts +++ b/test/e2e/lib/framework/sdkBuilds.ts @@ -7,24 +7,18 @@ const readFile = promisify(fs.readFile) const ROOT = path.join(__dirname, '../../../..') const RUM_BUNDLE = path.join(ROOT, 'packages/rum/bundle/datadog-rum.js') const LOGS_BUNDLE = path.join(ROOT, 'packages/logs/bundle/datadog-logs.js') -const RUM_RECORDER_BUNDLE = path.join(ROOT, 'packages/rum-recorder/bundle/datadog-rum-recorder.js') const NPM_BUNDLE = path.join(ROOT, 'test/app/dist/app.js') export interface Endpoints { rum: string logs: string internalMonitoring: string - sessionReplay: string } export async function buildRum(endpoints: Endpoints) { return replaceEndpoints(await readFile(RUM_BUNDLE), endpoints) } -export async function buildRumRecorder(endpoints: Endpoints) { - return replaceEndpoints(await readFile(RUM_RECORDER_BUNDLE), endpoints) -} - export async function buildLogs(endpoints: Endpoints) { return replaceEndpoints(await readFile(LOGS_BUNDLE), endpoints) } @@ -38,7 +32,6 @@ function replaceEndpoints(content: Buffer, endpoints: Endpoints) { '<<< E2E INTERNAL MONITORING ENDPOINT >>>': endpoints.internalMonitoring, '<<< E2E LOGS ENDPOINT >>>': endpoints.logs, '<<< E2E RUM ENDPOINT >>>': endpoints.rum, - '<<< E2E SESSION REPLAY ENDPOINT >>>': endpoints.sessionReplay, }) } diff --git a/test/e2e/lib/framework/serverApps/intake.ts b/test/e2e/lib/framework/serverApps/intake.ts index 4995532e45..35a3505b70 100644 --- a/test/e2e/lib/framework/serverApps/intake.ts +++ b/test/e2e/lib/framework/serverApps/intake.ts @@ -1,76 +1,18 @@ -import connectBusboy from 'connect-busboy' import express from 'express' -import { createInflate } from 'zlib' - -import { SegmentFile, SerssionReplayCall } from '../../types/serverEvents' import { EventRegistry } from '../eventsRegistry' export function createIntakeServerApp(events: EventRegistry) { const app = express() app.use(express.text()) - app.use(connectBusboy({ immediate: true })) - app.post('/v1/input/:endpoint', async (req, res) => { + app.post('/v1/input/:endpoint', (req, res) => { const endpoint = req.params.endpoint if (endpoint === 'rum' || endpoint === 'logs' || endpoint === 'internalMonitoring') { ;(req.body as string).split('\n').map((rawEvent) => events.push(endpoint, JSON.parse(rawEvent) as any)) } - - if (endpoint === 'sessionReplay' && req.busboy) { - events.push('sessionReplay', await readSessionReplay(req)) - } - res.end() }) return app } - -async function readSessionReplay(req: express.Request): Promise { - return new Promise((resolve, reject) => { - const meta: { - [field: string]: string - } = {} - let segmentPromise: Promise - - req.busboy.on('file', (fieldname, file, filename, encoding, mimetype) => { - if (fieldname === 'segment') { - segmentPromise = readStream(file.pipe(createInflate())).then((data) => ({ - encoding, - filename, - mimetype, - data: JSON.parse(data.toString()), - })) - } - }) - - req.busboy.on('field', (key: string, value: string) => { - meta[key] = value - }) - - req.busboy.on('finish', async () => { - try { - const segment = await segmentPromise - resolve({ meta, segment }) - } catch (e) { - reject(e) - } - }) - }) -} - -async function readStream(stream: NodeJS.ReadableStream): Promise { - return new Promise((resolve, reject) => { - const buffers: Buffer[] = [] - stream.on('data', (data: Buffer) => { - buffers.push(data) - }) - stream.on('error', (error) => { - reject(error) - }) - stream.on('end', () => { - resolve(Buffer.concat(buffers)) - }) - }) -} diff --git a/test/e2e/lib/framework/serverApps/mock.ts b/test/e2e/lib/framework/serverApps/mock.ts index 8889f0b1ae..88da0e6cc9 100644 --- a/test/e2e/lib/framework/serverApps/mock.ts +++ b/test/e2e/lib/framework/serverApps/mock.ts @@ -1,7 +1,7 @@ import cors from 'cors' import express from 'express' import * as url from 'url' -import { buildLogs, buildNpm, buildRum, buildRumRecorder, Endpoints } from '../sdkBuilds' +import { buildLogs, buildNpm, buildRum, Endpoints } from '../sdkBuilds' export function createMockServerApp(endpoints: Endpoints, setup: string) { const app = express() @@ -59,10 +59,6 @@ export function createMockServerApp(endpoints: Endpoints, setup: string) { res.header('content-type', 'application/javascript').send(await buildRum(endpoints)) }) - app.get('/datadog-rum-recorder.js', async (req, res) => { - res.header('content-type', 'application/javascript').send(await buildRumRecorder(endpoints)) - }) - app.get('/app.js', async (req, res) => { res.header('content-type', 'application/javascript').send(await buildNpm(endpoints)) }) diff --git a/test/e2e/lib/types/serverEvents.ts b/test/e2e/lib/types/serverEvents.ts index e400871f41..23f574db94 100644 --- a/test/e2e/lib/types/serverEvents.ts +++ b/test/e2e/lib/types/serverEvents.ts @@ -1,5 +1,4 @@ import { RumActionEvent, RumErrorEvent, RumEvent, RumResourceEvent, RumViewEvent } from '@datadog/browser-rum' -import { Segment } from '../../../../packages/rum-recorder/src/types' export interface ServerInternalMonitoringMessage { message: string @@ -24,15 +23,3 @@ export function isRumViewEvent(event: RumEvent): event is RumViewEvent { export function isRumErrorEvent(event: RumEvent): event is RumErrorEvent { return event.type === 'error' } - -export interface SegmentFile { - filename: string - encoding: string - mimetype: string - data: Segment -} - -export interface SerssionReplayCall { - segment: SegmentFile - meta: { [key: string]: string } -} diff --git a/test/e2e/scenario/recorder.scenario.ts b/test/e2e/scenario/recorder.scenario.ts deleted file mode 100644 index 8505a5a039..0000000000 --- a/test/e2e/scenario/recorder.scenario.ts +++ /dev/null @@ -1,61 +0,0 @@ -import { CreationReason, IncrementalSource, RecordType } from '../../../packages/rum-recorder/src/types' - -import { createTest } from '../lib/framework' -import { browserExecute } from '../lib/helpers/browser' -import { flushEvents } from '../lib/helpers/sdk' - -const INTEGER_RE = /^\d+$/ -const TIMESTAMP_RE = /^\d{13}$/ -const UUID_RE = /^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$/ - -describe('recorder', () => { - createTest('record mouse move') - .withRumRecorder() - .run(async ({ events }) => { - await browserExecute(() => { - return document.documentElement.outerHTML - }) - const html = await $('html') - await html.click() - await flushEvents() - - expect(events.sessionReplay.length).toBe(1) - const { segment, meta } = events.sessionReplay[0] - expect(meta).toEqual({ - 'application.id': jasmine.stringMatching(UUID_RE), - creation_reason: 'init', - end: jasmine.stringMatching(TIMESTAMP_RE), - has_full_snapshot: 'true', - records_count: jasmine.stringMatching(INTEGER_RE), - 'session.id': jasmine.stringMatching(UUID_RE), - start: jasmine.stringMatching(TIMESTAMP_RE), - 'view.id': jasmine.stringMatching(UUID_RE), - }) - expect(segment).toEqual({ - data: { - application: { id: meta['application.id'] }, - creation_reason: meta.creation_reason as CreationReason, - end: Number(meta.end), - has_full_snapshot: true, - records: jasmine.any(Array), - records_count: Number(meta.records_count), - session: { id: meta['session.id'] }, - start: Number(meta.start), - view: { id: meta['view.id'] }, - }, - encoding: jasmine.any(String), - filename: `${meta['session.id']}-${meta.start}`, - mimetype: 'application/octet-stream', - }) - expect(segment.data.records.find((record) => record.type === RecordType.Meta)).toBeTruthy('have a Meta record') - expect(segment.data.records.find((record) => record.type === RecordType.FullSnapshot)).toBeTruthy( - 'have a FullSnapshot record' - ) - expect( - segment.data.records.find( - (record) => - record.type === RecordType.IncrementalSnapshot && record.data.source === IncrementalSource.MouseInteraction - ) - ).toBeTruthy('have a IncrementalSnapshot/MouseInteraction record') - }) -}) diff --git a/tsconfig.base.json b/tsconfig.base.json index 937bdff875..66f9e0713a 100644 --- a/tsconfig.base.json +++ b/tsconfig.base.json @@ -18,7 +18,8 @@ "paths": { "@datadog/browser-core": ["./packages/core/src"], - "@datadog/browser-rum-core": ["./packages/rum-core/src"] + "@datadog/browser-rum-core": ["./packages/rum-core/src"], + "@datadog/browser-rum": ["./packages/rum/src"] } } } diff --git a/webpack.base.js b/webpack.base.js index 8d333ce6d3..f97bf2a592 100644 --- a/webpack.base.js +++ b/webpack.base.js @@ -29,7 +29,7 @@ module.exports = ({ entry, mode, filename, datacenter }) => ({ }, { - test: /\.(ts|js)$/, + test: /\.ts$/, loader: 'ts-loader', exclude: /node_modules/, options: { @@ -37,7 +37,6 @@ module.exports = ({ entry, mode, filename, datacenter }) => ({ onlyCompileBundledFiles: true, compilerOptions: { module: 'es6', - allowJs: true, }, }, }, diff --git a/yarn.lock b/yarn.lock index 5c5522e3de..b484c5009c 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1032,13 +1032,6 @@ "@types/connect" "*" "@types/node" "*" -"@types/busboy@*": - version "0.2.3" - resolved "https://registry.yarnpkg.com/@types/busboy/-/busboy-0.2.3.tgz#6697ad29873246c530f09a3ff5a40861824230d5" - integrity sha1-ZpetKYcyRsUw8Jo/9aQIYYJCMNU= - dependencies: - "@types/node" "*" - "@types/cacheable-request@^6.0.1": version "6.0.1" resolved "https://registry.yarnpkg.com/@types/cacheable-request/-/cacheable-request-6.0.1.tgz#5d22f3dded1fd3a84c0bbeb5039a7419c2c91976" @@ -1054,14 +1047,6 @@ resolved "https://registry.yarnpkg.com/@types/color-name/-/color-name-1.1.1.tgz#1c1261bbeaa10a8055bbc5d8ab84b7b2afc846a0" integrity sha512-rr+OQyAjxze7GgWrSaJwydHStIhHq2lvY3BOC2Mj7KnzI7XK0Uw1TOOdI9lDoajEbSWLiYgoo4f1R51erQfhPQ== -"@types/connect-busboy@0.0.2": - version "0.0.2" - resolved "https://registry.yarnpkg.com/@types/connect-busboy/-/connect-busboy-0.0.2.tgz#2a17ab167984e8430d1def368c25f402ffa292aa" - integrity sha512-d0tPVq7z2knefTgdJ2IuWAavWSz3NUaToIq/DMWyUzG3wSK2lCpqfmHV/1J79BHjaz4xKvOi+JTK7JB+1LxQig== - dependencies: - "@types/busboy" "*" - "@types/express" "*" - "@types/connect@*": version "3.4.33" resolved "https://registry.yarnpkg.com/@types/connect/-/connect-3.4.33.tgz#31610c901eca573b8713c3330abc6e6b9f588546" @@ -1076,6 +1061,11 @@ dependencies: "@types/express" "*" +"@types/css-font-loading-module@0.0.4": + version "0.0.4" + resolved "https://registry.yarnpkg.com/@types/css-font-loading-module/-/css-font-loading-module-0.0.4.tgz#94a835e27d1af444c65cba88523533c174463d64" + integrity sha512-ENdXf7MW4m9HeDojB2Ukbi7lYMIuQNBHVf98dbzaiG4EEJREBd6oleVAjrLRCrp7dm6CK1mmdmU9tcgF61acbw== + "@types/events@*": version "3.0.0" resolved "https://registry.yarnpkg.com/@types/events/-/events-3.0.0.tgz#2862f3f58a9a7f7c3e78d79f130dd4d71c25c2a7" @@ -2404,13 +2394,6 @@ builtins@^1.0.3: resolved "https://registry.yarnpkg.com/builtins/-/builtins-1.0.3.tgz#cb94faeb61c8696451db36534e1422f94f0aee88" integrity sha1-y5T662HIaWRR2zZTThQi+U8K7og= -busboy@*: - version "0.3.1" - resolved "https://registry.yarnpkg.com/busboy/-/busboy-0.3.1.tgz#170899274c5bf38aae27d5c62b71268cd585fd1b" - integrity sha512-y7tTxhGKXcyBxRKAni+awqx8uqaJKrSFSNFSeRG5CsWNdmy2BIK+6VGWEW7TZnIO/533mtMEA4rOevQV815YJw== - dependencies: - dicer "0.3.0" - byline@^5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/byline/-/byline-5.0.0.tgz#741c5216468eadc457b03410118ad77de8c1ddb1" @@ -2949,13 +2932,6 @@ config-chain@^1.1.11: ini "^1.3.4" proto-list "~1.2.1" -connect-busboy@0.0.2: - version "0.0.2" - resolved "https://registry.yarnpkg.com/connect-busboy/-/connect-busboy-0.0.2.tgz#ac5c9c96672171885e576c66b2bfd95d3bb11097" - integrity sha1-rFyclmchcYheV2xmsr/ZXTuxEJc= - dependencies: - busboy "*" - connect@^3.6.0: version "3.7.0" resolved "https://registry.yarnpkg.com/connect/-/connect-3.7.0.tgz#5d49348910caa5e07a01800b030d0c35f20484f8" @@ -3502,13 +3478,6 @@ di@^0.0.1: resolved "https://registry.yarnpkg.com/di/-/di-0.0.1.tgz#806649326ceaa7caa3306d75d985ea2748ba913c" integrity sha1-gGZJMmzqp8qjMG112YXqJ0i6kTw= -dicer@0.3.0: - version "0.3.0" - resolved "https://registry.yarnpkg.com/dicer/-/dicer-0.3.0.tgz#eacd98b3bfbf92e8ab5c2fdb71aaac44bb06b872" - integrity sha512-MdceRRWqltEG2dZqO769g27N/3PXfcKl04VhYnBlo2YhH7zPi88VebsjTKclaOyiuMaGU72hTfw3VkUitGcVCA== - dependencies: - streamsearch "0.1.2" - diff-sequences@^25.2.6: version "25.2.6" resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-25.2.6.tgz#5f467c00edd35352b7bca46d7927d60e687a76dd" @@ -8734,11 +8703,6 @@ streamroller@^1.0.6: fs-extra "^7.0.1" lodash "^4.17.14" -streamsearch@0.1.2: - version "0.1.2" - resolved "https://registry.yarnpkg.com/streamsearch/-/streamsearch-0.1.2.tgz#808b9d0e56fc273d809ba57338e929919a1a9f1a" - integrity sha1-gIudDlb8Jz2Am6VzOOkpkZoanxo= - string-replace-loader@2.2.0: version "2.2.0" resolved "https://registry.yarnpkg.com/string-replace-loader/-/string-replace-loader-2.2.0.tgz#0a0e6543fcec783d85c353a3e96a23872d45a94f"