diff --git a/lighthouse-core/audits/estimated-input-latency.js b/lighthouse-core/audits/estimated-input-latency.js index 29dbef90d342..d70857300d20 100644 --- a/lighthouse-core/audits/estimated-input-latency.js +++ b/lighthouse-core/audits/estimated-input-latency.js @@ -10,6 +10,13 @@ const Util = require('../report/v2/renderer/util'); const TracingProcessor = require('../lib/traces/tracing-processor'); const LHError = require('../lib/errors'); +const ROLLING_WINDOW_SIZE = 5000; + +/** + * @fileOverview This audit determines the largest 90 percentile EQT value of all 5s windows between + * FMP and the end of the trace. + * @see https://docs.google.com/document/d/1b9slyaB9yho91YTOkAQfpCdULFkZM9LqsipcX3t7He8/preview + */ class EstimatedInputLatency extends Audit { /** * @return {!AuditMeta} @@ -19,8 +26,7 @@ class EstimatedInputLatency extends Audit { name: 'estimated-input-latency', description: 'Estimated Input Latency', helpText: 'The score above is an estimate of how long your app takes to respond to user ' + - 'input, in milliseconds. There is a 90% probability that a user encounters this amount ' + - 'of latency, or less. 10% of the time a user can expect additional latency. If your ' + + 'input, in milliseconds, during the busiest 5s window of page load. If your ' + 'latency is higher than 50 ms, users may perceive your app as laggy. ' + '[Learn more](https://developers.google.com/web/tools/lighthouse/audits/estimated-input-latency).', scoreDisplayMode: Audit.SCORING_MODES.NUMERIC, @@ -45,23 +51,36 @@ class EstimatedInputLatency extends Audit { throw new LHError(LHError.errors.NO_FMP); } - const latencyPercentiles = TracingProcessor.getRiskToResponsiveness(tabTrace, startTime); - const ninetieth = latencyPercentiles.find(result => result.percentile === 0.9); - const rawValue = parseFloat(ninetieth.time.toFixed(1)); + const events = TracingProcessor.getMainThreadTopLevelEvents(tabTrace, startTime) + .filter(evt => evt.duration >= 1); + + const candidateStartEvts = events.filter(evt => evt.duration >= 10); + + let worst90thPercentileLatency = 16; + for (const startEvt of candidateStartEvts) { + const latencyPercentiles = TracingProcessor.getRiskToResponsiveness( + events, + startEvt.start, + startEvt.start + ROLLING_WINDOW_SIZE, + [0.9] + ); + + worst90thPercentileLatency = Math.max( + latencyPercentiles[0].time, + worst90thPercentileLatency + ); + } const score = Audit.computeLogNormalScore( - ninetieth.time, + worst90thPercentileLatency, context.options.scorePODR, context.options.scoreMedian ); return { score, - rawValue, - displayValue: Util.formatMilliseconds(rawValue, 1), - extendedInfo: { - value: latencyPercentiles, - }, + rawValue: worst90thPercentileLatency, + displayValue: Util.formatMilliseconds(worst90thPercentileLatency, 1), }; } diff --git a/lighthouse-core/lib/traces/tracing-processor.js b/lighthouse-core/lib/traces/tracing-processor.js index 85fbbed976b4..82b5bd242d10 100644 --- a/lighthouse-core/lib/traces/tracing-processor.js +++ b/lighthouse-core/lib/traces/tracing-processor.js @@ -84,42 +84,44 @@ class TraceProcessor { * Calculates the maximum queueing time (in ms) of high priority tasks for * selected percentiles within a window of the main thread. * @see https://docs.google.com/document/d/1b9slyaB9yho91YTOkAQfpCdULFkZM9LqsipcX3t7He8/preview - * @param {LH.Artifacts.TraceOfTab} tabTrace - * @param {number=} startTime Optional start time (in ms relative to navstart) of range of interest. Defaults to navstart. - * @param {number=} endTime Optional end time (in ms relative to navstart) of range of interest. Defaults to trace end. + * @param {Array} events + * @param {number} startTime Start time (in ms relative to navstart) of range of interest. + * @param {number} endTime End time (in ms relative to navstart) of range of interest. * @param {!Array=} percentiles Optional array of percentiles to compute. Defaults to [0.5, 0.75, 0.9, 0.99, 1]. * @return {!Array<{percentile: number, time: number}>} */ static getRiskToResponsiveness( - tabTrace, - startTime = 0, - endTime = tabTrace.timings.traceEnd, + events, + startTime, + endTime, percentiles = [0.5, 0.75, 0.9, 0.99, 1] ) { const totalTime = endTime - startTime; percentiles.sort((a, b) => a - b); - const ret = TraceProcessor.getMainThreadTopLevelEventDurations(tabTrace, startTime, endTime); + const ret = TraceProcessor.getMainThreadTopLevelEventDurations(events, startTime, endTime); return TraceProcessor._riskPercentiles(ret.durations, totalTime, percentiles, ret.clippedLength); } /** * Provides durations in ms of all main thread top-level events - * @param {LH.Artifacts.TraceOfTab} tabTrace + * @param {Array} topLevelEvents * @param {number} startTime Optional start time (in ms relative to navstart) of range of interest. Defaults to navstart. * @param {number} endTime Optional end time (in ms relative to navstart) of range of interest. Defaults to trace end. * @return {{durations: Array, clippedLength: number}} */ - static getMainThreadTopLevelEventDurations(tabTrace, startTime = 0, endTime = Infinity) { - const topLevelEvents = TraceProcessor.getMainThreadTopLevelEvents(tabTrace, startTime, endTime); - + static getMainThreadTopLevelEventDurations(topLevelEvents, startTime = 0, endTime = Infinity) { // Find durations of all slices in range of interest. /** @type {Array} */ const durations = []; let clippedLength = 0; - topLevelEvents.forEach(event => { + for (const event of topLevelEvents) { + if (event.end < startTime || event.start > endTime) { + continue; + } + let duration = event.duration; let eventStart = event.start; if (eventStart < startTime) { @@ -134,7 +136,7 @@ class TraceProcessor { } durations.push(duration); - }); + } durations.sort((a, b) => a - b); return { @@ -149,7 +151,7 @@ class TraceProcessor { * @param {LH.Artifacts.TraceOfTab} tabTrace * @param {number=} startTime Optional start time (in ms relative to navstart) of range of interest. Defaults to navstart. * @param {number=} endTime Optional end time (in ms relative to navstart) of range of interest. Defaults to trace end. - * @return {!Array<{start: number, end: number, duration: number}>} + * @return {Array} */ static getMainThreadTopLevelEvents(tabTrace, startTime = 0, endTime = Infinity) { const topLevelEvents = []; @@ -186,4 +188,11 @@ class TraceProcessor { } } +/** + * @typedef ToplevelEvent + * @prop {number} start + * @prop {number} end + * @prop {number} duration + */ + module.exports = TraceProcessor; diff --git a/lighthouse-core/test/audits/estimated-input-latency-test.js b/lighthouse-core/test/audits/estimated-input-latency-test.js index 95a3f39c5f86..0affdc27d3b8 100644 --- a/lighthouse-core/test/audits/estimated-input-latency-test.js +++ b/lighthouse-core/test/audits/estimated-input-latency-test.js @@ -5,12 +5,13 @@ */ 'use strict'; -const Audit = require('../../audits/estimated-input-latency.js'); -const Runner = require('../../runner.js'); +const Audit = require('../../audits/estimated-input-latency'); +const Runner = require('../../runner'); const assert = require('assert'); const options = Audit.defaultOptions; -const pwaTrace = require('../fixtures/traces/progressive-app.json'); +const TracingProcessor = require('../../lib/traces/tracing-processor'); +const pwaTrace = require('../fixtures/traces/progressive-app-m60.json'); const computedArtifacts = Runner.instantiateComputedArtifacts(); @@ -25,12 +26,75 @@ function generateArtifactsWithTrace(trace) { describe('Performance: estimated-input-latency audit', () => { it('evaluates valid input correctly', () => { - const artifacts = generateArtifactsWithTrace({traceEvents: pwaTrace}); + const artifacts = generateArtifactsWithTrace(pwaTrace); return Audit.audit(artifacts, {options}).then(output => { assert.equal(output.debugString, undefined); - assert.equal(output.rawValue, 16.2); - assert.equal(output.displayValue, '16\xa0ms'); + assert.equal(Math.round(output.rawValue * 10) / 10, 17.1); + assert.equal(output.displayValue, '17\xa0ms'); assert.equal(output.score, 1); }); }); + + describe('#audit', () => { + let firstMeaningfulPaint; + let traceEnd; + let artifacts; + let origGetMainThreadEventsFn; + let mainThreadEvtsMock; + + beforeEach(() => { + firstMeaningfulPaint = 0.00001; + traceEnd = 1e20; + artifacts = { + traces: {}, + requestTraceOfTab() { + const timings = {firstMeaningfulPaint, traceEnd}; + return Promise.resolve({timings}); + }, + }; + + origGetMainThreadEventsFn = TracingProcessor.getMainThreadTopLevelEvents; + TracingProcessor.getMainThreadTopLevelEvents = () => mainThreadEvtsMock(arguments); + }); + + afterEach(() => { + TracingProcessor.getMainThreadTopLevelEvents = origGetMainThreadEventsFn; + }); + + it('uses a 5s rolling window, not traceEnd', async () => { + mainThreadEvtsMock = () => [ + {start: 7500, end: 10000, duration: 2500}, + {start: 10000, end: 15000, duration: 5000}, + ]; + + const result = await Audit.audit(artifacts, {options}); + assert.equal(result.rawValue, 4516); + assert.equal(result.score, 0); + }); + + it('handles continuous tasks', async () => { + const events = []; + const longTaskDuration = 100; + const longTaskNumber = 1000; + const shortTaskDuration = 1.1; + const shortTaskNumber = 10000; + + for (let i = 0; i < longTaskNumber; i++) { + const start = i * longTaskDuration; + events.push({start: start, end: start + longTaskDuration, duration: longTaskDuration}); + } + + const baseline = events[events.length - 1].end; + for (let i = 0; i < shortTaskNumber; i++) { + const start = i * shortTaskDuration + baseline; + events.push({start: start, end: start + shortTaskDuration, duration: shortTaskDuration}); + } + + mainThreadEvtsMock = () => events; + + const result = await Audit.audit(artifacts, {options}); + assert.equal(result.rawValue, 106); + assert.equal(result.score, 0.44); + }); + }); }); diff --git a/lighthouse-core/test/lib/traces/tracing-processor-test.js b/lighthouse-core/test/lib/traces/tracing-processor-test.js index f9bef3e1379e..7f5c35bf2cdc 100644 --- a/lighthouse-core/test/lib/traces/tracing-processor-test.js +++ b/lighthouse-core/test/lib/traces/tracing-processor-test.js @@ -191,7 +191,8 @@ describe('TracingProcessor lib', () => { it('gets durations of top-level tasks', () => { const trace = {traceEvents: pwaTrace}; const tabTrace = new TraceOfTab().compute_(trace); - const ret = TracingProcessor.getMainThreadTopLevelEventDurations(tabTrace); + const events = TracingProcessor.getMainThreadTopLevelEvents(tabTrace); + const ret = TracingProcessor.getMainThreadTopLevelEventDurations(events); const durations = ret.durations; function getDurationFromIndex(index) { @@ -225,7 +226,8 @@ describe('TracingProcessor lib', () => { it('compute correct defaults', () => { const trace = {traceEvents: pwaTrace}; const tabTrace = new TraceOfTab().compute_(trace); - const ret = TracingProcessor.getRiskToResponsiveness(tabTrace); + const events = TracingProcessor.getMainThreadTopLevelEvents(tabTrace); + const ret = TracingProcessor.getRiskToResponsiveness(events, 0, tabTrace.timings.traceEnd); assert.equal(ret.durations.length, 645); assert.equal(Math.round(ret.totalTime), 2143); assert.equal(ret.clippedLength, 0); diff --git a/lighthouse-core/test/results/sample_v2.json b/lighthouse-core/test/results/sample_v2.json index 84d852223651..52e4f741a4d5 100644 --- a/lighthouse-core/test/results/sample_v2.json +++ b/lighthouse-core/test/results/sample_v2.json @@ -190,34 +190,10 @@ "score": 1, "displayValue": "16 ms", "rawValue": 16, - "extendedInfo": { - "value": [ - { - "percentile": 0.5, - "time": 16 - }, - { - "percentile": 0.75, - "time": 16 - }, - { - "percentile": 0.9, - "time": 16 - }, - { - "percentile": 0.99, - "time": 92.83829500000138 - }, - { - "percentile": 1, - "time": 138.53700000000208 - } - ] - }, "scoreDisplayMode": "numeric", "name": "estimated-input-latency", "description": "Estimated Input Latency", - "helpText": "The score above is an estimate of how long your app takes to respond to user input, in milliseconds. There is a 90% probability that a user encounters this amount of latency, or less. 10% of the time a user can expect additional latency. If your latency is higher than 50 ms, users may perceive your app as laggy. [Learn more](https://developers.google.com/web/tools/lighthouse/audits/estimated-input-latency)." + "helpText": "The score above is an estimate of how long your app takes to respond to user input, in milliseconds, during the busiest 5s window of page load. If your latency is higher than 50 ms, users may perceive your app as laggy. [Learn more](https://developers.google.com/web/tools/lighthouse/audits/estimated-input-latency)." }, "errors-in-console": { "score": 0, @@ -5371,6 +5347,6 @@ } }, "timing": { - "total": 842 + "total": 830 } } \ No newline at end of file