diff --git a/lighthouse-core/audits/estimated-input-latency.js b/lighthouse-core/audits/estimated-input-latency.js index 52d02f14d05f..02df63e742ea 100644 --- a/lighthouse-core/audits/estimated-input-latency.js +++ b/lighthouse-core/audits/estimated-input-latency.js @@ -7,16 +7,7 @@ const Audit = require('./audit'); const Util = require('../report/html/renderer/util'); -const TracingProcessor = require('../lib/traces/tracing-processor'); -const LHError = require('../lib/errors'); -const ROLLING_WINDOW_SIZE = 5000; - -/** - * @fileOverview This audit determines the largest 90 percentile EQT value of all 5s windows between - * FMP and the end of the trace. - * @see https://docs.google.com/document/d/1b9slyaB9yho91YTOkAQfpCdULFkZM9LqsipcX3t7He8/preview - */ class EstimatedInputLatency extends Audit { /** * @return {!AuditMeta} @@ -45,57 +36,29 @@ class EstimatedInputLatency extends Audit { }; } - static calculate(tabTrace, context) { - const startTime = tabTrace.timings.firstMeaningfulPaint; - if (!startTime) { - throw new LHError(LHError.errors.NO_FMP); - } - - const events = TracingProcessor.getMainThreadTopLevelEvents(tabTrace, startTime) - .filter(evt => evt.duration >= 1); - - const candidateStartEvts = events.filter(evt => evt.duration >= 10); - - let worst90thPercentileLatency = 16; - for (const startEvt of candidateStartEvts) { - const latencyPercentiles = TracingProcessor.getRiskToResponsiveness( - events, - startEvt.start, - startEvt.start + ROLLING_WINDOW_SIZE, - [0.9] - ); - - worst90thPercentileLatency = Math.max( - latencyPercentiles[0].time, - worst90thPercentileLatency - ); - } - - const score = Audit.computeLogNormalScore( - worst90thPercentileLatency, - context.options.scorePODR, - context.options.scoreMedian - ); - - return { - score, - rawValue: worst90thPercentileLatency, - displayValue: Util.formatMilliseconds(worst90thPercentileLatency, 1), - }; - } - /** * Audits the page to estimate input latency. * @see https://github.com/GoogleChrome/lighthouse/issues/28 - * @param {!Artifacts} artifacts The artifacts from the gather phase. + * + * @param {LH.Artifacts} artifacts * @param {LH.Audit.Context} context - * @return {!Promise} The score from the audit, ranging from 0-100. + * @return {Promise} */ - static audit(artifacts, context) { - const trace = artifacts.traces[this.DEFAULT_PASS]; + static async audit(artifacts, context) { + const trace = artifacts.traces[Audit.DEFAULT_PASS]; + const devtoolsLog = artifacts.devtoolsLogs[Audit.DEFAULT_PASS]; + const metricComputationData = {trace, devtoolsLog, settings: context.settings}; + const metricResult = await artifacts.requestEstimatedInputLatency(metricComputationData); - return artifacts.requestTraceOfTab(trace) - .then(traceOfTab => EstimatedInputLatency.calculate(traceOfTab, context)); + return { + score: Audit.computeLogNormalScore( + metricResult.timing, + context.options.scorePODR, + context.options.scoreMedian + ), + rawValue: metricResult.timing, + displayValue: Util.formatMilliseconds(metricResult.timing, 1), + }; } } diff --git a/lighthouse-core/audits/metrics.js b/lighthouse-core/audits/metrics.js index a101a4b56644..ab6eaf89db2a 100644 --- a/lighthouse-core/audits/metrics.js +++ b/lighthouse-core/audits/metrics.js @@ -37,6 +37,7 @@ class Metrics extends Audit { const firstCPUIdle = await artifacts.requestFirstCPUIdle(metricComputationData); const timeToInteractive = await artifacts.requestConsistentlyInteractive(metricComputationData); const speedIndex = await artifacts.requestSpeedIndex(metricComputationData); + const estimatedInputLatency = await artifacts.requestEstimatedInputLatency(metricComputationData); // eslint-disable-line max-len const metrics = []; // Include the simulated/observed performance metrics @@ -46,6 +47,7 @@ class Metrics extends Audit { firstCPUIdle, timeToInteractive, speedIndex, + estimatedInputLatency, }; for (const [metricName, values] of Object.entries(metricsMap)) { diff --git a/lighthouse-core/audits/predictive-perf.js b/lighthouse-core/audits/predictive-perf.js index 9bf72f66d143..f3bdc2999fac 100644 --- a/lighthouse-core/audits/predictive-perf.js +++ b/lighthouse-core/audits/predictive-perf.js @@ -41,6 +41,7 @@ class PredictivePerf extends Audit { const ttci = await artifacts.requestLanternConsistentlyInteractive({trace, devtoolsLog}); const ttfcpui = await artifacts.requestLanternFirstCPUIdle({trace, devtoolsLog}); const si = await artifacts.requestLanternSpeedIndex({trace, devtoolsLog}); + const eil = await artifacts.requestLanternEstimatedInputLatency({trace, devtoolsLog}); const values = { roughEstimateOfFCP: fcp.timing, @@ -62,6 +63,10 @@ class PredictivePerf extends Audit { roughEstimateOfSI: si.timing, optimisticSI: si.optimisticEstimate.timeInMs, pessimisticSI: si.pessimisticEstimate.timeInMs, + + roughEstimateOfEIL: eil.timing, + optimisticEIL: eil.optimisticEstimate.timeInMs, + pessimisticEIL: eil.pessimisticEstimate.timeInMs, }; const score = Audit.computeLogNormalScore( diff --git a/lighthouse-core/gather/computed/metrics/estimated-input-latency.js b/lighthouse-core/gather/computed/metrics/estimated-input-latency.js new file mode 100644 index 000000000000..268edba40758 --- /dev/null +++ b/lighthouse-core/gather/computed/metrics/estimated-input-latency.js @@ -0,0 +1,67 @@ +/** + * @license Copyright 2018 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + */ +'use strict'; + +const MetricArtifact = require('./metric'); +const LHError = require('../../../lib/errors'); +const TracingProcessor = require('../../../lib/traces/tracing-processor'); + +const ROLLING_WINDOW_SIZE = 5000; + +/** + * @fileoverview This audit determines the largest 90 percentile EQT value of all 5s windows between + * FMP and the end of the trace. + * @see https://docs.google.com/document/d/1b9slyaB9yho91YTOkAQfpCdULFkZM9LqsipcX3t7He8/preview + */ +class EstimatedInputLatency extends MetricArtifact { + get name() { + return 'EstimatedInputLatency'; + } + + /** + * @param {Array<{start: number, end: number, duration: number}>} events + * @return {number} + */ + static calculateRollingWindowEIL(events) { + const candidateStartEvts = events.filter(evt => evt.duration >= 10); + + let worst90thPercentileLatency = 16; + for (const startEvt of candidateStartEvts) { + const latencyPercentiles = TracingProcessor.getRiskToResponsiveness( + events, + startEvt.start, + startEvt.start + ROLLING_WINDOW_SIZE, + [0.9] + ); + + worst90thPercentileLatency = Math.max(latencyPercentiles[0].time, worst90thPercentileLatency); + } + + return worst90thPercentileLatency; + } + + /** + * @param {LH.Artifacts.MetricComputationData} data + * @return {Promise} + */ + computeObservedMetric(data) { + const {firstMeaningfulPaint} = data.traceOfTab.timings; + if (!firstMeaningfulPaint) { + throw new LHError(LHError.errors.NO_FMP); + } + + const events = TracingProcessor.getMainThreadTopLevelEvents( + data.traceOfTab, + firstMeaningfulPaint + ).filter(evt => evt.duration >= 1); + + return Promise.resolve({ + timing: EstimatedInputLatency.calculateRollingWindowEIL(events), + }); + } +} + +module.exports = EstimatedInputLatency; diff --git a/lighthouse-core/gather/computed/metrics/lantern-estimated-input-latency.js b/lighthouse-core/gather/computed/metrics/lantern-estimated-input-latency.js new file mode 100644 index 000000000000..a69a69351ec7 --- /dev/null +++ b/lighthouse-core/gather/computed/metrics/lantern-estimated-input-latency.js @@ -0,0 +1,100 @@ +/** + * @license Copyright 2018 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + */ +'use strict'; + +const LanternMetricArtifact = require('./lantern-metric'); +const Node = require('../../../lib/dependency-graph/node'); +const EstimatedInputLatency = require('./estimated-input-latency'); + +class LanternEstimatedInputLatency extends LanternMetricArtifact { + get name() { + return 'LanternEstimatedInputLatency'; + } + + /** + * @return {LH.Gatherer.Simulation.MetricCoefficients} + */ + get COEFFICIENTS() { + return { + intercept: 0, + optimistic: 0.4, + pessimistic: 0.4, + }; + } + + /** + * @param {Node} dependencyGraph + * @return {Node} + */ + getOptimisticGraph(dependencyGraph) { + return dependencyGraph; + } + + /** + * @param {Node} dependencyGraph + * @return {Node} + */ + getPessimisticGraph(dependencyGraph) { + return dependencyGraph; + } + + /** + * @param {LH.Gatherer.Simulation.Result} simulation + * @param {Object} extras + * @return {LH.Gatherer.Simulation.Result} + */ + getEstimateFromSimulation(simulation, extras) { + // Intentionally use the opposite FMP estimate, a more pessimistic FMP means that more tasks + // are excluded from the EIL computation, so a higher FMP means lower EIL for same work. + const fmpTimeInMs = extras.optimistic + ? extras.fmpResult.pessimisticEstimate.timeInMs + : extras.fmpResult.optimisticEstimate.timeInMs; + + const events = LanternEstimatedInputLatency.getEventsAfterFMP( + simulation.nodeTimings, + fmpTimeInMs + ); + + return { + timeInMs: EstimatedInputLatency.calculateRollingWindowEIL(events), + nodeTimings: simulation.nodeTimings, + }; + } + + /** + * @param {LH.Artifacts.MetricComputationData} data + * @param {Object} artifacts + * @return {Promise} + */ + async compute_(data, artifacts) { + const fmpResult = await artifacts.requestLanternFirstMeaningfulPaint(data, artifacts); + return this.computeMetricWithGraphs(data, artifacts, {fmpResult}); + } + + /** + * @param {LH.Gatherer.Simulation.Result['nodeTimings']} nodeTimings + * @param {number} fmpTimeInMs + */ + static getEventsAfterFMP(nodeTimings, fmpTimeInMs) { + /** @type {Array<{start: number, end: number, duration: number}>} */ + const events = []; + for (const [node, timing] of nodeTimings.entries()) { + if (node.type !== Node.TYPES.CPU) continue; + if (!timing.endTime || !timing.startTime) continue; + if (timing.endTime < fmpTimeInMs) continue; + + events.push({ + start: timing.startTime, + end: timing.endTime, + duration: timing.endTime - timing.startTime, + }); + } + + return events; + } +} + +module.exports = LanternEstimatedInputLatency; diff --git a/lighthouse-core/test/audits/estimated-input-latency-test.js b/lighthouse-core/test/audits/estimated-input-latency-test.js index 0affdc27d3b8..060e4fd988a8 100644 --- a/lighthouse-core/test/audits/estimated-input-latency-test.js +++ b/lighthouse-core/test/audits/estimated-input-latency-test.js @@ -10,16 +10,14 @@ const Runner = require('../../runner'); const assert = require('assert'); const options = Audit.defaultOptions; -const TracingProcessor = require('../../lib/traces/tracing-processor'); const pwaTrace = require('../fixtures/traces/progressive-app-m60.json'); const computedArtifacts = Runner.instantiateComputedArtifacts(); function generateArtifactsWithTrace(trace) { return Object.assign({ - traces: { - [Audit.DEFAULT_PASS]: trace, - }, + traces: {[Audit.DEFAULT_PASS]: trace}, + devtoolsLogs: {[Audit.DEFAULT_PASS]: []}, }, computedArtifacts); } /* eslint-env mocha */ @@ -27,74 +25,12 @@ function generateArtifactsWithTrace(trace) { describe('Performance: estimated-input-latency audit', () => { it('evaluates valid input correctly', () => { const artifacts = generateArtifactsWithTrace(pwaTrace); - return Audit.audit(artifacts, {options}).then(output => { + const settings = {throttlingMethod: 'provided'}; + return Audit.audit(artifacts, {options, settings}).then(output => { assert.equal(output.debugString, undefined); assert.equal(Math.round(output.rawValue * 10) / 10, 17.1); assert.equal(output.displayValue, '17\xa0ms'); assert.equal(output.score, 1); }); }); - - describe('#audit', () => { - let firstMeaningfulPaint; - let traceEnd; - let artifacts; - let origGetMainThreadEventsFn; - let mainThreadEvtsMock; - - beforeEach(() => { - firstMeaningfulPaint = 0.00001; - traceEnd = 1e20; - artifacts = { - traces: {}, - requestTraceOfTab() { - const timings = {firstMeaningfulPaint, traceEnd}; - return Promise.resolve({timings}); - }, - }; - - origGetMainThreadEventsFn = TracingProcessor.getMainThreadTopLevelEvents; - TracingProcessor.getMainThreadTopLevelEvents = () => mainThreadEvtsMock(arguments); - }); - - afterEach(() => { - TracingProcessor.getMainThreadTopLevelEvents = origGetMainThreadEventsFn; - }); - - it('uses a 5s rolling window, not traceEnd', async () => { - mainThreadEvtsMock = () => [ - {start: 7500, end: 10000, duration: 2500}, - {start: 10000, end: 15000, duration: 5000}, - ]; - - const result = await Audit.audit(artifacts, {options}); - assert.equal(result.rawValue, 4516); - assert.equal(result.score, 0); - }); - - it('handles continuous tasks', async () => { - const events = []; - const longTaskDuration = 100; - const longTaskNumber = 1000; - const shortTaskDuration = 1.1; - const shortTaskNumber = 10000; - - for (let i = 0; i < longTaskNumber; i++) { - const start = i * longTaskDuration; - events.push({start: start, end: start + longTaskDuration, duration: longTaskDuration}); - } - - const baseline = events[events.length - 1].end; - for (let i = 0; i < shortTaskNumber; i++) { - const start = i * shortTaskDuration + baseline; - events.push({start: start, end: start + shortTaskDuration, duration: shortTaskDuration}); - } - - mainThreadEvtsMock = () => events; - - const result = await Audit.audit(artifacts, {options}); - assert.equal(result.rawValue, 106); - assert.equal(result.score, 0.44); - }); - }); }); diff --git a/lighthouse-core/test/audits/metrics-test.js b/lighthouse-core/test/audits/metrics-test.js index df3cccb87125..a2276f48b4c7 100644 --- a/lighthouse-core/test/audits/metrics-test.js +++ b/lighthouse-core/test/audits/metrics-test.js @@ -43,6 +43,7 @@ describe('Performance: metrics', () => { firstCPUIdle: 5308, timeToInteractive: 5308, speedIndex: 2063, + estimatedInputLatency: 104, observedNavigationStart: 0, observedFirstPaint: 499, diff --git a/lighthouse-core/test/gather/computed/metrics/estimated-input-latency-test.js b/lighthouse-core/test/gather/computed/metrics/estimated-input-latency-test.js new file mode 100644 index 000000000000..2afbcabdcf1e --- /dev/null +++ b/lighthouse-core/test/gather/computed/metrics/estimated-input-latency-test.js @@ -0,0 +1,68 @@ +/** + * @license Copyright 2018 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + */ +'use strict'; + +const Runner = require('../../../../runner'); +const EstimatedInputLatency = require('../../../../gather/computed/metrics/estimated-input-latency'); // eslint-disable-line +const assert = require('assert'); + +const trace = require('../../../fixtures/traces/progressive-app-m60.json'); +const devtoolsLog = require('../../../fixtures/traces/progressive-app-m60.devtools.log.json'); + +/* eslint-env mocha */ + +describe('Metrics: EIL', () => { + it('should compute a simulated value', async () => { + const artifacts = Runner.instantiateComputedArtifacts(); + const settings = {throttlingMethod: 'simulate'}; + const result = await artifacts.requestEstimatedInputLatency({trace, devtoolsLog, settings}); + + assert.equal(Math.round(result.timing), 104); + assert.equal(Math.round(result.optimisticEstimate.timeInMs), 101); + assert.equal(Math.round(result.pessimisticEstimate.timeInMs), 158); + }); + + it('should compute an observed value', async () => { + const artifacts = Runner.instantiateComputedArtifacts(); + const settings = {throttlingMethod: 'provided'}; + const result = await artifacts.requestEstimatedInputLatency({trace, devtoolsLog, settings}); + + assert.equal(Math.round(result.timing * 10) / 10, 17.1); + }); + + + describe('#calculateRollingWindowEIL', () => { + it('uses a 5s rolling window', async () => { + const events = [ + {start: 7500, end: 10000, duration: 2500}, + {start: 10000, end: 15000, duration: 5000}, + ]; + + assert.equal(EstimatedInputLatency.calculateRollingWindowEIL(events), 4516); + }); + + it('handles continuous tasks', async () => { + const events = []; + const longTaskDuration = 100; + const longTaskNumber = 1000; + const shortTaskDuration = 1.1; + const shortTaskNumber = 10000; + + for (let i = 0; i < longTaskNumber; i++) { + const start = i * longTaskDuration; + events.push({start: start, end: start + longTaskDuration, duration: longTaskDuration}); + } + + const baseline = events[events.length - 1].end; + for (let i = 0; i < shortTaskNumber; i++) { + const start = i * shortTaskDuration + baseline; + events.push({start: start, end: start + shortTaskDuration, duration: shortTaskDuration}); + } + + assert.equal(EstimatedInputLatency.calculateRollingWindowEIL(events), 106); + }); + }); +}); diff --git a/lighthouse-core/test/gather/computed/metrics/lantern-estimated-input-latency-test.js b/lighthouse-core/test/gather/computed/metrics/lantern-estimated-input-latency-test.js new file mode 100644 index 000000000000..2ecceed67326 --- /dev/null +++ b/lighthouse-core/test/gather/computed/metrics/lantern-estimated-input-latency-test.js @@ -0,0 +1,27 @@ +/** + * @license Copyright 2018 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + */ +'use strict'; + +const Runner = require('../../../../runner'); +const assert = require('assert'); + +const trace = require('../../../fixtures/traces/progressive-app-m60.json'); +const devtoolsLog = require('../../../fixtures/traces/progressive-app-m60.devtools.log.json'); + +/* eslint-env mocha */ + +describe('Metrics: Lantern EIL', () => { + it('should compute a simulated value', async () => { + const artifacts = Runner.instantiateComputedArtifacts(); + const settings = {throttlingMethod: 'simulate'}; + const data = {trace, devtoolsLog, settings}; + const result = await artifacts.requestLanternEstimatedInputLatency(data); + + assert.equal(Math.round(result.timing), 104); + assert.equal(Math.round(result.optimisticEstimate.timeInMs), 101); + assert.equal(Math.round(result.pessimisticEstimate.timeInMs), 158); + }); +}); diff --git a/lighthouse-core/test/results/sample_v2.json b/lighthouse-core/test/results/sample_v2.json index 582f77eb2d5c..755f061504a6 100644 --- a/lighthouse-core/test/results/sample_v2.json +++ b/lighthouse-core/test/results/sample_v2.json @@ -1535,6 +1535,10 @@ "timing": 4417, "timestamp": 185607736912 }, + { + "metricName": "estimatedInputLatency", + "timing": 16 + }, { "metricName": "observedNavigationStart", "timing": 0, diff --git a/typings/artifacts.d.ts b/typings/artifacts.d.ts index 63b59fa3ff16..14a322d7e35e 100644 --- a/typings/artifacts.d.ts +++ b/typings/artifacts.d.ts @@ -243,7 +243,7 @@ declare global { export interface Metric { timing: number; - timestamp: number; + timestamp?: number; } export interface LanternMetric {