Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

core(lantern): move metrics to computed artifacts #4766

Merged
merged 16 commits into from
Mar 30, 2018
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 9 additions & 4 deletions lighthouse-core/audits/byte-efficiency/byte-efficiency-audit.js
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
'use strict';

const Audit = require('../audit');
const PredictivePerf = require('../predictive-perf');
const ConsistentlyInteractive = require('../../gather/computed/metrics/consistently-interactive');
const NetworkAnalysis = require('../../gather/computed/network-analysis');
const LoadSimulator = require('../../lib/dependency-graph/simulator/simulator.js');

const KB_IN_BYTES = 1024;
Expand Down Expand Up @@ -120,8 +121,8 @@ class UnusedBytes extends Audit {
});

const savingsOnTTI = Math.max(
PredictivePerf.getLastLongTaskEndTime(simulationBeforeChanges.nodeTiming) -
PredictivePerf.getLastLongTaskEndTime(simulationAfterChanges.nodeTiming),
ConsistentlyInteractive.getLastLongTaskEndTime(simulationBeforeChanges.nodeTiming) -
ConsistentlyInteractive.getLastLongTaskEndTime(simulationAfterChanges.nodeTiming),
0
);

Expand All @@ -135,7 +136,11 @@ class UnusedBytes extends Audit {
* @return {!AuditResult}
*/
static createAuditResult(result, graph) {
const simulatorOptions = PredictivePerf.computeRTTAndServerResponseTime(graph);
const records = [];
graph.traverse(node => node.record && records.push(node.record));
const simulatorOptions = NetworkAnalysis.computeRTTAndServerResponseTime(records);
// TODO: use rtt/throughput from config.settings instead of defaults
delete simulatorOptions.rtt;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

set to undefined?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

unfortunately that won't have the same effect, gotta love JS key existence vs. undefined biting us left and right 😆

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we should probably keep an eye on performance then, but hopefully properties on simulatorOptions aren't the bottleneck anyways... :)

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🤣 🤣 🤣

// TODO: calibrate multipliers, see https://github.com/GoogleChrome/lighthouse/issues/820
Object.assign(simulatorOptions, {cpuTaskMultiplier: 1, layoutTaskMultiplier: 1});
const simulator = new LoadSimulator(graph, simulatorOptions);
Expand Down
306 changes: 29 additions & 277 deletions lighthouse-core/audits/predictive-perf.js
Original file line number Diff line number Diff line change
Expand Up @@ -7,37 +7,12 @@

const Audit = require('./audit');
const Util = require('../report/v2/renderer/util');
const LoadSimulator = require('../lib/dependency-graph/simulator/simulator');
const NetworkAnalyzer = require('../lib/dependency-graph/simulator/network-analyzer');
const Node = require('../lib/dependency-graph/node');
const WebInspector = require('../lib/web-inspector');

// Parameters (in ms) for log-normal CDF scoring. To see the curve:
// https://www.desmos.com/calculator/rjp0lbit8y
const SCORING_POINT_OF_DIMINISHING_RETURNS = 1700;
const SCORING_MEDIAN = 10000;

// Any CPU task of 20 ms or more will end up being a critical long task on mobile
const CRITICAL_LONG_TASK_THRESHOLD = 20;

const COEFFICIENTS = {
FCP: {
intercept: 1440,
optimistic: -1.75,
pessimistic: 2.73,
},
FMP: {
intercept: 1532,
optimistic: -0.3,
pessimistic: 1.33,
},
TTCI: {
intercept: 1582,
optimistic: 0.97,
pessimistic: 0.49,
},
};

class PredictivePerf extends Audit {
/**
* @return {!AuditMeta}
Expand All @@ -54,266 +29,43 @@ class PredictivePerf extends Audit {
};
}

/**
* @param {!Node} dependencyGraph
* @param {function()=} condition
* @return {!Set<string>}
*/
static getScriptUrls(dependencyGraph, condition) {
const scriptUrls = new Set();

dependencyGraph.traverse(node => {
if (node.type === Node.TYPES.CPU) return;
if (node.record._resourceType !== WebInspector.resourceTypes.Script) return;
if (condition && !condition(node)) return;
scriptUrls.add(node.record.url);
});

return scriptUrls;
}

/**
* @param {!Node} dependencyGraph
* @return {!Object}
*/
static computeRTTAndServerResponseTime(dependencyGraph) {
const records = [];
dependencyGraph.traverse(node => {
if (node.type === Node.TYPES.NETWORK) records.push(node.record);
});

// First pass compute the estimated observed RTT to each origin's servers.
const rttByOrigin = new Map();
for (const [origin, summary] of NetworkAnalyzer.estimateRTTByOrigin(records).entries()) {
rttByOrigin.set(origin, summary.min);
}

// We'll use the minimum RTT as the assumed connection latency since we care about how much addt'l
// latency each origin introduces as Lantern will be simulating with its own connection latency.
const minimumRtt = Math.min(...Array.from(rttByOrigin.values()));
// We'll use the observed RTT information to help estimate the server response time
const responseTimeSummaries = NetworkAnalyzer.estimateServerResponseTimeByOrigin(records, {
rttByOrigin,
});

const additionalRttByOrigin = new Map();
const serverResponseTimeByOrigin = new Map();
for (const [origin, summary] of responseTimeSummaries.entries()) {
additionalRttByOrigin.set(origin, rttByOrigin.get(origin) - minimumRtt);
serverResponseTimeByOrigin.set(origin, summary.median);
}

return {additionalRttByOrigin, serverResponseTimeByOrigin};
}

/**
* @param {!Node} dependencyGraph
* @param {!TraceOfTabArtifact} traceOfTab
* @return {!Node}
*/
static getOptimisticFCPGraph(dependencyGraph, traceOfTab) {
const fcp = traceOfTab.timestamps.firstContentfulPaint;
const blockingScriptUrls = PredictivePerf.getScriptUrls(dependencyGraph, node => {
return (
node.endTime <= fcp && node.hasRenderBlockingPriority() && node.initiatorType !== 'script'
);
});

return dependencyGraph.cloneWithRelationships(node => {
if (node.endTime > fcp) return false;
// Include EvaluateScript tasks for blocking scripts
if (node.type === Node.TYPES.CPU) return node.isEvaluateScriptFor(blockingScriptUrls);
// Include non-script-initiated network requests with a render-blocking priority
return node.hasRenderBlockingPriority() && node.initiatorType !== 'script';
});
}

/**
* @param {!Node} dependencyGraph
* @param {!TraceOfTabArtifact} traceOfTab
* @return {!Node}
*/
static getPessimisticFCPGraph(dependencyGraph, traceOfTab) {
const fcp = traceOfTab.timestamps.firstContentfulPaint;
const blockingScriptUrls = PredictivePerf.getScriptUrls(dependencyGraph, node => {
return node.endTime <= fcp && node.hasRenderBlockingPriority();
});

return dependencyGraph.cloneWithRelationships(node => {
if (node.endTime > fcp) return false;
// Include EvaluateScript tasks for blocking scripts
if (node.type === Node.TYPES.CPU) return node.isEvaluateScriptFor(blockingScriptUrls);
// Include all network requests that had render-blocking priority (even script-initiated)
return node.hasRenderBlockingPriority();
});
}

/**
* @param {!Node} dependencyGraph
* @param {!TraceOfTabArtifact} traceOfTab
* @return {!Node}
*/
static getOptimisticFMPGraph(dependencyGraph, traceOfTab) {
const fmp = traceOfTab.timestamps.firstMeaningfulPaint;
const requiredScriptUrls = PredictivePerf.getScriptUrls(dependencyGraph, node => {
return (
node.endTime <= fmp && node.hasRenderBlockingPriority() && node.initiatorType !== 'script'
);
});

return dependencyGraph.cloneWithRelationships(node => {
if (node.endTime > fmp) return false;
// Include EvaluateScript tasks for blocking scripts
if (node.type === Node.TYPES.CPU) return node.isEvaluateScriptFor(requiredScriptUrls);
// Include non-script-initiated network requests with a render-blocking priority
return node.hasRenderBlockingPriority() && node.initiatorType !== 'script';
});
}

/**
* @param {!Node} dependencyGraph
* @param {!TraceOfTabArtifact} traceOfTab
* @return {!Node}
*/
static getPessimisticFMPGraph(dependencyGraph, traceOfTab) {
const fmp = traceOfTab.timestamps.firstMeaningfulPaint;
const requiredScriptUrls = PredictivePerf.getScriptUrls(dependencyGraph, node => {
return node.endTime <= fmp && node.hasRenderBlockingPriority();
});

return dependencyGraph.cloneWithRelationships(node => {
if (node.endTime > fmp) return false;

// Include CPU tasks that performed a layout or were evaluations of required scripts
if (node.type === Node.TYPES.CPU) {
return node.didPerformLayout() || node.isEvaluateScriptFor(requiredScriptUrls);
}

// Include all network requests that had render-blocking priority (even script-initiated)
return node.hasRenderBlockingPriority();
});
}

/**
* @param {!Node} dependencyGraph
* @return {!Node}
*/
static getOptimisticTTCIGraph(dependencyGraph) {
// Adjust the critical long task threshold for microseconds
const minimumCpuTaskDuration = CRITICAL_LONG_TASK_THRESHOLD * 1000;

return dependencyGraph.cloneWithRelationships(node => {
// Include everything that might be a long task
if (node.type === Node.TYPES.CPU) return node.event.dur > minimumCpuTaskDuration;
// Include all scripts and high priority requests, exclude all images
const isImage = node.record._resourceType === WebInspector.resourceTypes.Image;
const isScript = node.record._resourceType === WebInspector.resourceTypes.Script;
return (
!isImage &&
(isScript || node.record.priority() === 'High' || node.record.priority() === 'VeryHigh')
);
});
}

/**
* @param {!Node} dependencyGraph
* @return {!Node}
*/
static getPessimisticTTCIGraph(dependencyGraph) {
return dependencyGraph;
}

/**
* @param {!Map<!Node, {startTime, endTime}>} nodeTiming
* @return {number}
*/
static getLastLongTaskEndTime(nodeTiming, duration = 50) {
return Array.from(nodeTiming.entries())
.filter(
([node, timing]) =>
node.type === Node.TYPES.CPU && timing.endTime - timing.startTime > duration
)
.map(([_, timing]) => timing.endTime)
.reduce((max, x) => Math.max(max, x), 0);
}

/**
* @param {!Artifacts} artifacts
* @return {!AuditResult}
*/
static audit(artifacts) {
static async audit(artifacts) {
const trace = artifacts.traces[Audit.DEFAULT_PASS];
const devtoolsLog = artifacts.devtoolsLogs[Audit.DEFAULT_PASS];
return Promise.all([
artifacts.requestPageDependencyGraph({trace, devtoolsLog}),
artifacts.requestTraceOfTab(trace),
]).then(([graph, traceOfTab]) => {
const graphs = {
optimisticFCP: PredictivePerf.getOptimisticFCPGraph(graph, traceOfTab),
pessimisticFCP: PredictivePerf.getPessimisticFCPGraph(graph, traceOfTab),
optimisticFMP: PredictivePerf.getOptimisticFMPGraph(graph, traceOfTab),
pessimisticFMP: PredictivePerf.getPessimisticFMPGraph(graph, traceOfTab),
optimisticTTCI: PredictivePerf.getOptimisticTTCIGraph(graph, traceOfTab),
pessimisticTTCI: PredictivePerf.getPessimisticTTCIGraph(graph, traceOfTab),
};

const values = {};
const options = PredictivePerf.computeRTTAndServerResponseTime(graph);
Object.keys(graphs).forEach(key => {
const estimate = new LoadSimulator(graphs[key], options).simulate();
const longTaskThreshold = key.startsWith('optimistic') ? 100 : 50;
const lastLongTaskEnd = PredictivePerf.getLastLongTaskEndTime(
estimate.nodeTiming,
longTaskThreshold
);

switch (key) {
case 'optimisticFCP':
case 'pessimisticFCP':
case 'optimisticFMP':
case 'pessimisticFMP':
values[key] = estimate.timeInMs;
break;
case 'optimisticTTCI':
values[key] = Math.max(values.optimisticFMP, lastLongTaskEnd);
break;
case 'pessimisticTTCI':
values[key] = Math.max(values.pessimisticFMP, lastLongTaskEnd);
break;
}
});

values.roughEstimateOfFCP =
COEFFICIENTS.FCP.intercept +
COEFFICIENTS.FCP.optimistic * values.optimisticFCP +
COEFFICIENTS.FCP.pessimistic * values.pessimisticFCP;
values.roughEstimateOfFMP =
COEFFICIENTS.FMP.intercept +
COEFFICIENTS.FMP.optimistic * values.optimisticFMP +
COEFFICIENTS.FMP.pessimistic * values.pessimisticFMP;
values.roughEstimateOfTTCI =
COEFFICIENTS.TTCI.intercept +
COEFFICIENTS.TTCI.optimistic * values.optimisticTTCI +
COEFFICIENTS.TTCI.pessimistic * values.pessimisticTTCI;

// While the raw values will never be lower than following metric, the weights make this
// theoretically possible, so take the maximum if this happens.
values.roughEstimateOfFMP = Math.max(values.roughEstimateOfFCP, values.roughEstimateOfFMP);
values.roughEstimateOfTTCI = Math.max(values.roughEstimateOfFMP, values.roughEstimateOfTTCI);
const fcp = await artifacts.requestFirstContentfulPaint({trace, devtoolsLog});
const fmp = await artifacts.requestFirstMeaningfulPaint({trace, devtoolsLog});
const ttci = await artifacts.requestConsistentlyInteractive({trace, devtoolsLog});

const values = {
roughEstimateOfFCP: fcp.timing,
optimisticFCP: fcp.optimisticEstimate.timeInMs,
pessimisticFCP: fcp.pessimisticEstimate.timeInMs,

roughEstimateOfFMP: fmp.timing,
optimisticFMP: fmp.optimisticEstimate.timeInMs,
pessimisticFMP: fmp.pessimisticEstimate.timeInMs,

roughEstimateOfTTCI: ttci.timing,
optimisticTTCI: ttci.optimisticEstimate.timeInMs,
pessimisticTTCI: ttci.pessimisticEstimate.timeInMs,
};

const score = Audit.computeLogNormalScore(
values.roughEstimateOfTTCI,
SCORING_POINT_OF_DIMINISHING_RETURNS,
SCORING_MEDIAN
);
const score = Audit.computeLogNormalScore(
values.roughEstimateOfTTCI,
SCORING_POINT_OF_DIMINISHING_RETURNS,
SCORING_MEDIAN
);

return {
score,
rawValue: values.roughEstimateOfTTCI,
displayValue: Util.formatMilliseconds(values.roughEstimateOfTTCI),
extendedInfo: {value: values},
};
});
return {
score,
rawValue: values.roughEstimateOfTTCI,
displayValue: Util.formatMilliseconds(values.roughEstimateOfTTCI),
extendedInfo: {value: values},
};
}
}

Expand Down
Loading