From 8ce26dd1b41a4adf3c1e4fcbb3d97dd85bff3ae5 Mon Sep 17 00:00:00 2001 From: Joshua Segaran Date: Wed, 28 Apr 2021 13:52:16 -0700 Subject: [PATCH] apollo-server-core: unified Studio reporting (#4142) The usage reporting plugin in `apollo-server-core` is not the first tool Apollo built to report usage to Studio. Previous iterations such as `optics-agent` and `engineproxy` reported a combination of detailed per-field single-operation performance *traces* and summarized *stats* of operations to Apollo's servers. When we built this TypeScript usage reporting plugin in 2018, for the sakes of expediency we did something different: it only sent traces to Apollo's servers. This meant that the performance of every single single user operation was described in detail to Apollo's servers. Studio is not an exhaustive trace warehouse: we have always *sampled* the traces received, making only some of them available via Studio's Traces UI. The other traces were converted to stats inside Studio's servers. While this meant that the reporting agent was simpler than the previous implementations (no need to be able to describe performance statistics), it also meant that the protocol used to talk to Studio consumed a lot more bandwidth (as well as CPU time for encoding traces). This PR returns us to the world where Studio usage is reported as a combination of stats and traces. It takes a slightly different approach than the previous implementations: instead of reporting stats and traces in parallel, usage reports contain both stats and traces. Each GraphQL operation is described either as a trace or as stats, not both. We expect this to significantly reduce the network and CPU requirements of sending usage reports to Studio. It should not significantly affect the experience of using Studio: we have always heavily sampled traces in Studio before saving them to the trace warehouse, and the default heuristic for which operations to send as traces works similarly to the heuristic used in Studio's servers. This PR introduces an option `experimental_sendOperationAsTrace` to allow you to control whether a given operation is sent as trace or stats. This is truly an experimental option that may change at any time. For example, you should not rely on the fact that this will be called on all operations after the operation is done with a full, or on its signature, or even that it exists. It is likely that future improvements to the usage reporting plugin will change how operations are observed so that we don't have to collect a full trace before deciding how to represent the operation. Some other notes: - Upgrade our fork `@apollo/protobufjs` with a few improvements: - New `js_use_toArray` option which lets you encode repeated fields from objects that aren't stored in memory as arrays but expose `toArray` methods. We use this so that we can build up `DurationHistogram`s and map-like objects in a non-array fashion and only convert to array at encoding time. - New `js_preEncoded` option which allows you to encode messages in repeated fields as buffers (Uint8Arrays). This helps amortize encoding cost of a large message over time instead of freezing the event loop to encode the whole message at once. This replaces an old hack we used for one field with something built in to the protobuf compiler (including correct TypeScript typings). - New `--no-from-object` flag which we use to reduce the size of generated code (as we don't use the fromObject protobuf.js API). - In order to help us validate that the trace->stats code in this PR matches similar code in Studio's servers, the flag `internal_includeTracesContributingToStats` sends the traces that contribute to stats in a special field. This is something we only use as part of our own validation in our servers; for your graphs it will have no effect other than increasing message size. - Viewing traces in Studio is only available on paid plans. The usage-reporting endpoint now tells the plugin whether traces are supported on your graph's plan; if not supported, the plugin will switch to sending all operations as stats (regardless of the value of `experimental_sendOperationAsTrace`) after the first report. - We try to estimate the message size compared to maxUncompressedReportSize via a rough estimate about how big the leaf nodes of the stats messages will be rather than carefully counting how much space is used by each number and histogram. We do take the lengths of all strings into account. - By mistake, this plugin never sent the cache policy on traces, meaning that visualizing cache-specific stats in Studio did not work. This is now fixed. This project was begun by @jsegaran and completed by @glasser. --- package-lock.json | 26 +- .../apollo-reporting-protobuf/package.json | 4 +- .../apollo-reporting-protobuf/src/index.js | 22 +- .../src/reports.proto | 6 +- .../src/plugin/traceTreeBuilder.ts | 2 +- .../__snapshots__/stats.test.ts.snap | 6126 +++++++++++++++++ .../usageReporting/__tests__/plugin.test.ts | 95 +- .../usageReporting/__tests__/stats.test.ts | 471 ++ .../defaultSendOperationsAsTrace.ts | 71 + .../usageReporting/durationHistogram.ts | 13 +- .../plugin/usageReporting/iterateOverTrace.ts | 140 + .../src/plugin/usageReporting/options.ts | 26 + .../src/plugin/usageReporting/plugin.ts | 136 +- .../src/plugin/usageReporting/stats.ts | 355 + 14 files changed, 7355 insertions(+), 138 deletions(-) create mode 100644 packages/apollo-server-core/src/plugin/usageReporting/__tests__/__snapshots__/stats.test.ts.snap create mode 100644 packages/apollo-server-core/src/plugin/usageReporting/__tests__/stats.test.ts create mode 100644 packages/apollo-server-core/src/plugin/usageReporting/defaultSendOperationsAsTrace.ts create mode 100644 packages/apollo-server-core/src/plugin/usageReporting/iterateOverTrace.ts create mode 100644 packages/apollo-server-core/src/plugin/usageReporting/stats.ts diff --git a/package-lock.json b/package-lock.json index 1b97bb96c62..1b8aab550c6 100644 --- a/package-lock.json +++ b/package-lock.json @@ -4,9 +4,9 @@ "lockfileVersion": 1, "dependencies": { "@apollo/protobufjs": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@apollo/protobufjs/-/protobufjs-1.0.3.tgz", - "integrity": "sha512-gqeT810Ect9WIqsrgfUvr+ljSB5m1PyBae9HGdrRyQ3HjHjTcjVvxpsMYXlUk4rUHnrfUqyoGvLSy2yLlRGEOw==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@apollo/protobufjs/-/protobufjs-1.2.0.tgz", + "integrity": "sha512-TBgsADig/K4Hx71uQO6KPLxgoE/ORhPGh/HgouHru+cum8RLDfAfEY5Dde+cNala+luGI2X4Rs42pLWRud7/WA==", "requires": { "@protobufjs/aspromise": "^1.1.2", "@protobufjs/base64": "^1.1.2", @@ -24,9 +24,9 @@ }, "dependencies": { "@types/node": { - "version": "10.17.5", - "resolved": "https://registry.npmjs.org/@types/node/-/node-10.17.5.tgz", - "integrity": "sha512-RElZIr/7JreF1eY6oD5RF3kpmdcreuQPjg5ri4oQ5g9sq7YWU8HkfB3eH8GwAwxf5OaCh0VPi7r4N/yoTGelrA==" + "version": "10.17.58", + "resolved": "https://registry.npmjs.org/@types/node/-/node-10.17.58.tgz", + "integrity": "sha512-Dn5RBxLohjdHFj17dVVw3rtrZAeXeWg+LQfvxDIW/fdPkSiuQk7h3frKMYtsQhtIW42wkErDcy9UMVxhGW4O7w==" } } }, @@ -4838,9 +4838,9 @@ "dev": true }, "@types/long": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.0.tgz", - "integrity": "sha512-1w52Nyx4Gq47uuu0EVcsHBxZFJgurQ+rTKS3qMHxR1GY2T8c2AJYd6vZoZ9q1rupaDjU0yT+Jc2XTyXkjeMA+Q==" + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.1.tgz", + "integrity": "sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w==" }, "@types/lru-cache": { "version": "5.1.0", @@ -5437,7 +5437,7 @@ "apollo-reporting-protobuf": { "version": "file:packages/apollo-reporting-protobuf", "requires": { - "@apollo/protobufjs": "^1.0.3" + "@apollo/protobufjs": "1.2.0" } }, "apollo-server": { @@ -7943,9 +7943,9 @@ "integrity": "sha512-bd79DPpx+1Ilh9+30aT5O1sgpQd4Ttg8oqkqi51ZzhedMM1omD2e6IOF48Z/DzDCZ2svp49tN/3vneTK6ZBkXw==" }, "core-js-pure": { - "version": "3.10.2", - "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.10.2.tgz", - "integrity": "sha512-uu18pVHQ21n4mzfuSlCXpucu5VKsck3j2m5fjrBOBqqdgWAxwdCgUuGWj6cDDPN1zLj/qtiqKvBMxWgDeeu49Q==" + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.11.0.tgz", + "integrity": "sha512-PxEiQGjzC+5qbvE7ZIs5Zn6BynNeZO9zHhrrWmkRff2SZLq0CE/H5LuZOJHhmOQ8L38+eMzEHAmPYWrUtDfuDQ==" }, "core-util-is": { "version": "1.0.2", diff --git a/packages/apollo-reporting-protobuf/package.json b/packages/apollo-reporting-protobuf/package.json index 670b6219d42..6e13bc1135a 100644 --- a/packages/apollo-reporting-protobuf/package.json +++ b/packages/apollo-reporting-protobuf/package.json @@ -7,7 +7,7 @@ "scripts": { "clean": "git clean -fdX -- dist", "prepare": "npm run clean && mkdir dist && npm run pbjs && npm run pbts && cp src/* dist", - "pbjs": "apollo-pbjs --target static-module --out dist/protobuf.js --wrap commonjs --force-number src/reports.proto", + "pbjs": "apollo-pbjs --target static-module --out dist/protobuf.js --wrap commonjs --force-number --no-from-object src/reports.proto", "pbts": "apollo-pbts -o dist/protobuf.d.ts dist/protobuf.js", "update-proto": "curl -sSfo src/reports.proto https://usage-reporting.api.apollographql.com/proto/reports.proto" }, @@ -29,6 +29,6 @@ }, "homepage": "https://github.com/apollographql/apollo-server#readme", "dependencies": { - "@apollo/protobufjs": "^1.0.3" + "@apollo/protobufjs": "1.2.0" } } diff --git a/packages/apollo-reporting-protobuf/src/index.js b/packages/apollo-reporting-protobuf/src/index.js index f50831c507c..3a56610a603 100644 --- a/packages/apollo-reporting-protobuf/src/index.js +++ b/packages/apollo-reporting-protobuf/src/index.js @@ -3,29 +3,9 @@ const protobufJS = require('@apollo/protobufjs/minimal'); // Remove Long support. Our uint64s tend to be small (less // than 104 days). +// XXX Just remove this in our fork? // https://github.com/protobufjs/protobuf.js/issues/1253 protobufJS.util.Long = undefined; protobufJS.configure(); -// Override the generated protobuf Traces.encode function so that it will look -// for Traces that are already encoded to Buffer as well as unencoded -// Traces. This amortizes the protobuf encoding time over each generated Trace -// instead of bunching it all up at once at sendReport time. In load tests, this -// change improved p99 end-to-end HTTP response times by a factor of 11 without -// a casually noticeable effect on p50 times. This also makes it easier for us -// to implement maxUncompressedReportSize as we know the encoded size of traces -// as we go. -const originalTracesAndStatsEncode = protobuf.TracesAndStats.encode; -protobuf.TracesAndStats.encode = function(message, originalWriter) { - const writer = originalTracesAndStatsEncode(message, originalWriter); - const encodedTraces = message.encodedTraces; - if (encodedTraces != null && encodedTraces.length) { - for (let i = 0; i < encodedTraces.length; ++i) { - writer.uint32(/* id 1, wireType 2 =*/ 10); - writer.bytes(encodedTraces[i]); - } - } - return writer; -}; - module.exports = protobuf; diff --git a/packages/apollo-reporting-protobuf/src/reports.proto b/packages/apollo-reporting-protobuf/src/reports.proto index 1f23c9fa913..e6aadea7261 100644 --- a/packages/apollo-reporting-protobuf/src/reports.proto +++ b/packages/apollo-reporting-protobuf/src/reports.proto @@ -375,6 +375,10 @@ message ContextualizedStats { // A sequence of traces and stats. An individual trace should either be counted as a stat or trace message TracesAndStats { - repeated Trace trace = 1; + repeated Trace trace = 1 [(js_preEncoded)=true]; repeated ContextualizedStats stats_with_context = 2 [(js_use_toArray)=true]; + // This field is used to validate that the algorithm used to construct `stats_with_context` + // matches similar algorithms in Apollo's servers. It is otherwise ignored and should not + // be included in reports. + repeated Trace internal_traces_contributing_to_stats = 3 [(js_preEncoded)=true]; } diff --git a/packages/apollo-server-core/src/plugin/traceTreeBuilder.ts b/packages/apollo-server-core/src/plugin/traceTreeBuilder.ts index de3e5a61c01..9426013329b 100644 --- a/packages/apollo-server-core/src/plugin/traceTreeBuilder.ts +++ b/packages/apollo-server-core/src/plugin/traceTreeBuilder.ts @@ -261,7 +261,7 @@ function errorToProtobufError(error: GraphQLError): Trace.Error { } // Converts a JS Date into a Timestamp. -function dateToProtoTimestamp(date: Date): google.protobuf.Timestamp { +export function dateToProtoTimestamp(date: Date): google.protobuf.Timestamp { const totalMillis = +date; const millis = totalMillis % 1000; return new google.protobuf.Timestamp({ diff --git a/packages/apollo-server-core/src/plugin/usageReporting/__tests__/__snapshots__/stats.test.ts.snap b/packages/apollo-server-core/src/plugin/usageReporting/__tests__/__snapshots__/stats.test.ts.snap new file mode 100644 index 00000000000..809bc96f2e4 --- /dev/null +++ b/packages/apollo-server-core/src/plugin/usageReporting/__tests__/__snapshots__/stats.test.ts.snap @@ -0,0 +1,6126 @@ +// Jest Snapshot v1, https://goo.gl/fbAQLP + +exports[`Check query latency stats when adding a forbidden trace 1`] = ` +OurContextualizedStats { + "context": Object { + "clientReferenceId": "reference", + "clientVersion": "version", + }, + "perTypeStat": Object {}, + "queryLatencyStats": OurQueryLatencyStats { + "cacheHits": 0, + "cacheLatencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "forbiddenOperationCount": 1, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "persistedQueryHits": 0, + "persistedQueryMisses": 0, + "privateCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "publicCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "registeredOperationCount": 0, + "requestCount": 1, + "requestsWithErrorsCount": 0, + "rootErrorStats": OurPathErrorStats { + "children": Object {}, + "errorsCount": 0, + "requestsWithErrorsCount": 0, + }, + }, +} +`; + +exports[`Check query latency stats when adding a fully cached trace 1`] = ` +OurContextualizedStats { + "context": Object { + "clientReferenceId": "reference", + "clientVersion": "version", + }, + "perTypeStat": Object {}, + "queryLatencyStats": OurQueryLatencyStats { + "cacheHits": 1, + "cacheLatencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "forbiddenOperationCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "persistedQueryHits": 0, + "persistedQueryMisses": 0, + "privateCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "publicCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "registeredOperationCount": 0, + "requestCount": 1, + "requestsWithErrorsCount": 0, + "rootErrorStats": OurPathErrorStats { + "children": Object {}, + "errorsCount": 0, + "requestsWithErrorsCount": 0, + }, + }, +} +`; + +exports[`Check query latency stats when adding a persisted hit trace 1`] = ` +OurContextualizedStats { + "context": Object { + "clientReferenceId": "reference", + "clientVersion": "version", + }, + "perTypeStat": Object {}, + "queryLatencyStats": OurQueryLatencyStats { + "cacheHits": 0, + "cacheLatencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "forbiddenOperationCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "persistedQueryHits": 1, + "persistedQueryMisses": 0, + "privateCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "publicCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "registeredOperationCount": 0, + "requestCount": 1, + "requestsWithErrorsCount": 0, + "rootErrorStats": OurPathErrorStats { + "children": Object {}, + "errorsCount": 0, + "requestsWithErrorsCount": 0, + }, + }, +} +`; + +exports[`Check query latency stats when adding a persisted miss trace 1`] = ` +OurContextualizedStats { + "context": Object { + "clientReferenceId": "reference", + "clientVersion": "version", + }, + "perTypeStat": Object {}, + "queryLatencyStats": OurQueryLatencyStats { + "cacheHits": 0, + "cacheLatencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "forbiddenOperationCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "persistedQueryHits": 0, + "persistedQueryMisses": 1, + "privateCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "publicCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "registeredOperationCount": 0, + "requestCount": 1, + "requestsWithErrorsCount": 0, + "rootErrorStats": OurPathErrorStats { + "children": Object {}, + "errorsCount": 0, + "requestsWithErrorsCount": 0, + }, + }, +} +`; + +exports[`Check query latency stats when adding a private cached trace 1`] = ` +OurContextualizedStats { + "context": Object { + "clientReferenceId": "reference", + "clientVersion": "version", + }, + "perTypeStat": Object {}, + "queryLatencyStats": OurQueryLatencyStats { + "cacheHits": 0, + "cacheLatencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "forbiddenOperationCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "persistedQueryHits": 0, + "persistedQueryMisses": 0, + "privateCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "publicCacheTtlCount": DurationHistogram { + "buckets": Array [ + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "registeredOperationCount": 0, + "requestCount": 1, + "requestsWithErrorsCount": 0, + "rootErrorStats": OurPathErrorStats { + "children": Object {}, + "errorsCount": 0, + "requestsWithErrorsCount": 0, + }, + }, +} +`; + +exports[`Check query latency stats when adding a public cached trace 1`] = ` +OurContextualizedStats { + "context": Object { + "clientReferenceId": "reference", + "clientVersion": "version", + }, + "perTypeStat": Object {}, + "queryLatencyStats": OurQueryLatencyStats { + "cacheHits": 0, + "cacheLatencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "forbiddenOperationCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "persistedQueryHits": 0, + "persistedQueryMisses": 0, + "privateCacheTtlCount": DurationHistogram { + "buckets": Array [ + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "publicCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "registeredOperationCount": 0, + "requestCount": 1, + "requestsWithErrorsCount": 0, + "rootErrorStats": OurPathErrorStats { + "children": Object {}, + "errorsCount": 0, + "requestsWithErrorsCount": 0, + }, + }, +} +`; + +exports[`Check query latency stats when adding a registered trace 1`] = ` +OurContextualizedStats { + "context": Object { + "clientReferenceId": "reference", + "clientVersion": "version", + }, + "perTypeStat": Object {}, + "queryLatencyStats": OurQueryLatencyStats { + "cacheHits": 0, + "cacheLatencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "forbiddenOperationCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "persistedQueryHits": 0, + "persistedQueryMisses": 0, + "privateCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "publicCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "registeredOperationCount": 1, + "requestCount": 1, + "requestsWithErrorsCount": 0, + "rootErrorStats": OurPathErrorStats { + "children": Object {}, + "errorsCount": 0, + "requestsWithErrorsCount": 0, + }, + }, +} +`; + +exports[`Check query latency stats when adding a single trace 1`] = ` +OurContextualizedStats { + "context": Object { + "clientReferenceId": "reference", + "clientVersion": "version", + }, + "perTypeStat": Object {}, + "queryLatencyStats": OurQueryLatencyStats { + "cacheHits": 0, + "cacheLatencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "forbiddenOperationCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "persistedQueryHits": 0, + "persistedQueryMisses": 0, + "privateCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "publicCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "registeredOperationCount": 0, + "requestCount": 1, + "requestsWithErrorsCount": 0, + "rootErrorStats": OurPathErrorStats { + "children": Object {}, + "errorsCount": 0, + "requestsWithErrorsCount": 0, + }, + }, +} +`; + +exports[`Check query latency stats when adding an errored trace 1`] = ` +OurContextualizedStats { + "context": Object { + "clientReferenceId": "reference", + "clientVersion": "version", + }, + "perTypeStat": Object {}, + "queryLatencyStats": OurQueryLatencyStats { + "cacheHits": 0, + "cacheLatencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "forbiddenOperationCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "persistedQueryHits": 0, + "persistedQueryMisses": 0, + "privateCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "publicCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "registeredOperationCount": 1, + "requestCount": 1, + "requestsWithErrorsCount": 1, + "rootErrorStats": OurPathErrorStats { + "children": Object { + "user": OurPathErrorStats { + "children": Object {}, + "errorsCount": 1, + "requestsWithErrorsCount": 1, + }, + }, + "errorsCount": 0, + "requestsWithErrorsCount": 0, + }, + }, +} +`; + +exports[`Check query latency stats when merging errored traces 1`] = ` +OurContextualizedStats { + "context": Object { + "clientReferenceId": "reference", + "clientVersion": "version", + }, + "perTypeStat": Object {}, + "queryLatencyStats": OurQueryLatencyStats { + "cacheHits": 0, + "cacheLatencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "forbiddenOperationCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "persistedQueryHits": 0, + "persistedQueryMisses": 0, + "privateCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "publicCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "registeredOperationCount": 4, + "requestCount": 4, + "requestsWithErrorsCount": 4, + "rootErrorStats": OurPathErrorStats { + "children": Object { + "account": OurPathErrorStats { + "children": Object { + "name": OurPathErrorStats { + "children": Object {}, + "errorsCount": 1, + "requestsWithErrorsCount": 1, + }, + }, + "errorsCount": 0, + "requestsWithErrorsCount": 0, + }, + "user": OurPathErrorStats { + "children": Object { + "email": OurPathErrorStats { + "children": Object {}, + "errorsCount": 2, + "requestsWithErrorsCount": 2, + }, + }, + "errorsCount": 1, + "requestsWithErrorsCount": 1, + }, + }, + "errorsCount": 0, + "requestsWithErrorsCount": 0, + }, + }, +} +`; + +exports[`Check query latency stats when merging non-errored traces 1`] = ` +OurContextualizedStats { + "context": Object { + "clientReferenceId": "reference", + "clientVersion": "version", + }, + "perTypeStat": Object {}, + "queryLatencyStats": OurQueryLatencyStats { + "cacheHits": 2, + "cacheLatencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "forbiddenOperationCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "persistedQueryHits": 0, + "persistedQueryMisses": 0, + "privateCacheTtlCount": DurationHistogram { + "buckets": Array [ + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "publicCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "registeredOperationCount": 0, + "requestCount": 6, + "requestsWithErrorsCount": 0, + "rootErrorStats": OurPathErrorStats { + "children": Object {}, + "errorsCount": 0, + "requestsWithErrorsCount": 0, + }, + }, +} +`; + +exports[`Check type stats add multiple errored traces trace 1`] = ` +OurContextualizedStats { + "context": Object { + "clientReferenceId": "reference", + "clientVersion": "version", + }, + "perTypeStat": Object { + "Query": OurTypeStat { + "perFieldStat": Object { + "user": OurFieldStat { + "count": 2, + "errorsCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "requestsWithErrorsCount": 0, + "returnType": "User!", + }, + }, + }, + "User": OurTypeStat { + "perFieldStat": Object { + "email": OurFieldStat { + "count": 2, + "errorsCount": 4, + "latencyCount": DurationHistogram { + "buckets": Array [ + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "requestsWithErrorsCount": 2, + "returnType": "String!", + }, + "friends": OurFieldStat { + "count": 2, + "errorsCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "requestsWithErrorsCount": 0, + "returnType": "[String!]!", + }, + }, + }, + }, + "queryLatencyStats": OurQueryLatencyStats { + "cacheHits": 0, + "cacheLatencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "forbiddenOperationCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "persistedQueryHits": 0, + "persistedQueryMisses": 0, + "privateCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "publicCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "registeredOperationCount": 2, + "requestCount": 2, + "requestsWithErrorsCount": 2, + "rootErrorStats": OurPathErrorStats { + "children": Object { + "user": OurPathErrorStats { + "children": Object { + "email": OurPathErrorStats { + "children": Object {}, + "errorsCount": 4, + "requestsWithErrorsCount": 2, + }, + }, + "errorsCount": 0, + "requestsWithErrorsCount": 0, + }, + }, + "errorsCount": 0, + "requestsWithErrorsCount": 0, + }, + }, +} +`; + +exports[`Check type stats add multiple federated trace 1`] = ` +OurContextualizedStats { + "context": Object { + "clientReferenceId": "reference", + "clientVersion": "version", + }, + "perTypeStat": Object { + "Query": OurTypeStat { + "perFieldStat": Object { + "user": OurFieldStat { + "count": 2, + "errorsCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "requestsWithErrorsCount": 0, + "returnType": "User!", + }, + }, + }, + "User": OurTypeStat { + "perFieldStat": Object { + "email": OurFieldStat { + "count": 2, + "errorsCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "requestsWithErrorsCount": 0, + "returnType": "String!", + }, + "friends": OurFieldStat { + "count": 2, + "errorsCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "requestsWithErrorsCount": 0, + "returnType": "[String!]!", + }, + }, + }, + }, + "queryLatencyStats": OurQueryLatencyStats { + "cacheHits": 0, + "cacheLatencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "forbiddenOperationCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "persistedQueryHits": 0, + "persistedQueryMisses": 0, + "privateCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "publicCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "registeredOperationCount": 2, + "requestCount": 2, + "requestsWithErrorsCount": 0, + "rootErrorStats": OurPathErrorStats { + "children": Object {}, + "errorsCount": 0, + "requestsWithErrorsCount": 0, + }, + }, +} +`; + +exports[`Check type stats add multiple non-federated trace 1`] = ` +OurContextualizedStats { + "context": Object { + "clientReferenceId": "reference", + "clientVersion": "version", + }, + "perTypeStat": Object { + "Query": OurTypeStat { + "perFieldStat": Object { + "user": OurFieldStat { + "count": 2, + "errorsCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "requestsWithErrorsCount": 0, + "returnType": "User!", + }, + }, + }, + "User": OurTypeStat { + "perFieldStat": Object { + "email": OurFieldStat { + "count": 2, + "errorsCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "requestsWithErrorsCount": 0, + "returnType": "String!", + }, + "friends": OurFieldStat { + "count": 2, + "errorsCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "requestsWithErrorsCount": 0, + "returnType": "[String!]!", + }, + }, + }, + }, + "queryLatencyStats": OurQueryLatencyStats { + "cacheHits": 0, + "cacheLatencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "forbiddenOperationCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "persistedQueryHits": 0, + "persistedQueryMisses": 0, + "privateCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "publicCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "registeredOperationCount": 2, + "requestCount": 2, + "requestsWithErrorsCount": 0, + "rootErrorStats": OurPathErrorStats { + "children": Object {}, + "errorsCount": 0, + "requestsWithErrorsCount": 0, + }, + }, +} +`; + +exports[`Check type stats add single non-federated trace 1`] = ` +OurContextualizedStats { + "context": Object { + "clientReferenceId": "reference", + "clientVersion": "version", + }, + "perTypeStat": Object { + "Query": OurTypeStat { + "perFieldStat": Object { + "user": OurFieldStat { + "count": 1, + "errorsCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "requestsWithErrorsCount": 0, + "returnType": "User!", + }, + }, + }, + "User": OurTypeStat { + "perFieldStat": Object { + "email": OurFieldStat { + "count": 1, + "errorsCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "requestsWithErrorsCount": 0, + "returnType": "String!", + }, + "friends": OurFieldStat { + "count": 1, + "errorsCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "requestsWithErrorsCount": 0, + "returnType": "[String!]!", + }, + }, + }, + }, + "queryLatencyStats": OurQueryLatencyStats { + "cacheHits": 0, + "cacheLatencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "forbiddenOperationCount": 0, + "latencyCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "persistedQueryHits": 0, + "persistedQueryMisses": 0, + "privateCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "publicCacheTtlCount": DurationHistogram { + "buckets": Array [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + }, + "registeredOperationCount": 1, + "requestCount": 1, + "requestsWithErrorsCount": 0, + "rootErrorStats": OurPathErrorStats { + "children": Object {}, + "errorsCount": 0, + "requestsWithErrorsCount": 0, + }, + }, +} +`; diff --git a/packages/apollo-server-core/src/plugin/usageReporting/__tests__/plugin.test.ts b/packages/apollo-server-core/src/plugin/usageReporting/__tests__/plugin.test.ts index e77bce421b8..76e497d4032 100644 --- a/packages/apollo-server-core/src/plugin/usageReporting/__tests__/plugin.test.ts +++ b/packages/apollo-server-core/src/plugin/usageReporting/__tests__/plugin.test.ts @@ -6,7 +6,7 @@ import { ApolloServerPluginUsageReporting, } from '../plugin'; import { Headers } from 'apollo-server-env'; -import { Trace, Report } from 'apollo-reporting-protobuf'; +import { Trace, Report, ITrace } from 'apollo-reporting-protobuf'; import pluginTestHarness from 'apollo-server-core/dist/utils/pluginTestHarness'; import nock from 'nock'; import { gunzipSync } from 'zlib'; @@ -88,7 +88,8 @@ describe('end-to-end', () => { query: query ?? defaultQuery, // If operation name is specified use it. If it is specified as null convert it to // undefined because graphqlRequest expects string | undefined - operationName: operationName === undefined ? 'q' : (operationName || undefined), + operationName: + operationName === undefined ? 'q' : operationName || undefined, extensions: { clientName: 'testing suite', }, @@ -121,62 +122,64 @@ describe('end-to-end', () => { expect(Object.keys(report!.tracesPerQuery)).toHaveLength(1); expect(Object.keys(report!.tracesPerQuery)[0]).toMatch(/^# q\n/); - const traces = Object.values(report!.tracesPerQuery)[0].trace; + const traces = Object.values(report!.tracesPerQuery)[0]!.trace; expect(traces).toHaveLength(1); expect( - traces![0].root!.child!.some( + (traces![0] as ITrace).root!.child!.some( ({ responseName }) => responseName === 'aBoolean', ), ).toBeTruthy(); }); - it('fails parse for non-parseable gql', async () => { - const { report } = await runTest({ query: 'random text' }); - expect(Object.keys(report!.tracesPerQuery)).toHaveLength(1); - expect(Object.keys(report!.tracesPerQuery)[0]).toBe( - '## GraphQLParseFailure\n', - ); - const traces = Object.values(report!.tracesPerQuery)[0].trace; - expect(traces).toHaveLength(1); - }); - - it('validation fails for invalid operation', async () => { - const { report } = await runTest({ query: 'query q { nonExistentField }' }); - expect(Object.keys(report!.tracesPerQuery)).toHaveLength(1); - expect(Object.keys(report!.tracesPerQuery)[0]).toBe( - '## GraphQLValidationFailure\n', - ); - const traces = Object.values(report!.tracesPerQuery)[0].trace; - expect(traces).toHaveLength(1); - }); - - it('unknown operation error if not specified', async () => { - const { report } = await runTest({ query: 'query notQ { aString }' }); - expect(Object.keys(report!.tracesPerQuery)).toHaveLength(1); - expect(Object.keys(report!.tracesPerQuery)[0]).toBe( - '## GraphQLUnknownOperationName\n', - ); - const traces = Object.values(report!.tracesPerQuery)[0].trace; - expect(traces).toHaveLength(1); - }); - - it('handles anonymous operation', async () => { - const { report } = await runTest({ - query: 'query { aString }', - operationName: null, - }); - expect(Object.keys(report!.tracesPerQuery)).toHaveLength(1); - expect(Object.keys(report!.tracesPerQuery)[0]).toMatch(/^# -\n/); - const traces = Object.values(report!.tracesPerQuery)[0].trace; - expect(traces).toHaveLength(1); - }); + [ + { + testName: 'fails parse for non-parseable gql', + op: { query: 'random text' }, + statsReportKey: '## GraphQLParseFailure\n', + }, + { + testName: 'validation fails for invalid operation', + op: { query: 'query q { nonExistentField }' }, + statsReportKey: '## GraphQLValidationFailure\n', + }, + { + testName: 'unknown operation error if not specified', + op: { query: 'query notQ { aString }' }, + statsReportKey: '## GraphQLUnknownOperationName\n', + }, + { + testName: 'handles anonymous operation', + op: { + query: 'query { aString }', + operationName: null, + }, + statsReportKey: '# -\n{aString}', + }, + { + testName: 'handles named operation', + op: { + query: 'query bar { aString } query foo { aBoolean }', + operationName: 'foo', + }, + statsReportKey: '# foo\nquery foo{aBoolean}', + }, + ].forEach(({ testName, op, statsReportKey }) => + it(testName, async () => { + const { report } = await runTest(op); + const queryEntries = Object.entries(report!.tracesPerQuery); + expect(queryEntries).toHaveLength(1); + expect(queryEntries[0][0]).toBe(statsReportKey); + const traces = queryEntries[0][1]!.trace; + expect(traces).toHaveLength(1); + }), + ); describe('includeRequest', () => { it('include based on operation name', async () => { const { report, context } = await runTest({ pluginOptions: { includeRequest: async (request: any) => { - await new Promise((res) => setTimeout(() => res(), 1)); + await new Promise((res) => setTimeout(() => res(), 1)); return request.request.operationName === 'q'; }, }, @@ -188,7 +191,7 @@ describe('end-to-end', () => { const { context } = await runTest({ pluginOptions: { includeRequest: async (request: any) => { - await new Promise((res) => setTimeout(() => res(), 1)); + await new Promise((res) => setTimeout(() => res(), 1)); return request.request.operationName === 'not_q'; }, }, diff --git a/packages/apollo-server-core/src/plugin/usageReporting/__tests__/stats.test.ts b/packages/apollo-server-core/src/plugin/usageReporting/__tests__/stats.test.ts new file mode 100644 index 00000000000..59a061551f3 --- /dev/null +++ b/packages/apollo-server-core/src/plugin/usageReporting/__tests__/stats.test.ts @@ -0,0 +1,471 @@ +import { Trace } from 'apollo-reporting-protobuf'; +import { dateToProtoTimestamp } from '../../traceTreeBuilder'; +import { OurContextualizedStats, SizeEstimator } from '../stats'; +import { DurationHistogram } from '../durationHistogram'; + +const statsContext = { + clientReferenceId: 'reference', + clientVersion: 'version', +}; + +const baseDate = new Date(); +const duration = 30 * 1000; +const baseTrace = new Trace({ + startTime: dateToProtoTimestamp(baseDate), + endTime: dateToProtoTimestamp(new Date(baseDate.getTime() + duration)), + durationNs: duration, + root: null, + signature: 'signature', + details: null, +}); +// TODO: add a federated trace +describe('Check query latency stats when', () => { + it('adding a single trace', () => { + const contextualizedStats = new OurContextualizedStats(statsContext); + contextualizedStats.addTrace(baseTrace, new SizeEstimator()); + expect(contextualizedStats.queryLatencyStats.requestCount).toBe(1); + expect(contextualizedStats.queryLatencyStats.latencyCount).toStrictEqual( + new DurationHistogram().incrementDuration(duration), + ); + expect(contextualizedStats.queryLatencyStats.requestsWithErrorsCount).toBe( + 0, + ); + expect(contextualizedStats).toMatchSnapshot(); + }); + it('adding a fully cached trace', () => { + const contextualizedStats = new OurContextualizedStats(statsContext); + contextualizedStats.addTrace( + new Trace({ + ...baseTrace, + fullQueryCacheHit: true, + }), + new SizeEstimator(), + ); + expect(contextualizedStats.queryLatencyStats.requestCount).toBe(1); + expect(contextualizedStats.queryLatencyStats.cacheHits).toBe(1); + expect( + contextualizedStats.queryLatencyStats.cacheLatencyCount, + ).toStrictEqual(new DurationHistogram().incrementDuration(duration)); + expect(contextualizedStats).toMatchSnapshot(); + }); + it('adding a public cached trace', () => { + const contextualizedStats = new OurContextualizedStats(statsContext); + contextualizedStats.addTrace( + new Trace({ + ...baseTrace, + fullQueryCacheHit: false, + cachePolicy: { + scope: Trace.CachePolicy.Scope.PRIVATE, + maxAgeNs: 1000, + }, + }), + new SizeEstimator(), + ); + expect(contextualizedStats.queryLatencyStats.requestCount).toBe(1); + expect( + contextualizedStats.queryLatencyStats.privateCacheTtlCount, + ).toStrictEqual(new DurationHistogram().incrementDuration(1000)); + expect(contextualizedStats).toMatchSnapshot(); + }); + it('adding a private cached trace', () => { + const contextualizedStats = new OurContextualizedStats(statsContext); + contextualizedStats.addTrace( + new Trace({ + ...baseTrace, + fullQueryCacheHit: false, + cachePolicy: { + scope: Trace.CachePolicy.Scope.PUBLIC, + maxAgeNs: 1000, + }, + }), + new SizeEstimator(), + ); + expect(contextualizedStats.queryLatencyStats.requestCount).toBe(1); + expect( + contextualizedStats.queryLatencyStats.publicCacheTtlCount, + ).toStrictEqual(new DurationHistogram().incrementDuration(1000)); + expect(contextualizedStats).toMatchSnapshot(); + }); + it('adding a persisted hit trace', () => { + const contextualizedStats = new OurContextualizedStats(statsContext); + contextualizedStats.addTrace( + new Trace({ + ...baseTrace, + persistedQueryHit: true, + }), + new SizeEstimator(), + ); + expect(contextualizedStats.queryLatencyStats.requestCount).toBe(1); + expect(contextualizedStats.queryLatencyStats.persistedQueryHits).toBe(1); + expect(contextualizedStats).toMatchSnapshot(); + }); + it('adding a persisted miss trace', () => { + const contextualizedStats = new OurContextualizedStats(statsContext); + contextualizedStats.addTrace( + new Trace({ + ...baseTrace, + persistedQueryRegister: true, + }), + new SizeEstimator(), + ); + expect(contextualizedStats.queryLatencyStats.requestCount).toBe(1); + expect(contextualizedStats.queryLatencyStats.persistedQueryMisses).toBe(1); + expect(contextualizedStats).toMatchSnapshot(); + }); + it('adding a forbidden trace', () => { + const contextualizedStats = new OurContextualizedStats(statsContext); + contextualizedStats.addTrace( + new Trace({ + ...baseTrace, + forbiddenOperation: true, + }), + new SizeEstimator(), + ); + expect(contextualizedStats.queryLatencyStats.requestCount).toBe(1); + expect(contextualizedStats.queryLatencyStats.forbiddenOperationCount).toBe( + 1, + ); + expect(contextualizedStats).toMatchSnapshot(); + }); + it('adding a registered trace', () => { + const contextualizedStats = new OurContextualizedStats(statsContext); + contextualizedStats.addTrace( + new Trace({ + ...baseTrace, + registeredOperation: true, + }), + new SizeEstimator(), + ); + expect(contextualizedStats.queryLatencyStats.requestCount).toBe(1); + expect(contextualizedStats.queryLatencyStats.registeredOperationCount).toBe( + 1, + ); + expect(contextualizedStats).toMatchSnapshot(); + }); + it('adding an errored trace ', () => { + const contextualizedStats = new OurContextualizedStats(statsContext); + contextualizedStats.addTrace( + new Trace({ + ...baseTrace, + registeredOperation: true, + root: { + child: [ + { + responseName: 'user', + parentType: 'Query', + type: 'User!', + error: [ + { + message: 'error 1', + }, + ], + }, + ], + }, + }), + new SizeEstimator(), + ); + expect(contextualizedStats.queryLatencyStats.requestCount).toBe(1); + expect( + contextualizedStats.queryLatencyStats.rootErrorStats.children['user'] + .requestsWithErrorsCount, + ).toBe(1); + expect( + contextualizedStats.queryLatencyStats.rootErrorStats.children['user'] + .errorsCount, + ).toBe(1); + expect(contextualizedStats).toMatchSnapshot(); + }); + it('merging errored traces', () => { + const contextualizedStats = new OurContextualizedStats(statsContext); + contextualizedStats.addTrace( + new Trace({ + ...baseTrace, + registeredOperation: true, + root: { + child: [ + { + responseName: 'user', + parentType: 'Query', + type: 'User!', + error: [ + { + message: 'error 1', + }, + ], + }, + ], + }, + }), + new SizeEstimator(), + ); + contextualizedStats.addTrace( + new Trace({ + ...baseTrace, + registeredOperation: true, + root: { + child: [ + { + responseName: 'account', + parentType: 'Query', + type: 'Account!', + child: [ + { + responseName: 'name', + parentType: 'Account', + type: 'String!', + error: [ + { + message: 'has error', + }, + ], + }, + ], + }, + ], + }, + }), + new SizeEstimator(), + ); + for (let _ in [1, 2]) { + contextualizedStats.addTrace( + new Trace({ + ...baseTrace, + registeredOperation: true, + root: { + child: [ + { + responseName: 'user', + parentType: 'Query', + type: 'User!', + child: [ + { + responseName: 'email', + parentType: 'User', + type: 'String!', + error: [ + { + message: 'has error', + }, + ], + }, + ], + }, + ], + }, + }), + new SizeEstimator(), + ); + } + + expect(contextualizedStats.queryLatencyStats.requestCount).toBe(4); + expect( + contextualizedStats.queryLatencyStats.rootErrorStats.children['user'] + .requestsWithErrorsCount, + ).toBe(1); + expect( + contextualizedStats.queryLatencyStats.rootErrorStats.children['user'] + .errorsCount, + ).toBe(1); + expect( + contextualizedStats.queryLatencyStats.rootErrorStats.children['user'] + .children['email'].requestsWithErrorsCount, + ).toBe(2); + expect( + contextualizedStats.queryLatencyStats.rootErrorStats.children['user'] + .children['email'].errorsCount, + ).toBe(2); + expect( + contextualizedStats.queryLatencyStats.rootErrorStats.children['account'] + .requestsWithErrorsCount, + ).toBeFalsy(); + expect( + contextualizedStats.queryLatencyStats.rootErrorStats.children['account'] + .errorsCount, + ).toBeFalsy(); + expect( + contextualizedStats.queryLatencyStats.rootErrorStats.children['account'] + .children['name'].requestsWithErrorsCount, + ).toBe(1); + expect( + contextualizedStats.queryLatencyStats.rootErrorStats.children['account'] + .children['name'].errorsCount, + ).toBe(1); + expect(contextualizedStats).toMatchSnapshot(); + }); + it('merging non-errored traces', () => { + const contextualizedStats = new OurContextualizedStats(statsContext); + const sizeEstimator = new SizeEstimator(); + contextualizedStats.addTrace(baseTrace, sizeEstimator); + contextualizedStats.addTrace(baseTrace, sizeEstimator); + contextualizedStats.addTrace( + new Trace({ + ...baseTrace, + fullQueryCacheHit: false, + cachePolicy: { + scope: Trace.CachePolicy.Scope.PRIVATE, + maxAgeNs: 1000, + }, + }), + sizeEstimator, + ); + contextualizedStats.addTrace( + new Trace({ + ...baseTrace, + fullQueryCacheHit: false, + cachePolicy: { + scope: Trace.CachePolicy.Scope.PRIVATE, + maxAgeNs: 1000, + }, + }), + sizeEstimator, + ); + for (let _ in [1, 2]) { + contextualizedStats.addTrace( + new Trace({ + ...baseTrace, + fullQueryCacheHit: true, + }), + sizeEstimator, + ); + } + expect(contextualizedStats.queryLatencyStats.requestCount).toBe(6); + expect(contextualizedStats.queryLatencyStats.latencyCount).toStrictEqual( + new DurationHistogram() + .incrementDuration(duration) + .incrementDuration(duration) + .incrementDuration(duration) + .incrementDuration(duration), + ); + expect(contextualizedStats.queryLatencyStats.requestsWithErrorsCount).toBe( + 0, + ); + expect( + contextualizedStats.queryLatencyStats.privateCacheTtlCount, + ).toStrictEqual( + new DurationHistogram().incrementDuration(1000).incrementDuration(1000), + ); + expect(contextualizedStats.queryLatencyStats.cacheHits).toBe(2); + expect( + contextualizedStats.queryLatencyStats.cacheLatencyCount, + ).toStrictEqual( + new DurationHistogram() + .incrementDuration(duration) + .incrementDuration(duration), + ); + expect(contextualizedStats).toMatchSnapshot(); + }); +}); + +describe('Check type stats', () => { + const trace = new Trace({ + ...baseTrace, + registeredOperation: true, + root: { + child: [ + { + originalFieldName: 'user', + responseName: 'user', + parentType: 'Query', + type: 'User!', + startTime: 0, + endTime: 100 * 1000, + child: [ + { + originalFieldName: 'email', + responseName: 'email', + parentType: 'User', + type: 'String!', + startTime: 1000, + endTime: 1005, + }, + { + originalFieldName: 'friends', + responseName: 'friends', + parentType: 'User', + type: '[String!]!', + startTime: 1000, + endTime: 1005, + }, + ], + }, + ], + }, + }); + + const federatedTrace = new Trace({ + ...baseTrace, + registeredOperation: true, + queryPlan: new Trace.QueryPlanNode({ + fetch: new Trace.QueryPlanNode.FetchNode({ + serviceName: 'A', + trace: trace, + sentTime: dateToProtoTimestamp(baseDate), + receivedTime: dateToProtoTimestamp( + new Date(baseDate.getTime() + duration), + ), + }), + }), + }); + + const errorTrace = new Trace({ + ...baseTrace, + registeredOperation: true, + root: { + child: [ + { + originalFieldName: 'user', + responseName: 'user', + parentType: 'Query', + type: 'User!', + startTime: 0, + endTime: 100 * 1000, + child: [ + { + originalFieldName: 'email', + responseName: 'email', + parentType: 'User', + type: 'String!', + startTime: 1000, + endTime: 1005, + error: [{ message: 'error message' }, { message: 'error2' }], + }, + { + originalFieldName: 'friends', + responseName: 'friends', + parentType: 'User', + type: '[String!]!', + startTime: 1000, + endTime: 1005, + }, + ], + }, + ], + }, + }); + + it('add single non-federated trace', () => { + const contextualizedStats = new OurContextualizedStats(statsContext); + contextualizedStats.addTrace(trace, new SizeEstimator()); + expect(contextualizedStats).toMatchSnapshot(); + }); + it('add multiple non-federated trace', () => { + const contextualizedStats = new OurContextualizedStats(statsContext); + contextualizedStats.addTrace(trace, new SizeEstimator()); + contextualizedStats.addTrace(trace, new SizeEstimator()); + expect(contextualizedStats).toMatchSnapshot(); + }); + + it('add multiple federated trace', () => { + const contextualizedStats = new OurContextualizedStats(statsContext); + contextualizedStats.addTrace(federatedTrace, new SizeEstimator()); + contextualizedStats.addTrace(federatedTrace, new SizeEstimator()); + expect(contextualizedStats).toMatchSnapshot(); + }); + it('add multiple errored traces trace', () => { + const contextualizedStats = new OurContextualizedStats(statsContext); + contextualizedStats.addTrace(errorTrace, new SizeEstimator()); + contextualizedStats.addTrace(errorTrace, new SizeEstimator()); + expect(contextualizedStats).toMatchSnapshot(); + }); +}); diff --git a/packages/apollo-server-core/src/plugin/usageReporting/defaultSendOperationsAsTrace.ts b/packages/apollo-server-core/src/plugin/usageReporting/defaultSendOperationsAsTrace.ts new file mode 100644 index 00000000000..300a429d622 --- /dev/null +++ b/packages/apollo-server-core/src/plugin/usageReporting/defaultSendOperationsAsTrace.ts @@ -0,0 +1,71 @@ +import LRUCache from 'lru-cache'; +import type { Trace } from 'apollo-reporting-protobuf'; +import { iterateOverTrace } from './iterateOverTrace'; +import { DurationHistogram } from './durationHistogram'; + +export function defaultSendOperationsAsTrace() { + // We keep an LRU cache mapping from a trace key (which consists of the + // operation as defined by statsReportKey, the rough duration of the + // operation, what minute the operation ended at, etc) to `true` if we've seen + // it recently. We actually split this into one cache per minute so we can + // throw away a full minute's worth of cache at once; we keep only the last + // three minutes + const cache = new LRUCache({ + // 3MiB limit, very much approximately since we can't be sure how V8 might + // be storing these strings internally. Though this should be enough to + // store a fair amount of trace keys. + + // A future version of this might expose some + // configuration option to grow the cache, but ideally, we could do that + // dynamically based on the resources available to the server, and not add + // more configuration surface area. Hopefully the warning message will allow + // us to evaluate the need with more validated input from those that receive + // it. + max: Math.pow(2, 20), + length: (_val, key) => { + return (key && Buffer.byteLength(key, 'uft8')) || 0; + }, + }); + + return (trace: Trace, statsReportKey: string): boolean => { + const endTimeSeconds = trace.endTime?.seconds; + if (endTimeSeconds == null) { + throw Error('programming error: endTime not set on trace'); + } + + const hasErrors = traceHasErrors(trace); + const cacheKey = JSON.stringify([ + statsReportKey, + DurationHistogram.durationToBucket(trace.durationNs), + // What minute it started at + Math.floor(endTimeSeconds / 60), + // If the trace has an error, send one errored trace per 5 second interval + // instead of the normal minutely bucket a non-errored trace takes. + hasErrors ? Math.floor(endTimeSeconds / 5) : '', + ]); + + // If we've already seen something roughly like this, don't send as a trace. + if (cache.get(cacheKey)) { + return false; + } + + cache.set(cacheKey, true); + return true; + }; +} + +// Returns true if any node on the trace has errors. (If this ends up being a +// hot spot, we can precalculate it in TraceTreeBuilder.) +function traceHasErrors(trace: Trace): boolean { + let hasErrors = false; + + function traceNodeStats(node: Trace.INode): boolean { + if ((node.error?.length ?? 0) > 0) { + hasErrors = true; + } + return hasErrors; + } + + iterateOverTrace(trace, traceNodeStats, false); + return hasErrors; +} diff --git a/packages/apollo-server-core/src/plugin/usageReporting/durationHistogram.ts b/packages/apollo-server-core/src/plugin/usageReporting/durationHistogram.ts index 9b924ce25d6..c19f9b54f2e 100644 --- a/packages/apollo-server-core/src/plugin/usageReporting/durationHistogram.ts +++ b/packages/apollo-server-core/src/plugin/usageReporting/durationHistogram.ts @@ -4,10 +4,10 @@ export interface DurationHistogramOptions { } export class DurationHistogram { private readonly buckets: number[]; - public static readonly BUCKET_COUNT = 384; - public static readonly EXPONENT_LOG = Math.log(1.1); + static readonly BUCKET_COUNT = 384; + static readonly EXPONENT_LOG = Math.log(1.1); - public toArray(): number[] { + toArray(): number[] { let bufferedZeroes = 0; const outputArray: number[] = []; @@ -39,11 +39,12 @@ export class DurationHistogram { : unboundedBucket; } - public incrementDuration(durationNs: number) { + incrementDuration(durationNs: number): DurationHistogram { this.incrementBucket(DurationHistogram.durationToBucket(durationNs)); + return this; } - public incrementBucket(bucket: number, value = 1) { + incrementBucket(bucket: number, value = 1) { if (bucket >= DurationHistogram.BUCKET_COUNT) { // Since we don't have fixed size arrays I'd rather throw the error manually throw Error('Bucket is out of bounds of the buckets array'); @@ -59,7 +60,7 @@ export class DurationHistogram { this.buckets[bucket] += value; } - public combine(otherHistogram: DurationHistogram) { + combine(otherHistogram: DurationHistogram) { for (let i = 0; i < otherHistogram.buckets.length; i++) { this.incrementBucket(i, otherHistogram.buckets[i]); } diff --git a/packages/apollo-server-core/src/plugin/usageReporting/iterateOverTrace.ts b/packages/apollo-server-core/src/plugin/usageReporting/iterateOverTrace.ts new file mode 100644 index 00000000000..cc867eeb058 --- /dev/null +++ b/packages/apollo-server-core/src/plugin/usageReporting/iterateOverTrace.ts @@ -0,0 +1,140 @@ +import { Trace } from 'apollo-reporting-protobuf'; + +/** + * Iterates over the entire trace, calling `f` on each Trace.Node found. It + * looks under the "root" node as well as any inside the query plan. If any `f` + * returns true, it stops walking the tree. + * + * Each call to `f` will receive an object that implements ResponseNamePath. If + * `includePath` is true, `f` can call `toArray()` on it to convert the + * linked-list representation to an array of the response name (field name) + * nodes that you navigate to get to the node (including a "service:subgraph" + * top-level node if this is a federated trace). Note that we don't add anything + * to the path for index (list element) nodes. This is because the only use case + * we have (error path statistics) does not care about list indexes (it's not + * that interesting to know that sometimes an error was at foo.3.bar and + * sometimes foo.5.bar, vs just generally foo.bar). + * + * If `includePath` is false, we don't bother to build up the linked lists, and + * calling `toArray()` will throw. + */ +export function iterateOverTrace( + trace: Trace, + f: (node: Trace.INode, path: ResponseNamePath) => boolean, + includePath: boolean, +) { + const rootPath = includePath + ? new RootCollectingPathsResponseNamePath() + : notCollectingPathsResponseNamePath; + if (trace.root) { + if (iterateOverTraceNode(trace.root, rootPath, f)) return; + } + + if (trace.queryPlan) { + if (iterateOverQueryPlan(trace.queryPlan, rootPath, f)) return; + } +} + +// Helper for iterateOverTrace; returns true to stop the overall walk. +function iterateOverQueryPlan( + node: Trace.IQueryPlanNode, + rootPath: ResponseNamePath, + f: (node: Trace.INode, path: ResponseNamePath) => boolean, +): boolean { + if (!node) return false; + + if (node.fetch?.trace?.root && node.fetch.serviceName) { + return iterateOverTraceNode( + node.fetch.trace.root, + rootPath.child(`service:${node.fetch.serviceName}`), + f, + ); + } + if (node.flatten?.node) { + return iterateOverQueryPlan(node.flatten.node, rootPath, f); + } + if (node.parallel?.nodes) { + // We want to stop as soon as some call returns true, which happens to be + // exactly what 'some' does. + return node.parallel.nodes.some((node) => + iterateOverQueryPlan(node, rootPath, f), + ); + } + if (node.sequence?.nodes) { + // We want to stop as soon as some call returns true, which happens to be + // exactly what 'some' does. + return node.sequence.nodes.some((node) => + iterateOverQueryPlan(node, rootPath, f), + ); + } + + return false; +} + +// Helper for iterateOverTrace; returns true to stop the overall walk. +function iterateOverTraceNode( + node: Trace.INode, + path: ResponseNamePath, + f: (node: Trace.INode, path: ResponseNamePath) => boolean, +): boolean { + // Invoke the function; if it returns true, don't descend and tell callers to + // stop walking. + if (f(node, path)) { + return true; + } + + return ( + // We want to stop as soon as some call returns true, which happens to be + // exactly what 'some' does. + node.child?.some((child) => { + const childPath = child.responseName + ? path.child(child.responseName) + : path; + return iterateOverTraceNode(child, childPath, f); + }) ?? false + ); +} + +export interface ResponseNamePath { + toArray(): string[]; + child(responseName: string): ResponseNamePath; +} + +const notCollectingPathsResponseNamePath: ResponseNamePath = { + toArray() { + throw Error('not collecting paths!'); + }, + child() { + return this; + }, +}; + +type CollectingPathsResponseNamePath = + | RootCollectingPathsResponseNamePath + | ChildCollectingPathsResponseNamePath; +class RootCollectingPathsResponseNamePath implements ResponseNamePath { + toArray() { + return []; + } + child(responseName: string) { + return new ChildCollectingPathsResponseNamePath(responseName, this); + } +} +class ChildCollectingPathsResponseNamePath implements ResponseNamePath { + constructor( + readonly responseName: string, + readonly prev: CollectingPathsResponseNamePath, + ) {} + toArray() { + const out = []; + let curr: CollectingPathsResponseNamePath = this; + while (curr instanceof ChildCollectingPathsResponseNamePath) { + out.push(curr.responseName); + curr = curr.prev; + } + return out.reverse(); + } + child(responseName: string) { + return new ChildCollectingPathsResponseNamePath(responseName, this); + } +} diff --git a/packages/apollo-server-core/src/plugin/usageReporting/options.ts b/packages/apollo-server-core/src/plugin/usageReporting/options.ts index 8769b62860e..f248fb8875e 100644 --- a/packages/apollo-server-core/src/plugin/usageReporting/options.ts +++ b/packages/apollo-server-core/src/plugin/usageReporting/options.ts @@ -6,6 +6,7 @@ import { GraphQLRequestContext, } from 'apollo-server-types'; import { RequestAgent } from 'apollo-server-env'; +import type { Trace } from 'apollo-reporting-protobuf'; export interface ApolloServerPluginUsageReportingOptions { //#region Configure exactly which data should be sent to Apollo. @@ -118,6 +119,25 @@ export interface ApolloServerPluginUsageReportingOptions { * failure will be embedded within the stats report key itself. */ sendUnexecutableOperationDocuments?: boolean; + + /** + * This plugin sends information about operations to Apollo's servers in two + * forms: as detailed operation traces of single operations and as summarized + * statistics of many operations. Each individual operation is described in + * exactly one of those ways. This hook lets you select which operations are + * sent as traces and which are sent as statistics. The default is a heuristic + * that tries to send one trace for each rough duration bucket for each + * operation each minute, plus more if the operations have errors. (Note that + * Apollo's servers perform their own sampling on received traces; not all + * traces sent to Apollo's servers can be later retrieved via the trace UI.) + * + * This option is highly experimental and may change or be removed in future + * versions. + */ + experimental_sendOperationAsTrace?: ( + trace: Trace, + statsReportKey: string, + ) => boolean; //#endregion //#region Configure the mechanics of communicating with Apollo's servers. @@ -193,6 +213,12 @@ export interface ApolloServerPluginUsageReportingOptions { * about how the signature relates to the operation you executed. */ calculateSignature?: (ast: DocumentNode, operationName: string) => string; + /** + * This option includes extra data in reports that helps Apollo validate the + * stats generation code in this plugin. Do not set it; the only impact on + * your app will be a decrease in performance. + */ + internal_includeTracesContributingToStats?: boolean; //#endregion } diff --git a/packages/apollo-server-core/src/plugin/usageReporting/plugin.ts b/packages/apollo-server-core/src/plugin/usageReporting/plugin.ts index 98c485a3dfb..5c0457dec9f 100644 --- a/packages/apollo-server-core/src/plugin/usageReporting/plugin.ts +++ b/packages/apollo-server-core/src/plugin/usageReporting/plugin.ts @@ -2,12 +2,7 @@ import os from 'os'; import { gzip } from 'zlib'; import retry from 'async-retry'; import { defaultUsageReportingSignature } from 'apollo-graphql'; -import { - Report, - ReportHeader, - Trace, - TracesAndStats, -} from 'apollo-reporting-protobuf'; +import { Report, ReportHeader, Trace } from 'apollo-reporting-protobuf'; import { Response, fetch, Headers } from 'apollo-server-env'; import { GraphQLRequestListener, @@ -26,11 +21,14 @@ import { ApolloServerPluginUsageReportingOptions, SendValuesBaseOptions, } from './options'; -import { TraceTreeBuilder } from '../traceTreeBuilder'; +import { dateToProtoTimestamp, TraceTreeBuilder } from '../traceTreeBuilder'; import { makeTraceDetails } from './traceDetails'; import { GraphQLSchema, printSchema } from 'graphql'; import { computeExecutableSchemaId } from '../schemaReporting'; import type { InternalApolloServerPlugin } from '../internalPlugin'; +import { OurReport } from './stats'; +import { CacheScope } from 'apollo-cache-control'; +import { defaultSendOperationsAsTrace } from './defaultSendOperationsAsTrace'; const reportHeaderDefaults = { hostname: os.hostname(), @@ -43,8 +41,7 @@ const reportHeaderDefaults = { }; class ReportData { - report!: Report; - size!: number; + report!: OurReport; readonly header: ReportHeader; constructor(executableSchemaId: string, graphVariant: string) { this.header = new ReportHeader({ @@ -55,8 +52,7 @@ class ReportData { this.reset(); } reset() { - this.report = new Report({ header: this.header }); - this.size = 0; + this.report = new OurReport(this.header); } } @@ -144,6 +140,14 @@ export function ApolloServerPluginUsageReporting( options.reportIntervalMs || 10 * 1000, ); } + + let graphMightSupportTraces = true; + const sendOperationAsTrace = + options.experimental_sendOperationAsTrace ?? + defaultSendOperationsAsTrace(); + const includeTracesContributingToStats = + options.internal_includeTracesContributingToStats ?? false; + let stopped = false; function executableSchemaIdForSchema(schema: GraphQLSchema) { @@ -210,6 +214,20 @@ export function ApolloServerPluginUsageReporting( return; } + // Set the report's overall end time. This is the timestamp that will be + // associated with the summarized statistics. + report.endTime = dateToProtoTimestamp(new Date()); + + const protobufError = Report.verify(report); + if (protobufError) { + throw new Error(`Error encoding report: ${protobufError}`); + } + const message = Report.encode(report).finish(); + + // Potential follow-up: we can compare message.length to + // report.sizeEstimator.bytes and use it to "learn" if our estimation is + // off and adjust it based on what we learn. + if (options.debugPrintReports) { // In terms of verbosity, and as the name of this option suggests, // this message is either an "info" or a "debug" level message. @@ -219,20 +237,16 @@ export function ApolloServerPluginUsageReporting( // `debugPrintReports`) just to reach the level of verbosity to // produce the output would be a breaking change. The "warn" level is // on by default. There is a similar theory and comment applied - // below. (Note that the actual traces are "pre-encoded" and not - // accessible to `toJSON` but we do print them separately when we - // encode them.) + // below. + // + // We decode the report rather than printing the original `report` + // so that it includes all of the pre-encoded traces. + const decodedReport = Report.decode(message); logger.warn( - `Apollo usage report: ${JSON.stringify(report.toJSON())}`, + `Apollo usage report: ${JSON.stringify(decodedReport.toJSON())}`, ); } - const protobufError = Report.verify(report); - if (protobufError) { - throw new Error(`Error encoding report: ${protobufError}`); - } - const message = Report.encode(report).finish(); - const compressed = await new Promise((resolve, reject) => { // The protobuf library gives us a Uint8Array. Node 8's zlib lets us // pass it directly; convert for the sake of Node 6. (No support right @@ -266,6 +280,7 @@ export function ApolloServerPluginUsageReporting( 'user-agent': 'ApolloServerPluginUsageReporting', 'x-api-key': key, 'content-encoding': 'gzip', + accept: 'application/json', }, body: compressed, agent: options.requestAgent, @@ -302,6 +317,31 @@ export function ApolloServerPluginUsageReporting( }, ${(await response.text()) || '(no body)'}`, ); } + + if ( + graphMightSupportTraces && + response.status === 200 && + response.headers + .get('content-type') + ?.match(/^\s*application\/json\s*(?:;|$)/i) + ) { + const body = await response.text(); + let parsedBody; + try { + parsedBody = JSON.parse(body); + } catch (e) { + throw new Error(`Error parsing response from Apollo servers: ${e}`); + } + if (parsedBody.tracesIgnored === true) { + logger.debug( + "This graph's organization does not have access to traces; sending all " + + 'subsequent operations as traces.', + ); + graphMightSupportTraces = false; + // XXX We could also parse traces that are already in the current + // report and convert them to stats if we wanted? + } + } if (options.debugPrintReports) { // In terms of verbosity, and as the name of this option suggests, this // message is either an "info" or a "debug" level message. However, @@ -421,6 +461,20 @@ export function ApolloServerPluginUsageReporting( treeBuilder.trace.forbiddenOperation = !!metrics.forbiddenOperation; treeBuilder.trace.registeredOperation = !!metrics.registeredOperation; + if (requestContext.overallCachePolicy) { + treeBuilder.trace.cachePolicy = new Trace.CachePolicy({ + scope: + requestContext.overallCachePolicy.scope === CacheScope.Private + ? Trace.CachePolicy.Scope.PRIVATE + : requestContext.overallCachePolicy.scope === + CacheScope.Public + ? Trace.CachePolicy.Scope.PUBLIC + : Trace.CachePolicy.Scope.UNKNOWN, + // Convert from seconds to ns. + maxAgeNs: requestContext.overallCachePolicy.maxAge * 1e9, + }); + } + // If operation resolution (parsing and validating the document followed // by selecting the correct operation) resulted in the population of the // `operationName`, we'll use that. (For anonymous operations, @@ -472,6 +526,7 @@ export function ApolloServerPluginUsageReporting( const reportData = getReportData(executableSchemaId); const { report } = reportData; + const { trace } = treeBuilder; let statsReportKey: string | undefined = undefined; if (!requestContext.document) { @@ -484,47 +539,32 @@ export function ApolloServerPluginUsageReporting( if (statsReportKey) { if (options.sendUnexecutableOperationDocuments) { - treeBuilder.trace.unexecutedOperationBody = - requestContext.source; - treeBuilder.trace.unexecutedOperationName = operationName; + trace.unexecutedOperationBody = requestContext.source; + trace.unexecutedOperationName = operationName; } } else { const signature = getTraceSignature(); statsReportKey = `# ${operationName || '-'}\n${signature}`; } - const protobufError = Trace.verify(treeBuilder.trace); + const protobufError = Trace.verify(trace); if (protobufError) { throw new Error(`Error encoding trace: ${protobufError}`); } - const encodedTrace = Trace.encode(treeBuilder.trace).finish(); - - if (!report.tracesPerQuery.hasOwnProperty(statsReportKey)) { - report.tracesPerQuery[statsReportKey] = new TracesAndStats(); - (report.tracesPerQuery[statsReportKey] as any).encodedTraces = []; - } - // See comment on our override of Traces.encode inside of - // apollo-reporting-protobuf to learn more about this strategy. - (report.tracesPerQuery[statsReportKey] as any).encodedTraces.push( - encodedTrace, - ); - - reportData.size += - encodedTrace.length + Buffer.byteLength(statsReportKey); - - if (options.debugPrintReports) { - logger.warn( - `Apollo usage report trace: ${JSON.stringify( - treeBuilder.trace.toJSON(), - )}`, - ); - } + report.addTrace({ + statsReportKey, + trace, + asTrace: + graphMightSupportTraces && + sendOperationAsTrace(trace, statsReportKey), + includeTracesContributingToStats, + }); // If the buffer gets big (according to our estimate), send. if ( sendReportsImmediately || - reportData.size >= + report.sizeEstimator.bytes >= (options.maxUncompressedReportSize || 4 * 1024 * 1024) ) { await sendReportAndReportErrors(executableSchemaId); diff --git a/packages/apollo-server-core/src/plugin/usageReporting/stats.ts b/packages/apollo-server-core/src/plugin/usageReporting/stats.ts new file mode 100644 index 00000000000..3cf29c7fc1c --- /dev/null +++ b/packages/apollo-server-core/src/plugin/usageReporting/stats.ts @@ -0,0 +1,355 @@ +import { DurationHistogram } from './durationHistogram'; +import { + IFieldStat, + IPathErrorStats, + IQueryLatencyStats, + IStatsContext, + Trace, + ITypeStat, + IContextualizedStats, + ReportHeader, + google, + ITracesAndStats, + IReport, +} from 'apollo-reporting-protobuf'; +import { iterateOverTrace, ResponseNamePath } from './iterateOverTrace'; + +// protobuf.js exports both a class and an interface (starting with I) for each +// message type. The class is what it produces when it decodes the message; the +// interface is what is accepted as input. We build up our messages using custom +// types implementing the interfaces, so that we can take advantage of the +// js_use_toArray option we added to our protobuf.js fork which allows us to use +// classes like DurationHistogram to generate repeated fields. We end up +// re-creating most of the report structure as custom classes (starting with +// "Our"). TypeScript validates that we've properly listed all of the message +// fields with the appropriate types (we use `Required` to ensure we implement +// all message fields). Using our own classes has other advantages, like being +// able to specify that nested messages are instances of the same class rather +// than the interface type and thus that they have non-null fields (because the +// interface type allows all fields to be optional, even though the protobuf +// format doesn't differentiate between missing and falsey). + +export class SizeEstimator { + bytes = 0; +} +export class OurReport implements Required { + constructor(readonly header: ReportHeader) {} + readonly tracesPerQuery: Record = Object.create( + null, + ); + endTime: google.protobuf.ITimestamp | null = null; + + // A rough estimate of the number of bytes currently in the report. We start + // at zero and don't count `header` and `endTime`, which have the same size + // for every report. This really is a rough estimate, so we don't stress too + // much about counting bytes for the tags and string/message lengths, etc: + // we mostly just count the lengths of strings plus some estimates for the + // messages with a bunch of numbers in them. + // + // We store this in a class so we can pass it down as a reference to other + // methods which increment it. + readonly sizeEstimator = new SizeEstimator(); + + addTrace({ + statsReportKey, + trace, + asTrace, + includeTracesContributingToStats, + }: { + statsReportKey: string; + trace: Trace; + asTrace: boolean; + includeTracesContributingToStats: boolean; + }) { + const tracesAndStats = this.getTracesAndStats(statsReportKey); + if (asTrace) { + const encodedTrace = Trace.encode(trace).finish(); + tracesAndStats.trace.push(encodedTrace); + this.sizeEstimator.bytes += 2 + encodedTrace.length; + } else { + tracesAndStats.statsWithContext.addTrace(trace, this.sizeEstimator); + if (includeTracesContributingToStats) { + // For specific use inside Apollo's infrastructure to help validate that + // the code in this file matches similar code in Apollo's servers, + // include the traces that contribute to the stats. Doing this outside + // of Apollo's infrastructure only serves to make reports larger with no + // other advantage. + const encodedTrace = Trace.encode(trace).finish(); + tracesAndStats.internalTracesContributingToStats.push(encodedTrace); + this.sizeEstimator.bytes += 2 + encodedTrace.length; + } + } + } + + private getTracesAndStats(statsReportKey: string) { + const existing = this.tracesPerQuery[statsReportKey]; + if (existing) { + return existing; + } + this.sizeEstimator.bytes += estimatedBytesForString(statsReportKey); + return (this.tracesPerQuery[statsReportKey] = new OurTracesAndStats()); + } +} + +class OurTracesAndStats implements Required { + readonly trace: Uint8Array[] = []; + readonly statsWithContext = new StatsByContext(); + readonly internalTracesContributingToStats: Uint8Array[] = []; +} + +class StatsByContext { + readonly map: { [k: string]: OurContextualizedStats } = Object.create(null); + + /** + * This function is used by the protobuf generator to convert this map into + * an array of contextualized stats to serialize + */ + toArray(): IContextualizedStats[] { + return Object.values(this.map); + } + + addTrace(trace: Trace, sizeEstimator: SizeEstimator) { + this.getContextualizedStats(trace, sizeEstimator).addTrace( + trace, + sizeEstimator, + ); + } + + private getContextualizedStats( + trace: Trace, + sizeEstimator: SizeEstimator, + ): OurContextualizedStats { + const statsContext: IStatsContext = { + clientName: trace.clientName, + clientVersion: trace.clientVersion, + clientReferenceId: trace.clientReferenceId, + }; + const statsContextKey = JSON.stringify(statsContext); + + const existing = this.map[statsContextKey]; + if (existing) { + return existing; + } + // Adding a ContextualizedStats means adding a StatsContext plus a + // QueryLatencyStats. Let's guess about 20 bytes for a QueryLatencyStats; + // it'll be more if more features are used (like cache, APQ, etc). + sizeEstimator.bytes += + 20 + + estimatedBytesForString(trace.clientName) + + estimatedBytesForString(trace.clientVersion) + + estimatedBytesForString(trace.clientReferenceId); + const contextualizedStats = new OurContextualizedStats(statsContext); + this.map[statsContextKey] = contextualizedStats; + return contextualizedStats; + } +} + +export class OurContextualizedStats implements Required { + queryLatencyStats = new OurQueryLatencyStats(); + perTypeStat: { [k: string]: OurTypeStat } = Object.create(null); + + constructor(readonly context: IStatsContext) {} + + // Extract statistics from the trace, and increment the estimated report size. + // We only add to the estimate when adding whole sub-messages. If it really + // mattered, we could do a lot more careful things like incrementing it + // whenever a numeric field on queryLatencyStats gets incremented over 0. + addTrace(trace: Trace, sizeEstimator: SizeEstimator) { + this.queryLatencyStats.requestCount++; + if (trace.fullQueryCacheHit) { + this.queryLatencyStats.cacheLatencyCount.incrementDuration( + trace.durationNs, + ); + this.queryLatencyStats.cacheHits++; + } else { + this.queryLatencyStats.latencyCount.incrementDuration(trace.durationNs); + } + + // We only provide stats about cache TTLs on cache misses (ie, TTLs directly + // calculated by the backend), not for cache hits. This matches the + // behavior we've had for a while when converting traces into statistics + // in Studio's servers. + if (!trace.fullQueryCacheHit && trace.cachePolicy?.maxAgeNs != null) { + switch (trace.cachePolicy.scope) { + case Trace.CachePolicy.Scope.PRIVATE: + this.queryLatencyStats.privateCacheTtlCount.incrementDuration( + trace.cachePolicy.maxAgeNs, + ); + break; + case Trace.CachePolicy.Scope.PUBLIC: + this.queryLatencyStats.publicCacheTtlCount.incrementDuration( + trace.cachePolicy.maxAgeNs, + ); + break; + } + } + + if (trace.persistedQueryHit) { + this.queryLatencyStats.persistedQueryHits++; + } + if (trace.persistedQueryRegister) { + this.queryLatencyStats.persistedQueryMisses++; + } + + if (trace.forbiddenOperation) { + this.queryLatencyStats.forbiddenOperationCount++; + } + if (trace.registeredOperation) { + this.queryLatencyStats.registeredOperationCount++; + } + + let hasError = false; + + const traceNodeStats = (node: Trace.INode, path: ResponseNamePath) => { + // Generate error stats and error path information + if (node.error?.length) { + hasError = true; + + let currPathErrorStats = this.queryLatencyStats.rootErrorStats; + path.toArray().forEach((subPath) => { + currPathErrorStats = currPathErrorStats.getChild( + subPath, + sizeEstimator, + ); + }); + + currPathErrorStats.requestsWithErrorsCount += 1; + currPathErrorStats.errorsCount += node.error.length; + } + + // The actual field name behind the node; originalFieldName is set + // if an alias was used, otherwise responseName. (This is falsey for + // nodes that are not fields (root, array index, etc).) + const fieldName = node.originalFieldName || node.responseName; + + // Protobuf doesn't really differentiate between "unset" and "falsey" so + // we're mostly actually checking that these things are non-empty string / + // non-zero numbers. The time fields represent the number of nanoseconds + // since the beginning of the entire trace, so let's pretend for the + // moment that it's plausible for a node to start or even end exactly when + // the trace started (ie, for the time values to be 0). This is unlikely + // in practice (everything should take at least 1ns). In practice we only + // write `type` and `parentType` on a Node when we write `startTime`, so + // the main thing we're looking out for by checking the time values is + // whether we somehow failed to write `endTime` at the end of the field; + // in this case, the `endTime >= startTime` check won't match. + if ( + node.parentType && + fieldName && + node.type && + node.endTime != null && + node.startTime != null && + node.endTime >= node.startTime + ) { + const typeStat = this.getTypeStat(node.parentType, sizeEstimator); + + const fieldStat = typeStat.getFieldStat( + fieldName, + node.type, + sizeEstimator, + ); + + fieldStat.errorsCount += node.error?.length ?? 0; + fieldStat.count++; + // Note: this is actually counting the number of resolver calls for this + // field that had at least one error, not the number of overall GraphQL + // queries that had at least one error for this field. That doesn't seem + // to match the name, but it does match the other implementations of this + // logic. + fieldStat.requestsWithErrorsCount += + (node.error?.length ?? 0) > 0 ? 1 : 0; + fieldStat.latencyCount.incrementDuration(node.endTime - node.startTime); + } + + return false; + }; + + iterateOverTrace(trace, traceNodeStats, true); + if (hasError) { + this.queryLatencyStats.requestsWithErrorsCount++; + } + } + + getTypeStat(parentType: string, sizeEstimator: SizeEstimator): OurTypeStat { + const existing = this.perTypeStat[parentType]; + if (existing) { + return existing; + } + sizeEstimator.bytes += estimatedBytesForString(parentType); + const typeStat = new OurTypeStat(); + this.perTypeStat[parentType] = typeStat; + return typeStat; + } +} + +class OurQueryLatencyStats implements Required { + latencyCount: DurationHistogram = new DurationHistogram(); + requestCount: number = 0; + cacheHits: number = 0; + persistedQueryHits: number = 0; + persistedQueryMisses: number = 0; + cacheLatencyCount: DurationHistogram = new DurationHistogram(); + rootErrorStats: OurPathErrorStats = new OurPathErrorStats(); + requestsWithErrorsCount: number = 0; + publicCacheTtlCount: DurationHistogram = new DurationHistogram(); + privateCacheTtlCount: DurationHistogram = new DurationHistogram(); + registeredOperationCount: number = 0; + forbiddenOperationCount: number = 0; +} + +class OurPathErrorStats implements Required { + children: { [k: string]: OurPathErrorStats } = Object.create(null); + errorsCount: number = 0; + requestsWithErrorsCount: number = 0; + + getChild(subPath: string, sizeEstimator: SizeEstimator): OurPathErrorStats { + const existing = this.children[subPath]; + if (existing) { + return existing; + } + const child = new OurPathErrorStats(); + this.children[subPath] = child; + // Include a few bytes in the estimate for the numbers etc. + sizeEstimator.bytes += estimatedBytesForString(subPath) + 4; + return child; + } +} + +class OurTypeStat implements Required { + perFieldStat: { [k: string]: OurFieldStat } = Object.create(null); + + getFieldStat( + fieldName: string, + returnType: string, + sizeEstimator: SizeEstimator, + ): OurFieldStat { + const existing = this.perFieldStat[fieldName]; + if (existing) { + return existing; + } + // Rough estimate of 10 bytes for the numbers in the FieldStat. + sizeEstimator.bytes += + estimatedBytesForString(fieldName) + + estimatedBytesForString(returnType) + + 10; + const fieldStat = new OurFieldStat(returnType); + this.perFieldStat[fieldName] = fieldStat; + return fieldStat; + } +} + +class OurFieldStat implements Required { + errorsCount: number = 0; + count: number = 0; + requestsWithErrorsCount: number = 0; + latencyCount: DurationHistogram = new DurationHistogram(); + + constructor(readonly returnType: string) {} +} + +function estimatedBytesForString(s: string) { + // 2 is for the tag (field ID + wire type) plus the encoded length. (The + // encoded length takes up more than 1 byte for strings that are longer than + // 127 bytes, but this is an estimate.) + return 2 + Buffer.byteLength(s); +}