diff --git a/.circleci/config.yml b/.circleci/config.yml index 827f7e9221..d51d34a765 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -649,8 +649,19 @@ jobs: command: > cd artifacts && sha1sum *.tar.gz > sha1sums.txt - run: + name: Create GitHub Release command: > - gh release create $VERSION --notes-file /dev/null --title $VERSION artifacts/* + case "$VERSION" in + + # If the VERSION contains a dash, consider it a pre-release version. + # This is in-line with SemVer's expectations/designations! + *-*) gh release create $VERSION --prerelease --notes-file /dev/null --title $VERSION artifacts/* ;; + + # In all other cases, publish it as the latest version. + *) gh release create $VERSION --notes-file /dev/null --title $VERSION artifacts/* ;; + + esac + - setup_remote_docker: version: 20.10.11 docker_layer_caching: true diff --git a/CHANGELOG.md b/CHANGELOG.md index 82acd446cf..bdbb4f3fa5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,197 @@ All notable changes to Router will be documented in this file. This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2.0.0.html). +# [1.16.0] - 2023-05-03 + +## 🚀 Features + +### Add ability to transmit un-redacted errors from federated traces to Apollo Studio + +When using subgraphs which are enabled with [Apollo Federated Tracing](https://www.apollographql.com/docs/router/configuration/apollo-telemetry/#enabling-field-level-instrumentation), the error messages within those traces will be **redacted by default**. + +New configuration (`tracing.apollo.errors.subgraph.all.redact`, which defaults to `true`) enables or disables the redaction mechanism. Similar configuration (`tracing.apollo.errors.subgraph.all.send`, which also defaults to `true`) enables or disables the entire transmission of the error to Studio. + +The error messages returned to the clients are **not** changed or redacted from their previous behavior. + +To enable sending subgraphs' federated trace error messages to Studio **without redaction**, you can set the following configuration: + +```yaml title="router.yaml" +telemetry: + apollo: + errors: + subgraph: + all: + send: true # (true = Send to Studio, false = Do not send; default: true) + redact: false # (true = Redact full error message, false = Do not redact; default: true) +``` + +It is also possible to configure this **per-subgraph** using a `subgraphs` map at the same level as `all` in the configuration, much like other sections of the configuration which have subgraph-specific capabilities: + +```yaml title="router.yaml" +telemetry: + apollo: + errors: + subgraph: + all: + send: true + redact: false # Disables redaction as a default. The `accounts` service enables it below. + subgraphs: + accounts: # Applies to the `accounts` subgraph, overriding the `all` global setting. + redact: true # Redacts messages from the `accounts` service. +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/3011 + +### Introduce `response.is_primary` Rhai helper for working with deferred responses ([Issue #2935](https://github.com/apollographql/router/issues/2935)) ([Issue #2936](https://github.com/apollographql/router/issues/2936)) + +A new Rhai `response.is_primary()` helper has been introduced that returns `false` when the current chunk being processed is a _deferred response_ chunk. Put another way, it will be `false` if the chunk is a _follow-up_ response to the initial _primary_ response, during the fulfillment of a `@defer`'d fragment in a larger operation. The initial response will be `is_primary() == true`. This aims to provide the right primitives so users can write more defensible error checking. It is especially useful for response header manipulations, which is only possible on the primary response. The introduction of this relates to a bug fix noted in the _Fixes_ section below. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2945 + +### Time-based forced hot-reload for "chaos" testing + +For testing purposes, the Router can now artificially be forced to hot-reload (as if the configuration or schema had changed) at a configured time interval. This can help reproduce issues like reload-related memory leaks. We don't recommend using this in any production environment. (If you are compelled to use it in production, please let us know about your use case!) + +The new configuration section for this "chaos" testing is (and will likely remain) marked as "experimental": + +```yaml +experimental_chaos: + force_hot_reload: 1m +``` + +By [@SimonSapin](https://github.com/SimonSapin) in https://github.com/apollographql/router/pull/2988 + +### Provide helpful console output when using "preview" features, just like "experimental" features + +This expands on the existing mechanism that was originally introduced in https://github.com/apollographql/router/pull/2242, which supports the notion of an "experimental" feature, and makes it compatible with the notion of "preview" features. + +When preview or experimental features are used, an `INFO`-level log is emitted during startup to notify which features are used and shows URLs to their GitHub discussions, for feedback. Additionally, `router config experimental` and `router config preview` CLI sub-commands list all such features in the current Router version, regardless of which are used in a given configuration file. + +For more information about launch stages, please see the documentation here: https://www.apollographql.com/docs/resources/product-launch-stages/ + +By [@o0ignition0o](https://github.com/o0ignition0o), [@abernix](https://github.com/abernix), and [@SimonSapin](https://github.com/SimonSapin) in https://github.com/apollographql/router/pull/2960 + +### Report `operationCountByType` counts to Apollo Studio ([PR #2979](https://github.com/apollographql/router/pull/2979)) + +This adds the ability for Studio to track operation **counts** broken down by type of operations (e.g., `query` vs `mutation`). Previously, we only reported total operation count. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/2979 + +## 🐛 Fixes + +### Update to Federation v2.4.2 + +This update to Federation v2.4.2 fixes a [potential bug](https://github.com/apollographql/federation/pull/2524) when an `@interfaceObject` type has a `@requires`. This might be encountered when an `@interfaceObject` type has a field with a `@requires` and the query requests that field only for some specific implementations of the corresponding interface. In this case, the generated query plan was sometimes invalid and could result in an invalid query to a subgraph. In the case that the subgraph was an Apollo Server implementation, this lead to the subgraph producing an `"The _entities resolver tried to load an entity for type X, but no object or interface type of that name was found in the schema"` error. + +By [@abernix](https://github.com/abernix) in https://github.com/apollographql/router/pull/2910 + +### Fix handling of deferred response errors from Rhai scripts ([Issue #2935](https://github.com/apollographql/router/issues/2935)) ([Issue #2936](https://github.com/apollographql/router/issues/2936)) + +If a Rhai script was to error while processing a deferred response (i.e., an operation which uses `@defer`) the Router was ignoring the error and returning `None` in the stream of results. This had two unfortunate aspects: + + - the error was not propagated to the client + - the stream was terminated (silently) + +With this fix we now capture the error and still propagate the response to the client. This fix _also_ adds support for the `is_primary()` method which may be invoked on both `supergraph_service()` and `execution_service()` responses. It may be used to avoid implementing exception handling for header interactions and to determine if a response `is_primary()` (i.e., first) or not. + +e.g.: + + +```perl + if response.is_primary() { + print(`all response headers: `); + } else { + print(`don't try to access headers`); + } +``` + +vs + + +```perl + try { + print(`all response headers: `); + } + catch(err) { + if err == "cannot access headers on a deferred response" { + print(`don't try to access headers`); + } + } +``` + +> **Note** +> This is a _minimal_ example for purposes of illustration which doesn't exhaustively check all error conditions. An exception handler should always handle all error conditions. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2945 + +### Fix incorrectly placed "message" in Rhai JSON-formatted logging ([Issue #2777](https://github.com/apollographql/router/issues/2777)) + +This fixes a bug where Rhai logging was incorrectly putting the message of the log into the `out` attribute, when serialized as JSON. Previously, the `message` field was showing `rhai_{{level}}` (i.e., `rhai_info`), despite there being a separate `level` field in the JSON structure. + +The impact of this fix can be seen in this example where we call `log_info()` in a Rhai script: + + +```perl + log_info("this is info"); +``` + +**Previously**, this would result in a log as follows, with the text of the message set within `out`, rather than `message`. + +```json +{"timestamp":"2023-04-19T07:46:15.483358Z","level":"INFO","message":"rhai_info","out":"this is info"} +``` + +**After the change**, the message is correctly within `message`. The level continues to be available at `level`. We've also additionally added a `target` property which shows the file which produced the error: + +```json +{"timestamp":"2023-04-19T07:46:15.483358Z","level":"INFO","message":"this is info","target":"src/rhai_logging.rhai"} +``` + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2975 + + +### Deferred responses now utilize compression, when requested ([Issue #1572](https://github.com/apollographql/router/issues/1572)) + +We previously had to disable compression on deferred responses due to an upstream library bug. To fix this, we've replaced `tower-http`'s `CompressionLayer` with a custom stream transformation. This is necessary because `tower-http` uses `async-compression` under the hood, which buffers data until the end of the stream, analyzes it, then writes it, ensuring a better compression. However, this is wholly-incompatible with a core concept of the multipart protocol for `@defer`, which requires chunks to be sent _as soon as possible_. To support that, we need to compress chunks independently. + +This extracts parts of the `codec` module of `async-compression`, which so far is not public, and makes a streaming wrapper _above it_ that flushes the compressed data on every response within the stream. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2986 + +### Update the `h2` dependency to fix a _potential_ Denial-of-Service (DoS) vulnerability + +Proactively addresses the advisory in https://rustsec.org/advisories/RUSTSEC-2023-0034, though we have no evidence that suggests it has been exploited on any Router deployment. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2982 + +### Rate limit errors emitted from OpenTelemetry ([Issue #2953](https://github.com/apollographql/router/issues/2953)) + +When a batch span exporter is unable to send accept a span because the buffer is full it will emit an error. These errors can be very frequent and could potentially impact performance. To mitigate this, OpenTelemetry errors are now rate limited to one every ten seconds, per error type. + +By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2954 + +### Improved messaging when a request is received without an operation ([Issue #2941](https://github.com/apollographql/router/issues/2941)) + +The message that is displayed when a request has been sent to the Router without an operation has been improved. This materializes as a developer experience improvement since users (especially those using GraphQL for the first time) might send a request to the Router using a tool that isn't GraphQL-aware, or might just have their API tool of choice misconfigured. + +Previously, the message stated "missing query string", but now more helpfully suggests sending either a POST or GET request and specifying the desired operation as the `query` parameter (i.e., either in the POST data or in the query string parameters for GET queries). + +By [@kushal-93](https://github.com/kushal-93) in https://github.com/apollographql/router/pull/2955 + +### Traffic shaping configuration fix for global `experimental_enable_http2` + +We've resolved a case where the `experimental_enable_http2` feature wouldn't properly apply when configured with a global configuration. + +Huge thanks to [@westhechiang](https://github.com/westhechiang), [@leggomuhgreggo](https://github.com/leggomuhgreggo), [@vecchp](https://github.com/vecchp) and [@davidvasandani](https://github.com/davidvasandani) for discovering the issue and finding a reproducible testcase! + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/2976 + +### Limit the memory usage of the `apollo` OpenTelemetry exporter ([PR #3006](https://github.com/apollographql/router/pull/3006)) + +We've added a new LRU cache in place of a `Vec` for sub-span data to avoid keeping all events for a span in memory, since we don't need it for our computations. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/3006 + # [1.15.1] - 2023-04-18 ## 🐛 Fixes diff --git a/Cargo.lock b/Cargo.lock index a779610e52..945958389f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -272,10 +272,9 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.15.1" +version = "1.16.0" dependencies = [ "access-json", - "ansi_term", "anyhow", "apollo-compiler", "apollo-parser 0.5.1", @@ -287,6 +286,7 @@ dependencies = [ "axum", "backtrace", "base64 0.20.0", + "brotli", "buildstructor 0.5.2", "bytes", "ci_info", @@ -334,6 +334,7 @@ dependencies = [ "multer", "multimap", "notify", + "nu-ansi-term 0.47.0", "once_cell", "opentelemetry", "opentelemetry-datadog", @@ -401,11 +402,13 @@ dependencies = [ "wiremock", "wsl", "yaml-rust", + "zstd", + "zstd-safe", ] [[package]] name = "apollo-router-benchmarks" -version = "1.15.1" +version = "1.16.0" dependencies = [ "apollo-parser 0.4.1", "apollo-router", @@ -421,7 +424,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.15.1" +version = "1.16.0" dependencies = [ "anyhow", "cargo-scaffold", @@ -2528,9 +2531,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.16" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" +checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" dependencies = [ "bytes", "fnv", @@ -3536,6 +3539,15 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "nu-ansi-term" +version = "0.47.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df031e117bca634c262e9bd3173776844b6c17a90b3741c9163663b4385af76" +dependencies = [ + "windows-sys 0.45.0", +] + [[package]] name = "num" version = "0.4.0" @@ -4828,9 +4840,9 @@ dependencies = [ [[package]] name = "router-bridge" -version = "0.2.1+v2.4.1" +version = "0.2.2+v2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4acbfd187f72fd4ac00ae1e050b011b87641135f7ef222fe462ee78964c037d4" +checksum = "a3b7f46d4ce5a83664e398eebe73d5501f38523f2b96fa0ef2e0cecb8474856e" dependencies = [ "anyhow", "async-channel", @@ -6276,7 +6288,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" dependencies = [ "matchers", - "nu-ansi-term", + "nu-ansi-term 0.46.0", "once_cell", "regex", "serde", @@ -7064,3 +7076,33 @@ dependencies = [ "quote", "syn 2.0.13", ] + +[[package]] +name = "zstd" +version = "0.12.3+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76eea132fb024e0e13fd9c2f5d5d595d8a967aa72382ac2f9d39fcc95afd0806" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "6.0.5+zstd.1.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d56d9e60b4b1758206c238a10165fbcae3ca37b01744e394c463463f6529d23b" +dependencies = [ + "libc", + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.8+zstd.1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" +dependencies = [ + "cc", + "libc", + "pkg-config", +] diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index 475f8b202b..04352ce2e7 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.15.1" +version = "1.16.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index a755cce558..d9f23886c7 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.15.1" +version = "1.16.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml index deea381e85..444b68326d 100644 --- a/apollo-router-scaffold/templates/base/Cargo.toml +++ b/apollo-router-scaffold/templates/base/Cargo.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.15.1" +apollo-router = "1.16.0" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml index ec7e5e57e0..282839db3c 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.15.1" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.16.0" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index cae8d59417..e23c0699a0 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.15.1" +version = "1.16.0" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" @@ -40,7 +40,6 @@ features = ["docs_rs"] askama = "0.11.1" access-json = "0.1.0" anyhow = "1.0.68" -ansi_term = "0.12" apollo-compiler = "0.8.0" apollo-parser = "0.5.1" arc-swap = "1.6.0" @@ -104,6 +103,7 @@ multer = "2.0.4" multimap = "0.8.3" # To avoid tokio issues notify = { version = "5.1.0", default-features = false, features=["macos_kqueue"] } +nu-ansi-term = "0.47" once_cell = "1.16.0" # Any package that starts with `opentelemetry` needs to be updated with care @@ -157,7 +157,7 @@ reqwest = { version = "0.11.15", default-features = false, features = [ "json", "stream", ] } -router-bridge = "=0.2.1+v2.4.1" +router-bridge = "=0.2.2+v2.4.2" rust-embed="6.4.2" rustls = "0.20.8" rustls-pemfile = "1.0.2" @@ -203,6 +203,10 @@ yaml-rust = "0.4.5" wsl = "0.1.0" tokio-rustls = "0.23.4" http-serde = "1.1.2" +memchr = "2.5.0" +brotli = "3.3.4" +zstd = "0.12.3" +zstd-safe = "6.0.5" [target.'cfg(macos)'.dependencies] uname = "0.1.1" diff --git a/apollo-router/experimental_features.json b/apollo-router/feature_discussions.json similarity index 67% rename from apollo-router/experimental_features.json rename to apollo-router/feature_discussions.json index c3ee74a475..84ffdd352d 100644 --- a/apollo-router/experimental_features.json +++ b/apollo-router/feature_discussions.json @@ -1,6 +1,8 @@ { + "experimental": { "experimental_retry": "https://github.com/apollographql/router/discussions/2241", "experimental_response_trace_id": "https://github.com/apollographql/router/discussions/2147", - "experimental_logging": "https://github.com/apollographql/router/discussions/1961", - "experimental_jwt_authentication": "https://github.com/apollographql/router/discussions/2391" + "experimental_logging": "https://github.com/apollographql/router/discussions/1961" + }, + "preview": {} } diff --git a/apollo-router/src/axum_factory/axum_http_server_factory.rs b/apollo-router/src/axum_factory/axum_http_server_factory.rs index 74c0ac1601..398f686a4a 100644 --- a/apollo-router/src/axum_factory/axum_http_server_factory.rs +++ b/apollo-router/src/axum_factory/axum_http_server_factory.rs @@ -19,6 +19,9 @@ use futures::channel::oneshot; use futures::future::join; use futures::future::join_all; use futures::prelude::*; +use http::header::ACCEPT_ENCODING; +use http::header::CONTENT_ENCODING; +use http::HeaderValue; use http::Request; use http_body::combinators::UnsyncBoxBody; use hyper::Body; @@ -32,10 +35,6 @@ use tokio_rustls::TlsAcceptor; use tower::service_fn; use tower::BoxError; use tower::ServiceExt; -use tower_http::compression::predicate::NotForContentType; -use tower_http::compression::CompressionLayer; -use tower_http::compression::DefaultPredicate; -use tower_http::compression::Predicate; use tower_http::trace::TraceLayer; use super::listeners::ensure_endpoints_consistency; @@ -45,6 +44,7 @@ use super::listeners::ListenersAndRouters; use super::utils::decompress_request_body; use super::utils::PropagatingMakeSpan; use super::ListenAddrAndRouter; +use crate::axum_factory::compression::Compressor; use crate::axum_factory::listeners::get_extra_listeners; use crate::axum_factory::listeners::serve_router_on_listen_addr; use crate::configuration::Configuration; @@ -329,12 +329,7 @@ where )) .layer(TraceLayer::new_for_http().make_span_with(PropagatingMakeSpan { entitlement })) .layer(Extension(service_factory)) - .layer(cors) - // Compress the response body, except for multipart responses such as with `@defer`. - // This is a work-around for https://github.com/apollographql/router/issues/1572 - .layer(CompressionLayer::new().compress_when( - DefaultPredicate::new().and(NotForContentType::const_new("multipart/")), - )); + .layer(cors); let route = endpoints_on_main_listener .into_iter() @@ -434,6 +429,11 @@ async fn handle_graphql( let request: router::Request = http_request.into(); let context = request.context.clone(); + let accept_encoding = request + .router_request + .headers() + .get(ACCEPT_ENCODING) + .cloned(); let res = service.oneshot(request).await; let dur = context.busy_time().await; @@ -467,7 +467,24 @@ async fn handle_graphql( } Ok(response) => { tracing::info!(counter.apollo_router_session_count_active = -1,); - response.response.into_response() + let (mut parts, body) = response.response.into_parts(); + + let opt_compressor = accept_encoding + .as_ref() + .and_then(|value| value.to_str().ok()) + .and_then(|v| Compressor::new(v.split(',').map(|s| s.trim()))); + let body = match opt_compressor { + None => body, + Some(compressor) => { + parts.headers.insert( + CONTENT_ENCODING, + HeaderValue::from_static(compressor.content_encoding()), + ); + Body::wrap_stream(compressor.process(body)) + } + }; + + http::Response::from_parts(parts, body).into_response() } } } diff --git a/apollo-router/src/axum_factory/compression/codec/brotli/encoder.rs b/apollo-router/src/axum_factory/compression/codec/brotli/encoder.rs new file mode 100644 index 0000000000..ef877335ac --- /dev/null +++ b/apollo-router/src/axum_factory/compression/codec/brotli/encoder.rs @@ -0,0 +1,112 @@ +// All code from this module is extracted from https://github.com/Nemo157/async-compression and is under MIT or Apache-2 licence +// it will be removed when we find a long lasting solution to https://github.com/Nemo157/async-compression/issues/154 +use std::fmt; +use std::io::Error; +use std::io::ErrorKind; +use std::io::Result; + +use brotli::enc::backward_references::BrotliEncoderParams; +use brotli::enc::encode::BrotliEncoderCompressStream; +use brotli::enc::encode::BrotliEncoderCreateInstance; +use brotli::enc::encode::BrotliEncoderHasMoreOutput; +use brotli::enc::encode::BrotliEncoderIsFinished; +use brotli::enc::encode::BrotliEncoderOperation; +use brotli::enc::encode::BrotliEncoderStateStruct; +use brotli::enc::StandardAlloc; + +use crate::axum_factory::compression::codec::Encode; +use crate::axum_factory::compression::util::PartialBuffer; + +pub(crate) struct BrotliEncoder { + state: BrotliEncoderStateStruct, +} + +impl BrotliEncoder { + pub(crate) fn new(params: BrotliEncoderParams) -> Self { + let mut state = BrotliEncoderCreateInstance(StandardAlloc::default()); + state.params = params; + Self { state } + } + + fn encode( + &mut self, + input: &mut PartialBuffer>, + output: &mut PartialBuffer + AsMut<[u8]>>, + op: BrotliEncoderOperation, + ) -> Result<()> { + let in_buf = input.unwritten(); + let out_buf = output.unwritten_mut(); + + let mut input_len = 0; + let mut output_len = 0; + + if BrotliEncoderCompressStream( + &mut self.state, + op, + &mut in_buf.len(), + in_buf, + &mut input_len, + &mut out_buf.len(), + out_buf, + &mut output_len, + &mut None, + &mut |_, _, _, _| (), + ) <= 0 + { + return Err(Error::new(ErrorKind::Other, "brotli error")); + } + + input.advance(input_len); + output.advance(output_len); + + Ok(()) + } +} + +impl Encode for BrotliEncoder { + fn encode( + &mut self, + input: &mut PartialBuffer>, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> Result<()> { + self.encode( + input, + output, + BrotliEncoderOperation::BROTLI_OPERATION_PROCESS, + ) + } + + fn flush( + &mut self, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> Result { + self.encode( + &mut PartialBuffer::new(&[][..]), + output, + BrotliEncoderOperation::BROTLI_OPERATION_FLUSH, + )?; + + Ok(BrotliEncoderHasMoreOutput(&self.state) == 0) + } + + fn finish( + &mut self, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> Result { + self.encode( + &mut PartialBuffer::new(&[][..]), + output, + BrotliEncoderOperation::BROTLI_OPERATION_FINISH, + )?; + + Ok(BrotliEncoderIsFinished(&self.state) == 1) + } +} + +impl fmt::Debug for BrotliEncoder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BrotliEncoder") + .field("compress", &"") + .finish() + } +} diff --git a/apollo-router/src/axum_factory/compression/codec/brotli/mod.rs b/apollo-router/src/axum_factory/compression/codec/brotli/mod.rs new file mode 100644 index 0000000000..1e71412652 --- /dev/null +++ b/apollo-router/src/axum_factory/compression/codec/brotli/mod.rs @@ -0,0 +1,5 @@ +// All code from this module is extracted from https://github.com/Nemo157/async-compression and is under MIT or Apache-2 licence +// it will be removed when we find a long lasting solution to https://github.com/Nemo157/async-compression/issues/154 +mod encoder; + +pub(crate) use self::encoder::BrotliEncoder; diff --git a/apollo-router/src/axum_factory/compression/codec/deflate/encoder.rs b/apollo-router/src/axum_factory/compression/codec/deflate/encoder.rs new file mode 100644 index 0000000000..88cac88903 --- /dev/null +++ b/apollo-router/src/axum_factory/compression/codec/deflate/encoder.rs @@ -0,0 +1,46 @@ +// All code from this module is extracted from https://github.com/Nemo157/async-compression and is under MIT or Apache-2 licence +// it will be removed when we find a long lasting solution to https://github.com/Nemo157/async-compression/issues/154 +use std::io::Result; + +use flate2::Compression; + +use crate::axum_factory::compression::codec::Encode; +use crate::axum_factory::compression::codec::FlateEncoder; +use crate::axum_factory::compression::util::PartialBuffer; + +#[derive(Debug)] +pub(crate) struct DeflateEncoder { + inner: FlateEncoder, +} + +impl DeflateEncoder { + pub(crate) fn new(level: Compression) -> Self { + Self { + inner: FlateEncoder::new(level, false), + } + } +} + +impl Encode for DeflateEncoder { + fn encode( + &mut self, + input: &mut PartialBuffer>, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> Result<()> { + self.inner.encode(input, output) + } + + fn flush( + &mut self, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> Result { + self.inner.flush(output) + } + + fn finish( + &mut self, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> Result { + self.inner.finish(output) + } +} diff --git a/apollo-router/src/axum_factory/compression/codec/deflate/mod.rs b/apollo-router/src/axum_factory/compression/codec/deflate/mod.rs new file mode 100644 index 0000000000..5a2d24c4be --- /dev/null +++ b/apollo-router/src/axum_factory/compression/codec/deflate/mod.rs @@ -0,0 +1,5 @@ +// All code from this module is extracted from https://github.com/Nemo157/async-compression and is under MIT or Apache-2 licence +// it will be removed when we find a long lasting solution to https://github.com/Nemo157/async-compression/issues/154 +mod encoder; + +pub(crate) use self::encoder::DeflateEncoder; diff --git a/apollo-router/src/axum_factory/compression/codec/flate/encoder.rs b/apollo-router/src/axum_factory/compression/codec/flate/encoder.rs new file mode 100644 index 0000000000..e264b874ff --- /dev/null +++ b/apollo-router/src/axum_factory/compression/codec/flate/encoder.rs @@ -0,0 +1,110 @@ +// All code from this module is extracted from https://github.com/Nemo157/async-compression and is under MIT or Apache-2 licence +// it will be removed when we find a long lasting solution to https://github.com/Nemo157/async-compression/issues/154 +use std::io::Error; +use std::io::ErrorKind; +use std::io::Result; + +use flate2::Compress; +use flate2::Compression; +use flate2::FlushCompress; +use flate2::Status; + +use crate::axum_factory::compression::codec::Encode; +use crate::axum_factory::compression::util::PartialBuffer; + +#[derive(Debug)] +pub(crate) struct FlateEncoder { + compress: Compress, + flushed: bool, +} + +impl FlateEncoder { + pub(crate) fn new(level: Compression, zlib_header: bool) -> Self { + Self { + compress: Compress::new(level, zlib_header), + flushed: true, + } + } + + fn encode( + &mut self, + input: &mut PartialBuffer>, + output: &mut PartialBuffer + AsMut<[u8]>>, + flush: FlushCompress, + ) -> Result { + let prior_in = self.compress.total_in(); + let prior_out = self.compress.total_out(); + + let status = self + .compress + .compress(input.unwritten(), output.unwritten_mut(), flush)?; + + input.advance((self.compress.total_in() - prior_in) as usize); + output.advance((self.compress.total_out() - prior_out) as usize); + + Ok(status) + } +} + +impl Encode for FlateEncoder { + fn encode( + &mut self, + input: &mut PartialBuffer>, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> Result<()> { + self.flushed = false; + match self.encode(input, output, FlushCompress::None)? { + Status::Ok => Ok(()), + Status::StreamEnd => unreachable!(), + Status::BufError => Err(Error::new(ErrorKind::Other, "unexpected BufError")), + } + } + + fn flush( + &mut self, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> Result { + // We need to keep track of whether we've already flushed otherwise we'll just keep writing + // out sync blocks continuously and probably never complete flushing. + if self.flushed { + return Ok(true); + } + + self.encode( + &mut PartialBuffer::new(&[][..]), + output, + FlushCompress::Sync, + )?; + + loop { + let old_len = output.written().len(); + self.encode( + &mut PartialBuffer::new(&[][..]), + output, + FlushCompress::None, + )?; + if output.written().len() == old_len { + break; + } + } + + self.flushed = true; + Ok(!output.unwritten().is_empty()) + } + + fn finish( + &mut self, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> Result { + self.flushed = false; + match self.encode( + &mut PartialBuffer::new(&[][..]), + output, + FlushCompress::Finish, + )? { + Status::Ok => Ok(false), + Status::StreamEnd => Ok(true), + Status::BufError => Err(Error::new(ErrorKind::Other, "unexpected BufError")), + } + } +} diff --git a/apollo-router/src/axum_factory/compression/codec/flate/mod.rs b/apollo-router/src/axum_factory/compression/codec/flate/mod.rs new file mode 100644 index 0000000000..215623803c --- /dev/null +++ b/apollo-router/src/axum_factory/compression/codec/flate/mod.rs @@ -0,0 +1,5 @@ +// All code from this module is extracted from https://github.com/Nemo157/async-compression and is under MIT or Apache-2 licence +// it will be removed when we find a long lasting solution to https://github.com/Nemo157/async-compression/issues/154 +mod encoder; + +pub(crate) use self::encoder::FlateEncoder; diff --git a/apollo-router/src/axum_factory/compression/codec/gzip/encoder.rs b/apollo-router/src/axum_factory/compression/codec/gzip/encoder.rs new file mode 100644 index 0000000000..9203c9103b --- /dev/null +++ b/apollo-router/src/axum_factory/compression/codec/gzip/encoder.rs @@ -0,0 +1,170 @@ +// All code from this module is extracted from https://github.com/Nemo157/async-compression and is under MIT or Apache-2 licence +// it will be removed when we find a long lasting solution to https://github.com/Nemo157/async-compression/issues/154 +use std::io::Result; + +use flate2::Compression; +use flate2::Crc; + +use crate::axum_factory::compression::codec::Encode; +use crate::axum_factory::compression::codec::FlateEncoder; +use crate::axum_factory::compression::util::PartialBuffer; + +#[derive(Debug)] +enum State { + Header(PartialBuffer>), + Encoding, + Footer(PartialBuffer>), + Done, +} + +#[derive(Debug)] +pub(crate) struct GzipEncoder { + inner: FlateEncoder, + crc: Crc, + state: State, +} + +fn header(level: Compression) -> Vec { + let level_byte = if level.level() >= Compression::best().level() { + 0x02 + } else if level.level() <= Compression::fast().level() { + 0x04 + } else { + 0x00 + }; + + vec![0x1f, 0x8b, 0x08, 0, 0, 0, 0, 0, level_byte, 0xff] +} + +impl GzipEncoder { + pub(crate) fn new(level: Compression) -> Self { + Self { + inner: FlateEncoder::new(level, false), + crc: Crc::new(), + state: State::Header(header(level).into()), + } + } + + fn footer(&mut self) -> Vec { + let mut output = Vec::with_capacity(8); + + output.extend(&self.crc.sum().to_le_bytes()); + output.extend(&self.crc.amount().to_le_bytes()); + + output + } +} + +impl Encode for GzipEncoder { + fn encode( + &mut self, + input: &mut PartialBuffer>, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> Result<()> { + loop { + match &mut self.state { + State::Header(header) => { + output.copy_unwritten_from(&mut *header); + + if header.unwritten().is_empty() { + self.state = State::Encoding; + } + } + + State::Encoding => { + let prior_written = input.written().len(); + self.inner.encode(input, output)?; + self.crc.update(&input.written()[prior_written..]); + } + + State::Footer(_) | State::Done => panic!("encode after complete"), + }; + + if input.unwritten().is_empty() || output.unwritten().is_empty() { + return Ok(()); + } + } + } + + fn flush( + &mut self, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> Result { + loop { + let done = match &mut self.state { + State::Header(header) => { + output.copy_unwritten_from(&mut *header); + + if header.unwritten().is_empty() { + self.state = State::Encoding; + } + false + } + + State::Encoding => self.inner.flush(output)?, + + State::Footer(footer) => { + output.copy_unwritten_from(&mut *footer); + + if footer.unwritten().is_empty() { + self.state = State::Done; + true + } else { + false + } + } + + State::Done => true, + }; + + if done { + return Ok(true); + } + + if output.unwritten().is_empty() { + return Ok(false); + } + } + } + + fn finish( + &mut self, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> Result { + loop { + match &mut self.state { + State::Header(header) => { + output.copy_unwritten_from(&mut *header); + + if header.unwritten().is_empty() { + self.state = State::Encoding; + } + } + + State::Encoding => { + if self.inner.finish(output)? { + self.state = State::Footer(self.footer().into()); + } + } + + State::Footer(footer) => { + output.copy_unwritten_from(&mut *footer); + + if footer.unwritten().is_empty() { + self.state = State::Done; + } + } + + State::Done => {} + }; + + if let State::Done = self.state { + return Ok(true); + } + + if output.unwritten().is_empty() { + return Ok(false); + } + } + } +} diff --git a/apollo-router/src/axum_factory/compression/codec/gzip/header.rs b/apollo-router/src/axum_factory/compression/codec/gzip/header.rs new file mode 100644 index 0000000000..754dcaa012 --- /dev/null +++ b/apollo-router/src/axum_factory/compression/codec/gzip/header.rs @@ -0,0 +1,167 @@ +#![allow(dead_code)] +// All code from this module is extracted from https://github.com/Nemo157/async-compression and is under MIT or Apache-2 licence +// it will be removed when we find a long lasting solution to https://github.com/Nemo157/async-compression/issues/154 +use std::io::Error; +use std::io::ErrorKind; +use std::io::Result; + +use crate::axum_factory::compression::util::PartialBuffer; + +#[derive(Debug, Default)] +struct Flags { + ascii: bool, + crc: bool, + extra: bool, + filename: bool, + comment: bool, +} + +#[derive(Debug, Default)] +pub(super) struct Header { + flags: Flags, +} + +#[derive(Debug)] +enum State { + Fixed(PartialBuffer<[u8; 10]>), + ExtraLen(PartialBuffer<[u8; 2]>), + Extra(PartialBuffer>), + Filename(Vec), + Comment(Vec), + Crc(PartialBuffer<[u8; 2]>), + Done, +} + +impl Default for State { + fn default() -> Self { + State::Fixed(<_>::default()) + } +} + +#[derive(Debug, Default)] +pub(super) struct Parser { + state: State, + header: Header, +} + +impl Header { + fn parse(input: &[u8; 10]) -> Result { + if input[0..3] != [0x1f, 0x8b, 0x08] { + return Err(Error::new(ErrorKind::InvalidData, "Invalid gzip header")); + } + + let flag = input[3]; + + let flags = Flags { + ascii: (flag & 0b0000_0001) != 0, + crc: (flag & 0b0000_0010) != 0, + extra: (flag & 0b0000_0100) != 0, + filename: (flag & 0b0000_1000) != 0, + comment: (flag & 0b0001_0000) != 0, + }; + + Ok(Header { flags }) + } +} + +impl Parser { + pub(super) fn input( + &mut self, + input: &mut PartialBuffer>, + ) -> Result> { + loop { + match &mut self.state { + State::Fixed(data) => { + data.copy_unwritten_from(input); + + if data.unwritten().is_empty() { + self.header = Header::parse(&data.take().into_inner())?; + self.state = State::ExtraLen(<_>::default()); + } else { + return Ok(None); + } + } + + State::ExtraLen(data) => { + if !self.header.flags.extra { + self.state = State::Filename(<_>::default()); + continue; + } + + data.copy_unwritten_from(input); + + if data.unwritten().is_empty() { + let len = u16::from_le_bytes(data.take().into_inner()); + self.state = State::Extra(vec![0; usize::from(len)].into()); + } else { + return Ok(None); + } + } + + State::Extra(data) => { + data.copy_unwritten_from(input); + + if data.unwritten().is_empty() { + self.state = State::Filename(<_>::default()); + } else { + return Ok(None); + } + } + + State::Filename(data) => { + if !self.header.flags.filename { + self.state = State::Comment(<_>::default()); + continue; + } + + if let Some(len) = memchr::memchr(0, input.unwritten()) { + data.extend_from_slice(&input.unwritten()[..len]); + input.advance(len + 1); + self.state = State::Comment(<_>::default()); + } else { + data.extend_from_slice(input.unwritten()); + input.advance(input.unwritten().len()); + return Ok(None); + } + } + + State::Comment(data) => { + if !self.header.flags.comment { + self.state = State::Crc(<_>::default()); + continue; + } + + if let Some(len) = memchr::memchr(0, input.unwritten()) { + data.extend_from_slice(&input.unwritten()[..len]); + input.advance(len + 1); + self.state = State::Crc(<_>::default()); + } else { + data.extend_from_slice(input.unwritten()); + input.advance(input.unwritten().len()); + return Ok(None); + } + } + + State::Crc(data) => { + if !self.header.flags.crc { + self.state = State::Done; + return Ok(Some(std::mem::take(&mut self.header))); + } + + data.copy_unwritten_from(input); + + if data.unwritten().is_empty() { + self.state = State::Done; + return Ok(Some(std::mem::take(&mut self.header))); + } else { + return Ok(None); + } + } + + State::Done => { + panic!("parser used after done"); + } + }; + } + } +} diff --git a/apollo-router/src/axum_factory/compression/codec/gzip/mod.rs b/apollo-router/src/axum_factory/compression/codec/gzip/mod.rs new file mode 100644 index 0000000000..77d5604f1c --- /dev/null +++ b/apollo-router/src/axum_factory/compression/codec/gzip/mod.rs @@ -0,0 +1,6 @@ +// All code from this module is extracted from https://github.com/Nemo157/async-compression and is under MIT or Apache-2 licence +// it will be removed when we find a long lasting solution to https://github.com/Nemo157/async-compression/issues/154 +mod encoder; +mod header; + +pub(crate) use self::encoder::GzipEncoder; diff --git a/apollo-router/src/axum_factory/compression/codec/mod.rs b/apollo-router/src/axum_factory/compression/codec/mod.rs new file mode 100644 index 0000000000..71e801b313 --- /dev/null +++ b/apollo-router/src/axum_factory/compression/codec/mod.rs @@ -0,0 +1,36 @@ +// All code from this module is extracted from https://github.com/Nemo157/async-compression and is under MIT or Apache-2 licence +// it will be removed when we find a long lasting solution to https://github.com/Nemo157/async-compression/issues/154 +use std::io::Result; + +use super::util::PartialBuffer; + +mod brotli; +mod deflate; +mod flate; +mod gzip; +//mod zlib; +mod zstd; + +pub(crate) use self::brotli::BrotliEncoder; +pub(crate) use self::deflate::DeflateEncoder; +pub(crate) use self::flate::FlateEncoder; +pub(crate) use self::gzip::GzipEncoder; +pub(crate) use self::zstd::ZstdEncoder; + +pub(crate) trait Encode { + fn encode( + &mut self, + input: &mut PartialBuffer>, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> Result<()>; + + /// Returns whether the internal buffers are flushed + fn flush(&mut self, output: &mut PartialBuffer + AsMut<[u8]>>) + -> Result; + + /// Returns whether the internal buffers are flushed and the end of the stream is written + fn finish( + &mut self, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> Result; +} diff --git a/apollo-router/src/axum_factory/compression/codec/zstd/encoder.rs b/apollo-router/src/axum_factory/compression/codec/zstd/encoder.rs new file mode 100644 index 0000000000..fe6230bf72 --- /dev/null +++ b/apollo-router/src/axum_factory/compression/codec/zstd/encoder.rs @@ -0,0 +1,61 @@ +// All code from this module is extracted from https://github.com/Nemo157/async-compression and is under MIT or Apache-2 licence +// it will be removed when we find a long lasting solution to https://github.com/Nemo157/async-compression/issues/154 +use std::io::Result; + +use zstd::stream::raw::Encoder; +use zstd::stream::raw::Operation; + +use crate::axum_factory::compression::codec::Encode; +use crate::axum_factory::compression::unshared::Unshared; +use crate::axum_factory::compression::util::PartialBuffer; + +#[derive(Debug)] +pub(crate) struct ZstdEncoder { + encoder: Unshared>, +} + +impl ZstdEncoder { + pub(crate) fn new(level: i32) -> Self { + Self { + encoder: Unshared::new(Encoder::new(level).unwrap()), + } + } +} + +impl Encode for ZstdEncoder { + fn encode( + &mut self, + input: &mut PartialBuffer>, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> Result<()> { + let status = self + .encoder + .get_mut() + .run_on_buffers(input.unwritten(), output.unwritten_mut())?; + input.advance(status.bytes_read); + output.advance(status.bytes_written); + Ok(()) + } + + fn flush( + &mut self, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> Result { + let mut out_buf = zstd_safe::OutBuffer::around(output.unwritten_mut()); + let bytes_left = self.encoder.get_mut().flush(&mut out_buf)?; + let len = out_buf.as_slice().len(); + output.advance(len); + Ok(bytes_left == 0) + } + + fn finish( + &mut self, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> Result { + let mut out_buf = zstd_safe::OutBuffer::around(output.unwritten_mut()); + let bytes_left = self.encoder.get_mut().finish(&mut out_buf, true)?; + let len = out_buf.as_slice().len(); + output.advance(len); + Ok(bytes_left == 0) + } +} diff --git a/apollo-router/src/axum_factory/compression/codec/zstd/mod.rs b/apollo-router/src/axum_factory/compression/codec/zstd/mod.rs new file mode 100644 index 0000000000..a99dd85331 --- /dev/null +++ b/apollo-router/src/axum_factory/compression/codec/zstd/mod.rs @@ -0,0 +1,5 @@ +// All code from this module is extracted from https://github.com/Nemo157/async-compression and is under MIT or Apache-2 licence +// it will be removed when we find a long lasting solution to https://github.com/Nemo157/async-compression/issues/154 +mod encoder; + +pub(crate) use self::encoder::ZstdEncoder; diff --git a/apollo-router/src/axum_factory/compression/mod.rs b/apollo-router/src/axum_factory/compression/mod.rs new file mode 100644 index 0000000000..38eb0c2e6d --- /dev/null +++ b/apollo-router/src/axum_factory/compression/mod.rs @@ -0,0 +1,183 @@ +use brotli::enc::BrotliEncoderParams; +use bytes::Bytes; +use bytes::BytesMut; +use flate2::Compression; +use futures::Stream; +use futures::StreamExt; +use tokio::sync::mpsc; +use tokio_stream::wrappers::ReceiverStream; +use tower::BoxError; + +use self::codec::BrotliEncoder; +use self::codec::DeflateEncoder; +use self::codec::Encode; +use self::codec::GzipEncoder; +use self::codec::ZstdEncoder; +use self::util::PartialBuffer; + +pub(crate) mod codec; +pub(crate) mod unshared; +pub(crate) mod util; + +pub(crate) enum Compressor { + Deflate(DeflateEncoder), + Gzip(GzipEncoder), + Brotli(Box), + Zstd(ZstdEncoder), +} + +impl Compressor { + pub(crate) fn new<'a, It: 'a>(it: It) -> Option + where + It: Iterator, + { + for s in it { + match s { + "gzip" => return Some(Compressor::Gzip(GzipEncoder::new(Compression::fast()))), + "deflate" => { + return Some(Compressor::Deflate( + DeflateEncoder::new(Compression::fast()), + )) + } + // FIXME: find the "fast" brotli encoder params + "br" => { + return Some(Compressor::Brotli(Box::new(BrotliEncoder::new( + BrotliEncoderParams::default(), + )))) + } + "zstd" => { + return Some(Compressor::Zstd(ZstdEncoder::new(zstd_safe::min_c_level()))) + } + _ => {} + } + } + None + } + + pub(crate) fn content_encoding(&self) -> &'static str { + match self { + Compressor::Deflate(_) => "deflate", + Compressor::Gzip(_) => "gzip", + Compressor::Brotli(_) => "br", + Compressor::Zstd(_) => "zstd", + } + } + + pub(crate) fn process( + mut self, + mut stream: hyper::Body, + ) -> impl Stream> +where { + let (tx, rx) = mpsc::channel(10); + + tokio::task::spawn(async move { + while let Some(data) = stream.next().await { + match data { + Err(e) => { + if (tx.send(Err(e.into())).await).is_err() { + return; + } + } + Ok(data) => { + let mut buf = BytesMut::zeroed(1024); + let mut written = 0usize; + + let mut partial_input = PartialBuffer::new(&*data); + loop { + let mut partial_output = PartialBuffer::new(&mut buf); + partial_output.advance(written); + + if let Err(e) = self.encode(&mut partial_input, &mut partial_output) { + let _ = tx.send(Err(e.into())).await; + return; + } + + written += partial_output.written().len(); + + if !partial_input.unwritten().is_empty() { + // there was not enough space in the output buffer to compress everything, + // so we resize and add more data + if partial_output.unwritten().is_empty() { + let _ = partial_output.into_inner(); + buf.reserve(written); + } + } else { + match self.flush(&mut partial_output) { + Err(e) => { + let _ = tx.send(Err(e.into())).await; + return; + } + Ok(_) => { + let len = partial_output.written().len(); + let _ = partial_output.into_inner(); + buf.resize(len, 0); + if (tx.send(Ok(buf.freeze())).await).is_err() { + return; + } + break; + } + } + } + } + } + } + } + + let buf = BytesMut::zeroed(64); + let mut partial_output = PartialBuffer::new(buf); + + match self.finish(&mut partial_output) { + Err(e) => { + let _ = tx.send(Err(e.into())).await; + } + Ok(_) => { + let len = partial_output.written().len(); + + let mut buf = partial_output.into_inner(); + buf.resize(len, 0); + let _ = tx.send(Ok(buf.freeze())).await; + } + } + }); + ReceiverStream::new(rx) + } +} + +impl Encode for Compressor { + fn encode( + &mut self, + input: &mut PartialBuffer>, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> std::io::Result<()> { + match self { + Compressor::Deflate(e) => e.encode(input, output), + Compressor::Gzip(e) => e.encode(input, output), + Compressor::Brotli(e) => e.encode(input, output), + Compressor::Zstd(e) => e.encode(input, output), + } + } + + fn flush( + &mut self, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> std::io::Result { + match self { + Compressor::Deflate(e) => e.flush(output), + Compressor::Gzip(e) => e.flush(output), + Compressor::Brotli(e) => e.flush(output), + Compressor::Zstd(e) => e.flush(output), + } + } + + fn finish( + &mut self, + output: &mut PartialBuffer + AsMut<[u8]>>, + ) -> std::io::Result { + match self { + Compressor::Deflate(e) => e.finish(output), + Compressor::Gzip(e) => e.finish(output), + Compressor::Brotli(e) => e.finish(output), + Compressor::Zstd(e) => e.finish(output), + } + } +} diff --git a/apollo-router/src/axum_factory/compression/unshared.rs b/apollo-router/src/axum_factory/compression/unshared.rs new file mode 100644 index 0000000000..b4b244f168 --- /dev/null +++ b/apollo-router/src/axum_factory/compression/unshared.rs @@ -0,0 +1,42 @@ +// All code from this module is extracted from https://github.com/Nemo157/async-compression and is under MIT or Apache-2 licence +// it will be removed when we find a long lasting solution to https://github.com/Nemo157/async-compression/issues/154 +#![allow(dead_code)] // unused without any features + +use core::fmt::Debug; +use core::fmt::{self}; + +/// Wraps a type and only allows unique borrowing, the main usecase is to wrap a `!Sync` type and +/// implement `Sync` for it as this type blocks having multiple shared references to the inner +/// value. +/// +/// # Safety +/// +/// We must be careful when accessing `inner`, there must be no way to create a shared reference to +/// it from a shared reference to an `Unshared`, as that would allow creating shared references on +/// multiple threads. +/// +/// As an example deriving or implementing `Clone` is impossible, two threads could attempt to +/// clone a shared `Unshared` reference which would result in accessing the same inner value +/// concurrently. +pub(crate) struct Unshared { + inner: T, +} + +impl Unshared { + pub(crate) fn new(inner: T) -> Self { + Unshared { inner } + } + + pub(crate) fn get_mut(&mut self) -> &mut T { + &mut self.inner + } +} + +/// Safety: See comments on main docs for `Unshared` +unsafe impl Sync for Unshared {} + +impl Debug for Unshared { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct(core::any::type_name::()).finish() + } +} diff --git a/apollo-router/src/axum_factory/compression/util.rs b/apollo-router/src/axum_factory/compression/util.rs new file mode 100644 index 0000000000..609667b217 --- /dev/null +++ b/apollo-router/src/axum_factory/compression/util.rs @@ -0,0 +1,64 @@ +#![allow(dead_code)] +// All code from this module is extracted from https://github.com/Nemo157/async-compression and is under MIT or Apache-2 licence +// it will be removed when we find a long lasting solution to https://github.com/Nemo157/async-compression/issues/154 +pub(crate) fn _assert_send() {} +pub(crate) fn _assert_sync() {} + +#[derive(Debug, Default)] +pub(crate) struct PartialBuffer> { + buffer: B, + index: usize, +} + +impl> PartialBuffer { + pub(crate) fn new(buffer: B) -> Self { + Self { buffer, index: 0 } + } + + pub(crate) fn written(&self) -> &[u8] { + &self.buffer.as_ref()[..self.index] + } + + pub(crate) fn unwritten(&self) -> &[u8] { + &self.buffer.as_ref()[self.index..] + } + + pub(crate) fn advance(&mut self, amount: usize) { + self.index += amount; + } + + pub(crate) fn get_mut(&mut self) -> &mut B { + &mut self.buffer + } + + pub(crate) fn into_inner(self) -> B { + self.buffer + } +} + +impl + AsMut<[u8]>> PartialBuffer { + pub(crate) fn unwritten_mut(&mut self) -> &mut [u8] { + &mut self.buffer.as_mut()[self.index..] + } + + pub(crate) fn copy_unwritten_from>(&mut self, other: &mut PartialBuffer) { + let len = std::cmp::min(self.unwritten().len(), other.unwritten().len()); + + self.unwritten_mut()[..len].copy_from_slice(&other.unwritten()[..len]); + + self.advance(len); + other.advance(len); + } +} + +impl + Default> PartialBuffer { + pub(crate) fn take(&mut self) -> Self { + std::mem::replace(self, Self::new(B::default())) + } +} + +impl + AsMut<[u8]>> From for PartialBuffer { + fn from(buffer: B) -> Self { + Self::new(buffer) + } +} diff --git a/apollo-router/src/axum_factory/mod.rs b/apollo-router/src/axum_factory/mod.rs index 5f6668794d..78234133dd 100644 --- a/apollo-router/src/axum_factory/mod.rs +++ b/apollo-router/src/axum_factory/mod.rs @@ -1,5 +1,6 @@ //! axum factory is useful to create an [`AxumHttpServerFactory`] which implements [`crate::http_server_factory::HttpServerFactory`] mod axum_http_server_factory; +mod compression; mod listeners; #[cfg(test)] pub(crate) mod tests; diff --git a/apollo-router/src/configuration/experimental.rs b/apollo-router/src/configuration/experimental.rs index d5f1c6e1da..06784b4483 100644 --- a/apollo-router/src/configuration/experimental.rs +++ b/apollo-router/src/configuration/experimental.rs @@ -1,71 +1,111 @@ use std::collections::HashMap; +use serde::Deserialize; use serde_json::Value; -pub(crate) fn print_all_experimental_conf() { - let available_exp_confs = serde_json::from_str::>(include_str!( - "../../experimental_features.json" - )) - .expect("cannot load the list of available experimental configurations"); - - let available_exp_confs_str: Vec = available_exp_confs - .into_iter() - .map(|(used_exp_conf, discussion_link)| format!("\t- {used_exp_conf}: {discussion_link}")) - .collect(); - println!( - "List of all experimental configurations with related GitHub discussions:\n\n{}", - available_exp_confs_str.join("\n") - ); +#[derive(Deserialize)] +pub(crate) struct Discussed { + experimental: HashMap, + preview: HashMap, } -pub(crate) fn log_used_experimental_conf(conf: &Value) { - let available_discussions = serde_json::from_str::>(include_str!( - "../../experimental_features.json" - )); - if let Ok(available_discussions) = available_discussions { - let used_experimental_conf = get_experimental_configurations(conf); - let needed_discussions: Vec = used_experimental_conf +impl Discussed { + pub(crate) fn new() -> Self { + serde_json::from_str(include_str!("../../feature_discussions.json")) + .expect("cannot load the list of available discussed configurations") + } + + pub(crate) fn print_experimental(&self) { + self.print("experimental", &self.experimental) + } + + pub(crate) fn print_preview(&self) { + self.print("preview", &self.preview) + } + + pub(crate) fn print(&self, stage: &str, urls: &HashMap) { + let mut list: Vec<_> = urls + .iter() + .map(|(config_key, discussion_link)| format!("\t- {config_key}: {discussion_link}")) + .collect(); + if list.is_empty() { + println!("This Router version has no {stage} configuration") + } else { + list.sort(); + let list = list.join("\n"); + println!( + "List of all {stage} configurations with related GitHub discussions:\n\n{list}" + ) + } + } + + pub(crate) fn log_experimental_used(&self, conf: &Value) { + self.log_used( + conf, + "experimental", + &self.experimental, + "We may make breaking changes in future releases. \ + To help us design the stable version we need your feedback.", + ) + } + + pub(crate) fn log_preview_used(&self, conf: &Value) { + self.log_used( + conf, + "preview", + &self.preview, + "These features are not officially supported with any SLA \ + and may still contain bugs or undergo iteration. \ + You're encouraged to try preview features in test environments \ + to familiarize yourself with upcoming functionality \ + before it reaches general availability.", + ) + } + + fn log_used( + &self, + conf: &Value, + prefix: &str, + urls: &HashMap, + stage_description: &str, + ) { + let used = get_configurations(conf, &format!("{prefix}_")); + let mut list: Vec<_> = used .into_iter() - .filter_map(|used_exp_conf| { - available_discussions - .get(&used_exp_conf) - .map(|discussion_link| format!("\t- {used_exp_conf}: {discussion_link}")) + .filter_map(|config_key| { + urls.get(&config_key) + .map(|discussion_link| format!("\t- {config_key}: {discussion_link}")) }) .collect(); - if !needed_discussions.is_empty() { + if !list.is_empty() { + list.sort(); + let list = list.join("\n"); tracing::info!( - r#"You're using some "experimental" features (configuration prefixed by "experimental_" or contained within an "experimental" section), we may make breaking changes in future releases. -To help us design the stable version we need your feedback, here is a list of links where you can give your opinion: - -{} -"#, - needed_discussions.join("\n") + "You're using some \"{prefix}\" features of the Apollo Router \ + (those which have their configuration prefixed by \"{prefix}_\").\n\ + {stage_description}\n\ + Here is a list of links where you can give your opinion:\n\n\ + {list}\n\n\ + For more information about launch stages, please see the documentation here: \ + https://www.apollographql.com/docs/resources/product-launch-stages/", ); } } } -fn get_experimental_configurations(conf: &Value) -> Vec { - let mut experimental_fields = Vec::new(); - visit_experimental_configurations(conf, &mut experimental_fields); - - experimental_fields +fn get_configurations(conf: &Value, prefix: &str) -> Vec { + let mut fields = Vec::new(); + visit_configurations(conf, prefix, &mut fields); + fields } -pub(crate) fn visit_experimental_configurations( - conf: &Value, - experimental_fields: &mut Vec, -) { +fn visit_configurations(conf: &Value, prefix: &str, fields: &mut Vec) { if let Value::Object(object) = conf { object.iter().for_each(|(field_name, val)| { - if field_name.starts_with("experimental_") { - experimental_fields.push(field_name.clone()); - } - // TODO: Remove when JWT authentication is generally available - if field_name == "experimental" { - experimental_fields.push("experimental_jwt_authentication".to_string()); + if field_name.starts_with(prefix) { + fields.push(field_name.clone()); } - visit_experimental_configurations(val, experimental_fields); + visit_configurations(val, prefix, fields); }); } } @@ -87,7 +127,7 @@ mod tests { }); assert_eq!( - get_experimental_configurations(&val), + get_configurations(&val, "experimental"), vec![ "experimental_logging".to_string(), "experimental_trace_id".to_string() diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index b9964967c4..a91df09a3c 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -46,7 +46,7 @@ use thiserror::Error; use self::cors::Cors; use self::expansion::Expansion; -pub(crate) use self::experimental::print_all_experimental_conf; +pub(crate) use self::experimental::Discussed; pub(crate) use self::schema::generate_config_schema; pub(crate) use self::schema::generate_upgrade; use self::subgraph::SubgraphConfiguration; @@ -136,6 +136,11 @@ pub struct Configuration { #[serde(default)] pub(crate) apq: Apq, + /// Configuration for chaos testing, trying to reproduce bugs that require uncommon conditions. + /// You probably don’t want this in production! + #[serde(default)] + pub(crate) experimental_chaos: Chaos, + /// Plugin configuration #[serde(default)] plugins: UserPlugins, @@ -167,6 +172,7 @@ impl<'de> serde::Deserialize<'de> for Configuration { apollo_plugins: ApolloPlugins, tls: Tls, apq: Apq, + experimental_chaos: Chaos, } let ad_hoc: AdHocConfiguration = serde::Deserialize::deserialize(deserializer)?; @@ -181,6 +187,7 @@ impl<'de> serde::Deserialize<'de> for Configuration { .apollo_plugins(ad_hoc.apollo_plugins.plugins) .tls(ad_hoc.tls) .apq(ad_hoc.apq) + .chaos(ad_hoc.experimental_chaos) .build() .map_err(|e| serde::de::Error::custom(e.to_string())) } @@ -212,6 +219,7 @@ impl Configuration { apollo_plugins: Map, tls: Option, apq: Option, + chaos: Option, ) -> Result { let conf = Self { validated_yaml: Default::default(), @@ -222,6 +230,7 @@ impl Configuration { homepage: homepage.unwrap_or_default(), cors: cors.unwrap_or_default(), apq: apq.unwrap_or_default(), + experimental_chaos: chaos.unwrap_or_default(), plugins: UserPlugins { plugins: Some(plugins), }, @@ -280,6 +289,7 @@ impl Configuration { apollo_plugins: Map, tls: Option, apq: Option, + chaos: Option, ) -> Result { let configuration = Self { validated_yaml: Default::default(), @@ -289,6 +299,7 @@ impl Configuration { sandbox: sandbox.unwrap_or_else(|| Sandbox::fake_builder().build()), homepage: homepage.unwrap_or_else(|| Homepage::fake_builder().build()), cors: cors.unwrap_or_default(), + experimental_chaos: chaos.unwrap_or_default(), plugins: UserPlugins { plugins: Some(plugins), }, @@ -946,6 +957,19 @@ impl Default for Server { } } +/// Configuration for chaos testing, trying to reproduce bugs that require uncommon conditions. +/// You probably don’t want this in production! +#[derive(Debug, Clone, Default, Deserialize, Serialize, JsonSchema)] +#[serde(deny_unknown_fields)] +#[serde(default)] +pub(crate) struct Chaos { + /// Force a hot reload of the Router (as if the schema or configuration had changed) + /// at a regular time interval. + #[serde(with = "humantime_serde")] + #[schemars(with = "Option")] + pub(crate) force_hot_reload: Option, +} + /// Listening address. #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)] #[serde(untagged)] diff --git a/apollo-router/src/configuration/schema.rs b/apollo-router/src/configuration/schema.rs index c4ee39cb77..66f0f3c097 100644 --- a/apollo-router/src/configuration/schema.rs +++ b/apollo-router/src/configuration/schema.rs @@ -15,7 +15,7 @@ use yaml_rust::scanner::Marker; use super::expansion::coerce; use super::expansion::Expansion; -use super::experimental::log_used_experimental_conf; +use super::experimental::Discussed; use super::plugins; use super::yaml; use super::Configuration; @@ -107,7 +107,10 @@ pub(crate) fn validate_yaml_configuration( tracing::warn!("configuration could not be upgraded automatically as it had errors") } } - log_used_experimental_conf(&yaml); + + let discussed = Discussed::new(); + discussed.log_experimental_used(&yaml); + discussed.log_preview_used(&yaml); let expanded_yaml = expansion.expand(&yaml)?; let parsed_yaml = super::yaml::parse(raw_yaml)?; if let Err(errors_it) = schema.validate(&expanded_yaml) { diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index d420b728c7..23fbfef1e0 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -578,6 +578,22 @@ expression: "&schema" }, "additionalProperties": false }, + "experimental_chaos": { + "description": "Configuration for chaos testing, trying to reproduce bugs that require uncommon conditions. You probably don’t want this in production!", + "default": { + "force_hot_reload": null + }, + "type": "object", + "properties": { + "force_hot_reload": { + "description": "Force a hot reload of the Router (as if the schema or configuration had changed) at a regular time interval.", + "default": null, + "type": "string", + "nullable": true + } + }, + "additionalProperties": false + }, "forbid_mutations": { "description": "Forbid mutations configuration", "type": "boolean" @@ -1354,8 +1370,60 @@ expression: "&schema" "default": "https://usage-reporting.api.apollographql.com/api/ingress/traces", "type": "string" }, + "errors": { + "description": "Configure the way errors are transmitted to Apollo Studio", + "type": "object", + "properties": { + "subgraph": { + "description": "Handling of errors coming from subgraph", + "type": "object", + "properties": { + "all": { + "description": "Handling of errors coming from all subgraphs", + "type": "object", + "properties": { + "redact": { + "description": "Redact subgraph errors to Apollo Studio", + "default": true, + "type": "boolean" + }, + "send": { + "description": "Send subgraph errors to Apollo Studio", + "default": true, + "type": "boolean" + } + }, + "additionalProperties": false + }, + "subgraphs": { + "description": "Handling of errors coming from specified subgraphs", + "type": "object", + "additionalProperties": { + "type": "object", + "properties": { + "redact": { + "description": "Redact subgraph errors to Apollo Studio", + "default": true, + "type": "boolean" + }, + "send": { + "description": "Send subgraph errors to Apollo Studio", + "default": true, + "type": "boolean" + } + }, + "additionalProperties": false + }, + "nullable": true + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, "field_level_instrumentation_sampler": { - "description": "Enable field level instrumentation for subgraphs via ftv1. ftv1 tracing can cause performance issues as it is transmitted in band with subgraph responses. 0.0 will result in no field level instrumentation. 1.0 will result in always instrumentation. Value MUST be less than global sampling rate", + "description": "Field level instrumentation for subgraphs via ftv1. ftv1 tracing can cause performance issues as it is transmitted in band with subgraph responses.", "anyOf": [ { "description": "Sample a given fraction. Fractions >= 1 will always sample.", diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@field_level_instrumentation.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@field_level_instrumentation.yaml.snap new file mode 100644 index 0000000000..2b4da064ff --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@field_level_instrumentation.yaml.snap @@ -0,0 +1,10 @@ +--- +source: apollo-router/src/configuration/tests.rs +expression: new_config +--- +--- +telemetry: + apollo: + field_level_instrumentation: + sampler: always_off + diff --git a/apollo-router/src/executable.rs b/apollo-router/src/executable.rs index 5b38513cb7..87f38b8ac7 100644 --- a/apollo-router/src/executable.rs +++ b/apollo-router/src/executable.rs @@ -22,10 +22,10 @@ use once_cell::sync::OnceCell; use url::ParseError; use url::Url; -use crate::configuration; use crate::configuration::generate_config_schema; use crate::configuration::generate_upgrade; use crate::configuration::ConfigurationError; +use crate::configuration::Discussed; use crate::plugins::telemetry::reload::init_telemetry; use crate::router::ConfigurationSource; use crate::router::RouterHttpServer; @@ -127,6 +127,8 @@ enum ConfigSubcommand { }, /// List all the available experimental configurations with related GitHub discussion Experimental, + /// List all the available preview configurations with related GitHub discussion + Preview, } /// Options for the router @@ -384,7 +386,13 @@ impl Executable { Some(Commands::Config(ConfigSubcommandArgs { command: ConfigSubcommand::Experimental, })) => { - configuration::print_all_experimental_conf(); + Discussed::new().print_experimental(); + Ok(()) + } + Some(Commands::Config(ConfigSubcommandArgs { + command: ConfigSubcommand::Preview, + })) => { + Discussed::new().print_preview(); Ok(()) } None => Self::inner_start(shutdown, schema, config, entitlement, opt).await, diff --git a/apollo-router/src/plugins/rhai/mod.rs b/apollo-router/src/plugins/rhai/mod.rs index 9fba213f83..436fb631bc 100644 --- a/apollo-router/src/plugins/rhai/mod.rs +++ b/apollo-router/src/plugins/rhai/mod.rs @@ -85,6 +85,9 @@ type SharedMut = rhai::Shared>>; pub(crate) const RHAI_SPAN_NAME: &str = "rhai_plugin"; +const CANNOT_ACCESS_HEADERS_ON_A_DEFERRED_RESPONSE: &str = + "cannot access headers on a deferred response"; + impl OptionDance for SharedMut { fn with_mut(&self, f: impl FnOnce(&mut T) -> R) -> R { let mut guard = self.lock().expect("poisoned mutex"); @@ -214,11 +217,25 @@ mod router_plugin { Ok(obj.with_mut(|response| response.response.headers().clone())) } + #[rhai_fn(name = "is_primary", pure)] + pub(crate) fn supergraph_response_is_primary( + _obj: &mut SharedMut, + ) -> bool { + true + } + #[rhai_fn(get = "headers", pure, return_raw)] pub(crate) fn get_originating_headers_router_deferred_response( _obj: &mut SharedMut, ) -> Result> { - Err("cannot access headers on a deferred response".into()) + Err(CANNOT_ACCESS_HEADERS_ON_A_DEFERRED_RESPONSE.into()) + } + + #[rhai_fn(name = "is_primary", pure)] + pub(crate) fn supergraph_deferred_response_is_primary( + _obj: &mut SharedMut, + ) -> bool { + false } #[rhai_fn(get = "headers", pure, return_raw)] @@ -228,11 +245,23 @@ mod router_plugin { Ok(obj.with_mut(|response| response.response.headers().clone())) } + #[rhai_fn(name = "is_primary", pure)] + pub(crate) fn execution_response_is_primary(_obj: &mut SharedMut) -> bool { + true + } + #[rhai_fn(get = "headers", pure, return_raw)] pub(crate) fn get_originating_headers_execution_deferred_response( _obj: &mut SharedMut, ) -> Result> { - Err("cannot access headers on a deferred response".into()) + Err(CANNOT_ACCESS_HEADERS_ON_A_DEFERRED_RESPONSE.into()) + } + + #[rhai_fn(name = "is_primary", pure)] + pub(crate) fn execution_deferred_response_is_primary( + _obj: &mut SharedMut, + ) -> bool { + false } #[rhai_fn(get = "headers", pure, return_raw)] @@ -291,7 +320,7 @@ mod router_plugin { _obj: &mut SharedMut, _headers: HeaderMap, ) -> Result<(), Box> { - Err("cannot access headers on a deferred response".into()) + Err(CANNOT_ACCESS_HEADERS_ON_A_DEFERRED_RESPONSE.into()) } #[rhai_fn(set = "headers", return_raw)] @@ -308,7 +337,7 @@ mod router_plugin { _obj: &mut SharedMut, _headers: HeaderMap, ) -> Result<(), Box> { - Err("cannot access headers on a deferred response".into()) + Err(CANNOT_ACCESS_HEADERS_ON_A_DEFERRED_RESPONSE.into()) } #[rhai_fn(set = "headers", return_raw)] @@ -390,7 +419,11 @@ impl EngineBlock { main: PathBuf, sdl: Arc, ) -> Result { - let engine = Arc::new(Rhai::new_rhai_engine(scripts, sdl.to_string())); + let engine = Arc::new(Rhai::new_rhai_engine( + scripts, + sdl.to_string(), + main.clone(), + )); let ast = engine.compile_file(main)?; let mut scope = Scope::new(); // Keep these two lower cases ones as mistakes until 2.0 @@ -907,7 +940,16 @@ macro_rules! gen_map_deferred_response { ); if let Err(error) = result { tracing::error!("map_response callback failed: {error}"); - return None; + let error_details = process_error(error); + let mut guard = shared_response.lock().unwrap(); + let response_opt = guard.take(); + let $rhai_deferred_response { mut response, .. } = response_opt.unwrap(); + let error = Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }; + response.errors = vec![error]; + return Some(response); } let mut guard = shared_response.lock().unwrap(); @@ -1263,7 +1305,7 @@ impl Rhai { Ok(()) } - fn new_rhai_engine(path: Option, sdl: String) -> Engine { + fn new_rhai_engine(path: Option, sdl: String, main: PathBuf) -> Engine { let mut engine = Engine::new(); // If we pass in a path, use it to configure our engine // with a FileModuleResolver which allows import to work @@ -1278,11 +1320,22 @@ impl Rhai { let base64_module = exported_module!(router_base64); + // Share main so we can move copies into each closure as required for logging + let shared_main = Shared::new(main.display().to_string()); + + let trace_main = shared_main.clone(); + let debug_main = shared_main.clone(); + let info_main = shared_main.clone(); + let warn_main = shared_main.clone(); + let error_main = shared_main.clone(); + + let print_main = shared_main; + // Configure our engine for execution engine .set_max_expr_depths(0, 0) - .on_print(move |rhai_log| { - tracing::info!("{}", rhai_log); + .on_print(move |message| { + tracing::info!(%message, target = %print_main); }) // Register our plugin module .register_global_module(module.into()) @@ -1546,20 +1599,20 @@ impl Rhai { }) .register_fn("to_string", |id: &mut TraceId| -> String { id.to_string() }) // Register a series of logging functions - .register_fn("log_trace", |out: Dynamic| { - tracing::trace!(%out, "rhai_trace"); + .register_fn("log_trace", move |message: Dynamic| { + tracing::trace!(%message, target = %trace_main); }) - .register_fn("log_debug", |out: Dynamic| { - tracing::debug!(%out, "rhai_debug"); + .register_fn("log_debug", move |message: Dynamic| { + tracing::debug!(%message, target = %debug_main); }) - .register_fn("log_info", |out: Dynamic| { - tracing::info!(%out, "rhai_info"); + .register_fn("log_info", move |message: Dynamic| { + tracing::info!(%message, target = %info_main); }) - .register_fn("log_warn", |out: Dynamic| { - tracing::warn!(%out, "rhai_warn"); + .register_fn("log_warn", move |message: Dynamic| { + tracing::warn!(%message, target = %warn_main); }) - .register_fn("log_error", |out: Dynamic| { - tracing::error!(%out, "rhai_error"); + .register_fn("log_error", move |message: Dynamic| { + tracing::error!(%message, target = %error_main); }) // Register a function for printing to stderr .register_fn("eprint", |x: &str| { diff --git a/apollo-router/src/plugins/rhai/tests.rs b/apollo-router/src/plugins/rhai/tests.rs index b6222911f5..84d678bb71 100644 --- a/apollo-router/src/plugins/rhai/tests.rs +++ b/apollo-router/src/plugins/rhai/tests.rs @@ -18,6 +18,7 @@ use uuid::Uuid; use super::process_error; use super::subgraph; +use super::PathBuf; use super::Rhai; use super::RhaiExecutionDeferredResponse; use super::RhaiExecutionResponse; @@ -154,7 +155,7 @@ async fn rhai_plugin_execution_service_error() -> Result<(), BoxError> { // A Rhai engine suitable for minimal testing. There are no scripts and the SDL is an empty // string. fn new_rhai_test_engine() -> Engine { - Rhai::new_rhai_engine(None, "".to_string()) + Rhai::new_rhai_engine(None, "".to_string(), PathBuf::new()) } // Some of these tests rely extensively on internal implementation details of the tracing_test crate. @@ -407,6 +408,14 @@ async fn it_can_process_supergraph_response() { gen_response_test!(RhaiSupergraphResponse, "process_supergraph_response"); } +#[tokio::test] +async fn it_can_process_supergraph_response_is_primary() { + gen_response_test!( + RhaiSupergraphResponse, + "process_supergraph_response_is_primary" + ); +} + #[tokio::test] async fn it_can_process_supergraph_deferred_response() { gen_response_test!( @@ -415,16 +424,40 @@ async fn it_can_process_supergraph_deferred_response() { ); } +#[tokio::test] +async fn it_can_process_supergraph_deferred_response_is_not_primary() { + gen_response_test!( + RhaiSupergraphDeferredResponse, + "process_supergraph_deferred_response_is_not_primary" + ); +} + #[tokio::test] async fn it_can_process_execution_response() { gen_response_test!(RhaiExecutionResponse, "process_execution_response"); } +#[tokio::test] +async fn it_can_process_execution_response_is_primary() { + gen_response_test!( + RhaiExecutionResponse, + "process_execution_response_is_primary" + ); +} + #[tokio::test] async fn it_can_process_execution_deferred_response() { gen_response_test!(RhaiExecutionDeferredResponse, "process_execution_response"); } +#[tokio::test] +async fn it_can_process_execution_deferred_response_is_not_primary() { + gen_response_test!( + RhaiExecutionDeferredResponse, + "process_execution_deferred_response_is_not_primary" + ); +} + #[tokio::test] async fn it_can_process_subgraph_response() { let dyn_plugin: Box = crate::plugin::plugins() diff --git a/apollo-router/src/plugins/telemetry/apollo.rs b/apollo-router/src/plugins/telemetry/apollo.rs index 45a196df39..043f766277 100644 --- a/apollo-router/src/plugins/telemetry/apollo.rs +++ b/apollo-router/src/plugins/telemetry/apollo.rs @@ -1,6 +1,7 @@ //! Configuration for apollo telemetry. // This entire file is license key functionality use std::collections::HashMap; +use std::fmt::Display; use std::num::NonZeroUsize; use std::ops::AddAssign; use std::time::SystemTime; @@ -9,6 +10,7 @@ use derivative::Derivative; use http::header::HeaderName; use itertools::Itertools; use schemars::JsonSchema; +use serde::ser::SerializeMap; use serde::Deserialize; use serde::Serialize; use url::Url; @@ -25,6 +27,7 @@ use crate::plugins::telemetry::apollo_exporter::proto::reports::StatsContext; use crate::plugins::telemetry::apollo_exporter::proto::reports::Trace; use crate::plugins::telemetry::config::SamplerOption; use crate::plugins::telemetry::tracing::BatchProcessorConfig; +use crate::query_planner::OperationKind; use crate::services::apollo_graph_reference; use crate::services::apollo_key; @@ -63,9 +66,7 @@ pub(crate) struct Config { /// The buffer size for sending traces to Apollo. Increase this if you are experiencing lost traces. pub(crate) buffer_size: NonZeroUsize, - /// Enable field level instrumentation for subgraphs via ftv1. ftv1 tracing can cause performance issues as it is transmitted in band with subgraph responses. - /// 0.0 will result in no field level instrumentation. 1.0 will result in always instrumentation. - /// Value MUST be less than global sampling rate + /// Field level instrumentation for subgraphs via ftv1. ftv1 tracing can cause performance issues as it is transmitted in band with subgraph responses. pub(crate) field_level_instrumentation_sampler: SamplerOption, /// To configure which request header names and values are included in trace data that's sent to Apollo Studio. @@ -80,9 +81,64 @@ pub(crate) struct Config { /// Configuration for batch processing. pub(crate) batch_processor: BatchProcessorConfig, + + /// Configure the way errors are transmitted to Apollo Studio + pub(crate) errors: ErrorsConfiguration, +} + +#[derive(Debug, Clone, Deserialize, JsonSchema, Default)] +#[serde(deny_unknown_fields, default)] +pub(crate) struct ErrorsConfiguration { + /// Handling of errors coming from subgraph + pub(crate) subgraph: SubgraphErrorConfig, +} + +#[derive(Debug, Clone, Deserialize, JsonSchema, Default)] +#[serde(deny_unknown_fields, default)] +pub(crate) struct SubgraphErrorConfig { + /// Handling of errors coming from all subgraphs + pub(crate) all: ErrorConfiguration, + /// Handling of errors coming from specified subgraphs + pub(crate) subgraphs: Option>, +} + +#[derive(Debug, Clone, Deserialize, JsonSchema)] +#[serde(deny_unknown_fields, default)] +pub(crate) struct ErrorConfiguration { + /// Send subgraph errors to Apollo Studio + pub(crate) send: bool, + /// Redact subgraph errors to Apollo Studio + pub(crate) redact: bool, +} + +impl Default for ErrorConfiguration { + fn default() -> Self { + Self { + send: default_send_errors(), + redact: default_redact_errors(), + } + } +} + +impl SubgraphErrorConfig { + pub(crate) fn get_error_config(&self, subgraph: &str) -> &ErrorConfiguration { + if let Some(subgraph_conf) = self.subgraphs.as_ref().and_then(|s| s.get(subgraph)) { + subgraph_conf + } else { + &self.all + } + } +} + +pub(crate) const fn default_send_errors() -> bool { + true } -fn default_field_level_instrumentation_sampler() -> SamplerOption { +pub(crate) const fn default_redact_errors() -> bool { + true +} + +const fn default_field_level_instrumentation_sampler() -> SamplerOption { SamplerOption::TraceIdRatioBased(0.01) } @@ -124,6 +180,7 @@ impl Default for Config { send_headers: ForwardHeaders::None, send_variable_values: ForwardValues::None, batch_processor: BatchProcessorConfig::default(), + errors: ErrorsConfiguration::default(), } } } @@ -209,9 +266,71 @@ pub(crate) enum SingleReport { #[derive(Default, Debug, Serialize)] pub(crate) struct Report { pub(crate) traces_per_query: HashMap, + #[serde(serialize_with = "serialize_operation_count_by_type")] + pub(crate) operation_count_by_type: + HashMap<(OperationKind, Option), OperationCountByType>, +} + +#[derive(Default, Debug, Serialize, PartialEq, Eq, Hash)] +pub(crate) struct OperationCountByType { + pub(crate) r#type: OperationKind, + pub(crate) subtype: Option, pub(crate) operation_count: u64, } +#[derive(Debug, Serialize, PartialEq, Eq, Hash, Clone, Copy)] +#[serde(rename_all = "kebab-case")] +pub(crate) enum OperationSubType { + // TODO +} + +impl OperationSubType { + pub(crate) const fn as_str(&self) -> &'static str { + "" + } +} + +impl Display for OperationSubType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "") + } +} + +impl From + for crate::plugins::telemetry::apollo_exporter::proto::reports::report::OperationCountByType +{ + fn from(value: OperationCountByType) -> Self { + Self { + r#type: value.r#type.as_apollo_operation_type().to_string(), + subtype: value.subtype.map(|s| s.to_string()).unwrap_or_default(), + operation_count: value.operation_count, + } + } +} + +fn serialize_operation_count_by_type( + elt: &HashMap<(OperationKind, Option), OperationCountByType>, + serializer: S, +) -> Result +where + S: serde::Serializer, +{ + let mut map_ser = serializer.serialize_map(Some(elt.len()))?; + for ((op_type, op_subtype), v) in elt { + map_ser.serialize_entry( + &format!( + "{}{}", + op_type.as_apollo_operation_type(), + op_subtype + .map(|o| "/".to_owned() + o.as_str()) + .unwrap_or_default() + ), + v, + )?; + } + map_ser.end() +} + impl Report { #[cfg(test)] pub(crate) fn new(reports: Vec) -> Report { @@ -229,7 +348,11 @@ impl Report { let mut report = crate::plugins::telemetry::apollo_exporter::proto::reports::Report { header: Some(header), end_time: Some(SystemTime::now().into()), - operation_count: self.operation_count, + operation_count_by_type: self + .operation_count_by_type + .into_values() + .map(|op| op.into()) + .collect(), traces_pre_aggregated: true, ..Default::default() }; @@ -269,7 +392,18 @@ impl AddAssign for Report { *self.traces_per_query.entry(k).or_default() += v; } - self.operation_count += report.operation_count; + if let Some(operation_count_by_type) = report.operation_count_by_type { + let key = ( + operation_count_by_type.r#type, + operation_count_by_type.subtype, + ); + self.operation_count_by_type + .entry(key) + .and_modify(|e| { + e.operation_count += 1; + }) + .or_insert(operation_count_by_type); + } } } diff --git a/apollo-router/src/plugins/telemetry/apollo_exporter.rs b/apollo-router/src/plugins/telemetry/apollo_exporter.rs index a80b6550ba..ffe5acebc6 100644 --- a/apollo-router/src/plugins/telemetry/apollo_exporter.rs +++ b/apollo-router/src/plugins/telemetry/apollo_exporter.rs @@ -164,7 +164,7 @@ impl ApolloExporter { pub(crate) async fn submit_report(&self, report: Report) -> Result<(), ApolloExportError> { // We may be sending traces but with no operation count - if report.operation_count == 0 && report.traces_per_query.is_empty() { + if report.operation_count_by_type.is_empty() && report.traces_per_query.is_empty() { return Ok(()); } tracing::debug!("submitting report: {:?}", report); diff --git a/apollo-router/src/plugins/telemetry/formatters/text.rs b/apollo-router/src/plugins/telemetry/formatters/text.rs index 4096c0871f..1ed1a79b2c 100644 --- a/apollo-router/src/plugins/telemetry/formatters/text.rs +++ b/apollo-router/src/plugins/telemetry/formatters/text.rs @@ -1,7 +1,7 @@ use std::fmt; -use ansi_term::Color; -use ansi_term::Style; +use nu_ansi_term::Color; +use nu_ansi_term::Style; use opentelemetry::trace::TraceContextExt; use tracing_core::Event; use tracing_core::Level; diff --git a/apollo-router/src/plugins/telemetry/metrics/apollo/snapshots/apollo_router__plugins__telemetry__metrics__apollo__studio__test__aggregation.snap b/apollo-router/src/plugins/telemetry/metrics/apollo/snapshots/apollo_router__plugins__telemetry__metrics__apollo__studio__test__aggregation.snap index 7dd98d0630..84204150d7 100644 --- a/apollo-router/src/plugins/telemetry/metrics/apollo/snapshots/apollo_router__plugins__telemetry__metrics__apollo__studio__test__aggregation.snap +++ b/apollo-router/src/plugins/telemetry/metrics/apollo/snapshots/apollo_router__plugins__telemetry__metrics__apollo__studio__test__aggregation.snap @@ -1302,5 +1302,11 @@ expression: aggregated_metrics } } }, - "operation_count": 2 + "operation_count_by_type": { + "query": { + "type": "query", + "subtype": null, + "operation_count": 2 + } + } } diff --git a/apollo-router/src/plugins/telemetry/metrics/apollo/studio.rs b/apollo-router/src/plugins/telemetry/metrics/apollo/studio.rs index 3993e8b9b9..b41f67c36f 100644 --- a/apollo-router/src/plugins/telemetry/metrics/apollo/studio.rs +++ b/apollo-router/src/plugins/telemetry/metrics/apollo/studio.rs @@ -6,6 +6,7 @@ use serde::Serialize; use uuid::Uuid; use super::duration_histogram::DurationHistogram; +use crate::plugins::telemetry::apollo::OperationCountByType; use crate::plugins::telemetry::apollo_exporter::proto::reports::ReferencedFieldsForType; use crate::plugins::telemetry::apollo_exporter::proto::reports::StatsContext; @@ -13,7 +14,7 @@ use crate::plugins::telemetry::apollo_exporter::proto::reports::StatsContext; pub(crate) struct SingleStatsReport { pub(crate) request_id: Uuid, pub(crate) stats: HashMap, - pub(crate) operation_count: u64, + pub(crate) operation_count_by_type: Option, } #[derive(Default, Debug, Serialize)] @@ -269,6 +270,7 @@ mod test { use super::*; use crate::plugins::telemetry::apollo::Report; + use crate::query_planner::OperationKind; #[test] fn test_aggregation() { @@ -317,7 +319,12 @@ mod test { SingleStatsReport { request_id: Uuid::default(), - operation_count: count.inc_u64(), + operation_count_by_type: OperationCountByType { + r#type: OperationKind::Query, + subtype: None, + operation_count: count.inc_u64(), + } + .into(), stats: HashMap::from([( stats_report_key.to_string(), SingleStats { diff --git a/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_exclude.snap b/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_exclude.snap index a6b951c383..9cc671a8bd 100644 --- a/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_exclude.snap +++ b/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_exclude.snap @@ -6,6 +6,10 @@ expression: results { "request_id": "[REDACTED]", "stats": {}, - "operation_count": 1 + "operation_count_by_type": { + "type": "query", + "subtype": null, + "operation_count": 1 + } } ] diff --git a/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_multiple_operations.snap b/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_multiple_operations.snap index bb0c2186bc..00b35e7340 100644 --- a/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_multiple_operations.snap +++ b/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_multiple_operations.snap @@ -11,7 +11,7 @@ expression: results "context": { "client_name": "test_client", "client_version": "1.0-test", - "operation_type": "", + "operation_type": "query", "operation_subtype": "" }, "query_latency_stats": { @@ -39,6 +39,6 @@ expression: results "referenced_fields_by_type": {} } }, - "operation_count": 0 + "operation_count_by_type": null } ] diff --git a/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_parse_failure.snap b/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_parse_failure.snap index ad668f4272..73dbea2d94 100644 --- a/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_parse_failure.snap +++ b/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_parse_failure.snap @@ -11,7 +11,7 @@ expression: results "context": { "client_name": "test_client", "client_version": "1.0-test", - "operation_type": "", + "operation_type": "query", "operation_subtype": "" }, "query_latency_stats": { @@ -39,6 +39,6 @@ expression: results "referenced_fields_by_type": {} } }, - "operation_count": 0 + "operation_count_by_type": null } ] diff --git a/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_single_operation.snap b/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_single_operation.snap index da1de050ee..0a859ac1d3 100644 --- a/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_single_operation.snap +++ b/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_single_operation.snap @@ -11,7 +11,7 @@ expression: results "context": { "client_name": "test_client", "client_version": "1.0-test", - "operation_type": "", + "operation_type": "query", "operation_subtype": "" }, "query_latency_stats": { @@ -52,6 +52,10 @@ expression: results } } }, - "operation_count": 1 + "operation_count_by_type": { + "type": "query", + "subtype": null, + "operation_count": 1 + } } ] diff --git a/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_unknown_operation.snap b/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_unknown_operation.snap index 5a03140638..89cb0df5c8 100644 --- a/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_unknown_operation.snap +++ b/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_unknown_operation.snap @@ -11,7 +11,7 @@ expression: results "context": { "client_name": "test_client", "client_version": "1.0-test", - "operation_type": "", + "operation_type": "query", "operation_subtype": "" }, "query_latency_stats": { @@ -39,6 +39,6 @@ expression: results "referenced_fields_by_type": {} } }, - "operation_count": 0 + "operation_count_by_type": null } ] diff --git a/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_validation_failure.snap b/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_validation_failure.snap index bb0c2186bc..00b35e7340 100644 --- a/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_validation_failure.snap +++ b/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_validation_failure.snap @@ -11,7 +11,7 @@ expression: results "context": { "client_name": "test_client", "client_version": "1.0-test", - "operation_type": "", + "operation_type": "query", "operation_subtype": "" }, "query_latency_stats": { @@ -39,6 +39,6 @@ expression: results "referenced_fields_by_type": {} } }, - "operation_count": 0 + "operation_count_by_type": null } ] diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index 8eca46b6c3..b05d3adb97 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -12,6 +12,7 @@ use ::tracing::field; use ::tracing::info_span; use ::tracing::Span; use axum::headers::HeaderName; +use dashmap::DashMap; use futures::future::ready; use futures::future::BoxFuture; use futures::stream::once; @@ -22,6 +23,7 @@ use http::HeaderMap; use http::HeaderValue; use http::StatusCode; use multimap::MultiMap; +use once_cell::sync::OnceCell; use opentelemetry::propagation::text_map_propagator::FieldIter; use opentelemetry::propagation::Extractor; use opentelemetry::propagation::Injector; @@ -51,6 +53,8 @@ use tracing_subscriber::fmt::format::JsonFields; use tracing_subscriber::Layer; use self::apollo::ForwardValues; +use self::apollo::OperationCountByType; +use self::apollo::OperationSubType; use self::apollo::SingleReport; use self::apollo_exporter::proto; use self::apollo_exporter::Sender; @@ -88,6 +92,7 @@ use crate::plugins::telemetry::metrics::MetricsExporterHandle; use crate::plugins::telemetry::tracing::apollo_telemetry::decode_ftv1_trace; use crate::plugins::telemetry::tracing::apollo_telemetry::APOLLO_PRIVATE_OPERATION_SIGNATURE; use crate::plugins::telemetry::tracing::TracingConfigurator; +use crate::query_planner::OperationKind; use crate::query_planner::USAGE_REPORTING; use crate::register_plugin; use crate::router_factory::Endpoint; @@ -125,6 +130,7 @@ const ATTRIBUTES: &str = "apollo_telemetry::metrics_attributes"; const SUBGRAPH_ATTRIBUTES: &str = "apollo_telemetry::subgraph_metrics_attributes"; const ENABLE_SUBGRAPH_FTV1: &str = "apollo_telemetry::enable_subgraph_ftv1"; const SUBGRAPH_FTV1: &str = "apollo_telemetry::subgraph_ftv1"; +const OPERATION_KIND: &str = "apollo_telemetry::operation_kind"; pub(crate) const STUDIO_EXCLUDE: &str = "apollo_telemetry::studio::exclude"; pub(crate) const LOGGING_DISPLAY_HEADERS: &str = "apollo_telemetry::logging::display_headers"; pub(crate) const LOGGING_DISPLAY_BODY: &str = "apollo_telemetry::logging::display_body"; @@ -371,7 +377,7 @@ impl Plugin for Telemetry { start.elapsed(), ) .await; - Self::update_metrics_on_last_response( + Self::update_metrics_on_response_events( &ctx, config, field_level_instrumentation_ratio, metrics, sender, start, result, ) } @@ -383,8 +389,28 @@ impl Plugin for Telemetry { fn execution_service(&self, service: execution::BoxService) -> execution::BoxService { ServiceBuilder::new() - .instrument(move |_req: &ExecutionRequest| { - info_span!("execution", "otel.kind" = "INTERNAL",) + .instrument(move |req: &ExecutionRequest| { + let operation_kind = req + .query_plan + .query + .operation(req.supergraph_request.body().operation_name.as_deref()) + .map(|op| *op.kind()); + let _ = req + .context + .insert(OPERATION_KIND, operation_kind.unwrap_or_default()); + + match operation_kind { + Some(operation_kind) => { + info_span!( + EXECUTION_SPAN_NAME, + "otel.kind" = "INTERNAL", + "graphql.operation.type" = operation_kind.as_apollo_operation_type() + ) + } + None => { + info_span!(EXECUTION_SPAN_NAME, "otel.kind" = "INTERNAL",) + } + } }) .service(service) .boxed() @@ -1028,7 +1054,7 @@ impl Telemetry { } #[allow(clippy::too_many_arguments)] - fn update_metrics_on_last_response( + fn update_metrics_on_response_events( ctx: &Context, config: Arc, field_level_instrumentation_ratio: f64, @@ -1037,6 +1063,9 @@ impl Telemetry { start: Instant, result: Result, ) -> Result { + let operation_kind: OperationKind = + ctx.get(OPERATION_KIND).ok().flatten().unwrap_or_default(); + match result { Err(e) => { if !matches!(sender, Sender::Noop) { @@ -1046,6 +1075,8 @@ impl Telemetry { sender, true, start.elapsed(), + operation_kind, + None, ); } let mut metric_attrs = Vec::new(); @@ -1075,6 +1106,7 @@ impl Telemetry { } Ok(router_response) => { let mut has_errors = !router_response.response.status().is_success(); + Ok(router_response.map(move |response_stream| { let sender = sender.clone(); let ctx = ctx.clone(); @@ -1094,8 +1126,11 @@ impl Telemetry { sender.clone(), has_errors, start.elapsed(), + operation_kind, + None, ); } + response }) .boxed() @@ -1110,6 +1145,8 @@ impl Telemetry { sender: Sender, has_errors: bool, duration: Duration, + operation_kind: OperationKind, + operation_subtype: Option, ) { let metrics = if let Some(usage_reporting) = context .get::<_, UsageReporting>(USAGE_REPORTING) @@ -1126,7 +1163,13 @@ impl Telemetry { { // The request was excluded don't report the details, but do report the operation count SingleStatsReport { - operation_count, + operation_count_by_type: (operation_count > 0).then_some( + OperationCountByType { + r#type: operation_kind, + subtype: operation_subtype, + operation_count, + }, + ), ..Default::default() } } else { @@ -1142,7 +1185,13 @@ impl Telemetry { .trace_id() .to_bytes(), ), - operation_count, + operation_count_by_type: (operation_count > 0).then_some( + OperationCountByType { + r#type: operation_kind, + subtype: operation_subtype, + operation_count, + }, + ), stats: HashMap::from([( usage_reporting.stats_report_key.to_string(), SingleStats { @@ -1156,9 +1205,12 @@ impl Telemetry { .get(CLIENT_VERSION) .unwrap_or_default() .unwrap_or_default(), - // FIXME - operation_type: String::new(), - operation_subtype: String::new(), + operation_type: operation_kind + .as_apollo_operation_type() + .to_string(), + operation_subtype: operation_subtype + .map(|op| op.to_string()) + .unwrap_or_default(), }, query_latency_stats: SingleQueryLatencyStats { latency: duration, @@ -1181,7 +1233,12 @@ impl Telemetry { } else { // Usage reporting was missing, so it counts as one operation. SingleStatsReport { - operation_count: 1, + operation_count_by_type: OperationCountByType { + r#type: operation_kind, + subtype: operation_subtype, + operation_count: 1, + } + .into(), ..Default::default() } }; @@ -1366,19 +1423,56 @@ fn convert( } } +#[derive(Eq, PartialEq, Hash)] +enum ErrorType { + Trace, + Metric, + Other, +} +static OTEL_ERROR_LAST_LOGGED: OnceCell> = OnceCell::new(); + fn handle_error>(err: T) { - match err.into() { - opentelemetry::global::Error::Trace(err) => { - ::tracing::error!("OpenTelemetry trace error occurred: {}", err) - } - opentelemetry::global::Error::Metric(err_msg) => { - ::tracing::error!("OpenTelemetry metric error occurred: {}", err_msg) - } - opentelemetry::global::Error::Other(err_msg) => { - ::tracing::error!("OpenTelemetry error occurred: {}", err_msg) - } - other => { - ::tracing::error!("OpenTelemetry error occurred: {:?}", other) + // We have to rate limit these errors because when they happen they are very frequent. + // Use a dashmap to store the message type with the last time it was logged. + let last_logged_map = OTEL_ERROR_LAST_LOGGED.get_or_init(DashMap::new); + let err = err.into(); + + // We don't want the dashmap to get big, so we key the error messages by type. + let error_type = match err { + opentelemetry::global::Error::Trace(_) => ErrorType::Trace, + opentelemetry::global::Error::Metric(_) => ErrorType::Metric, + _ => ErrorType::Other, + }; + #[cfg(not(test))] + let threshold = Duration::from_secs(10); + #[cfg(test)] + let threshold = Duration::from_millis(100); + + // Copy here so that we don't retain a mutable reference into the dashmap and lock the shard + let now = Instant::now(); + let last_logged = *last_logged_map + .entry(error_type) + .and_modify(|last_logged| { + if last_logged.elapsed() > threshold { + *last_logged = now; + } + }) + .or_insert_with(|| now); + + if last_logged == now { + match err { + opentelemetry::global::Error::Trace(err) => { + ::tracing::error!("OpenTelemetry trace error occurred: {}", err) + } + opentelemetry::global::Error::Metric(err) => { + ::tracing::error!("OpenTelemetry metric error occurred: {}", err) + } + opentelemetry::global::Error::Other(err) => { + ::tracing::error!("OpenTelemetry error occurred: {}", err) + } + other => { + ::tracing::error!("OpenTelemetry error occurred: {:?}", other) + } } } } @@ -1491,7 +1585,12 @@ impl TextMapPropagator for CustomTraceIdPropagator { // #[cfg(test)] mod tests { + use std::fmt::Debug; + use std::ops::DerefMut; use std::str::FromStr; + use std::sync::Arc; + use std::sync::Mutex; + use std::time::Duration; use axum::headers::HeaderName; use http::HeaderMap; @@ -1505,6 +1604,14 @@ mod tests { use tower::util::BoxService; use tower::Service; use tower::ServiceExt; + use tracing_core::field::Visit; + use tracing_core::Event; + use tracing_core::Field; + use tracing_core::Subscriber; + use tracing_futures::WithSubscriber; + use tracing_subscriber::layer::Context; + use tracing_subscriber::layer::SubscriberExt; + use tracing_subscriber::Layer; use super::apollo::ForwardHeaders; use crate::error::FetchError; @@ -1515,6 +1622,7 @@ mod tests { use crate::plugin::test::MockSubgraphService; use crate::plugin::test::MockSupergraphService; use crate::plugin::DynPlugin; + use crate::plugins::telemetry::handle_error; use crate::services::SubgraphRequest; use crate::services::SubgraphResponse; use crate::services::SupergraphRequest; @@ -1996,4 +2104,72 @@ mod tests { let filtered_headers = super::filter_headers(&headers, &ForwardHeaders::None); assert_eq!(filtered_headers.as_str(), "{}"); } + + #[tokio::test] + async fn test_handle_error_throttling() { + // Set up a fake subscriber so we can check log events. If this is useful then maybe it can be factored out into something reusable + #[derive(Default)] + struct TestVisitor { + log_entries: Vec, + } + + #[derive(Default, Clone)] + struct TestLayer { + visitor: Arc>, + } + impl TestLayer { + fn assert_log_entry_count(&self, message: &str, expected: usize) { + let log_entries = self.visitor.lock().unwrap().log_entries.clone(); + let actual = log_entries.iter().filter(|e| e.contains(message)).count(); + assert_eq!(actual, expected); + } + } + impl Visit for TestVisitor { + fn record_debug(&mut self, field: &Field, value: &dyn Debug) { + self.log_entries + .push(format!("{}={:?}", field.name(), value)); + } + } + + impl Layer for TestLayer + where + S: Subscriber, + Self: 'static, + { + fn on_event(&self, event: &Event<'_>, _ctx: Context<'_, S>) { + event.record(self.visitor.lock().unwrap().deref_mut()) + } + } + + let test_layer = TestLayer::default(); + + async { + // Log twice rapidly, they should get deduped + handle_error(opentelemetry::global::Error::Other( + "other error".to_string(), + )); + handle_error(opentelemetry::global::Error::Other( + "other error".to_string(), + )); + handle_error(opentelemetry::global::Error::Trace( + "trace error".to_string().into(), + )); + } + .with_subscriber(tracing_subscriber::registry().with(test_layer.clone())) + .await; + + test_layer.assert_log_entry_count("other error", 1); + test_layer.assert_log_entry_count("trace error", 1); + + // Sleep a bit and then log again, it should get logged + tokio::time::sleep(Duration::from_millis(200)).await; + async { + handle_error(opentelemetry::global::Error::Other( + "other error".to_string(), + )); + } + .with_subscriber(tracing_subscriber::registry().with(test_layer.clone())) + .await; + test_layer.assert_log_entry_count("other error", 2); + } } diff --git a/apollo-router/src/plugins/telemetry/tracing/apollo.rs b/apollo-router/src/plugins/telemetry/tracing/apollo.rs index 8f7ad3a2b8..fc2b8e9f78 100644 --- a/apollo-router/src/plugins/telemetry/tracing/apollo.rs +++ b/apollo-router/src/plugins/telemetry/tracing/apollo.rs @@ -23,6 +23,7 @@ impl TracingConfigurator for Config { buffer_size, field_level_instrumentation_sampler, batch_processor, + errors, .. } => { tracing::debug!("configuring exporter to Studio"); @@ -35,6 +36,7 @@ impl TracingConfigurator for Config { .buffer_size(*buffer_size) .field_execution_sampler(field_level_instrumentation_sampler.clone()) .batch_config(batch_processor.clone()) + .errors_configuration(errors.clone()) .build()?; builder.with_span_processor( BatchSpanProcessor::builder(exporter, opentelemetry::runtime::Tokio) diff --git a/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs b/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs index 1c88f1e846..eedecc79c9 100644 --- a/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs +++ b/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs @@ -1,7 +1,9 @@ +use std::borrow::Cow; use std::collections::HashMap; use std::io::Cursor; use std::num::NonZeroUsize; use std::sync::Arc; +use std::time::SystemTime; use std::time::SystemTimeError; use async_trait::async_trait; @@ -14,6 +16,7 @@ use lru::LruCache; use opentelemetry::sdk::export::trace::ExportResult; use opentelemetry::sdk::export::trace::SpanData; use opentelemetry::sdk::export::trace::SpanExporter; +use opentelemetry::sdk::trace::EvictedHashMap; use opentelemetry::trace::SpanId; use opentelemetry::trace::TraceError; use opentelemetry::Key; @@ -26,6 +29,8 @@ use url::Url; use crate::axum_factory::utils::REQUEST_SPAN_NAME; use crate::plugins::telemetry; +use crate::plugins::telemetry::apollo::ErrorConfiguration; +use crate::plugins::telemetry::apollo::ErrorsConfiguration; use crate::plugins::telemetry::apollo::SingleReport; use crate::plugins::telemetry::apollo_exporter::proto; use crate::plugins::telemetry::apollo_exporter::proto::reports::trace::http::Values; @@ -49,6 +54,7 @@ use crate::plugins::telemetry::config::SamplerOption; use crate::plugins::telemetry::tracing::apollo::TracesReport; use crate::plugins::telemetry::tracing::BatchProcessorConfig; use crate::plugins::telemetry::BoxError; +use crate::plugins::telemetry::EXECUTION_SPAN_NAME; use crate::plugins::telemetry::ROUTER_SPAN_NAME; use crate::plugins::telemetry::SUBGRAPH_SPAN_NAME; use crate::plugins::telemetry::SUPERGRAPH_SPAN_NAME; @@ -83,6 +89,7 @@ const DEPENDS: Key = Key::from_static_str("graphql.depends"); const LABEL: Key = Key::from_static_str("graphql.label"); const CONDITION: Key = Key::from_static_str("graphql.condition"); const OPERATION_NAME: Key = Key::from_static_str("graphql.operation.name"); +const OPERATION_TYPE: Key = Key::from_static_str("graphql.operation.type"); #[derive(Error, Debug)] pub(crate) enum Error { @@ -102,6 +109,27 @@ pub(crate) enum Error { SystemTime(#[from] SystemTimeError), } +#[derive(Debug, Clone, PartialEq)] +struct LightSpanData { + span_id: SpanId, + name: Cow<'static, str>, + start_time: SystemTime, + end_time: SystemTime, + attributes: EvictedHashMap, +} + +impl From for LightSpanData { + fn from(value: SpanData) -> Self { + Self { + span_id: value.span_context.span_id(), + name: value.name, + start_time: value.start_time, + end_time: value.end_time, + attributes: value.attributes, + } + } +} + /// A [`SpanExporter`] that writes to [`Reporter`]. /// /// [`SpanExporter`]: super::SpanExporter @@ -109,10 +137,11 @@ pub(crate) enum Error { #[derive(Derivative)] #[derivative(Debug)] pub(crate) struct Exporter { - spans_by_parent_id: LruCache>, + spans_by_parent_id: LruCache>, #[derivative(Debug = "ignore")] report_exporter: Arc, field_execution_weight: f64, + errors_configuration: ErrorsConfiguration, } enum TreeData { @@ -133,6 +162,7 @@ enum TreeData { DeferDeferred(DeferredNode), ConditionIf(Option), ConditionElse(Option), + Execution(String), Trace(Option, Error>>), } @@ -146,6 +176,7 @@ impl Exporter { schema_id: String, buffer_size: NonZeroUsize, field_execution_sampler: SamplerOption, + errors_configuration: Option, batch_config: BatchProcessorConfig, ) -> Result { tracing::debug!("creating studio exporter"); @@ -158,17 +189,19 @@ impl Exporter { &apollo_graph_ref, &schema_id, )?), + field_execution_weight: match field_execution_sampler { SamplerOption::Always(Sampler::AlwaysOn) => 1.0, SamplerOption::Always(Sampler::AlwaysOff) => 0.0, SamplerOption::TraceIdRatioBased(ratio) => 1.0 / ratio, }, + errors_configuration: errors_configuration.unwrap_or_default(), }) } fn extract_root_trace( &mut self, - span: &SpanData, + span: &LightSpanData, child_nodes: Vec, ) -> Result, Error> { let http = extract_http_data(span); @@ -215,6 +248,9 @@ impl Exporter { operation_name, }); } + TreeData::Execution(operation_type) => { + root_trace.operation_type = operation_type; + } _ => panic!("should never have had other node types"), } } @@ -222,7 +258,7 @@ impl Exporter { Ok(Box::new(root_trace)) } - fn extract_trace(&mut self, span: SpanData) -> Result, Error> { + fn extract_trace(&mut self, span: LightSpanData) -> Result, Error> { self.extract_data_from_spans(&span)? .pop() .and_then(|node| { @@ -235,21 +271,20 @@ impl Exporter { .expect("root trace must exist because it is constructed on the request span, qed") } - fn extract_data_from_spans(&mut self, span: &SpanData) -> Result, Error> { - let (mut child_nodes, errors) = self - .spans_by_parent_id - .pop_entry(&span.span_context.span_id()) - .map(|(_, spans)| spans) - .unwrap_or_default() - .into_iter() - .map(|span| self.extract_data_from_spans(&span)) - .fold((Vec::new(), Vec::new()), |(mut oks, mut errors), next| { - match next { - Ok(mut children) => oks.append(&mut children), - Err(err) => errors.push(err), - } - (oks, errors) - }); + fn extract_data_from_spans(&mut self, span: &LightSpanData) -> Result, Error> { + let (mut child_nodes, errors) = match self.spans_by_parent_id.pop_entry(&span.span_id) { + Some((_, spans)) => spans + .into_iter() + .map(|(_, span)| self.extract_data_from_spans(&span)) + .fold((Vec::new(), Vec::new()), |(mut oks, mut errors), next| { + match next { + Ok(mut children) => oks.append(&mut children), + Err(err) => errors.push(err), + } + (oks, errors) + }), + None => (Vec::new(), Vec::new()), + }; if !errors.is_empty() { return Err(Error::MultipleErrors(errors)); } @@ -315,10 +350,19 @@ impl Exporter { })] } SUBGRAPH_SPAN_NAME => { + let subgraph_name = span + .attributes + .get(&Key::from_static_str("apollo.subgraph.name")) + .and_then(extract_string) + .unwrap_or_default(); + let error_configuration = self + .errors_configuration + .subgraph + .get_error_config(&subgraph_name); vec![TreeData::Trace( span.attributes .get(&APOLLO_PRIVATE_FTV1) - .and_then(extract_ftv1_trace), + .and_then(|t| extract_ftv1_trace(t, error_configuration)), )] } SUPERGRAPH_SPAN_NAME => { @@ -428,6 +472,15 @@ impl Exporter { child_nodes.remove_first_query_plan_node(), )] } + EXECUTION_SPAN_NAME => { + child_nodes.push(TreeData::Execution( + span.attributes + .get(&OPERATION_TYPE) + .and_then(extract_string) + .unwrap_or_default(), + )); + child_nodes + } _ => child_nodes, }) } @@ -485,9 +538,17 @@ fn extract_i64(v: &Value) -> Option { } } -fn extract_ftv1_trace(v: &Value) -> Option, Error>> { +fn extract_ftv1_trace( + v: &Value, + error_config: &ErrorConfiguration, +) -> Option, Error>> { if let Value::String(s) = v { - if let Some(t) = decode_ftv1_trace(s.as_str()) { + if let Some(mut t) = decode_ftv1_trace(s.as_str()) { + if error_config.redact || !error_config.send { + if let Some(root) = &mut t.root { + redact_node_errors(root, !error_config.send); + } + } return Some(Ok(Box::new(t))); } return Some(Err(Error::TraceParsingFailed)); @@ -495,12 +556,27 @@ fn extract_ftv1_trace(v: &Value) -> Option, Er None } +fn redact_node_errors(t: &mut proto::reports::trace::Node, to_delete: bool) { + if to_delete { + t.error = Vec::new(); + } else { + t.error.iter_mut().for_each(|err| { + err.message = String::from(""); + err.location = Vec::new(); + err.json = String::new(); + }); + } + t.child + .iter_mut() + .for_each(|n| redact_node_errors(n, to_delete)); +} + pub(crate) fn decode_ftv1_trace(string: &str) -> Option { let bytes = base64::decode(string).ok()?; proto::reports::Trace::decode(Cursor::new(bytes)).ok() } -fn extract_http_data(span: &SpanData) -> Http { +fn extract_http_data(span: &LightSpanData) -> Http { let method = match span .attributes .get(&HTTP_METHOD) @@ -554,7 +630,7 @@ impl SpanExporter for Exporter { let mut traces: Vec<(String, proto::reports::Trace)> = Vec::new(); for span in batch { if span.name == REQUEST_SPAN_NAME { - match self.extract_trace(span) { + match self.extract_trace(span.into()) { Ok(mut trace) => { let mut operation_signature = Default::default(); std::mem::swap(&mut trace.signature, &mut operation_signature); @@ -572,17 +648,21 @@ impl SpanExporter for Exporter { tracing::error!("failed to construct trace: {}, skipping", error); } } - } else { + } else if span.parent_span_id != SpanId::INVALID { // Not a root span, we may need it later so stash it. // This is sad, but with LRU there is no `get_insert_mut` so a double lookup is required // It is safe to expect the entry to exist as we just inserted it, however capacity of the LRU must not be 0. - self.spans_by_parent_id - .get_or_insert(span.parent_span_id, Vec::new); + let len = self + .spans_by_parent_id + .get_or_insert(span.parent_span_id, || { + LruCache::new(NonZeroUsize::new(50).unwrap()) + }) + .len(); self.spans_by_parent_id .get_mut(&span.parent_span_id) .expect("capacity of cache was zero") - .push(span); + .push(len, span.into()); } } let mut report = telemetry::apollo::Report::default(); @@ -690,11 +770,12 @@ mod test { use opentelemetry::Value; use prost::Message; use serde_json::json; + use crate::plugins::telemetry::apollo::{ErrorConfiguration}; use crate::plugins::telemetry::apollo_exporter::proto::reports::Trace; use crate::plugins::telemetry::apollo_exporter::proto::reports::trace::query_plan_node::{DeferNodePrimary, DeferredNode, ResponsePathElement}; - use crate::plugins::telemetry::apollo_exporter::proto::reports::trace::QueryPlanNode; + use crate::plugins::telemetry::apollo_exporter::proto::reports::trace::{QueryPlanNode, Node, Error}; use crate::plugins::telemetry::apollo_exporter::proto::reports::trace::query_plan_node::response_path_element::Id; - use crate::plugins::telemetry::tracing::apollo_telemetry::{ChildNodes, extract_ftv1_trace, extract_i64, extract_json, extract_path, extract_string, TreeData}; + use crate::plugins::telemetry::tracing::apollo_telemetry::{ChildNodes, extract_ftv1_trace, extract_i64, extract_json, extract_path, extract_string, TreeData, redact_node_errors}; fn elements(tree_data: Vec) -> Vec<&'static str> { let mut elements = Vec::new(); @@ -708,6 +789,7 @@ mod test { TreeData::ConditionIf(_) => elements.push("condition_if"), TreeData::ConditionElse(_) => elements.push("condition_else"), TreeData::Trace(_) => elements.push("trace"), + TreeData::Execution(_) => elements.push("execution"), TreeData::Router { .. } => elements.push("router"), } } @@ -847,10 +929,94 @@ mod test { let trace = Trace::default(); let encoded = base64::encode(trace.encode_to_vec()); assert_eq!( - *extract_ftv1_trace(&Value::String(encoded.into())) - .expect("there was a trace here") - .expect("the trace must be decoded"), + *extract_ftv1_trace( + &Value::String(encoded.into()), + &ErrorConfiguration::default() + ) + .expect("there was a trace here") + .expect("the trace must be decoded"), trace ); } + + #[test] + fn test_redact_node_errors() { + let sub_node = Node { + error: vec![Error { + message: "this is my error".to_string(), + location: Vec::new(), + time_ns: 5, + json: String::from(r#"{"foo": "bar"}"#), + }], + ..Default::default() + }; + let mut node = Node { + error: vec![ + Error { + message: "this is my error".to_string(), + location: Vec::new(), + time_ns: 5, + json: String::from(r#"{"foo": "bar"}"#), + }, + Error { + message: "this is my other error".to_string(), + location: Vec::new(), + time_ns: 5, + json: String::from(r#"{"foo": "bar"}"#), + }, + ], + ..Default::default() + }; + node.child.push(sub_node); + + redact_node_errors(&mut node, false); + assert!(node.error[0].json.is_empty()); + assert!(node.error[0].location.is_empty()); + assert_eq!(node.error[0].message.as_str(), ""); + assert_eq!(node.error[0].time_ns, 5u64); + assert!(node.error[1].json.is_empty()); + assert!(node.error[1].location.is_empty()); + assert_eq!(node.error[1].message.as_str(), ""); + assert_eq!(node.error[1].time_ns, 5u64); + + assert!(node.child[0].error[0].json.is_empty()); + assert!(node.child[0].error[0].location.is_empty()); + assert_eq!(node.child[0].error[0].message.as_str(), ""); + assert_eq!(node.child[0].error[0].time_ns, 5u64); + } + + #[test] + fn test_delete_node_errors() { + let sub_node = Node { + error: vec![Error { + message: "this is my error".to_string(), + location: Vec::new(), + time_ns: 5, + json: String::from(r#"{"foo": "bar"}"#), + }], + ..Default::default() + }; + let mut node = Node { + error: vec![ + Error { + message: "this is my error".to_string(), + location: Vec::new(), + time_ns: 5, + json: String::from(r#"{"foo": "bar"}"#), + }, + Error { + message: "this is my other error".to_string(), + location: Vec::new(), + time_ns: 5, + json: String::from(r#"{"foo": "bar"}"#), + }, + ], + ..Default::default() + }; + node.child.push(sub_node); + + redact_node_errors(&mut node, true); + assert!(node.error.is_empty()); + assert!(node.child[0].error.is_empty()); + } } diff --git a/apollo-router/src/plugins/traffic_shaping/mod.rs b/apollo-router/src/plugins/traffic_shaping/mod.rs index 635382ec23..5b8a9da2de 100644 --- a/apollo-router/src/plugins/traffic_shaping/mod.rs +++ b/apollo-router/src/plugins/traffic_shaping/mod.rs @@ -451,11 +451,12 @@ impl TrafficShaping { } pub(crate) fn enable_subgraph_http2(&self, service_name: &str) -> bool { - self.config - .subgraphs - .get(service_name) - .and_then(|subgraph| subgraph.shaping.experimental_enable_http2) - .unwrap_or(true) + Self::merge_config( + self.config.all.as_ref(), + self.config.subgraphs.get(service_name), + ) + .and_then(|config| config.shaping.experimental_enable_http2) + .unwrap_or(true) } } @@ -692,6 +693,72 @@ mod test { ); } + #[test] + fn test_merge_http2_all() { + let config = serde_yaml::from_str::( + r#" + all: + experimental_enable_http2: false + subgraphs: + products: + experimental_enable_http2: true + reviews: + experimental_enable_http2: false + router: + timeout: 65s + "#, + ) + .unwrap(); + + assert!(TrafficShaping::merge_config( + config.all.as_ref(), + config.subgraphs.get("products") + ) + .unwrap() + .shaping + .experimental_enable_http2 + .unwrap()); + assert!(!TrafficShaping::merge_config( + config.all.as_ref(), + config.subgraphs.get("reviews") + ) + .unwrap() + .shaping + .experimental_enable_http2 + .unwrap()); + assert!(!TrafficShaping::merge_config(config.all.as_ref(), None) + .unwrap() + .shaping + .experimental_enable_http2 + .unwrap()); + } + + #[tokio::test] + async fn test_enable_subgraph_http2() { + let config = serde_yaml::from_str::( + r#" + all: + experimental_enable_http2: false + subgraphs: + products: + experimental_enable_http2: true + reviews: + experimental_enable_http2: false + router: + timeout: 65s + "#, + ) + .unwrap(); + + let shaping_config = TrafficShaping::new(PluginInit::new(config, Default::default())) + .await + .unwrap(); + + assert!(shaping_config.enable_subgraph_http2("products")); + assert!(!shaping_config.enable_subgraph_http2("reviews")); + assert!(!shaping_config.enable_subgraph_http2("this_doesnt_exist")); + } + #[tokio::test(flavor = "multi_thread")] async fn it_rate_limit_subgraph_requests() { let config = serde_yaml::from_str::( diff --git a/apollo-router/src/query_planner/fetch.rs b/apollo-router/src/query_planner/fetch.rs index 25f9f61d0e..d8db47e076 100644 --- a/apollo-router/src/query_planner/fetch.rs +++ b/apollo-router/src/query_planner/fetch.rs @@ -50,6 +50,15 @@ impl OperationKind { OperationKind::Subscription => "Subscription", } } + + /// Only for apollo studio exporter + pub(crate) const fn as_apollo_operation_type(&self) -> &'static str { + match self { + OperationKind::Query => "query", + OperationKind::Mutation => "mutation", + OperationKind::Subscription => "subscription", + } + } } impl Default for OperationKind { diff --git a/apollo-router/src/router.rs b/apollo-router/src/router.rs index db88135e18..ef2c4d2c9a 100644 --- a/apollo-router/src/router.rs +++ b/apollo-router/src/router.rs @@ -10,6 +10,8 @@ use std::path::Path; use std::path::PathBuf; use std::pin::Pin; use std::str::FromStr; +use std::sync::atomic::AtomicU64; +use std::sync::atomic::Ordering; use std::sync::Arc; use std::task::Context; use std::task::Poll; @@ -32,6 +34,7 @@ use tower::ServiceExt; use tracing_futures::WithSubscriber; use url::Url; +use self::Event::ForcedHotReload; use self::Event::NoMoreConfiguration; use self::Event::NoMoreSchema; use self::Event::Shutdown; @@ -577,6 +580,67 @@ enum ReadConfigError { Validation(crate::configuration::ConfigurationError), } +pub(crate) struct ForcedHotReloadSource { + config: Arc, + timer: Option, +} + +#[derive(Default)] +pub(crate) struct ForcedHotReloadConfig { + interval_seconds: AtomicU64, // zero: disabled +} + +impl ForcedHotReloadConfig { + pub(crate) fn period(&self) -> Option { + match self.interval_seconds.load(Ordering::Acquire) { + 0 => None, + secs => Some(Duration::from_secs(secs)), + } + } + + pub(crate) fn set_period(&self, new_period: Option) { + let new_seconds = if let Some(duration) = new_period { + duration.as_secs().max(1) // round to at least 1 second + } else { + 0 + }; + self.interval_seconds.store(new_seconds, Ordering::Release) + } +} + +impl ForcedHotReloadSource { + pub(crate) fn new(config: Arc) -> Self { + let mut new = Self { + config, + timer: None, + }; + new.check_config(); + new + } + + fn check_config(&mut self) { + let configured = self.config.period(); + if configured != self.timer.as_ref().map(|t| t.period()) { + self.timer = configured.map(|period| { + let mut interval = tokio::time::interval(period); + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + interval + }) + } + } + + fn into_stream(mut self) -> impl Stream { + futures::stream::poll_fn(move |ctx: &mut Context<'_>| { + self.check_config(); + if let Some(timer) = &mut self.timer { + timer.poll_tick(ctx).map(|_| Some(Event::ForcedHotReload)) + } else { + Poll::Pending + } + }) + } +} + type ShutdownFuture = Pin + Send>>; /// Specifies when the Router’s HTTP server should gracefully shutdown @@ -725,7 +789,7 @@ impl RouterHttpServer { shutdown: Option, ) -> RouterHttpServer { let (shutdown_sender, shutdown_receiver) = oneshot::channel::<()>(); - let event_stream = generate_event_stream( + let (event_stream, forced_hot_reload_config) = generate_event_stream( shutdown.unwrap_or(ShutdownSource::CtrlC), configuration.unwrap_or_default(), schema, @@ -734,7 +798,8 @@ impl RouterHttpServer { ); let server_factory = AxumHttpServerFactory::new(); let router_factory = OrbiterRouterSuperServiceFactory::new(YamlRouterFactory::default()); - let state_machine = StateMachine::new(server_factory, router_factory); + let state_machine = + StateMachine::new(server_factory, router_factory, forced_hot_reload_config); let listen_addresses = state_machine.listen_addresses.clone(); let result = spawn( async move { state_machine.process_events(event_stream).await } @@ -814,6 +879,9 @@ pub(crate) enum Event { /// There were no more updates to entitlement. NoMoreEntitlement, + /// Artificial hot reload for chaos testing + ForcedHotReload, + /// The server should gracefully shutdown. Shutdown, } @@ -839,6 +907,9 @@ impl Debug for Event { NoMoreEntitlement => { write!(f, "NoMoreEntitlement") } + ForcedHotReload => { + write!(f, "ForcedHotReload") + } Shutdown => { write!(f, "Shutdown") } @@ -871,18 +942,23 @@ fn generate_event_stream( schema: SchemaSource, entitlement: EntitlementSource, shutdown_receiver: oneshot::Receiver<()>, -) -> impl Stream { +) -> (impl Stream, Arc) { + let forced_hot_reload_config = Arc::new(ForcedHotReloadConfig::default()); // Chain is required so that the final shutdown message is sent. - stream::select_all(vec![ + let stream = stream::select_all(vec![ shutdown.into_stream().boxed(), configuration.into_stream().boxed(), schema.into_stream().boxed(), entitlement.into_stream().boxed(), + ForcedHotReloadSource::new(forced_hot_reload_config.clone()) + .into_stream() + .boxed(), shutdown_receiver.into_stream().map(|_| Shutdown).boxed(), ]) .take_while(|msg| future::ready(!matches!(msg, Shutdown))) .chain(stream::iter(vec![Shutdown])) - .boxed() + .boxed(); + (stream, forced_hot_reload_config) } #[cfg(test)] diff --git a/apollo-router/src/services/router_service.rs b/apollo-router/src/services/router_service.rs index 70df7bf7cc..305b6702bb 100644 --- a/apollo-router/src/services/router_service.rs +++ b/apollo-router/src/services/router_service.rs @@ -196,7 +196,7 @@ where }) }) .unwrap_or_else(|| { - Err(("missing query string", "missing query string".to_string())) + Err(("There was no GraphQL operation to execute. Use the `query` parameter to send an operation, using either GET or POST.", "There was no GraphQL operation to execute. Use the `query` parameter to send an operation, using either GET or POST.".to_string())) }) } else { hyper::body::to_bytes(body) diff --git a/apollo-router/src/services/supergraph_service.rs b/apollo-router/src/services/supergraph_service.rs index 920120473e..bf0469973c 100644 --- a/apollo-router/src/services/supergraph_service.rs +++ b/apollo-router/src/services/supergraph_service.rs @@ -691,13 +691,13 @@ mod tests { { query: Query } - + directive @join__field(graph: join__Graph!, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION directive @join__graph(name: String!, url: String!) on ENUM_VALUE directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA - + scalar link__Import enum link__Purpose { SECURITY @@ -711,9 +711,9 @@ mod tests { errorField: String nonNullErrorField: String! } - + scalar join__FieldSet - + enum join__Graph { COMPUTERS @join__graph(name: "computers", url: "http://localhost:4001/") } @@ -768,8 +768,8 @@ mod tests { let request = supergraph::Request::fake_builder() .context(defer_context()) .query( - r#"query { - computer(id: "Computer1") { + r#"query { + computer(id: "Computer1") { id ...ComputerErrorField @defer } @@ -1173,55 +1173,55 @@ mod tests { query: Query mutation: Mutation } - + directive @inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION - + directive @join__field(graph: join__Graph!, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION - + directive @join__graph(name: String!, url: String!) on ENUM_VALUE - + directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE - + directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR - + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA - + directive @tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION - + scalar join__FieldSet - + enum join__Graph { PRODUCTS @join__graph(name: "products", url: "http://products:4000/graphql") USERS @join__graph(name: "users", url: "http://users:4000/graphql") } - + scalar link__Import - + enum link__Purpose { SECURITY EXECUTION } - + type MakePaymentResult @join__type(graph: USERS) { id: ID! paymentStatus: PaymentStatus } - + type Mutation @join__type(graph: USERS) { makePayment(userId: ID!): MakePaymentResult! } - - + + type PaymentStatus @join__type(graph: USERS) { id: ID! } - + type Query @join__type(graph: PRODUCTS) @join__type(graph: USERS) @@ -1399,7 +1399,7 @@ mod tests { { query: Query } - + directive @inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION directive @join__field(graph: join__Graph!, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION directive @join__graph(name: String!, url: String!) on ENUM_VALUE @@ -1407,7 +1407,7 @@ mod tests { directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA directive @tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION - + scalar join__FieldSet enum join__Graph { USER @join__graph(name: "user", url: "http://localhost:4000/graphql") @@ -1428,7 +1428,7 @@ mod tests { id: ID! name: String! } - + type User implements Identity @join__implements(graph: USER, interface: "Identity") @join__type(graph: USER, key: "id") @@ -2254,7 +2254,7 @@ mod tests { { foo: Foo! @join__field(graph: S1) } - + type Foo @join__owner(graph: S1) @join__type(graph: S1) @@ -2262,7 +2262,7 @@ mod tests { id: ID! @join__field(graph: S1) bar: Bar! @join__field(graph: S1) } - + type Bar @join__owner(graph: S1) @join__type(graph: S1, key: "id") diff --git a/apollo-router/src/spec/query.rs b/apollo-router/src/spec/query.rs index a9e32f82fb..c4974ab86c 100644 --- a/apollo-router/src/spec/query.rs +++ b/apollo-router/src/spec/query.rs @@ -960,7 +960,7 @@ impl Query { }) } - fn operation(&self, operation_name: Option<&str>) -> Option<&Operation> { + pub(crate) fn operation(&self, operation_name: Option<&str>) -> Option<&Operation> { match operation_name { Some(name) => self .operations diff --git a/apollo-router/src/state_machine.rs b/apollo-router/src/state_machine.rs index 8d24a732f7..026b0e7439 100644 --- a/apollo-router/src/state_machine.rs +++ b/apollo-router/src/state_machine.rs @@ -9,6 +9,7 @@ use tokio::sync::mpsc; use tokio::sync::OwnedRwLockWriteGuard; use tokio::sync::RwLock; use ApolloRouterError::ServiceCreationError; +use Event::ForcedHotReload; use Event::NoMoreConfiguration; use Event::NoMoreEntitlement; use Event::NoMoreSchema; @@ -29,6 +30,7 @@ use super::state_machine::State::Stopped; use crate::configuration::Configuration; use crate::configuration::ListenAddr; use crate::router::Event::UpdateEntitlement; +use crate::router::ForcedHotReloadConfig; use crate::router_factory::RouterFactory; use crate::router_factory::RouterSuperServiceFactory; use crate::spec::Schema; @@ -294,6 +296,10 @@ impl State { entitlement }; + state_machine + .forced_hot_reload_config + .set_period(configuration.experimental_chaos.force_hot_reload); + let router_service_factory = state_machine .router_configurator .create( @@ -369,6 +375,7 @@ where router_configurator: FA, pub(crate) listen_addresses: Arc>, listen_addresses_guard: Option>, + forced_hot_reload_config: Arc, } impl StateMachine @@ -377,7 +384,11 @@ where FA: RouterSuperServiceFactory + Send, FA::RouterFactory: RouterFactory, { - pub(crate) fn new(http_server_factory: S, router_factory: FA) -> Self { + pub(crate) fn new( + http_server_factory: S, + router_factory: FA, + forced_hot_reload_config: Arc, + ) -> Self { // Listen address is created locked so that if a consumer tries to examine the listen address before the state machine has reached running state they are blocked. let listen_addresses: Arc> = Default::default(); let listen_addresses_guard = Some( @@ -391,6 +402,7 @@ where router_configurator: router_factory, listen_addresses, listen_addresses_guard, + forced_hot_reload_config, } } @@ -432,6 +444,7 @@ where .update_inputs(&mut self, None, None, Some(entitlement)) .await } + ForcedHotReload => state.update_inputs(&mut self, None, None, None).await, NoMoreEntitlement => state.no_more_entitlement().await, Shutdown => state.shutdown().await, }; @@ -677,7 +690,7 @@ mod tests { async fn listen_addresses_are_locked() { let router_factory = create_mock_router_configurator(0); let (server_factory, _) = create_mock_server_factory(0); - let state_machine = StateMachine::new(server_factory, router_factory); + let state_machine = StateMachine::new(server_factory, router_factory, Default::default()); assert!(state_machine.listen_addresses.try_read().is_err()); } @@ -1036,7 +1049,7 @@ mod tests { router_factory: MockMyRouterConfigurator, events: Vec, ) -> Result<(), ApolloRouterError> { - let state_machine = StateMachine::new(server_factory, router_factory); + let state_machine = StateMachine::new(server_factory, router_factory, Default::default()); state_machine .process_events(stream::iter(events).boxed()) .await diff --git a/apollo-router/src/uplink/entitlement_stream.rs b/apollo-router/src/uplink/entitlement_stream.rs index eed52514be..17e668eee7 100644 --- a/apollo-router/src/uplink/entitlement_stream.rs +++ b/apollo-router/src/uplink/entitlement_stream.rs @@ -439,6 +439,7 @@ mod test { HaltEntitlement, WarnEntitlement, NoMoreEntitlement, + ForcedHotReload, Shutdown, } @@ -457,6 +458,7 @@ mod test { } Event::UpdateEntitlement(_) => SimpleEvent::UpdateEntitlement, Event::NoMoreEntitlement => SimpleEvent::NoMoreEntitlement, + Event::ForcedHotReload => SimpleEvent::ForcedHotReload, Event::Shutdown => SimpleEvent::Shutdown, } } diff --git a/apollo-router/tests/common.rs b/apollo-router/tests/common.rs index d374c8f77f..94b1695c4f 100644 --- a/apollo-router/tests/common.rs +++ b/apollo-router/tests/common.rs @@ -52,6 +52,7 @@ pub struct IntegrationTest { _lock: tokio::sync::OwnedMutexGuard, stdio_tx: tokio::sync::mpsc::Sender, stdio_rx: tokio::sync::mpsc::Receiver, + collect_stdio: Option<(tokio::sync::oneshot::Sender, regex::Regex)>, _subgraphs: wiremock::MockServer, } @@ -92,6 +93,7 @@ impl IntegrationTest { config: &'static str, telemetry: Option, responder: Option, + collect_stdio: Option>, ) -> Self { Self::init_telemetry(telemetry); @@ -130,19 +132,27 @@ impl IntegrationTest { fs::write(&test_config_location, config).expect("could not write config"); - let router_location = PathBuf::from(env!("CARGO_BIN_EXE_router")); let (stdio_tx, stdio_rx) = tokio::sync::mpsc::channel(2000); + let collect_stdio = collect_stdio.map(|sender| { + let version_line_re = regex::Regex::new("Apollo Router v[^ ]+ ").unwrap(); + (sender, version_line_re) + }); Self { router: None, - router_location, + router_location: Self::router_location(), test_config_location, _lock: lock, stdio_tx, stdio_rx, + collect_stdio, _subgraphs: subgraphs, } } + pub fn router_location() -> PathBuf { + PathBuf::from(env!("CARGO_BIN_EXE_router")) + } + #[allow(dead_code)] pub async fn start(&mut self) { let mut router = Command::new(&self.router_location) @@ -161,13 +171,38 @@ impl IntegrationTest { .expect("router should start"); let reader = BufReader::new(router.stdout.take().expect("out")); let stdio_tx = self.stdio_tx.clone(); + let collect_stdio = self.collect_stdio.take(); // We need to read from stdout otherwise we will hang task::spawn(async move { + let mut collected = Vec::new(); let mut lines = reader.lines(); while let Ok(Some(line)) = lines.next_line().await { println!("{line}"); + if let Some((_sender, version_line_re)) = &collect_stdio { + #[derive(serde::Deserialize)] + struct Log { + #[allow(unused)] + timestamp: String, + level: String, + message: String, + } + let log = serde_json::from_str::(&line).unwrap(); + // Omit this message from snapshots since it depends on external environment + if !log.message.starts_with("RUST_BACKTRACE=full detected") { + collected.push(format!( + "{}: {}", + log.level, + // Redacted so we don't need to update snapshots every release + version_line_re + .replace(&log.message, "Apollo Router [version number] ") + )) + } + } let _ = stdio_tx.send(line).await; } + if let Some((sender, _version_line_re)) = collect_stdio { + let _ = sender.send(collected.join("\n")); + } }); self.router = Some(router); @@ -340,7 +375,12 @@ impl IntegrationTest { #[cfg(target_os = "windows")] pub async fn graceful_shutdown(&mut self) { - // On windows we have to do complicated things to gracefully shutdown. One day we may get around to this, but it's not a priority. + // We don’t have SIGTERM on Windows, so do a non-graceful kill instead + self.kill().await + } + + #[allow(dead_code)] + pub async fn kill(&mut self) { let _ = self .router .as_mut() diff --git a/apollo-router/tests/fixtures/request_response_test.rhai b/apollo-router/tests/fixtures/request_response_test.rhai index 1b09327289..63a848ee02 100644 --- a/apollo-router/tests/fixtures/request_response_test.rhai +++ b/apollo-router/tests/fixtures/request_response_test.rhai @@ -161,3 +161,40 @@ fn process_subgraph_response_om_missing_message(response) { status: 400, }; } + +fn process_supergraph_response_is_primary(response) { + if !response.is_primary() { + throw #{ + status: 500, + message: "should be primary" + }; + } +} + +fn process_execution_response_is_primary(response) { + if !response.is_primary() { + throw #{ + status: 500, + message: "should be primary" + }; + } +} + +fn process_supergraph_deferred_response_is_not_primary(response) { + if response.is_primary() { + throw #{ + status: 500, + message: "should not be primary" + }; + } +} + +fn process_execution_deferred_response_is_not_primary(response) { + if response.is_primary() { + throw #{ + status: 500, + message: "should not be primary" + }; + } +} + diff --git a/apollo-router/tests/lifecycle_tests.rs b/apollo-router/tests/lifecycle_tests.rs index 5f08ccac99..8f04209a21 100644 --- a/apollo-router/tests/lifecycle_tests.rs +++ b/apollo-router/tests/lifecycle_tests.rs @@ -3,6 +3,7 @@ use std::time::Duration; use apollo_router::graphql; use futures::FutureExt; use serde_json::json; +use tokio::process::Command; use tower::BoxError; use wiremock::ResponseTemplate; @@ -122,3 +123,83 @@ async fn test_graceful_shutdown() -> Result<(), BoxError> { Ok(()) } + +#[tokio::test(flavor = "multi_thread")] +async fn test_force_hot_reload() -> Result<(), BoxError> { + let mut router = IntegrationTest::builder() + .config( + "experimental_chaos: + force_hot_reload: 10s", + ) + .build() + .await; + router.start().await; + router.assert_started().await; + tokio::time::sleep(Duration::from_secs(11)).await; + router.assert_reloaded().await; + router.graceful_shutdown().await; + Ok(()) +} + +async fn command_output(command: &mut Command) -> String { + let output = command.output().await.unwrap(); + let success = output.status.success(); + let exit_code = output.status.code(); + let stderr = String::from_utf8_lossy(&output.stderr); + let stdout = String::from_utf8_lossy(&output.stdout); + format!( + "Success: {success:?}\n\ + Exit code: {exit_code:?}\n\ + stderr:\n\ + {stderr}\n\ + stdout:\n\ + {stdout}" + ) +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_cli_config_experimental() { + insta::assert_snapshot!( + command_output( + Command::new(IntegrationTest::router_location()) + .arg("config") + .arg("experimental") + .env("RUST_BACKTRACE", "") // Avoid "RUST_BACKTRACE=full detected" log on CI + ) + .await + ); +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_cli_config_preview() { + insta::assert_snapshot!( + command_output( + Command::new(IntegrationTest::router_location()) + .arg("config") + .arg("preview") + .env("RUST_BACKTRACE", "") // Avoid "RUST_BACKTRACE=full detected" log on CI + ) + .await + ); +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_experimental_notice() { + let (tx, rx) = tokio::sync::oneshot::channel(); + let mut router = IntegrationTest::builder() + .config( + " + telemetry: + experimental_logging: + format: json + ", + ) + .collect_stdio(tx) + .build() + .await; + router.start().await; + router.assert_started().await; + router.kill().await; + + insta::assert_snapshot!(rx.await.unwrap()); +} diff --git a/apollo-router/tests/snapshots/apollo_reports__client_name.snap b/apollo-router/tests/snapshots/apollo_reports__client_name.snap index 265464a1ec..2b9d7e05d7 100644 --- a/apollo-router/tests/snapshots/apollo_reports__client_name.snap +++ b/apollo-router/tests/snapshots/apollo_reports__client_name.snap @@ -30,7 +30,7 @@ traces_per_query: operation_name: "" client_name: my client client_version: "" - operation_type: "" + operation_type: query operation_subtype: "" agent_version: "[agent_version]" http: diff --git a/apollo-router/tests/snapshots/apollo_reports__client_version.snap b/apollo-router/tests/snapshots/apollo_reports__client_version.snap index d53fd9188e..0b5f7ff021 100644 --- a/apollo-router/tests/snapshots/apollo_reports__client_version.snap +++ b/apollo-router/tests/snapshots/apollo_reports__client_version.snap @@ -30,7 +30,7 @@ traces_per_query: operation_name: "" client_name: "" client_version: my client version - operation_type: "" + operation_type: query operation_subtype: "" agent_version: "[agent_version]" http: diff --git a/apollo-router/tests/snapshots/apollo_reports__condition_else.snap b/apollo-router/tests/snapshots/apollo_reports__condition_else.snap index 5629a26ee3..e6e0da0eb7 100644 --- a/apollo-router/tests/snapshots/apollo_reports__condition_else.snap +++ b/apollo-router/tests/snapshots/apollo_reports__condition_else.snap @@ -31,7 +31,7 @@ traces_per_query: operation_name: "" client_name: "" client_version: "" - operation_type: "" + operation_type: query operation_subtype: "" agent_version: "[agent_version]" http: diff --git a/apollo-router/tests/snapshots/apollo_reports__condition_if.snap b/apollo-router/tests/snapshots/apollo_reports__condition_if.snap index 985ac36783..fd1f507097 100644 --- a/apollo-router/tests/snapshots/apollo_reports__condition_if.snap +++ b/apollo-router/tests/snapshots/apollo_reports__condition_if.snap @@ -31,7 +31,7 @@ traces_per_query: operation_name: "" client_name: "" client_version: "" - operation_type: "" + operation_type: query operation_subtype: "" agent_version: "[agent_version]" http: diff --git a/apollo-router/tests/snapshots/apollo_reports__non_defer.snap b/apollo-router/tests/snapshots/apollo_reports__non_defer.snap index e098df6315..68ea38e80d 100644 --- a/apollo-router/tests/snapshots/apollo_reports__non_defer.snap +++ b/apollo-router/tests/snapshots/apollo_reports__non_defer.snap @@ -30,7 +30,7 @@ traces_per_query: operation_name: "" client_name: "" client_version: "" - operation_type: "" + operation_type: query operation_subtype: "" agent_version: "[agent_version]" http: diff --git a/apollo-router/tests/snapshots/apollo_reports__send_header.snap b/apollo-router/tests/snapshots/apollo_reports__send_header.snap index 103157c66d..3042d275b8 100644 --- a/apollo-router/tests/snapshots/apollo_reports__send_header.snap +++ b/apollo-router/tests/snapshots/apollo_reports__send_header.snap @@ -30,7 +30,7 @@ traces_per_query: operation_name: "" client_name: "" client_version: "" - operation_type: "" + operation_type: query operation_subtype: "" agent_version: "[agent_version]" http: diff --git a/apollo-router/tests/snapshots/apollo_reports__send_variable_value.snap b/apollo-router/tests/snapshots/apollo_reports__send_variable_value.snap index eeef70f1af..d5dfc4ea7b 100644 --- a/apollo-router/tests/snapshots/apollo_reports__send_variable_value.snap +++ b/apollo-router/tests/snapshots/apollo_reports__send_variable_value.snap @@ -32,7 +32,7 @@ traces_per_query: operation_name: "" client_name: "" client_version: "" - operation_type: "" + operation_type: query operation_subtype: "" agent_version: "[agent_version]" http: diff --git a/apollo-router/tests/snapshots/apollo_reports__stats.snap b/apollo-router/tests/snapshots/apollo_reports__stats.snap index 361f5aab13..fd23c8238b 100644 --- a/apollo-router/tests/snapshots/apollo_reports__stats.snap +++ b/apollo-router/tests/snapshots/apollo_reports__stats.snap @@ -17,7 +17,7 @@ traces_per_query: - context: client_name: "" client_version: "" - operation_type: "" + operation_type: query operation_subtype: "" query_latency_stats: latency_count: "[latency_count]" @@ -121,7 +121,10 @@ traces_per_query: is_interface: false internal_traces_contributing_to_stats: [] end_time: "[end_time]" -operation_count: 1 -operation_count_by_type: [] +operation_count: 0 +operation_count_by_type: + - type: query + subtype: "" + operation_count: 1 traces_pre_aggregated: true diff --git a/apollo-router/tests/snapshots/apollo_reports__stats_mocked.snap b/apollo-router/tests/snapshots/apollo_reports__stats_mocked.snap index bdf30200dc..2a872357fa 100644 --- a/apollo-router/tests/snapshots/apollo_reports__stats_mocked.snap +++ b/apollo-router/tests/snapshots/apollo_reports__stats_mocked.snap @@ -5,7 +5,7 @@ expression: stats context: client_name: "" client_version: "" - operation_type: "" + operation_type: query operation_subtype: "" query_latency_stats: latency_count: "[latency_count]" diff --git a/apollo-router/tests/snapshots/apollo_reports__trace_id.snap b/apollo-router/tests/snapshots/apollo_reports__trace_id.snap index e098df6315..68ea38e80d 100644 --- a/apollo-router/tests/snapshots/apollo_reports__trace_id.snap +++ b/apollo-router/tests/snapshots/apollo_reports__trace_id.snap @@ -30,7 +30,7 @@ traces_per_query: operation_name: "" client_name: "" client_version: "" - operation_type: "" + operation_type: query operation_subtype: "" agent_version: "[agent_version]" http: diff --git a/apollo-router/tests/snapshots/lifecycle_tests__cli_config_experimental.snap b/apollo-router/tests/snapshots/lifecycle_tests__cli_config_experimental.snap new file mode 100644 index 0000000000..14210cfc15 --- /dev/null +++ b/apollo-router/tests/snapshots/lifecycle_tests__cli_config_experimental.snap @@ -0,0 +1,15 @@ +--- +source: apollo-router/tests/lifecycle_tests.rs +expression: "command_output(Command::new(IntegrationTest::router_location()).arg(\"config\").arg(\"experimental\")).await" +--- +Success: true +Exit code: Some(0) +stderr: + +stdout: +List of all experimental configurations with related GitHub discussions: + + - experimental_logging: https://github.com/apollographql/router/discussions/1961 + - experimental_response_trace_id: https://github.com/apollographql/router/discussions/2147 + - experimental_retry: https://github.com/apollographql/router/discussions/2241 + diff --git a/apollo-router/tests/snapshots/lifecycle_tests__cli_config_preview.snap b/apollo-router/tests/snapshots/lifecycle_tests__cli_config_preview.snap new file mode 100644 index 0000000000..498fe8f2cc --- /dev/null +++ b/apollo-router/tests/snapshots/lifecycle_tests__cli_config_preview.snap @@ -0,0 +1,11 @@ +--- +source: apollo-router/tests/lifecycle_tests.rs +expression: "command_output(Command::new(IntegrationTest::router_location()).arg(\"config\").arg(\"preview\")).await" +--- +Success: true +Exit code: Some(0) +stderr: + +stdout: +This Router version has no preview configuration + diff --git a/apollo-router/tests/snapshots/lifecycle_tests__experimental_notice.snap b/apollo-router/tests/snapshots/lifecycle_tests__experimental_notice.snap new file mode 100644 index 0000000000..cd4b8216c4 --- /dev/null +++ b/apollo-router/tests/snapshots/lifecycle_tests__experimental_notice.snap @@ -0,0 +1,15 @@ +--- +source: apollo-router/tests/lifecycle_tests.rs +expression: rx.await.unwrap() +--- +INFO: Apollo Router [version number] // (c) Apollo Graph, Inc. // Licensed as ELv2 (https://go.apollo.dev/elv2) +INFO: Anonymous usage data collection is disabled. +INFO: You're using some "experimental" features of the Apollo Router (those which have their configuration prefixed by "experimental_"). +We may make breaking changes in future releases. To help us design the stable version we need your feedback. +Here is a list of links where you can give your opinion: + + - experimental_logging: https://github.com/apollographql/router/discussions/1961 + +For more information about launch stages, please see the documentation here: https://www.apollographql.com/docs/resources/product-launch-stages/ +INFO: Health check endpoint exposed at http://127.0.0.1:8088/health +INFO: GraphQL endpoint exposed at http://127.0.0.1:4000/ 🚀 diff --git a/apollo-router/tests/snapshots/tracing_tests__traced_basic_composition.snap b/apollo-router/tests/snapshots/tracing_tests__traced_basic_composition.snap index 8c41630bf8..7b74128302 100644 --- a/apollo-router/tests/snapshots/tracing_tests__traced_basic_composition.snap +++ b/apollo-router/tests/snapshots/tracing_tests__traced_basic_composition.snap @@ -275,6 +275,10 @@ expression: get_spans() [ "otel.kind", "INTERNAL" + ], + [ + "graphql.operation.type", + "query" ] ], "metadata": { @@ -284,7 +288,8 @@ expression: get_spans() "module_path": "apollo_router::plugins::telemetry", "fields": { "names": [ - "otel.kind" + "otel.kind", + "graphql.operation.type" ] } } diff --git a/apollo-router/tests/snapshots/tracing_tests__traced_basic_request.snap b/apollo-router/tests/snapshots/tracing_tests__traced_basic_request.snap index bc0e7758e4..f3a09de41b 100644 --- a/apollo-router/tests/snapshots/tracing_tests__traced_basic_request.snap +++ b/apollo-router/tests/snapshots/tracing_tests__traced_basic_request.snap @@ -275,6 +275,10 @@ expression: get_spans() [ "otel.kind", "INTERNAL" + ], + [ + "graphql.operation.type", + "query" ] ], "metadata": { @@ -284,7 +288,8 @@ expression: get_spans() "module_path": "apollo_router::plugins::telemetry", "fields": { "names": [ - "otel.kind" + "otel.kind", + "graphql.operation.type" ] } } diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index 66a789a9c7..cf639623f4 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.15.1 + image: ghcr.io/apollographql/router:v1.16.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index 484daa8af0..68a29e2069 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.15.1 + image: ghcr.io/apollographql/router:v1.16.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index 3241fbc7d9..f0536d174d 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.15.1 + image: ghcr.io/apollographql/router:v1.16.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/docs/source/config.json b/docs/source/config.json index 80ce936c7b..d8adda4bb8 100644 --- a/docs/source/config.json +++ b/docs/source/config.json @@ -1,5 +1,5 @@ { - "title": "Router", + "title": "Router (self-hosted)", "algoliaFilters": [ "docset:router" ], diff --git a/docs/source/configuration/apollo-telemetry.mdx b/docs/source/configuration/apollo-telemetry.mdx index 20adfa960e..70ccfe4302 100644 --- a/docs/source/configuration/apollo-telemetry.mdx +++ b/docs/source/configuration/apollo-telemetry.mdx @@ -17,20 +17,20 @@ More information on usage reporting is available in the [Studio documentation](/ ## Enabling field-level instrumentation -[Apollo field-level instrumentation](https://www.apollographql.com/docs/federation/metrics) (also known as FTV1 tracing) is set to a sampling rate of `0.01` by default, which means it will request traces for 1% of traffic. To change it to a higher rate, set the `field_level_instrumentation_sampler` value to a rate higher than `0.1`. To completely disable it, set it to `always_off`. +[Apollo field-level instrumentation](https://www.apollographql.com/docs/federation/metrics) (also known as FTV1 tracing) is set to a sampling rate of `0.01` by default, which means it will request traces for 1% of traffic. To change it to a higher rate, set the `field_level_instrumentation_sampler` value to a rate higher than `0.1`. To completely disable it, set it to `always_off`. > **Note** > Since field-level instrumentation is _dependent_ OpenTelemetry tracing, it is necessary to set the `sampler` value for traces to a value the _same or higher_ than the `field_level_instrumentation_sampler` value. The following example sets both Apollo field-level tracing and OpenTelemetry tracing to attempt to sample 50% of requests: - ```yaml telemetry: apollo: # This example will trace half of requests. This number can't # be higher than tracing.trace_config.sampler. field_level_instrumentation_sampler: 0.5 + tracing: trace_config: # FTV1 uses the same trace sampling as other tracing options, @@ -75,3 +75,22 @@ telemetry: trace_config: sampler: 0.5 # The percentage of requests that will generate traces (a rate or `always_on` or `always_off`) ``` + +### `errors` + +If you don't want to redact errors coming from subgraphs that the Router is sending to Apollo Studio you can set `redact` to `false` (enabled by default). +If you don't want to **transmit** errors coming from subgraphs that the Router is sending to Apollo Studio you can set `send` to `false` (enabled by default). + +```yaml title="router.yaml" +telemetry: + apollo: + errors: + subgraph: + all: + # Send errors to Apollo Studio + send: true # (default: true) + redact: false # (default: true) + subgraphs: + account: # Override the default behavior only for the "account" subgraph + send: false +``` diff --git a/docs/source/containerization/docker.mdx b/docs/source/containerization/docker.mdx index 404a9523d7..46a068e49e 100644 --- a/docs/source/containerization/docker.mdx +++ b/docs/source/containerization/docker.mdx @@ -11,7 +11,7 @@ The default behaviour of the router images is suitable for a quickstart or devel Note: The [docker documentation](https://docs.docker.com/engine/reference/run/) for the run command may be helpful when reading through the examples. -Note: The exact image version to use is your choice depending on which release you wish to use. In the following examples, replace `` with your chosen version. e.g.: `v1.15.1` +Note: The exact image version to use is your choice depending on which release you wish to use. In the following examples, replace `` with your chosen version. e.g.: `v1.16.0` ## Override the configuration diff --git a/docs/source/containerization/kubernetes.mdx b/docs/source/containerization/kubernetes.mdx index 8d606d06ea..9f8c343d59 100644 --- a/docs/source/containerization/kubernetes.mdx +++ b/docs/source/containerization/kubernetes.mdx @@ -13,7 +13,7 @@ import { Link } from 'gatsby'; [Helm](https://helm.sh) is the package manager for kubernetes. -There is a complete [helm chart definition](https://github.com/apollographql/router/tree/v1.15.1/helm/chart/router) in the repo which illustrates how to use helm to deploy the router in kubernetes. +There is a complete [helm chart definition](https://github.com/apollographql/router/tree/v1.16.0/helm/chart/router) in the repo which illustrates how to use helm to deploy the router in kubernetes. In both the following examples, we are using helm to install the router: - into namespace "router-deploy" (create namespace if it doesn't exist) @@ -64,10 +64,10 @@ kind: ServiceAccount metadata: name: release-name-router labels: - helm.sh/chart: router-1.15.1 + helm.sh/chart: router-1.16.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.15.1" + app.kubernetes.io/version: "v1.16.0" app.kubernetes.io/managed-by: Helm --- # Source: router/templates/secret.yaml @@ -76,10 +76,10 @@ kind: Secret metadata: name: "release-name-router" labels: - helm.sh/chart: router-1.15.1 + helm.sh/chart: router-1.16.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.15.1" + app.kubernetes.io/version: "v1.16.0" app.kubernetes.io/managed-by: Helm data: managedFederationApiKey: "UkVEQUNURUQ=" @@ -90,10 +90,10 @@ kind: ConfigMap metadata: name: release-name-router labels: - helm.sh/chart: router-1.15.1 + helm.sh/chart: router-1.16.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.15.1" + app.kubernetes.io/version: "v1.16.0" app.kubernetes.io/managed-by: Helm data: configuration.yaml: | @@ -117,10 +117,10 @@ kind: Service metadata: name: release-name-router labels: - helm.sh/chart: router-1.15.1 + helm.sh/chart: router-1.16.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.15.1" + app.kubernetes.io/version: "v1.16.0" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -143,10 +143,10 @@ kind: Deployment metadata: name: release-name-router labels: - helm.sh/chart: router-1.15.1 + helm.sh/chart: router-1.16.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.15.1" + app.kubernetes.io/version: "v1.16.0" app.kubernetes.io/managed-by: Helm annotations: @@ -172,7 +172,7 @@ spec: - name: router securityContext: {} - image: "ghcr.io/apollographql/router:v1.15.1" + image: "ghcr.io/apollographql/router:v1.16.0" imagePullPolicy: IfNotPresent args: - --hot-reload @@ -223,10 +223,10 @@ kind: Pod metadata: name: "release-name-router-test-connection" labels: - helm.sh/chart: router-1.15.1 + helm.sh/chart: router-1.16.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.15.1" + app.kubernetes.io/version: "v1.16.0" app.kubernetes.io/managed-by: Helm annotations: "helm.sh/hook": test diff --git a/docs/source/customizations/rhai-api.mdx b/docs/source/customizations/rhai-api.mdx index 55bf711ffd..c181f88255 100644 --- a/docs/source/customizations/rhai-api.mdx +++ b/docs/source/customizations/rhai-api.mdx @@ -435,6 +435,16 @@ The following fields are identical in behavior to their `request` counterparts: * [`headers`](#requestheaders) * [`body.extensions`](#requestbodyextensions) +Note: Be particularly careful when interacting with headers in a response context. For supergraph_service() and execution_service(), response headers only exist for the first response in a deferred response stream. You can handle this by making use of the `is_primary()` function which will return true if a response is the first (or primary) response. If you do try to access the headers in a non-primary response, then you'll raise an exception which can be handled like any other rhai exception, but is not so convenient as using the `is_primary()` method. + +```rhai + if response.is_primary() { + print(`all response headers: ${response.headers}`); + } else { + print(`don't try to access headers`); + } +``` + Other fields are described below. ### `response.body.label` diff --git a/docs/source/federation-version-support.mdx b/docs/source/federation-version-support.mdx index 13ee9cf879..6aac18baa1 100644 --- a/docs/source/federation-version-support.mdx +++ b/docs/source/federation-version-support.mdx @@ -27,7 +27,15 @@ The table below shows which version of federation each router release is compile - v1.15.0 and later (see latest releases) + v1.16.0 and later (see latest releases) + + + 2.4.2 + + + + + v1.15.0 - v1.15.1 2.4.1 diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index ac36c044c6..68101ab17f 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.15.1 +version: 1.16.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.15.1" +appVersion: "v1.16.0" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index c5d1358130..73daa742a0 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.15.1](https://img.shields.io/badge/Version-1.15.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.15.1](https://img.shields.io/badge/AppVersion-v1.15.1-informational?style=flat-square) +![Version: 1.16.0](https://img.shields.io/badge/Version-1.16.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.16.0](https://img.shields.io/badge/AppVersion-v1.16.0-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.15.1 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.16.0 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.15.1 **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.15.1 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.16.0 --values my-values.yaml ``` _See [configuration](#configuration) below._ diff --git a/licenses.html b/licenses.html index cf9d6dcbab..a7fbbe67b5 100644 --- a/licenses.html +++ b/licenses.html @@ -44,10 +44,10 @@

Third Party Licenses

Overview of licenses:

                                  Apache License
@@ -10310,7 +10312,6 @@ 

Used by:

  • askama_shared
  • backtrace-ext
  • block-modes
  • -
  • fiat-crypto
  • graphql-introspection-query
  • graphql_client
  • graphql_client_codegen
  • @@ -10693,6 +10694,39 @@

    Used by:

    MIT OR Apache-2.0
    +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    MIT or Apache-2.0
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    The Apache License, Version 2.0 (Apache-2.0)
    +
    +Copyright 2015-2020 the fiat-crypto authors (see the AUTHORS file)
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +
    +
  • BSD 2-Clause "Simplified" License

    Used by:

    @@ -10866,6 +10900,79 @@

    Used by:

    contributors may be used to endorse or promote products derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +
  • + +
  • +

    BSD 3-Clause "New" or "Revised" License

    +

    Used by:

    + +
    Copyright (c) 2016-2021 isis agora lovecruft. All rights reserved.
    +Copyright (c) 2016-2021 Henry de Valence. All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are
    +met:
    +
    +1. Redistributions of source code must retain the above copyright
    +notice, this list of conditions and the following disclaimer.
    +
    +2. Redistributions in binary form must reproduce the above copyright
    +notice, this list of conditions and the following disclaimer in the
    +documentation and/or other materials provided with the distribution.
    +
    +3. Neither the name of the copyright holder nor the names of its
    +contributors may be used to endorse or promote products derived from
    +this software without specific prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
    +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
    +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
    +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
    +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
    +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
    +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
    +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
    +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
    +
    +========================================================================
    +
    +Portions of curve25519-dalek were originally derived from Adam Langley's
    +Go ed25519 implementation, found at <https://github.com/agl/ed25519/>,
    +under the following licence:
    +
    +========================================================================
    +
    +Copyright (c) 2012 The Go Authors. All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are
    +met:
    +
    +   * Redistributions of source code must retain the above copyright
    +notice, this list of conditions and the following disclaimer.
    +   * Redistributions in binary form must reproduce the above
    +copyright notice, this list of conditions and the following disclaimer
    +in the documentation and/or other materials provided with the
    +distribution.
    +   * Neither the name of Google Inc. nor the names of its
    +contributors may be used to endorse or promote products derived from
    +this software without specific prior written permission.
    +
     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
     IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
    @@ -10956,7 +11063,6 @@ 

    BSD 3-Clause "New" or "Revised" Licens

    Used by:

    Copyright (c) <year> <owner>. 
     
    @@ -13525,6 +13631,7 @@ 

    MIT License

    Used by:

    The MIT License (MIT)
     
    @@ -14182,6 +14289,22 @@ 

    Used by:

    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
    +
  • +
  • +

    MIT License

    +

    Used by:

    + +
    The MIT License (MIT)
    +Copyright (c) 2016 Alexandre Bury
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
    +
  • MIT License

    diff --git a/scripts/install.sh b/scripts/install.sh index 6ef6e9d7d5..1f9acaf756 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.15.1" +PACKAGE_VERSION="v1.16.0" download_binary() { downloader --check diff --git a/xtask/Cargo.lock b/xtask/Cargo.lock index cd7a0d71a3..07d60c7734 100644 --- a/xtask/Cargo.lock +++ b/xtask/Cargo.lock @@ -26,15 +26,6 @@ dependencies = [ "libc", ] -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - [[package]] name = "anstream" version = "0.2.6" @@ -1107,6 +1098,15 @@ dependencies = [ "windows-sys 0.45.0", ] +[[package]] +name = "nu-ansi-term" +version = "0.47.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df031e117bca634c262e9bd3173776844b6c17a90b3741c9163663b4385af76" +dependencies = [ + "windows-sys 0.45.0", +] + [[package]] name = "num-bigint" version = "0.4.3" @@ -2604,7 +2604,6 @@ dependencies = [ name = "xtask" version = "1.5.0" dependencies = [ - "ansi_term", "anyhow", "base64 0.20.0", "camino", @@ -2619,6 +2618,7 @@ dependencies = [ "itertools", "libc", "memorable-wordlist", + "nu-ansi-term", "octorust", "once_cell", "regex", diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index d40190d141..577d83744a 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -9,7 +9,6 @@ license = "LicenseRef-ELv2" publish = false [dependencies] -ansi_term = "0.12" anyhow = "1" base64 = "0.20" camino = "1" @@ -24,6 +23,7 @@ git2 = { version = "0.16.1", features = ["vendored-openssl"] } itertools = "0.10.5" libc = "0.2" memorable-wordlist = "0.1.7" +nu-ansi-term = "0.47" octorust = "0.2.2" once_cell = "1" regex="1.7.1" diff --git a/xtask/src/main.rs b/xtask/src/main.rs index 737d71992c..38036e66df 100644 --- a/xtask/src/main.rs +++ b/xtask/src/main.rs @@ -1,8 +1,8 @@ mod commands; -use ansi_term::Colour::Green; use anyhow::Result; use clap::Parser; +use nu_ansi_term::Color::Green; fn main() -> Result<()> { let app = Xtask::parse();